diff options
Diffstat (limited to 'ipc/sem.c')
-rw-r--r-- | ipc/sem.c | 308 |
1 files changed, 155 insertions, 153 deletions
@@ -80,7 +80,7 @@ | |||
80 | #include <linux/audit.h> | 80 | #include <linux/audit.h> |
81 | #include <linux/capability.h> | 81 | #include <linux/capability.h> |
82 | #include <linux/seq_file.h> | 82 | #include <linux/seq_file.h> |
83 | #include <linux/mutex.h> | 83 | #include <linux/rwsem.h> |
84 | #include <linux/nsproxy.h> | 84 | #include <linux/nsproxy.h> |
85 | 85 | ||
86 | #include <asm/uaccess.h> | 86 | #include <asm/uaccess.h> |
@@ -88,18 +88,14 @@ | |||
88 | 88 | ||
89 | #define sem_ids(ns) (*((ns)->ids[IPC_SEM_IDS])) | 89 | #define sem_ids(ns) (*((ns)->ids[IPC_SEM_IDS])) |
90 | 90 | ||
91 | #define sem_lock(ns, id) ((struct sem_array*)ipc_lock(&sem_ids(ns), id)) | ||
92 | #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) | 91 | #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) |
93 | #define sem_rmid(ns, id) ((struct sem_array*)ipc_rmid(&sem_ids(ns), id)) | 92 | #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) |
94 | #define sem_checkid(ns, sma, semid) \ | 93 | #define sem_buildid(id, seq) ipc_buildid(id, seq) |
95 | ipc_checkid(&sem_ids(ns),&sma->sem_perm,semid) | ||
96 | #define sem_buildid(ns, id, seq) \ | ||
97 | ipc_buildid(&sem_ids(ns), id, seq) | ||
98 | 94 | ||
99 | static struct ipc_ids init_sem_ids; | 95 | static struct ipc_ids init_sem_ids; |
100 | 96 | ||
101 | static int newary(struct ipc_namespace *, key_t, int, int); | 97 | static int newary(struct ipc_namespace *, struct ipc_params *); |
102 | static void freeary(struct ipc_namespace *ns, struct sem_array *sma, int id); | 98 | static void freeary(struct ipc_namespace *, struct sem_array *); |
103 | #ifdef CONFIG_PROC_FS | 99 | #ifdef CONFIG_PROC_FS |
104 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it); | 100 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it); |
105 | #endif | 101 | #endif |
@@ -129,7 +125,7 @@ static void __sem_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) | |||
129 | ns->sc_semopm = SEMOPM; | 125 | ns->sc_semopm = SEMOPM; |
130 | ns->sc_semmni = SEMMNI; | 126 | ns->sc_semmni = SEMMNI; |
131 | ns->used_sems = 0; | 127 | ns->used_sems = 0; |
132 | ipc_init_ids(ids, ns->sc_semmni); | 128 | ipc_init_ids(ids); |
133 | } | 129 | } |
134 | 130 | ||
135 | int sem_init_ns(struct ipc_namespace *ns) | 131 | int sem_init_ns(struct ipc_namespace *ns) |
@@ -146,20 +142,24 @@ int sem_init_ns(struct ipc_namespace *ns) | |||
146 | 142 | ||
147 | void sem_exit_ns(struct ipc_namespace *ns) | 143 | void sem_exit_ns(struct ipc_namespace *ns) |
148 | { | 144 | { |
149 | int i; | ||
150 | struct sem_array *sma; | 145 | struct sem_array *sma; |
146 | int next_id; | ||
147 | int total, in_use; | ||
151 | 148 | ||
152 | mutex_lock(&sem_ids(ns).mutex); | 149 | down_write(&sem_ids(ns).rw_mutex); |
153 | for (i = 0; i <= sem_ids(ns).max_id; i++) { | 150 | |
154 | sma = sem_lock(ns, i); | 151 | in_use = sem_ids(ns).in_use; |
152 | |||
153 | for (total = 0, next_id = 0; total < in_use; next_id++) { | ||
154 | sma = idr_find(&sem_ids(ns).ipcs_idr, next_id); | ||
155 | if (sma == NULL) | 155 | if (sma == NULL) |
156 | continue; | 156 | continue; |
157 | 157 | ipc_lock_by_ptr(&sma->sem_perm); | |
158 | freeary(ns, sma, i); | 158 | freeary(ns, sma); |
159 | total++; | ||
159 | } | 160 | } |
160 | mutex_unlock(&sem_ids(ns).mutex); | 161 | up_write(&sem_ids(ns).rw_mutex); |
161 | 162 | ||
162 | ipc_fini_ids(ns->ids[IPC_SEM_IDS]); | ||
163 | kfree(ns->ids[IPC_SEM_IDS]); | 163 | kfree(ns->ids[IPC_SEM_IDS]); |
164 | ns->ids[IPC_SEM_IDS] = NULL; | 164 | ns->ids[IPC_SEM_IDS] = NULL; |
165 | } | 165 | } |
@@ -173,6 +173,42 @@ void __init sem_init (void) | |||
173 | } | 173 | } |
174 | 174 | ||
175 | /* | 175 | /* |
176 | * This routine is called in the paths where the rw_mutex is held to protect | ||
177 | * access to the idr tree. | ||
178 | */ | ||
179 | static inline struct sem_array *sem_lock_check_down(struct ipc_namespace *ns, | ||
180 | int id) | ||
181 | { | ||
182 | struct kern_ipc_perm *ipcp = ipc_lock_check_down(&sem_ids(ns), id); | ||
183 | |||
184 | return container_of(ipcp, struct sem_array, sem_perm); | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * sem_lock_(check_) routines are called in the paths where the rw_mutex | ||
189 | * is not held. | ||
190 | */ | ||
191 | static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id) | ||
192 | { | ||
193 | struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id); | ||
194 | |||
195 | return container_of(ipcp, struct sem_array, sem_perm); | ||
196 | } | ||
197 | |||
198 | static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns, | ||
199 | int id) | ||
200 | { | ||
201 | struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id); | ||
202 | |||
203 | return container_of(ipcp, struct sem_array, sem_perm); | ||
204 | } | ||
205 | |||
206 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) | ||
207 | { | ||
208 | ipc_rmid(&sem_ids(ns), &s->sem_perm); | ||
209 | } | ||
210 | |||
211 | /* | ||
176 | * Lockless wakeup algorithm: | 212 | * Lockless wakeup algorithm: |
177 | * Without the check/retry algorithm a lockless wakeup is possible: | 213 | * Without the check/retry algorithm a lockless wakeup is possible: |
178 | * - queue.status is initialized to -EINTR before blocking. | 214 | * - queue.status is initialized to -EINTR before blocking. |
@@ -206,12 +242,23 @@ void __init sem_init (void) | |||
206 | */ | 242 | */ |
207 | #define IN_WAKEUP 1 | 243 | #define IN_WAKEUP 1 |
208 | 244 | ||
209 | static int newary (struct ipc_namespace *ns, key_t key, int nsems, int semflg) | 245 | /** |
246 | * newary - Create a new semaphore set | ||
247 | * @ns: namespace | ||
248 | * @params: ptr to the structure that contains key, semflg and nsems | ||
249 | * | ||
250 | * Called with sem_ids.rw_mutex held (as a writer) | ||
251 | */ | ||
252 | |||
253 | static int newary(struct ipc_namespace *ns, struct ipc_params *params) | ||
210 | { | 254 | { |
211 | int id; | 255 | int id; |
212 | int retval; | 256 | int retval; |
213 | struct sem_array *sma; | 257 | struct sem_array *sma; |
214 | int size; | 258 | int size; |
259 | key_t key = params->key; | ||
260 | int nsems = params->u.nsems; | ||
261 | int semflg = params->flg; | ||
215 | 262 | ||
216 | if (!nsems) | 263 | if (!nsems) |
217 | return -EINVAL; | 264 | return -EINVAL; |
@@ -236,14 +283,14 @@ static int newary (struct ipc_namespace *ns, key_t key, int nsems, int semflg) | |||
236 | } | 283 | } |
237 | 284 | ||
238 | id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); | 285 | id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); |
239 | if(id == -1) { | 286 | if (id < 0) { |
240 | security_sem_free(sma); | 287 | security_sem_free(sma); |
241 | ipc_rcu_putref(sma); | 288 | ipc_rcu_putref(sma); |
242 | return -ENOSPC; | 289 | return id; |
243 | } | 290 | } |
244 | ns->used_sems += nsems; | 291 | ns->used_sems += nsems; |
245 | 292 | ||
246 | sma->sem_id = sem_buildid(ns, id, sma->sem_perm.seq); | 293 | sma->sem_perm.id = sem_buildid(id, sma->sem_perm.seq); |
247 | sma->sem_base = (struct sem *) &sma[1]; | 294 | sma->sem_base = (struct sem *) &sma[1]; |
248 | /* sma->sem_pending = NULL; */ | 295 | /* sma->sem_pending = NULL; */ |
249 | sma->sem_pending_last = &sma->sem_pending; | 296 | sma->sem_pending_last = &sma->sem_pending; |
@@ -252,48 +299,56 @@ static int newary (struct ipc_namespace *ns, key_t key, int nsems, int semflg) | |||
252 | sma->sem_ctime = get_seconds(); | 299 | sma->sem_ctime = get_seconds(); |
253 | sem_unlock(sma); | 300 | sem_unlock(sma); |
254 | 301 | ||
255 | return sma->sem_id; | 302 | return sma->sem_perm.id; |
303 | } | ||
304 | |||
305 | |||
306 | /* | ||
307 | * Called with sem_ids.rw_mutex and ipcp locked. | ||
308 | */ | ||
309 | static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) | ||
310 | { | ||
311 | struct sem_array *sma; | ||
312 | |||
313 | sma = container_of(ipcp, struct sem_array, sem_perm); | ||
314 | return security_sem_associate(sma, semflg); | ||
256 | } | 315 | } |
257 | 316 | ||
258 | asmlinkage long sys_semget (key_t key, int nsems, int semflg) | 317 | /* |
318 | * Called with sem_ids.rw_mutex and ipcp locked. | ||
319 | */ | ||
320 | static inline int sem_more_checks(struct kern_ipc_perm *ipcp, | ||
321 | struct ipc_params *params) | ||
259 | { | 322 | { |
260 | int id, err = -EINVAL; | ||
261 | struct sem_array *sma; | 323 | struct sem_array *sma; |
324 | |||
325 | sma = container_of(ipcp, struct sem_array, sem_perm); | ||
326 | if (params->u.nsems > sma->sem_nsems) | ||
327 | return -EINVAL; | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | asmlinkage long sys_semget(key_t key, int nsems, int semflg) | ||
333 | { | ||
262 | struct ipc_namespace *ns; | 334 | struct ipc_namespace *ns; |
335 | struct ipc_ops sem_ops; | ||
336 | struct ipc_params sem_params; | ||
263 | 337 | ||
264 | ns = current->nsproxy->ipc_ns; | 338 | ns = current->nsproxy->ipc_ns; |
265 | 339 | ||
266 | if (nsems < 0 || nsems > ns->sc_semmsl) | 340 | if (nsems < 0 || nsems > ns->sc_semmsl) |
267 | return -EINVAL; | 341 | return -EINVAL; |
268 | mutex_lock(&sem_ids(ns).mutex); | ||
269 | |||
270 | if (key == IPC_PRIVATE) { | ||
271 | err = newary(ns, key, nsems, semflg); | ||
272 | } else if ((id = ipc_findkey(&sem_ids(ns), key)) == -1) { /* key not used */ | ||
273 | if (!(semflg & IPC_CREAT)) | ||
274 | err = -ENOENT; | ||
275 | else | ||
276 | err = newary(ns, key, nsems, semflg); | ||
277 | } else if (semflg & IPC_CREAT && semflg & IPC_EXCL) { | ||
278 | err = -EEXIST; | ||
279 | } else { | ||
280 | sma = sem_lock(ns, id); | ||
281 | BUG_ON(sma==NULL); | ||
282 | if (nsems > sma->sem_nsems) | ||
283 | err = -EINVAL; | ||
284 | else if (ipcperms(&sma->sem_perm, semflg)) | ||
285 | err = -EACCES; | ||
286 | else { | ||
287 | int semid = sem_buildid(ns, id, sma->sem_perm.seq); | ||
288 | err = security_sem_associate(sma, semflg); | ||
289 | if (!err) | ||
290 | err = semid; | ||
291 | } | ||
292 | sem_unlock(sma); | ||
293 | } | ||
294 | 342 | ||
295 | mutex_unlock(&sem_ids(ns).mutex); | 343 | sem_ops.getnew = newary; |
296 | return err; | 344 | sem_ops.associate = sem_security; |
345 | sem_ops.more_checks = sem_more_checks; | ||
346 | |||
347 | sem_params.key = key; | ||
348 | sem_params.flg = semflg; | ||
349 | sem_params.u.nsems = nsems; | ||
350 | |||
351 | return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); | ||
297 | } | 352 | } |
298 | 353 | ||
299 | /* Manage the doubly linked list sma->sem_pending as a FIFO: | 354 | /* Manage the doubly linked list sma->sem_pending as a FIFO: |
@@ -487,15 +542,14 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) | |||
487 | return semzcnt; | 542 | return semzcnt; |
488 | } | 543 | } |
489 | 544 | ||
490 | /* Free a semaphore set. freeary() is called with sem_ids.mutex locked and | 545 | /* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked |
491 | * the spinlock for this semaphore set hold. sem_ids.mutex remains locked | 546 | * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex |
492 | * on exit. | 547 | * remains locked on exit. |
493 | */ | 548 | */ |
494 | static void freeary (struct ipc_namespace *ns, struct sem_array *sma, int id) | 549 | static void freeary(struct ipc_namespace *ns, struct sem_array *sma) |
495 | { | 550 | { |
496 | struct sem_undo *un; | 551 | struct sem_undo *un; |
497 | struct sem_queue *q; | 552 | struct sem_queue *q; |
498 | int size; | ||
499 | 553 | ||
500 | /* Invalidate the existing undo structures for this semaphore set. | 554 | /* Invalidate the existing undo structures for this semaphore set. |
501 | * (They will be freed without any further action in exit_sem() | 555 | * (They will be freed without any further action in exit_sem() |
@@ -518,12 +572,11 @@ static void freeary (struct ipc_namespace *ns, struct sem_array *sma, int id) | |||
518 | q = n; | 572 | q = n; |
519 | } | 573 | } |
520 | 574 | ||
521 | /* Remove the semaphore set from the ID array*/ | 575 | /* Remove the semaphore set from the IDR */ |
522 | sma = sem_rmid(ns, id); | 576 | sem_rmid(ns, sma); |
523 | sem_unlock(sma); | 577 | sem_unlock(sma); |
524 | 578 | ||
525 | ns->used_sems -= sma->sem_nsems; | 579 | ns->used_sems -= sma->sem_nsems; |
526 | size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem); | ||
527 | security_sem_free(sma); | 580 | security_sem_free(sma); |
528 | ipc_rcu_putref(sma); | 581 | ipc_rcu_putref(sma); |
529 | } | 582 | } |
@@ -576,7 +629,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, | |||
576 | seminfo.semmnu = SEMMNU; | 629 | seminfo.semmnu = SEMMNU; |
577 | seminfo.semmap = SEMMAP; | 630 | seminfo.semmap = SEMMAP; |
578 | seminfo.semume = SEMUME; | 631 | seminfo.semume = SEMUME; |
579 | mutex_lock(&sem_ids(ns).mutex); | 632 | down_read(&sem_ids(ns).rw_mutex); |
580 | if (cmd == SEM_INFO) { | 633 | if (cmd == SEM_INFO) { |
581 | seminfo.semusz = sem_ids(ns).in_use; | 634 | seminfo.semusz = sem_ids(ns).in_use; |
582 | seminfo.semaem = ns->used_sems; | 635 | seminfo.semaem = ns->used_sems; |
@@ -584,8 +637,8 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, | |||
584 | seminfo.semusz = SEMUSZ; | 637 | seminfo.semusz = SEMUSZ; |
585 | seminfo.semaem = SEMAEM; | 638 | seminfo.semaem = SEMAEM; |
586 | } | 639 | } |
587 | max_id = sem_ids(ns).max_id; | 640 | max_id = ipc_get_maxid(&sem_ids(ns)); |
588 | mutex_unlock(&sem_ids(ns).mutex); | 641 | up_read(&sem_ids(ns).rw_mutex); |
589 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) | 642 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) |
590 | return -EFAULT; | 643 | return -EFAULT; |
591 | return (max_id < 0) ? 0: max_id; | 644 | return (max_id < 0) ? 0: max_id; |
@@ -595,14 +648,9 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, | |||
595 | struct semid64_ds tbuf; | 648 | struct semid64_ds tbuf; |
596 | int id; | 649 | int id; |
597 | 650 | ||
598 | if(semid >= sem_ids(ns).entries->size) | ||
599 | return -EINVAL; | ||
600 | |||
601 | memset(&tbuf,0,sizeof(tbuf)); | ||
602 | |||
603 | sma = sem_lock(ns, semid); | 651 | sma = sem_lock(ns, semid); |
604 | if(sma == NULL) | 652 | if (IS_ERR(sma)) |
605 | return -EINVAL; | 653 | return PTR_ERR(sma); |
606 | 654 | ||
607 | err = -EACCES; | 655 | err = -EACCES; |
608 | if (ipcperms (&sma->sem_perm, S_IRUGO)) | 656 | if (ipcperms (&sma->sem_perm, S_IRUGO)) |
@@ -612,7 +660,9 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, | |||
612 | if (err) | 660 | if (err) |
613 | goto out_unlock; | 661 | goto out_unlock; |
614 | 662 | ||
615 | id = sem_buildid(ns, semid, sma->sem_perm.seq); | 663 | id = sma->sem_perm.id; |
664 | |||
665 | memset(&tbuf, 0, sizeof(tbuf)); | ||
616 | 666 | ||
617 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); | 667 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); |
618 | tbuf.sem_otime = sma->sem_otime; | 668 | tbuf.sem_otime = sma->sem_otime; |
@@ -642,16 +692,12 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
642 | ushort* sem_io = fast_sem_io; | 692 | ushort* sem_io = fast_sem_io; |
643 | int nsems; | 693 | int nsems; |
644 | 694 | ||
645 | sma = sem_lock(ns, semid); | 695 | sma = sem_lock_check(ns, semid); |
646 | if(sma==NULL) | 696 | if (IS_ERR(sma)) |
647 | return -EINVAL; | 697 | return PTR_ERR(sma); |
648 | 698 | ||
649 | nsems = sma->sem_nsems; | 699 | nsems = sma->sem_nsems; |
650 | 700 | ||
651 | err=-EIDRM; | ||
652 | if (sem_checkid(ns,sma,semid)) | ||
653 | goto out_unlock; | ||
654 | |||
655 | err = -EACCES; | 701 | err = -EACCES; |
656 | if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) | 702 | if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) |
657 | goto out_unlock; | 703 | goto out_unlock; |
@@ -795,7 +841,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
795 | for (un = sma->undo; un; un = un->id_next) | 841 | for (un = sma->undo; un; un = un->id_next) |
796 | un->semadj[semnum] = 0; | 842 | un->semadj[semnum] = 0; |
797 | curr->semval = val; | 843 | curr->semval = val; |
798 | curr->sempid = current->tgid; | 844 | curr->sempid = task_tgid_vnr(current); |
799 | sma->sem_ctime = get_seconds(); | 845 | sma->sem_ctime = get_seconds(); |
800 | /* maybe some queued-up processes were waiting for this */ | 846 | /* maybe some queued-up processes were waiting for this */ |
801 | update_queue(sma); | 847 | update_queue(sma); |
@@ -863,14 +909,10 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, | |||
863 | if(copy_semid_from_user (&setbuf, arg.buf, version)) | 909 | if(copy_semid_from_user (&setbuf, arg.buf, version)) |
864 | return -EFAULT; | 910 | return -EFAULT; |
865 | } | 911 | } |
866 | sma = sem_lock(ns, semid); | 912 | sma = sem_lock_check_down(ns, semid); |
867 | if(sma==NULL) | 913 | if (IS_ERR(sma)) |
868 | return -EINVAL; | 914 | return PTR_ERR(sma); |
869 | 915 | ||
870 | if (sem_checkid(ns,sma,semid)) { | ||
871 | err=-EIDRM; | ||
872 | goto out_unlock; | ||
873 | } | ||
874 | ipcp = &sma->sem_perm; | 916 | ipcp = &sma->sem_perm; |
875 | 917 | ||
876 | err = audit_ipc_obj(ipcp); | 918 | err = audit_ipc_obj(ipcp); |
@@ -894,7 +936,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, | |||
894 | 936 | ||
895 | switch(cmd){ | 937 | switch(cmd){ |
896 | case IPC_RMID: | 938 | case IPC_RMID: |
897 | freeary(ns, sma, semid); | 939 | freeary(ns, sma); |
898 | err = 0; | 940 | err = 0; |
899 | break; | 941 | break; |
900 | case IPC_SET: | 942 | case IPC_SET: |
@@ -948,45 +990,15 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) | |||
948 | return err; | 990 | return err; |
949 | case IPC_RMID: | 991 | case IPC_RMID: |
950 | case IPC_SET: | 992 | case IPC_SET: |
951 | mutex_lock(&sem_ids(ns).mutex); | 993 | down_write(&sem_ids(ns).rw_mutex); |
952 | err = semctl_down(ns,semid,semnum,cmd,version,arg); | 994 | err = semctl_down(ns,semid,semnum,cmd,version,arg); |
953 | mutex_unlock(&sem_ids(ns).mutex); | 995 | up_write(&sem_ids(ns).rw_mutex); |
954 | return err; | 996 | return err; |
955 | default: | 997 | default: |
956 | return -EINVAL; | 998 | return -EINVAL; |
957 | } | 999 | } |
958 | } | 1000 | } |
959 | 1001 | ||
960 | static inline void lock_semundo(void) | ||
961 | { | ||
962 | struct sem_undo_list *undo_list; | ||
963 | |||
964 | undo_list = current->sysvsem.undo_list; | ||
965 | if (undo_list) | ||
966 | spin_lock(&undo_list->lock); | ||
967 | } | ||
968 | |||
969 | /* This code has an interaction with copy_semundo(). | ||
970 | * Consider; two tasks are sharing the undo_list. task1 | ||
971 | * acquires the undo_list lock in lock_semundo(). If task2 now | ||
972 | * exits before task1 releases the lock (by calling | ||
973 | * unlock_semundo()), then task1 will never call spin_unlock(). | ||
974 | * This leave the sem_undo_list in a locked state. If task1 now creats task3 | ||
975 | * and once again shares the sem_undo_list, the sem_undo_list will still be | ||
976 | * locked, and future SEM_UNDO operations will deadlock. This case is | ||
977 | * dealt with in copy_semundo() by having it reinitialize the spin lock when | ||
978 | * the refcnt goes from 1 to 2. | ||
979 | */ | ||
980 | static inline void unlock_semundo(void) | ||
981 | { | ||
982 | struct sem_undo_list *undo_list; | ||
983 | |||
984 | undo_list = current->sysvsem.undo_list; | ||
985 | if (undo_list) | ||
986 | spin_unlock(&undo_list->lock); | ||
987 | } | ||
988 | |||
989 | |||
990 | /* If the task doesn't already have a undo_list, then allocate one | 1002 | /* If the task doesn't already have a undo_list, then allocate one |
991 | * here. We guarantee there is only one thread using this undo list, | 1003 | * here. We guarantee there is only one thread using this undo list, |
992 | * and current is THE ONE | 1004 | * and current is THE ONE |
@@ -1047,22 +1059,17 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) | |||
1047 | if (error) | 1059 | if (error) |
1048 | return ERR_PTR(error); | 1060 | return ERR_PTR(error); |
1049 | 1061 | ||
1050 | lock_semundo(); | 1062 | spin_lock(&ulp->lock); |
1051 | un = lookup_undo(ulp, semid); | 1063 | un = lookup_undo(ulp, semid); |
1052 | unlock_semundo(); | 1064 | spin_unlock(&ulp->lock); |
1053 | if (likely(un!=NULL)) | 1065 | if (likely(un!=NULL)) |
1054 | goto out; | 1066 | goto out; |
1055 | 1067 | ||
1056 | /* no undo structure around - allocate one. */ | 1068 | /* no undo structure around - allocate one. */ |
1057 | sma = sem_lock(ns, semid); | 1069 | sma = sem_lock_check(ns, semid); |
1058 | un = ERR_PTR(-EINVAL); | 1070 | if (IS_ERR(sma)) |
1059 | if(sma==NULL) | 1071 | return ERR_PTR(PTR_ERR(sma)); |
1060 | goto out; | 1072 | |
1061 | un = ERR_PTR(-EIDRM); | ||
1062 | if (sem_checkid(ns,sma,semid)) { | ||
1063 | sem_unlock(sma); | ||
1064 | goto out; | ||
1065 | } | ||
1066 | nsems = sma->sem_nsems; | 1073 | nsems = sma->sem_nsems; |
1067 | ipc_rcu_getref(sma); | 1074 | ipc_rcu_getref(sma); |
1068 | sem_unlock(sma); | 1075 | sem_unlock(sma); |
@@ -1077,10 +1084,10 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) | |||
1077 | new->semadj = (short *) &new[1]; | 1084 | new->semadj = (short *) &new[1]; |
1078 | new->semid = semid; | 1085 | new->semid = semid; |
1079 | 1086 | ||
1080 | lock_semundo(); | 1087 | spin_lock(&ulp->lock); |
1081 | un = lookup_undo(ulp, semid); | 1088 | un = lookup_undo(ulp, semid); |
1082 | if (un) { | 1089 | if (un) { |
1083 | unlock_semundo(); | 1090 | spin_unlock(&ulp->lock); |
1084 | kfree(new); | 1091 | kfree(new); |
1085 | ipc_lock_by_ptr(&sma->sem_perm); | 1092 | ipc_lock_by_ptr(&sma->sem_perm); |
1086 | ipc_rcu_putref(sma); | 1093 | ipc_rcu_putref(sma); |
@@ -1091,7 +1098,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) | |||
1091 | ipc_rcu_putref(sma); | 1098 | ipc_rcu_putref(sma); |
1092 | if (sma->sem_perm.deleted) { | 1099 | if (sma->sem_perm.deleted) { |
1093 | sem_unlock(sma); | 1100 | sem_unlock(sma); |
1094 | unlock_semundo(); | 1101 | spin_unlock(&ulp->lock); |
1095 | kfree(new); | 1102 | kfree(new); |
1096 | un = ERR_PTR(-EIDRM); | 1103 | un = ERR_PTR(-EIDRM); |
1097 | goto out; | 1104 | goto out; |
@@ -1102,7 +1109,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) | |||
1102 | sma->undo = new; | 1109 | sma->undo = new; |
1103 | sem_unlock(sma); | 1110 | sem_unlock(sma); |
1104 | un = new; | 1111 | un = new; |
1105 | unlock_semundo(); | 1112 | spin_unlock(&ulp->lock); |
1106 | out: | 1113 | out: |
1107 | return un; | 1114 | return un; |
1108 | } | 1115 | } |
@@ -1168,15 +1175,14 @@ retry_undos: | |||
1168 | } else | 1175 | } else |
1169 | un = NULL; | 1176 | un = NULL; |
1170 | 1177 | ||
1171 | sma = sem_lock(ns, semid); | 1178 | sma = sem_lock_check(ns, semid); |
1172 | error=-EINVAL; | 1179 | if (IS_ERR(sma)) { |
1173 | if(sma==NULL) | 1180 | error = PTR_ERR(sma); |
1174 | goto out_free; | 1181 | goto out_free; |
1175 | error = -EIDRM; | 1182 | } |
1176 | if (sem_checkid(ns,sma,semid)) | 1183 | |
1177 | goto out_unlock_free; | ||
1178 | /* | 1184 | /* |
1179 | * semid identifies are not unique - find_undo may have | 1185 | * semid identifiers are not unique - find_undo may have |
1180 | * allocated an undo structure, it was invalidated by an RMID | 1186 | * allocated an undo structure, it was invalidated by an RMID |
1181 | * and now a new array with received the same id. Check and retry. | 1187 | * and now a new array with received the same id. Check and retry. |
1182 | */ | 1188 | */ |
@@ -1196,7 +1202,7 @@ retry_undos: | |||
1196 | if (error) | 1202 | if (error) |
1197 | goto out_unlock_free; | 1203 | goto out_unlock_free; |
1198 | 1204 | ||
1199 | error = try_atomic_semop (sma, sops, nsops, un, current->tgid); | 1205 | error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); |
1200 | if (error <= 0) { | 1206 | if (error <= 0) { |
1201 | if (alter && error == 0) | 1207 | if (alter && error == 0) |
1202 | update_queue (sma); | 1208 | update_queue (sma); |
@@ -1211,7 +1217,7 @@ retry_undos: | |||
1211 | queue.sops = sops; | 1217 | queue.sops = sops; |
1212 | queue.nsops = nsops; | 1218 | queue.nsops = nsops; |
1213 | queue.undo = un; | 1219 | queue.undo = un; |
1214 | queue.pid = current->tgid; | 1220 | queue.pid = task_tgid_vnr(current); |
1215 | queue.id = semid; | 1221 | queue.id = semid; |
1216 | queue.alter = alter; | 1222 | queue.alter = alter; |
1217 | if (alter) | 1223 | if (alter) |
@@ -1242,7 +1248,7 @@ retry_undos: | |||
1242 | } | 1248 | } |
1243 | 1249 | ||
1244 | sma = sem_lock(ns, semid); | 1250 | sma = sem_lock(ns, semid); |
1245 | if(sma==NULL) { | 1251 | if (IS_ERR(sma)) { |
1246 | BUG_ON(queue.prev != NULL); | 1252 | BUG_ON(queue.prev != NULL); |
1247 | error = -EIDRM; | 1253 | error = -EIDRM; |
1248 | goto out_free; | 1254 | goto out_free; |
@@ -1279,10 +1285,6 @@ asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsop | |||
1279 | 1285 | ||
1280 | /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between | 1286 | /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between |
1281 | * parent and child tasks. | 1287 | * parent and child tasks. |
1282 | * | ||
1283 | * See the notes above unlock_semundo() regarding the spin_lock_init() | ||
1284 | * in this code. Initialize the undo_list->lock here instead of get_undo_list() | ||
1285 | * because of the reasoning in the comment above unlock_semundo. | ||
1286 | */ | 1288 | */ |
1287 | 1289 | ||
1288 | int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) | 1290 | int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) |
@@ -1342,13 +1344,13 @@ void exit_sem(struct task_struct *tsk) | |||
1342 | if(semid == -1) | 1344 | if(semid == -1) |
1343 | continue; | 1345 | continue; |
1344 | sma = sem_lock(ns, semid); | 1346 | sma = sem_lock(ns, semid); |
1345 | if (sma == NULL) | 1347 | if (IS_ERR(sma)) |
1346 | continue; | 1348 | continue; |
1347 | 1349 | ||
1348 | if (u->semid == -1) | 1350 | if (u->semid == -1) |
1349 | goto next_entry; | 1351 | goto next_entry; |
1350 | 1352 | ||
1351 | BUG_ON(sem_checkid(ns,sma,u->semid)); | 1353 | BUG_ON(sem_checkid(sma, u->semid)); |
1352 | 1354 | ||
1353 | /* remove u from the sma->undo list */ | 1355 | /* remove u from the sma->undo list */ |
1354 | for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { | 1356 | for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { |
@@ -1382,7 +1384,7 @@ found: | |||
1382 | semaphore->semval = 0; | 1384 | semaphore->semval = 0; |
1383 | if (semaphore->semval > SEMVMX) | 1385 | if (semaphore->semval > SEMVMX) |
1384 | semaphore->semval = SEMVMX; | 1386 | semaphore->semval = SEMVMX; |
1385 | semaphore->sempid = current->tgid; | 1387 | semaphore->sempid = task_tgid_vnr(current); |
1386 | } | 1388 | } |
1387 | } | 1389 | } |
1388 | sma->sem_otime = get_seconds(); | 1390 | sma->sem_otime = get_seconds(); |
@@ -1402,7 +1404,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) | |||
1402 | return seq_printf(s, | 1404 | return seq_printf(s, |
1403 | "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", | 1405 | "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", |
1404 | sma->sem_perm.key, | 1406 | sma->sem_perm.key, |
1405 | sma->sem_id, | 1407 | sma->sem_perm.id, |
1406 | sma->sem_perm.mode, | 1408 | sma->sem_perm.mode, |
1407 | sma->sem_nsems, | 1409 | sma->sem_nsems, |
1408 | sma->sem_perm.uid, | 1410 | sma->sem_perm.uid, |