diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /ipc/sem.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'ipc/sem.c')
-rw-r--r-- | ipc/sem.c | 1384 |
1 files changed, 1384 insertions, 0 deletions
diff --git a/ipc/sem.c b/ipc/sem.c new file mode 100644 index 000000000000..5ad7ac0ed60d --- /dev/null +++ b/ipc/sem.c | |||
@@ -0,0 +1,1384 @@ | |||
1 | /* | ||
2 | * linux/ipc/sem.c | ||
3 | * Copyright (C) 1992 Krishna Balasubramanian | ||
4 | * Copyright (C) 1995 Eric Schenk, Bruno Haible | ||
5 | * | ||
6 | * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995): | ||
7 | * This code underwent a massive rewrite in order to solve some problems | ||
8 | * with the original code. In particular the original code failed to | ||
9 | * wake up processes that were waiting for semval to go to 0 if the | ||
10 | * value went to 0 and was then incremented rapidly enough. In solving | ||
11 | * this problem I have also modified the implementation so that it | ||
12 | * processes pending operations in a FIFO manner, thus give a guarantee | ||
13 | * that processes waiting for a lock on the semaphore won't starve | ||
14 | * unless another locking process fails to unlock. | ||
15 | * In addition the following two changes in behavior have been introduced: | ||
16 | * - The original implementation of semop returned the value | ||
17 | * last semaphore element examined on success. This does not | ||
18 | * match the manual page specifications, and effectively | ||
19 | * allows the user to read the semaphore even if they do not | ||
20 | * have read permissions. The implementation now returns 0 | ||
21 | * on success as stated in the manual page. | ||
22 | * - There is some confusion over whether the set of undo adjustments | ||
23 | * to be performed at exit should be done in an atomic manner. | ||
24 | * That is, if we are attempting to decrement the semval should we queue | ||
25 | * up and wait until we can do so legally? | ||
26 | * The original implementation attempted to do this. | ||
27 | * The current implementation does not do so. This is because I don't | ||
28 | * think it is the right thing (TM) to do, and because I couldn't | ||
29 | * see a clean way to get the old behavior with the new design. | ||
30 | * The POSIX standard and SVID should be consulted to determine | ||
31 | * what behavior is mandated. | ||
32 | * | ||
33 | * Further notes on refinement (Christoph Rohland, December 1998): | ||
34 | * - The POSIX standard says, that the undo adjustments simply should | ||
35 | * redo. So the current implementation is o.K. | ||
36 | * - The previous code had two flaws: | ||
37 | * 1) It actively gave the semaphore to the next waiting process | ||
38 | * sleeping on the semaphore. Since this process did not have the | ||
39 | * cpu this led to many unnecessary context switches and bad | ||
40 | * performance. Now we only check which process should be able to | ||
41 | * get the semaphore and if this process wants to reduce some | ||
42 | * semaphore value we simply wake it up without doing the | ||
43 | * operation. So it has to try to get it later. Thus e.g. the | ||
44 | * running process may reacquire the semaphore during the current | ||
45 | * time slice. If it only waits for zero or increases the semaphore, | ||
46 | * we do the operation in advance and wake it up. | ||
47 | * 2) It did not wake up all zero waiting processes. We try to do | ||
48 | * better but only get the semops right which only wait for zero or | ||
49 | * increase. If there are decrement operations in the operations | ||
50 | * array we do the same as before. | ||
51 | * | ||
52 | * With the incarnation of O(1) scheduler, it becomes unnecessary to perform | ||
53 | * check/retry algorithm for waking up blocked processes as the new scheduler | ||
54 | * is better at handling thread switch than the old one. | ||
55 | * | ||
56 | * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> | ||
57 | * | ||
58 | * SMP-threaded, sysctl's added | ||
59 | * (c) 1999 Manfred Spraul <manfreds@colorfullife.com> | ||
60 | * Enforced range limit on SEM_UNDO | ||
61 | * (c) 2001 Red Hat Inc <alan@redhat.com> | ||
62 | * Lockless wakeup | ||
63 | * (c) 2003 Manfred Spraul <manfred@colorfullife.com> | ||
64 | */ | ||
65 | |||
66 | #include <linux/config.h> | ||
67 | #include <linux/slab.h> | ||
68 | #include <linux/spinlock.h> | ||
69 | #include <linux/init.h> | ||
70 | #include <linux/proc_fs.h> | ||
71 | #include <linux/time.h> | ||
72 | #include <linux/smp_lock.h> | ||
73 | #include <linux/security.h> | ||
74 | #include <linux/syscalls.h> | ||
75 | #include <linux/audit.h> | ||
76 | #include <asm/uaccess.h> | ||
77 | #include "util.h" | ||
78 | |||
79 | |||
80 | #define sem_lock(id) ((struct sem_array*)ipc_lock(&sem_ids,id)) | ||
81 | #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) | ||
82 | #define sem_rmid(id) ((struct sem_array*)ipc_rmid(&sem_ids,id)) | ||
83 | #define sem_checkid(sma, semid) \ | ||
84 | ipc_checkid(&sem_ids,&sma->sem_perm,semid) | ||
85 | #define sem_buildid(id, seq) \ | ||
86 | ipc_buildid(&sem_ids, id, seq) | ||
87 | static struct ipc_ids sem_ids; | ||
88 | |||
89 | static int newary (key_t, int, int); | ||
90 | static void freeary (struct sem_array *sma, int id); | ||
91 | #ifdef CONFIG_PROC_FS | ||
92 | static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data); | ||
93 | #endif | ||
94 | |||
95 | #define SEMMSL_FAST 256 /* 512 bytes on stack */ | ||
96 | #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ | ||
97 | |||
98 | /* | ||
99 | * linked list protection: | ||
100 | * sem_undo.id_next, | ||
101 | * sem_array.sem_pending{,last}, | ||
102 | * sem_array.sem_undo: sem_lock() for read/write | ||
103 | * sem_undo.proc_next: only "current" is allowed to read/write that field. | ||
104 | * | ||
105 | */ | ||
106 | |||
107 | int sem_ctls[4] = {SEMMSL, SEMMNS, SEMOPM, SEMMNI}; | ||
108 | #define sc_semmsl (sem_ctls[0]) | ||
109 | #define sc_semmns (sem_ctls[1]) | ||
110 | #define sc_semopm (sem_ctls[2]) | ||
111 | #define sc_semmni (sem_ctls[3]) | ||
112 | |||
113 | static int used_sems; | ||
114 | |||
115 | void __init sem_init (void) | ||
116 | { | ||
117 | used_sems = 0; | ||
118 | ipc_init_ids(&sem_ids,sc_semmni); | ||
119 | |||
120 | #ifdef CONFIG_PROC_FS | ||
121 | create_proc_read_entry("sysvipc/sem", 0, NULL, sysvipc_sem_read_proc, NULL); | ||
122 | #endif | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * Lockless wakeup algorithm: | ||
127 | * Without the check/retry algorithm a lockless wakeup is possible: | ||
128 | * - queue.status is initialized to -EINTR before blocking. | ||
129 | * - wakeup is performed by | ||
130 | * * unlinking the queue entry from sma->sem_pending | ||
131 | * * setting queue.status to IN_WAKEUP | ||
132 | * This is the notification for the blocked thread that a | ||
133 | * result value is imminent. | ||
134 | * * call wake_up_process | ||
135 | * * set queue.status to the final value. | ||
136 | * - the previously blocked thread checks queue.status: | ||
137 | * * if it's IN_WAKEUP, then it must wait until the value changes | ||
138 | * * if it's not -EINTR, then the operation was completed by | ||
139 | * update_queue. semtimedop can return queue.status without | ||
140 | * performing any operation on the semaphore array. | ||
141 | * * otherwise it must acquire the spinlock and check what's up. | ||
142 | * | ||
143 | * The two-stage algorithm is necessary to protect against the following | ||
144 | * races: | ||
145 | * - if queue.status is set after wake_up_process, then the woken up idle | ||
146 | * thread could race forward and try (and fail) to acquire sma->lock | ||
147 | * before update_queue had a chance to set queue.status | ||
148 | * - if queue.status is written before wake_up_process and if the | ||
149 | * blocked process is woken up by a signal between writing | ||
150 | * queue.status and the wake_up_process, then the woken up | ||
151 | * process could return from semtimedop and die by calling | ||
152 | * sys_exit before wake_up_process is called. Then wake_up_process | ||
153 | * will oops, because the task structure is already invalid. | ||
154 | * (yes, this happened on s390 with sysv msg). | ||
155 | * | ||
156 | */ | ||
157 | #define IN_WAKEUP 1 | ||
158 | |||
159 | static int newary (key_t key, int nsems, int semflg) | ||
160 | { | ||
161 | int id; | ||
162 | int retval; | ||
163 | struct sem_array *sma; | ||
164 | int size; | ||
165 | |||
166 | if (!nsems) | ||
167 | return -EINVAL; | ||
168 | if (used_sems + nsems > sc_semmns) | ||
169 | return -ENOSPC; | ||
170 | |||
171 | size = sizeof (*sma) + nsems * sizeof (struct sem); | ||
172 | sma = ipc_rcu_alloc(size); | ||
173 | if (!sma) { | ||
174 | return -ENOMEM; | ||
175 | } | ||
176 | memset (sma, 0, size); | ||
177 | |||
178 | sma->sem_perm.mode = (semflg & S_IRWXUGO); | ||
179 | sma->sem_perm.key = key; | ||
180 | |||
181 | sma->sem_perm.security = NULL; | ||
182 | retval = security_sem_alloc(sma); | ||
183 | if (retval) { | ||
184 | ipc_rcu_putref(sma); | ||
185 | return retval; | ||
186 | } | ||
187 | |||
188 | id = ipc_addid(&sem_ids, &sma->sem_perm, sc_semmni); | ||
189 | if(id == -1) { | ||
190 | security_sem_free(sma); | ||
191 | ipc_rcu_putref(sma); | ||
192 | return -ENOSPC; | ||
193 | } | ||
194 | used_sems += nsems; | ||
195 | |||
196 | sma->sem_base = (struct sem *) &sma[1]; | ||
197 | /* sma->sem_pending = NULL; */ | ||
198 | sma->sem_pending_last = &sma->sem_pending; | ||
199 | /* sma->undo = NULL; */ | ||
200 | sma->sem_nsems = nsems; | ||
201 | sma->sem_ctime = get_seconds(); | ||
202 | sem_unlock(sma); | ||
203 | |||
204 | return sem_buildid(id, sma->sem_perm.seq); | ||
205 | } | ||
206 | |||
207 | asmlinkage long sys_semget (key_t key, int nsems, int semflg) | ||
208 | { | ||
209 | int id, err = -EINVAL; | ||
210 | struct sem_array *sma; | ||
211 | |||
212 | if (nsems < 0 || nsems > sc_semmsl) | ||
213 | return -EINVAL; | ||
214 | down(&sem_ids.sem); | ||
215 | |||
216 | if (key == IPC_PRIVATE) { | ||
217 | err = newary(key, nsems, semflg); | ||
218 | } else if ((id = ipc_findkey(&sem_ids, key)) == -1) { /* key not used */ | ||
219 | if (!(semflg & IPC_CREAT)) | ||
220 | err = -ENOENT; | ||
221 | else | ||
222 | err = newary(key, nsems, semflg); | ||
223 | } else if (semflg & IPC_CREAT && semflg & IPC_EXCL) { | ||
224 | err = -EEXIST; | ||
225 | } else { | ||
226 | sma = sem_lock(id); | ||
227 | if(sma==NULL) | ||
228 | BUG(); | ||
229 | if (nsems > sma->sem_nsems) | ||
230 | err = -EINVAL; | ||
231 | else if (ipcperms(&sma->sem_perm, semflg)) | ||
232 | err = -EACCES; | ||
233 | else { | ||
234 | int semid = sem_buildid(id, sma->sem_perm.seq); | ||
235 | err = security_sem_associate(sma, semflg); | ||
236 | if (!err) | ||
237 | err = semid; | ||
238 | } | ||
239 | sem_unlock(sma); | ||
240 | } | ||
241 | |||
242 | up(&sem_ids.sem); | ||
243 | return err; | ||
244 | } | ||
245 | |||
246 | /* Manage the doubly linked list sma->sem_pending as a FIFO: | ||
247 | * insert new queue elements at the tail sma->sem_pending_last. | ||
248 | */ | ||
249 | static inline void append_to_queue (struct sem_array * sma, | ||
250 | struct sem_queue * q) | ||
251 | { | ||
252 | *(q->prev = sma->sem_pending_last) = q; | ||
253 | *(sma->sem_pending_last = &q->next) = NULL; | ||
254 | } | ||
255 | |||
256 | static inline void prepend_to_queue (struct sem_array * sma, | ||
257 | struct sem_queue * q) | ||
258 | { | ||
259 | q->next = sma->sem_pending; | ||
260 | *(q->prev = &sma->sem_pending) = q; | ||
261 | if (q->next) | ||
262 | q->next->prev = &q->next; | ||
263 | else /* sma->sem_pending_last == &sma->sem_pending */ | ||
264 | sma->sem_pending_last = &q->next; | ||
265 | } | ||
266 | |||
267 | static inline void remove_from_queue (struct sem_array * sma, | ||
268 | struct sem_queue * q) | ||
269 | { | ||
270 | *(q->prev) = q->next; | ||
271 | if (q->next) | ||
272 | q->next->prev = q->prev; | ||
273 | else /* sma->sem_pending_last == &q->next */ | ||
274 | sma->sem_pending_last = q->prev; | ||
275 | q->prev = NULL; /* mark as removed */ | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Determine whether a sequence of semaphore operations would succeed | ||
280 | * all at once. Return 0 if yes, 1 if need to sleep, else return error code. | ||
281 | */ | ||
282 | |||
283 | static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, | ||
284 | int nsops, struct sem_undo *un, int pid) | ||
285 | { | ||
286 | int result, sem_op; | ||
287 | struct sembuf *sop; | ||
288 | struct sem * curr; | ||
289 | |||
290 | for (sop = sops; sop < sops + nsops; sop++) { | ||
291 | curr = sma->sem_base + sop->sem_num; | ||
292 | sem_op = sop->sem_op; | ||
293 | result = curr->semval; | ||
294 | |||
295 | if (!sem_op && result) | ||
296 | goto would_block; | ||
297 | |||
298 | result += sem_op; | ||
299 | if (result < 0) | ||
300 | goto would_block; | ||
301 | if (result > SEMVMX) | ||
302 | goto out_of_range; | ||
303 | if (sop->sem_flg & SEM_UNDO) { | ||
304 | int undo = un->semadj[sop->sem_num] - sem_op; | ||
305 | /* | ||
306 | * Exceeding the undo range is an error. | ||
307 | */ | ||
308 | if (undo < (-SEMAEM - 1) || undo > SEMAEM) | ||
309 | goto out_of_range; | ||
310 | } | ||
311 | curr->semval = result; | ||
312 | } | ||
313 | |||
314 | sop--; | ||
315 | while (sop >= sops) { | ||
316 | sma->sem_base[sop->sem_num].sempid = pid; | ||
317 | if (sop->sem_flg & SEM_UNDO) | ||
318 | un->semadj[sop->sem_num] -= sop->sem_op; | ||
319 | sop--; | ||
320 | } | ||
321 | |||
322 | sma->sem_otime = get_seconds(); | ||
323 | return 0; | ||
324 | |||
325 | out_of_range: | ||
326 | result = -ERANGE; | ||
327 | goto undo; | ||
328 | |||
329 | would_block: | ||
330 | if (sop->sem_flg & IPC_NOWAIT) | ||
331 | result = -EAGAIN; | ||
332 | else | ||
333 | result = 1; | ||
334 | |||
335 | undo: | ||
336 | sop--; | ||
337 | while (sop >= sops) { | ||
338 | sma->sem_base[sop->sem_num].semval -= sop->sem_op; | ||
339 | sop--; | ||
340 | } | ||
341 | |||
342 | return result; | ||
343 | } | ||
344 | |||
345 | /* Go through the pending queue for the indicated semaphore | ||
346 | * looking for tasks that can be completed. | ||
347 | */ | ||
348 | static void update_queue (struct sem_array * sma) | ||
349 | { | ||
350 | int error; | ||
351 | struct sem_queue * q; | ||
352 | |||
353 | q = sma->sem_pending; | ||
354 | while(q) { | ||
355 | error = try_atomic_semop(sma, q->sops, q->nsops, | ||
356 | q->undo, q->pid); | ||
357 | |||
358 | /* Does q->sleeper still need to sleep? */ | ||
359 | if (error <= 0) { | ||
360 | struct sem_queue *n; | ||
361 | remove_from_queue(sma,q); | ||
362 | q->status = IN_WAKEUP; | ||
363 | /* | ||
364 | * Continue scanning. The next operation | ||
365 | * that must be checked depends on the type of the | ||
366 | * completed operation: | ||
367 | * - if the operation modified the array, then | ||
368 | * restart from the head of the queue and | ||
369 | * check for threads that might be waiting | ||
370 | * for semaphore values to become 0. | ||
371 | * - if the operation didn't modify the array, | ||
372 | * then just continue. | ||
373 | */ | ||
374 | if (q->alter) | ||
375 | n = sma->sem_pending; | ||
376 | else | ||
377 | n = q->next; | ||
378 | wake_up_process(q->sleeper); | ||
379 | /* hands-off: q will disappear immediately after | ||
380 | * writing q->status. | ||
381 | */ | ||
382 | q->status = error; | ||
383 | q = n; | ||
384 | } else { | ||
385 | q = q->next; | ||
386 | } | ||
387 | } | ||
388 | } | ||
389 | |||
390 | /* The following counts are associated to each semaphore: | ||
391 | * semncnt number of tasks waiting on semval being nonzero | ||
392 | * semzcnt number of tasks waiting on semval being zero | ||
393 | * This model assumes that a task waits on exactly one semaphore. | ||
394 | * Since semaphore operations are to be performed atomically, tasks actually | ||
395 | * wait on a whole sequence of semaphores simultaneously. | ||
396 | * The counts we return here are a rough approximation, but still | ||
397 | * warrant that semncnt+semzcnt>0 if the task is on the pending queue. | ||
398 | */ | ||
399 | static int count_semncnt (struct sem_array * sma, ushort semnum) | ||
400 | { | ||
401 | int semncnt; | ||
402 | struct sem_queue * q; | ||
403 | |||
404 | semncnt = 0; | ||
405 | for (q = sma->sem_pending; q; q = q->next) { | ||
406 | struct sembuf * sops = q->sops; | ||
407 | int nsops = q->nsops; | ||
408 | int i; | ||
409 | for (i = 0; i < nsops; i++) | ||
410 | if (sops[i].sem_num == semnum | ||
411 | && (sops[i].sem_op < 0) | ||
412 | && !(sops[i].sem_flg & IPC_NOWAIT)) | ||
413 | semncnt++; | ||
414 | } | ||
415 | return semncnt; | ||
416 | } | ||
417 | static int count_semzcnt (struct sem_array * sma, ushort semnum) | ||
418 | { | ||
419 | int semzcnt; | ||
420 | struct sem_queue * q; | ||
421 | |||
422 | semzcnt = 0; | ||
423 | for (q = sma->sem_pending; q; q = q->next) { | ||
424 | struct sembuf * sops = q->sops; | ||
425 | int nsops = q->nsops; | ||
426 | int i; | ||
427 | for (i = 0; i < nsops; i++) | ||
428 | if (sops[i].sem_num == semnum | ||
429 | && (sops[i].sem_op == 0) | ||
430 | && !(sops[i].sem_flg & IPC_NOWAIT)) | ||
431 | semzcnt++; | ||
432 | } | ||
433 | return semzcnt; | ||
434 | } | ||
435 | |||
436 | /* Free a semaphore set. freeary() is called with sem_ids.sem down and | ||
437 | * the spinlock for this semaphore set hold. sem_ids.sem remains locked | ||
438 | * on exit. | ||
439 | */ | ||
440 | static void freeary (struct sem_array *sma, int id) | ||
441 | { | ||
442 | struct sem_undo *un; | ||
443 | struct sem_queue *q; | ||
444 | int size; | ||
445 | |||
446 | /* Invalidate the existing undo structures for this semaphore set. | ||
447 | * (They will be freed without any further action in exit_sem() | ||
448 | * or during the next semop.) | ||
449 | */ | ||
450 | for (un = sma->undo; un; un = un->id_next) | ||
451 | un->semid = -1; | ||
452 | |||
453 | /* Wake up all pending processes and let them fail with EIDRM. */ | ||
454 | q = sma->sem_pending; | ||
455 | while(q) { | ||
456 | struct sem_queue *n; | ||
457 | /* lazy remove_from_queue: we are killing the whole queue */ | ||
458 | q->prev = NULL; | ||
459 | n = q->next; | ||
460 | q->status = IN_WAKEUP; | ||
461 | wake_up_process(q->sleeper); /* doesn't sleep */ | ||
462 | q->status = -EIDRM; /* hands-off q */ | ||
463 | q = n; | ||
464 | } | ||
465 | |||
466 | /* Remove the semaphore set from the ID array*/ | ||
467 | sma = sem_rmid(id); | ||
468 | sem_unlock(sma); | ||
469 | |||
470 | used_sems -= sma->sem_nsems; | ||
471 | size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem); | ||
472 | security_sem_free(sma); | ||
473 | ipc_rcu_putref(sma); | ||
474 | } | ||
475 | |||
476 | static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) | ||
477 | { | ||
478 | switch(version) { | ||
479 | case IPC_64: | ||
480 | return copy_to_user(buf, in, sizeof(*in)); | ||
481 | case IPC_OLD: | ||
482 | { | ||
483 | struct semid_ds out; | ||
484 | |||
485 | ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); | ||
486 | |||
487 | out.sem_otime = in->sem_otime; | ||
488 | out.sem_ctime = in->sem_ctime; | ||
489 | out.sem_nsems = in->sem_nsems; | ||
490 | |||
491 | return copy_to_user(buf, &out, sizeof(out)); | ||
492 | } | ||
493 | default: | ||
494 | return -EINVAL; | ||
495 | } | ||
496 | } | ||
497 | |||
498 | static int semctl_nolock(int semid, int semnum, int cmd, int version, union semun arg) | ||
499 | { | ||
500 | int err = -EINVAL; | ||
501 | struct sem_array *sma; | ||
502 | |||
503 | switch(cmd) { | ||
504 | case IPC_INFO: | ||
505 | case SEM_INFO: | ||
506 | { | ||
507 | struct seminfo seminfo; | ||
508 | int max_id; | ||
509 | |||
510 | err = security_sem_semctl(NULL, cmd); | ||
511 | if (err) | ||
512 | return err; | ||
513 | |||
514 | memset(&seminfo,0,sizeof(seminfo)); | ||
515 | seminfo.semmni = sc_semmni; | ||
516 | seminfo.semmns = sc_semmns; | ||
517 | seminfo.semmsl = sc_semmsl; | ||
518 | seminfo.semopm = sc_semopm; | ||
519 | seminfo.semvmx = SEMVMX; | ||
520 | seminfo.semmnu = SEMMNU; | ||
521 | seminfo.semmap = SEMMAP; | ||
522 | seminfo.semume = SEMUME; | ||
523 | down(&sem_ids.sem); | ||
524 | if (cmd == SEM_INFO) { | ||
525 | seminfo.semusz = sem_ids.in_use; | ||
526 | seminfo.semaem = used_sems; | ||
527 | } else { | ||
528 | seminfo.semusz = SEMUSZ; | ||
529 | seminfo.semaem = SEMAEM; | ||
530 | } | ||
531 | max_id = sem_ids.max_id; | ||
532 | up(&sem_ids.sem); | ||
533 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) | ||
534 | return -EFAULT; | ||
535 | return (max_id < 0) ? 0: max_id; | ||
536 | } | ||
537 | case SEM_STAT: | ||
538 | { | ||
539 | struct semid64_ds tbuf; | ||
540 | int id; | ||
541 | |||
542 | if(semid >= sem_ids.entries->size) | ||
543 | return -EINVAL; | ||
544 | |||
545 | memset(&tbuf,0,sizeof(tbuf)); | ||
546 | |||
547 | sma = sem_lock(semid); | ||
548 | if(sma == NULL) | ||
549 | return -EINVAL; | ||
550 | |||
551 | err = -EACCES; | ||
552 | if (ipcperms (&sma->sem_perm, S_IRUGO)) | ||
553 | goto out_unlock; | ||
554 | |||
555 | err = security_sem_semctl(sma, cmd); | ||
556 | if (err) | ||
557 | goto out_unlock; | ||
558 | |||
559 | id = sem_buildid(semid, sma->sem_perm.seq); | ||
560 | |||
561 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); | ||
562 | tbuf.sem_otime = sma->sem_otime; | ||
563 | tbuf.sem_ctime = sma->sem_ctime; | ||
564 | tbuf.sem_nsems = sma->sem_nsems; | ||
565 | sem_unlock(sma); | ||
566 | if (copy_semid_to_user (arg.buf, &tbuf, version)) | ||
567 | return -EFAULT; | ||
568 | return id; | ||
569 | } | ||
570 | default: | ||
571 | return -EINVAL; | ||
572 | } | ||
573 | return err; | ||
574 | out_unlock: | ||
575 | sem_unlock(sma); | ||
576 | return err; | ||
577 | } | ||
578 | |||
579 | static int semctl_main(int semid, int semnum, int cmd, int version, union semun arg) | ||
580 | { | ||
581 | struct sem_array *sma; | ||
582 | struct sem* curr; | ||
583 | int err; | ||
584 | ushort fast_sem_io[SEMMSL_FAST]; | ||
585 | ushort* sem_io = fast_sem_io; | ||
586 | int nsems; | ||
587 | |||
588 | sma = sem_lock(semid); | ||
589 | if(sma==NULL) | ||
590 | return -EINVAL; | ||
591 | |||
592 | nsems = sma->sem_nsems; | ||
593 | |||
594 | err=-EIDRM; | ||
595 | if (sem_checkid(sma,semid)) | ||
596 | goto out_unlock; | ||
597 | |||
598 | err = -EACCES; | ||
599 | if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) | ||
600 | goto out_unlock; | ||
601 | |||
602 | err = security_sem_semctl(sma, cmd); | ||
603 | if (err) | ||
604 | goto out_unlock; | ||
605 | |||
606 | err = -EACCES; | ||
607 | switch (cmd) { | ||
608 | case GETALL: | ||
609 | { | ||
610 | ushort __user *array = arg.array; | ||
611 | int i; | ||
612 | |||
613 | if(nsems > SEMMSL_FAST) { | ||
614 | ipc_rcu_getref(sma); | ||
615 | sem_unlock(sma); | ||
616 | |||
617 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | ||
618 | if(sem_io == NULL) { | ||
619 | ipc_lock_by_ptr(&sma->sem_perm); | ||
620 | ipc_rcu_putref(sma); | ||
621 | sem_unlock(sma); | ||
622 | return -ENOMEM; | ||
623 | } | ||
624 | |||
625 | ipc_lock_by_ptr(&sma->sem_perm); | ||
626 | ipc_rcu_putref(sma); | ||
627 | if (sma->sem_perm.deleted) { | ||
628 | sem_unlock(sma); | ||
629 | err = -EIDRM; | ||
630 | goto out_free; | ||
631 | } | ||
632 | } | ||
633 | |||
634 | for (i = 0; i < sma->sem_nsems; i++) | ||
635 | sem_io[i] = sma->sem_base[i].semval; | ||
636 | sem_unlock(sma); | ||
637 | err = 0; | ||
638 | if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) | ||
639 | err = -EFAULT; | ||
640 | goto out_free; | ||
641 | } | ||
642 | case SETALL: | ||
643 | { | ||
644 | int i; | ||
645 | struct sem_undo *un; | ||
646 | |||
647 | ipc_rcu_getref(sma); | ||
648 | sem_unlock(sma); | ||
649 | |||
650 | if(nsems > SEMMSL_FAST) { | ||
651 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | ||
652 | if(sem_io == NULL) { | ||
653 | ipc_lock_by_ptr(&sma->sem_perm); | ||
654 | ipc_rcu_putref(sma); | ||
655 | sem_unlock(sma); | ||
656 | return -ENOMEM; | ||
657 | } | ||
658 | } | ||
659 | |||
660 | if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { | ||
661 | ipc_lock_by_ptr(&sma->sem_perm); | ||
662 | ipc_rcu_putref(sma); | ||
663 | sem_unlock(sma); | ||
664 | err = -EFAULT; | ||
665 | goto out_free; | ||
666 | } | ||
667 | |||
668 | for (i = 0; i < nsems; i++) { | ||
669 | if (sem_io[i] > SEMVMX) { | ||
670 | ipc_lock_by_ptr(&sma->sem_perm); | ||
671 | ipc_rcu_putref(sma); | ||
672 | sem_unlock(sma); | ||
673 | err = -ERANGE; | ||
674 | goto out_free; | ||
675 | } | ||
676 | } | ||
677 | ipc_lock_by_ptr(&sma->sem_perm); | ||
678 | ipc_rcu_putref(sma); | ||
679 | if (sma->sem_perm.deleted) { | ||
680 | sem_unlock(sma); | ||
681 | err = -EIDRM; | ||
682 | goto out_free; | ||
683 | } | ||
684 | |||
685 | for (i = 0; i < nsems; i++) | ||
686 | sma->sem_base[i].semval = sem_io[i]; | ||
687 | for (un = sma->undo; un; un = un->id_next) | ||
688 | for (i = 0; i < nsems; i++) | ||
689 | un->semadj[i] = 0; | ||
690 | sma->sem_ctime = get_seconds(); | ||
691 | /* maybe some queued-up processes were waiting for this */ | ||
692 | update_queue(sma); | ||
693 | err = 0; | ||
694 | goto out_unlock; | ||
695 | } | ||
696 | case IPC_STAT: | ||
697 | { | ||
698 | struct semid64_ds tbuf; | ||
699 | memset(&tbuf,0,sizeof(tbuf)); | ||
700 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); | ||
701 | tbuf.sem_otime = sma->sem_otime; | ||
702 | tbuf.sem_ctime = sma->sem_ctime; | ||
703 | tbuf.sem_nsems = sma->sem_nsems; | ||
704 | sem_unlock(sma); | ||
705 | if (copy_semid_to_user (arg.buf, &tbuf, version)) | ||
706 | return -EFAULT; | ||
707 | return 0; | ||
708 | } | ||
709 | /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ | ||
710 | } | ||
711 | err = -EINVAL; | ||
712 | if(semnum < 0 || semnum >= nsems) | ||
713 | goto out_unlock; | ||
714 | |||
715 | curr = &sma->sem_base[semnum]; | ||
716 | |||
717 | switch (cmd) { | ||
718 | case GETVAL: | ||
719 | err = curr->semval; | ||
720 | goto out_unlock; | ||
721 | case GETPID: | ||
722 | err = curr->sempid; | ||
723 | goto out_unlock; | ||
724 | case GETNCNT: | ||
725 | err = count_semncnt(sma,semnum); | ||
726 | goto out_unlock; | ||
727 | case GETZCNT: | ||
728 | err = count_semzcnt(sma,semnum); | ||
729 | goto out_unlock; | ||
730 | case SETVAL: | ||
731 | { | ||
732 | int val = arg.val; | ||
733 | struct sem_undo *un; | ||
734 | err = -ERANGE; | ||
735 | if (val > SEMVMX || val < 0) | ||
736 | goto out_unlock; | ||
737 | |||
738 | for (un = sma->undo; un; un = un->id_next) | ||
739 | un->semadj[semnum] = 0; | ||
740 | curr->semval = val; | ||
741 | curr->sempid = current->tgid; | ||
742 | sma->sem_ctime = get_seconds(); | ||
743 | /* maybe some queued-up processes were waiting for this */ | ||
744 | update_queue(sma); | ||
745 | err = 0; | ||
746 | goto out_unlock; | ||
747 | } | ||
748 | } | ||
749 | out_unlock: | ||
750 | sem_unlock(sma); | ||
751 | out_free: | ||
752 | if(sem_io != fast_sem_io) | ||
753 | ipc_free(sem_io, sizeof(ushort)*nsems); | ||
754 | return err; | ||
755 | } | ||
756 | |||
757 | struct sem_setbuf { | ||
758 | uid_t uid; | ||
759 | gid_t gid; | ||
760 | mode_t mode; | ||
761 | }; | ||
762 | |||
763 | static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version) | ||
764 | { | ||
765 | switch(version) { | ||
766 | case IPC_64: | ||
767 | { | ||
768 | struct semid64_ds tbuf; | ||
769 | |||
770 | if(copy_from_user(&tbuf, buf, sizeof(tbuf))) | ||
771 | return -EFAULT; | ||
772 | |||
773 | out->uid = tbuf.sem_perm.uid; | ||
774 | out->gid = tbuf.sem_perm.gid; | ||
775 | out->mode = tbuf.sem_perm.mode; | ||
776 | |||
777 | return 0; | ||
778 | } | ||
779 | case IPC_OLD: | ||
780 | { | ||
781 | struct semid_ds tbuf_old; | ||
782 | |||
783 | if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) | ||
784 | return -EFAULT; | ||
785 | |||
786 | out->uid = tbuf_old.sem_perm.uid; | ||
787 | out->gid = tbuf_old.sem_perm.gid; | ||
788 | out->mode = tbuf_old.sem_perm.mode; | ||
789 | |||
790 | return 0; | ||
791 | } | ||
792 | default: | ||
793 | return -EINVAL; | ||
794 | } | ||
795 | } | ||
796 | |||
797 | static int semctl_down(int semid, int semnum, int cmd, int version, union semun arg) | ||
798 | { | ||
799 | struct sem_array *sma; | ||
800 | int err; | ||
801 | struct sem_setbuf setbuf; | ||
802 | struct kern_ipc_perm *ipcp; | ||
803 | |||
804 | if(cmd == IPC_SET) { | ||
805 | if(copy_semid_from_user (&setbuf, arg.buf, version)) | ||
806 | return -EFAULT; | ||
807 | if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode))) | ||
808 | return err; | ||
809 | } | ||
810 | sma = sem_lock(semid); | ||
811 | if(sma==NULL) | ||
812 | return -EINVAL; | ||
813 | |||
814 | if (sem_checkid(sma,semid)) { | ||
815 | err=-EIDRM; | ||
816 | goto out_unlock; | ||
817 | } | ||
818 | ipcp = &sma->sem_perm; | ||
819 | |||
820 | if (current->euid != ipcp->cuid && | ||
821 | current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { | ||
822 | err=-EPERM; | ||
823 | goto out_unlock; | ||
824 | } | ||
825 | |||
826 | err = security_sem_semctl(sma, cmd); | ||
827 | if (err) | ||
828 | goto out_unlock; | ||
829 | |||
830 | switch(cmd){ | ||
831 | case IPC_RMID: | ||
832 | freeary(sma, semid); | ||
833 | err = 0; | ||
834 | break; | ||
835 | case IPC_SET: | ||
836 | ipcp->uid = setbuf.uid; | ||
837 | ipcp->gid = setbuf.gid; | ||
838 | ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | ||
839 | | (setbuf.mode & S_IRWXUGO); | ||
840 | sma->sem_ctime = get_seconds(); | ||
841 | sem_unlock(sma); | ||
842 | err = 0; | ||
843 | break; | ||
844 | default: | ||
845 | sem_unlock(sma); | ||
846 | err = -EINVAL; | ||
847 | break; | ||
848 | } | ||
849 | return err; | ||
850 | |||
851 | out_unlock: | ||
852 | sem_unlock(sma); | ||
853 | return err; | ||
854 | } | ||
855 | |||
856 | asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) | ||
857 | { | ||
858 | int err = -EINVAL; | ||
859 | int version; | ||
860 | |||
861 | if (semid < 0) | ||
862 | return -EINVAL; | ||
863 | |||
864 | version = ipc_parse_version(&cmd); | ||
865 | |||
866 | switch(cmd) { | ||
867 | case IPC_INFO: | ||
868 | case SEM_INFO: | ||
869 | case SEM_STAT: | ||
870 | err = semctl_nolock(semid,semnum,cmd,version,arg); | ||
871 | return err; | ||
872 | case GETALL: | ||
873 | case GETVAL: | ||
874 | case GETPID: | ||
875 | case GETNCNT: | ||
876 | case GETZCNT: | ||
877 | case IPC_STAT: | ||
878 | case SETVAL: | ||
879 | case SETALL: | ||
880 | err = semctl_main(semid,semnum,cmd,version,arg); | ||
881 | return err; | ||
882 | case IPC_RMID: | ||
883 | case IPC_SET: | ||
884 | down(&sem_ids.sem); | ||
885 | err = semctl_down(semid,semnum,cmd,version,arg); | ||
886 | up(&sem_ids.sem); | ||
887 | return err; | ||
888 | default: | ||
889 | return -EINVAL; | ||
890 | } | ||
891 | } | ||
892 | |||
893 | static inline void lock_semundo(void) | ||
894 | { | ||
895 | struct sem_undo_list *undo_list; | ||
896 | |||
897 | undo_list = current->sysvsem.undo_list; | ||
898 | if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) | ||
899 | spin_lock(&undo_list->lock); | ||
900 | } | ||
901 | |||
902 | /* This code has an interaction with copy_semundo(). | ||
903 | * Consider; two tasks are sharing the undo_list. task1 | ||
904 | * acquires the undo_list lock in lock_semundo(). If task2 now | ||
905 | * exits before task1 releases the lock (by calling | ||
906 | * unlock_semundo()), then task1 will never call spin_unlock(). | ||
907 | * This leave the sem_undo_list in a locked state. If task1 now creats task3 | ||
908 | * and once again shares the sem_undo_list, the sem_undo_list will still be | ||
909 | * locked, and future SEM_UNDO operations will deadlock. This case is | ||
910 | * dealt with in copy_semundo() by having it reinitialize the spin lock when | ||
911 | * the refcnt goes from 1 to 2. | ||
912 | */ | ||
913 | static inline void unlock_semundo(void) | ||
914 | { | ||
915 | struct sem_undo_list *undo_list; | ||
916 | |||
917 | undo_list = current->sysvsem.undo_list; | ||
918 | if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) | ||
919 | spin_unlock(&undo_list->lock); | ||
920 | } | ||
921 | |||
922 | |||
923 | /* If the task doesn't already have a undo_list, then allocate one | ||
924 | * here. We guarantee there is only one thread using this undo list, | ||
925 | * and current is THE ONE | ||
926 | * | ||
927 | * If this allocation and assignment succeeds, but later | ||
928 | * portions of this code fail, there is no need to free the sem_undo_list. | ||
929 | * Just let it stay associated with the task, and it'll be freed later | ||
930 | * at exit time. | ||
931 | * | ||
932 | * This can block, so callers must hold no locks. | ||
933 | */ | ||
934 | static inline int get_undo_list(struct sem_undo_list **undo_listp) | ||
935 | { | ||
936 | struct sem_undo_list *undo_list; | ||
937 | int size; | ||
938 | |||
939 | undo_list = current->sysvsem.undo_list; | ||
940 | if (!undo_list) { | ||
941 | size = sizeof(struct sem_undo_list); | ||
942 | undo_list = (struct sem_undo_list *) kmalloc(size, GFP_KERNEL); | ||
943 | if (undo_list == NULL) | ||
944 | return -ENOMEM; | ||
945 | memset(undo_list, 0, size); | ||
946 | /* don't initialize unodhd->lock here. It's done | ||
947 | * in copy_semundo() instead. | ||
948 | */ | ||
949 | atomic_set(&undo_list->refcnt, 1); | ||
950 | current->sysvsem.undo_list = undo_list; | ||
951 | } | ||
952 | *undo_listp = undo_list; | ||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) | ||
957 | { | ||
958 | struct sem_undo **last, *un; | ||
959 | |||
960 | last = &ulp->proc_list; | ||
961 | un = *last; | ||
962 | while(un != NULL) { | ||
963 | if(un->semid==semid) | ||
964 | break; | ||
965 | if(un->semid==-1) { | ||
966 | *last=un->proc_next; | ||
967 | kfree(un); | ||
968 | } else { | ||
969 | last=&un->proc_next; | ||
970 | } | ||
971 | un=*last; | ||
972 | } | ||
973 | return un; | ||
974 | } | ||
975 | |||
976 | static struct sem_undo *find_undo(int semid) | ||
977 | { | ||
978 | struct sem_array *sma; | ||
979 | struct sem_undo_list *ulp; | ||
980 | struct sem_undo *un, *new; | ||
981 | int nsems; | ||
982 | int error; | ||
983 | |||
984 | error = get_undo_list(&ulp); | ||
985 | if (error) | ||
986 | return ERR_PTR(error); | ||
987 | |||
988 | lock_semundo(); | ||
989 | un = lookup_undo(ulp, semid); | ||
990 | unlock_semundo(); | ||
991 | if (likely(un!=NULL)) | ||
992 | goto out; | ||
993 | |||
994 | /* no undo structure around - allocate one. */ | ||
995 | sma = sem_lock(semid); | ||
996 | un = ERR_PTR(-EINVAL); | ||
997 | if(sma==NULL) | ||
998 | goto out; | ||
999 | un = ERR_PTR(-EIDRM); | ||
1000 | if (sem_checkid(sma,semid)) { | ||
1001 | sem_unlock(sma); | ||
1002 | goto out; | ||
1003 | } | ||
1004 | nsems = sma->sem_nsems; | ||
1005 | ipc_rcu_getref(sma); | ||
1006 | sem_unlock(sma); | ||
1007 | |||
1008 | new = (struct sem_undo *) kmalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); | ||
1009 | if (!new) { | ||
1010 | ipc_lock_by_ptr(&sma->sem_perm); | ||
1011 | ipc_rcu_putref(sma); | ||
1012 | sem_unlock(sma); | ||
1013 | return ERR_PTR(-ENOMEM); | ||
1014 | } | ||
1015 | memset(new, 0, sizeof(struct sem_undo) + sizeof(short)*nsems); | ||
1016 | new->semadj = (short *) &new[1]; | ||
1017 | new->semid = semid; | ||
1018 | |||
1019 | lock_semundo(); | ||
1020 | un = lookup_undo(ulp, semid); | ||
1021 | if (un) { | ||
1022 | unlock_semundo(); | ||
1023 | kfree(new); | ||
1024 | ipc_lock_by_ptr(&sma->sem_perm); | ||
1025 | ipc_rcu_putref(sma); | ||
1026 | sem_unlock(sma); | ||
1027 | goto out; | ||
1028 | } | ||
1029 | ipc_lock_by_ptr(&sma->sem_perm); | ||
1030 | ipc_rcu_putref(sma); | ||
1031 | if (sma->sem_perm.deleted) { | ||
1032 | sem_unlock(sma); | ||
1033 | unlock_semundo(); | ||
1034 | kfree(new); | ||
1035 | un = ERR_PTR(-EIDRM); | ||
1036 | goto out; | ||
1037 | } | ||
1038 | new->proc_next = ulp->proc_list; | ||
1039 | ulp->proc_list = new; | ||
1040 | new->id_next = sma->undo; | ||
1041 | sma->undo = new; | ||
1042 | sem_unlock(sma); | ||
1043 | un = new; | ||
1044 | unlock_semundo(); | ||
1045 | out: | ||
1046 | return un; | ||
1047 | } | ||
1048 | |||
1049 | asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, | ||
1050 | unsigned nsops, const struct timespec __user *timeout) | ||
1051 | { | ||
1052 | int error = -EINVAL; | ||
1053 | struct sem_array *sma; | ||
1054 | struct sembuf fast_sops[SEMOPM_FAST]; | ||
1055 | struct sembuf* sops = fast_sops, *sop; | ||
1056 | struct sem_undo *un; | ||
1057 | int undos = 0, decrease = 0, alter = 0, max; | ||
1058 | struct sem_queue queue; | ||
1059 | unsigned long jiffies_left = 0; | ||
1060 | |||
1061 | if (nsops < 1 || semid < 0) | ||
1062 | return -EINVAL; | ||
1063 | if (nsops > sc_semopm) | ||
1064 | return -E2BIG; | ||
1065 | if(nsops > SEMOPM_FAST) { | ||
1066 | sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); | ||
1067 | if(sops==NULL) | ||
1068 | return -ENOMEM; | ||
1069 | } | ||
1070 | if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { | ||
1071 | error=-EFAULT; | ||
1072 | goto out_free; | ||
1073 | } | ||
1074 | if (timeout) { | ||
1075 | struct timespec _timeout; | ||
1076 | if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { | ||
1077 | error = -EFAULT; | ||
1078 | goto out_free; | ||
1079 | } | ||
1080 | if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || | ||
1081 | _timeout.tv_nsec >= 1000000000L) { | ||
1082 | error = -EINVAL; | ||
1083 | goto out_free; | ||
1084 | } | ||
1085 | jiffies_left = timespec_to_jiffies(&_timeout); | ||
1086 | } | ||
1087 | max = 0; | ||
1088 | for (sop = sops; sop < sops + nsops; sop++) { | ||
1089 | if (sop->sem_num >= max) | ||
1090 | max = sop->sem_num; | ||
1091 | if (sop->sem_flg & SEM_UNDO) | ||
1092 | undos++; | ||
1093 | if (sop->sem_op < 0) | ||
1094 | decrease = 1; | ||
1095 | if (sop->sem_op > 0) | ||
1096 | alter = 1; | ||
1097 | } | ||
1098 | alter |= decrease; | ||
1099 | |||
1100 | retry_undos: | ||
1101 | if (undos) { | ||
1102 | un = find_undo(semid); | ||
1103 | if (IS_ERR(un)) { | ||
1104 | error = PTR_ERR(un); | ||
1105 | goto out_free; | ||
1106 | } | ||
1107 | } else | ||
1108 | un = NULL; | ||
1109 | |||
1110 | sma = sem_lock(semid); | ||
1111 | error=-EINVAL; | ||
1112 | if(sma==NULL) | ||
1113 | goto out_free; | ||
1114 | error = -EIDRM; | ||
1115 | if (sem_checkid(sma,semid)) | ||
1116 | goto out_unlock_free; | ||
1117 | /* | ||
1118 | * semid identifies are not unique - find_undo may have | ||
1119 | * allocated an undo structure, it was invalidated by an RMID | ||
1120 | * and now a new array with received the same id. Check and retry. | ||
1121 | */ | ||
1122 | if (un && un->semid == -1) { | ||
1123 | sem_unlock(sma); | ||
1124 | goto retry_undos; | ||
1125 | } | ||
1126 | error = -EFBIG; | ||
1127 | if (max >= sma->sem_nsems) | ||
1128 | goto out_unlock_free; | ||
1129 | |||
1130 | error = -EACCES; | ||
1131 | if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) | ||
1132 | goto out_unlock_free; | ||
1133 | |||
1134 | error = security_sem_semop(sma, sops, nsops, alter); | ||
1135 | if (error) | ||
1136 | goto out_unlock_free; | ||
1137 | |||
1138 | error = try_atomic_semop (sma, sops, nsops, un, current->tgid); | ||
1139 | if (error <= 0) { | ||
1140 | if (alter && error == 0) | ||
1141 | update_queue (sma); | ||
1142 | goto out_unlock_free; | ||
1143 | } | ||
1144 | |||
1145 | /* We need to sleep on this operation, so we put the current | ||
1146 | * task into the pending queue and go to sleep. | ||
1147 | */ | ||
1148 | |||
1149 | queue.sma = sma; | ||
1150 | queue.sops = sops; | ||
1151 | queue.nsops = nsops; | ||
1152 | queue.undo = un; | ||
1153 | queue.pid = current->tgid; | ||
1154 | queue.id = semid; | ||
1155 | queue.alter = alter; | ||
1156 | if (alter) | ||
1157 | append_to_queue(sma ,&queue); | ||
1158 | else | ||
1159 | prepend_to_queue(sma ,&queue); | ||
1160 | |||
1161 | queue.status = -EINTR; | ||
1162 | queue.sleeper = current; | ||
1163 | current->state = TASK_INTERRUPTIBLE; | ||
1164 | sem_unlock(sma); | ||
1165 | |||
1166 | if (timeout) | ||
1167 | jiffies_left = schedule_timeout(jiffies_left); | ||
1168 | else | ||
1169 | schedule(); | ||
1170 | |||
1171 | error = queue.status; | ||
1172 | while(unlikely(error == IN_WAKEUP)) { | ||
1173 | cpu_relax(); | ||
1174 | error = queue.status; | ||
1175 | } | ||
1176 | |||
1177 | if (error != -EINTR) { | ||
1178 | /* fast path: update_queue already obtained all requested | ||
1179 | * resources */ | ||
1180 | goto out_free; | ||
1181 | } | ||
1182 | |||
1183 | sma = sem_lock(semid); | ||
1184 | if(sma==NULL) { | ||
1185 | if(queue.prev != NULL) | ||
1186 | BUG(); | ||
1187 | error = -EIDRM; | ||
1188 | goto out_free; | ||
1189 | } | ||
1190 | |||
1191 | /* | ||
1192 | * If queue.status != -EINTR we are woken up by another process | ||
1193 | */ | ||
1194 | error = queue.status; | ||
1195 | if (error != -EINTR) { | ||
1196 | goto out_unlock_free; | ||
1197 | } | ||
1198 | |||
1199 | /* | ||
1200 | * If an interrupt occurred we have to clean up the queue | ||
1201 | */ | ||
1202 | if (timeout && jiffies_left == 0) | ||
1203 | error = -EAGAIN; | ||
1204 | remove_from_queue(sma,&queue); | ||
1205 | goto out_unlock_free; | ||
1206 | |||
1207 | out_unlock_free: | ||
1208 | sem_unlock(sma); | ||
1209 | out_free: | ||
1210 | if(sops != fast_sops) | ||
1211 | kfree(sops); | ||
1212 | return error; | ||
1213 | } | ||
1214 | |||
1215 | asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops) | ||
1216 | { | ||
1217 | return sys_semtimedop(semid, tsops, nsops, NULL); | ||
1218 | } | ||
1219 | |||
1220 | /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between | ||
1221 | * parent and child tasks. | ||
1222 | * | ||
1223 | * See the notes above unlock_semundo() regarding the spin_lock_init() | ||
1224 | * in this code. Initialize the undo_list->lock here instead of get_undo_list() | ||
1225 | * because of the reasoning in the comment above unlock_semundo. | ||
1226 | */ | ||
1227 | |||
1228 | int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) | ||
1229 | { | ||
1230 | struct sem_undo_list *undo_list; | ||
1231 | int error; | ||
1232 | |||
1233 | if (clone_flags & CLONE_SYSVSEM) { | ||
1234 | error = get_undo_list(&undo_list); | ||
1235 | if (error) | ||
1236 | return error; | ||
1237 | if (atomic_read(&undo_list->refcnt) == 1) | ||
1238 | spin_lock_init(&undo_list->lock); | ||
1239 | atomic_inc(&undo_list->refcnt); | ||
1240 | tsk->sysvsem.undo_list = undo_list; | ||
1241 | } else | ||
1242 | tsk->sysvsem.undo_list = NULL; | ||
1243 | |||
1244 | return 0; | ||
1245 | } | ||
1246 | |||
1247 | /* | ||
1248 | * add semadj values to semaphores, free undo structures. | ||
1249 | * undo structures are not freed when semaphore arrays are destroyed | ||
1250 | * so some of them may be out of date. | ||
1251 | * IMPLEMENTATION NOTE: There is some confusion over whether the | ||
1252 | * set of adjustments that needs to be done should be done in an atomic | ||
1253 | * manner or not. That is, if we are attempting to decrement the semval | ||
1254 | * should we queue up and wait until we can do so legally? | ||
1255 | * The original implementation attempted to do this (queue and wait). | ||
1256 | * The current implementation does not do so. The POSIX standard | ||
1257 | * and SVID should be consulted to determine what behavior is mandated. | ||
1258 | */ | ||
1259 | void exit_sem(struct task_struct *tsk) | ||
1260 | { | ||
1261 | struct sem_undo_list *undo_list; | ||
1262 | struct sem_undo *u, **up; | ||
1263 | |||
1264 | undo_list = tsk->sysvsem.undo_list; | ||
1265 | if (!undo_list) | ||
1266 | return; | ||
1267 | |||
1268 | if (!atomic_dec_and_test(&undo_list->refcnt)) | ||
1269 | return; | ||
1270 | |||
1271 | /* There's no need to hold the semundo list lock, as current | ||
1272 | * is the last task exiting for this undo list. | ||
1273 | */ | ||
1274 | for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) { | ||
1275 | struct sem_array *sma; | ||
1276 | int nsems, i; | ||
1277 | struct sem_undo *un, **unp; | ||
1278 | int semid; | ||
1279 | |||
1280 | semid = u->semid; | ||
1281 | |||
1282 | if(semid == -1) | ||
1283 | continue; | ||
1284 | sma = sem_lock(semid); | ||
1285 | if (sma == NULL) | ||
1286 | continue; | ||
1287 | |||
1288 | if (u->semid == -1) | ||
1289 | goto next_entry; | ||
1290 | |||
1291 | BUG_ON(sem_checkid(sma,u->semid)); | ||
1292 | |||
1293 | /* remove u from the sma->undo list */ | ||
1294 | for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { | ||
1295 | if (u == un) | ||
1296 | goto found; | ||
1297 | } | ||
1298 | printk ("exit_sem undo list error id=%d\n", u->semid); | ||
1299 | goto next_entry; | ||
1300 | found: | ||
1301 | *unp = un->id_next; | ||
1302 | /* perform adjustments registered in u */ | ||
1303 | nsems = sma->sem_nsems; | ||
1304 | for (i = 0; i < nsems; i++) { | ||
1305 | struct sem * sem = &sma->sem_base[i]; | ||
1306 | if (u->semadj[i]) { | ||
1307 | sem->semval += u->semadj[i]; | ||
1308 | /* | ||
1309 | * Range checks of the new semaphore value, | ||
1310 | * not defined by sus: | ||
1311 | * - Some unices ignore the undo entirely | ||
1312 | * (e.g. HP UX 11i 11.22, Tru64 V5.1) | ||
1313 | * - some cap the value (e.g. FreeBSD caps | ||
1314 | * at 0, but doesn't enforce SEMVMX) | ||
1315 | * | ||
1316 | * Linux caps the semaphore value, both at 0 | ||
1317 | * and at SEMVMX. | ||
1318 | * | ||
1319 | * Manfred <manfred@colorfullife.com> | ||
1320 | */ | ||
1321 | if (sem->semval < 0) | ||
1322 | sem->semval = 0; | ||
1323 | if (sem->semval > SEMVMX) | ||
1324 | sem->semval = SEMVMX; | ||
1325 | sem->sempid = current->tgid; | ||
1326 | } | ||
1327 | } | ||
1328 | sma->sem_otime = get_seconds(); | ||
1329 | /* maybe some queued-up processes were waiting for this */ | ||
1330 | update_queue(sma); | ||
1331 | next_entry: | ||
1332 | sem_unlock(sma); | ||
1333 | } | ||
1334 | kfree(undo_list); | ||
1335 | } | ||
1336 | |||
1337 | #ifdef CONFIG_PROC_FS | ||
1338 | static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data) | ||
1339 | { | ||
1340 | off_t pos = 0; | ||
1341 | off_t begin = 0; | ||
1342 | int i, len = 0; | ||
1343 | |||
1344 | len += sprintf(buffer, " key semid perms nsems uid gid cuid cgid otime ctime\n"); | ||
1345 | down(&sem_ids.sem); | ||
1346 | |||
1347 | for(i = 0; i <= sem_ids.max_id; i++) { | ||
1348 | struct sem_array *sma; | ||
1349 | sma = sem_lock(i); | ||
1350 | if(sma) { | ||
1351 | len += sprintf(buffer + len, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", | ||
1352 | sma->sem_perm.key, | ||
1353 | sem_buildid(i,sma->sem_perm.seq), | ||
1354 | sma->sem_perm.mode, | ||
1355 | sma->sem_nsems, | ||
1356 | sma->sem_perm.uid, | ||
1357 | sma->sem_perm.gid, | ||
1358 | sma->sem_perm.cuid, | ||
1359 | sma->sem_perm.cgid, | ||
1360 | sma->sem_otime, | ||
1361 | sma->sem_ctime); | ||
1362 | sem_unlock(sma); | ||
1363 | |||
1364 | pos += len; | ||
1365 | if(pos < offset) { | ||
1366 | len = 0; | ||
1367 | begin = pos; | ||
1368 | } | ||
1369 | if(pos > offset + length) | ||
1370 | goto done; | ||
1371 | } | ||
1372 | } | ||
1373 | *eof = 1; | ||
1374 | done: | ||
1375 | up(&sem_ids.sem); | ||
1376 | *start = buffer + (offset - begin); | ||
1377 | len -= (offset - begin); | ||
1378 | if(len > length) | ||
1379 | len = length; | ||
1380 | if(len < 0) | ||
1381 | len = 0; | ||
1382 | return len; | ||
1383 | } | ||
1384 | #endif | ||