diff options
Diffstat (limited to 'ipc')
-rw-r--r-- | ipc/msg.c | 1 | ||||
-rw-r--r-- | ipc/sem.c | 214 | ||||
-rw-r--r-- | ipc/shm.c | 1 |
3 files changed, 147 insertions, 69 deletions
@@ -125,6 +125,7 @@ void msg_init_ns(struct ipc_namespace *ns) | |||
125 | void msg_exit_ns(struct ipc_namespace *ns) | 125 | void msg_exit_ns(struct ipc_namespace *ns) |
126 | { | 126 | { |
127 | free_ipcs(ns, &msg_ids(ns), freeque); | 127 | free_ipcs(ns, &msg_ids(ns), freeque); |
128 | idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); | ||
128 | } | 129 | } |
129 | #endif | 130 | #endif |
130 | 131 | ||
@@ -129,6 +129,7 @@ void sem_init_ns(struct ipc_namespace *ns) | |||
129 | void sem_exit_ns(struct ipc_namespace *ns) | 129 | void sem_exit_ns(struct ipc_namespace *ns) |
130 | { | 130 | { |
131 | free_ipcs(ns, &sem_ids(ns), freeary); | 131 | free_ipcs(ns, &sem_ids(ns), freeary); |
132 | idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); | ||
132 | } | 133 | } |
133 | #endif | 134 | #endif |
134 | 135 | ||
@@ -240,6 +241,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) | |||
240 | key_t key = params->key; | 241 | key_t key = params->key; |
241 | int nsems = params->u.nsems; | 242 | int nsems = params->u.nsems; |
242 | int semflg = params->flg; | 243 | int semflg = params->flg; |
244 | int i; | ||
243 | 245 | ||
244 | if (!nsems) | 246 | if (!nsems) |
245 | return -EINVAL; | 247 | return -EINVAL; |
@@ -272,6 +274,11 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) | |||
272 | ns->used_sems += nsems; | 274 | ns->used_sems += nsems; |
273 | 275 | ||
274 | sma->sem_base = (struct sem *) &sma[1]; | 276 | sma->sem_base = (struct sem *) &sma[1]; |
277 | |||
278 | for (i = 0; i < nsems; i++) | ||
279 | INIT_LIST_HEAD(&sma->sem_base[i].sem_pending); | ||
280 | |||
281 | sma->complex_count = 0; | ||
275 | INIT_LIST_HEAD(&sma->sem_pending); | 282 | INIT_LIST_HEAD(&sma->sem_pending); |
276 | INIT_LIST_HEAD(&sma->list_id); | 283 | INIT_LIST_HEAD(&sma->list_id); |
277 | sma->sem_nsems = nsems; | 284 | sma->sem_nsems = nsems; |
@@ -397,63 +404,109 @@ undo: | |||
397 | return result; | 404 | return result; |
398 | } | 405 | } |
399 | 406 | ||
400 | /* Go through the pending queue for the indicated semaphore | 407 | /* |
401 | * looking for tasks that can be completed. | 408 | * Wake up a process waiting on the sem queue with a given error. |
409 | * The queue is invalid (may not be accessed) after the function returns. | ||
402 | */ | 410 | */ |
403 | static void update_queue (struct sem_array * sma) | 411 | static void wake_up_sem_queue(struct sem_queue *q, int error) |
404 | { | 412 | { |
405 | int error; | 413 | /* |
406 | struct sem_queue * q; | 414 | * Hold preempt off so that we don't get preempted and have the |
415 | * wakee busy-wait until we're scheduled back on. We're holding | ||
416 | * locks here so it may not strictly be needed, however if the | ||
417 | * locks become preemptible then this prevents such a problem. | ||
418 | */ | ||
419 | preempt_disable(); | ||
420 | q->status = IN_WAKEUP; | ||
421 | wake_up_process(q->sleeper); | ||
422 | /* hands-off: q can disappear immediately after writing q->status. */ | ||
423 | smp_wmb(); | ||
424 | q->status = error; | ||
425 | preempt_enable(); | ||
426 | } | ||
427 | |||
428 | static void unlink_queue(struct sem_array *sma, struct sem_queue *q) | ||
429 | { | ||
430 | list_del(&q->list); | ||
431 | if (q->nsops == 1) | ||
432 | list_del(&q->simple_list); | ||
433 | else | ||
434 | sma->complex_count--; | ||
435 | } | ||
436 | |||
437 | |||
438 | /** | ||
439 | * update_queue(sma, semnum): Look for tasks that can be completed. | ||
440 | * @sma: semaphore array. | ||
441 | * @semnum: semaphore that was modified. | ||
442 | * | ||
443 | * update_queue must be called after a semaphore in a semaphore array | ||
444 | * was modified. If multiple semaphore were modified, then @semnum | ||
445 | * must be set to -1. | ||
446 | */ | ||
447 | static void update_queue(struct sem_array *sma, int semnum) | ||
448 | { | ||
449 | struct sem_queue *q; | ||
450 | struct list_head *walk; | ||
451 | struct list_head *pending_list; | ||
452 | int offset; | ||
453 | |||
454 | /* if there are complex operations around, then knowing the semaphore | ||
455 | * that was modified doesn't help us. Assume that multiple semaphores | ||
456 | * were modified. | ||
457 | */ | ||
458 | if (sma->complex_count) | ||
459 | semnum = -1; | ||
460 | |||
461 | if (semnum == -1) { | ||
462 | pending_list = &sma->sem_pending; | ||
463 | offset = offsetof(struct sem_queue, list); | ||
464 | } else { | ||
465 | pending_list = &sma->sem_base[semnum].sem_pending; | ||
466 | offset = offsetof(struct sem_queue, simple_list); | ||
467 | } | ||
468 | |||
469 | again: | ||
470 | walk = pending_list->next; | ||
471 | while (walk != pending_list) { | ||
472 | int error, alter; | ||
473 | |||
474 | q = (struct sem_queue *)((char *)walk - offset); | ||
475 | walk = walk->next; | ||
476 | |||
477 | /* If we are scanning the single sop, per-semaphore list of | ||
478 | * one semaphore and that semaphore is 0, then it is not | ||
479 | * necessary to scan the "alter" entries: simple increments | ||
480 | * that affect only one entry succeed immediately and cannot | ||
481 | * be in the per semaphore pending queue, and decrements | ||
482 | * cannot be successful if the value is already 0. | ||
483 | */ | ||
484 | if (semnum != -1 && sma->sem_base[semnum].semval == 0 && | ||
485 | q->alter) | ||
486 | break; | ||
407 | 487 | ||
408 | q = list_entry(sma->sem_pending.next, struct sem_queue, list); | ||
409 | while (&q->list != &sma->sem_pending) { | ||
410 | error = try_atomic_semop(sma, q->sops, q->nsops, | 488 | error = try_atomic_semop(sma, q->sops, q->nsops, |
411 | q->undo, q->pid); | 489 | q->undo, q->pid); |
412 | 490 | ||
413 | /* Does q->sleeper still need to sleep? */ | 491 | /* Does q->sleeper still need to sleep? */ |
414 | if (error <= 0) { | 492 | if (error > 0) |
415 | struct sem_queue *n; | 493 | continue; |
416 | |||
417 | /* | ||
418 | * Continue scanning. The next operation | ||
419 | * that must be checked depends on the type of the | ||
420 | * completed operation: | ||
421 | * - if the operation modified the array, then | ||
422 | * restart from the head of the queue and | ||
423 | * check for threads that might be waiting | ||
424 | * for semaphore values to become 0. | ||
425 | * - if the operation didn't modify the array, | ||
426 | * then just continue. | ||
427 | * The order of list_del() and reading ->next | ||
428 | * is crucial: In the former case, the list_del() | ||
429 | * must be done first [because we might be the | ||
430 | * first entry in ->sem_pending], in the latter | ||
431 | * case the list_del() must be done last | ||
432 | * [because the list is invalid after the list_del()] | ||
433 | */ | ||
434 | if (q->alter) { | ||
435 | list_del(&q->list); | ||
436 | n = list_entry(sma->sem_pending.next, | ||
437 | struct sem_queue, list); | ||
438 | } else { | ||
439 | n = list_entry(q->list.next, struct sem_queue, | ||
440 | list); | ||
441 | list_del(&q->list); | ||
442 | } | ||
443 | |||
444 | /* wake up the waiting thread */ | ||
445 | q->status = IN_WAKEUP; | ||
446 | 494 | ||
447 | wake_up_process(q->sleeper); | 495 | unlink_queue(sma, q); |
448 | /* hands-off: q will disappear immediately after | 496 | |
449 | * writing q->status. | 497 | /* |
450 | */ | 498 | * The next operation that must be checked depends on the type |
451 | smp_wmb(); | 499 | * of the completed operation: |
452 | q->status = error; | 500 | * - if the operation modified the array, then restart from the |
453 | q = n; | 501 | * head of the queue and check for threads that might be |
454 | } else { | 502 | * waiting for the new semaphore values. |
455 | q = list_entry(q->list.next, struct sem_queue, list); | 503 | * - if the operation didn't modify the array, then just |
456 | } | 504 | * continue. |
505 | */ | ||
506 | alter = q->alter; | ||
507 | wake_up_sem_queue(q, error); | ||
508 | if (alter && !error) | ||
509 | goto again; | ||
457 | } | 510 | } |
458 | } | 511 | } |
459 | 512 | ||
@@ -533,12 +586,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) | |||
533 | 586 | ||
534 | /* Wake up all pending processes and let them fail with EIDRM. */ | 587 | /* Wake up all pending processes and let them fail with EIDRM. */ |
535 | list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { | 588 | list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { |
536 | list_del(&q->list); | 589 | unlink_queue(sma, q); |
537 | 590 | wake_up_sem_queue(q, -EIDRM); | |
538 | q->status = IN_WAKEUP; | ||
539 | wake_up_process(q->sleeper); /* doesn't sleep */ | ||
540 | smp_wmb(); | ||
541 | q->status = -EIDRM; /* hands-off q */ | ||
542 | } | 591 | } |
543 | 592 | ||
544 | /* Remove the semaphore set from the IDR */ | 593 | /* Remove the semaphore set from the IDR */ |
@@ -575,7 +624,7 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, | |||
575 | static int semctl_nolock(struct ipc_namespace *ns, int semid, | 624 | static int semctl_nolock(struct ipc_namespace *ns, int semid, |
576 | int cmd, int version, union semun arg) | 625 | int cmd, int version, union semun arg) |
577 | { | 626 | { |
578 | int err = -EINVAL; | 627 | int err; |
579 | struct sem_array *sma; | 628 | struct sem_array *sma; |
580 | 629 | ||
581 | switch(cmd) { | 630 | switch(cmd) { |
@@ -652,7 +701,6 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, | |||
652 | default: | 701 | default: |
653 | return -EINVAL; | 702 | return -EINVAL; |
654 | } | 703 | } |
655 | return err; | ||
656 | out_unlock: | 704 | out_unlock: |
657 | sem_unlock(sma); | 705 | sem_unlock(sma); |
658 | return err; | 706 | return err; |
@@ -759,7 +807,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
759 | } | 807 | } |
760 | sma->sem_ctime = get_seconds(); | 808 | sma->sem_ctime = get_seconds(); |
761 | /* maybe some queued-up processes were waiting for this */ | 809 | /* maybe some queued-up processes were waiting for this */ |
762 | update_queue(sma); | 810 | update_queue(sma, -1); |
763 | err = 0; | 811 | err = 0; |
764 | goto out_unlock; | 812 | goto out_unlock; |
765 | } | 813 | } |
@@ -801,7 +849,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
801 | curr->sempid = task_tgid_vnr(current); | 849 | curr->sempid = task_tgid_vnr(current); |
802 | sma->sem_ctime = get_seconds(); | 850 | sma->sem_ctime = get_seconds(); |
803 | /* maybe some queued-up processes were waiting for this */ | 851 | /* maybe some queued-up processes were waiting for this */ |
804 | update_queue(sma); | 852 | update_queue(sma, semnum); |
805 | err = 0; | 853 | err = 0; |
806 | goto out_unlock; | 854 | goto out_unlock; |
807 | } | 855 | } |
@@ -961,17 +1009,31 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) | |||
961 | return 0; | 1009 | return 0; |
962 | } | 1010 | } |
963 | 1011 | ||
964 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) | 1012 | static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) |
965 | { | 1013 | { |
966 | struct sem_undo *walk; | 1014 | struct sem_undo *un; |
967 | 1015 | ||
968 | list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) { | 1016 | list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { |
969 | if (walk->semid == semid) | 1017 | if (un->semid == semid) |
970 | return walk; | 1018 | return un; |
971 | } | 1019 | } |
972 | return NULL; | 1020 | return NULL; |
973 | } | 1021 | } |
974 | 1022 | ||
1023 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) | ||
1024 | { | ||
1025 | struct sem_undo *un; | ||
1026 | |||
1027 | assert_spin_locked(&ulp->lock); | ||
1028 | |||
1029 | un = __lookup_undo(ulp, semid); | ||
1030 | if (un) { | ||
1031 | list_del_rcu(&un->list_proc); | ||
1032 | list_add_rcu(&un->list_proc, &ulp->list_proc); | ||
1033 | } | ||
1034 | return un; | ||
1035 | } | ||
1036 | |||
975 | /** | 1037 | /** |
976 | * find_alloc_undo - Lookup (and if not present create) undo array | 1038 | * find_alloc_undo - Lookup (and if not present create) undo array |
977 | * @ns: namespace | 1039 | * @ns: namespace |
@@ -1163,7 +1225,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1163 | error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); | 1225 | error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); |
1164 | if (error <= 0) { | 1226 | if (error <= 0) { |
1165 | if (alter && error == 0) | 1227 | if (alter && error == 0) |
1166 | update_queue (sma); | 1228 | update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1); |
1229 | |||
1167 | goto out_unlock_free; | 1230 | goto out_unlock_free; |
1168 | } | 1231 | } |
1169 | 1232 | ||
@@ -1181,6 +1244,19 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1181 | else | 1244 | else |
1182 | list_add(&queue.list, &sma->sem_pending); | 1245 | list_add(&queue.list, &sma->sem_pending); |
1183 | 1246 | ||
1247 | if (nsops == 1) { | ||
1248 | struct sem *curr; | ||
1249 | curr = &sma->sem_base[sops->sem_num]; | ||
1250 | |||
1251 | if (alter) | ||
1252 | list_add_tail(&queue.simple_list, &curr->sem_pending); | ||
1253 | else | ||
1254 | list_add(&queue.simple_list, &curr->sem_pending); | ||
1255 | } else { | ||
1256 | INIT_LIST_HEAD(&queue.simple_list); | ||
1257 | sma->complex_count++; | ||
1258 | } | ||
1259 | |||
1184 | queue.status = -EINTR; | 1260 | queue.status = -EINTR; |
1185 | queue.sleeper = current; | 1261 | queue.sleeper = current; |
1186 | current->state = TASK_INTERRUPTIBLE; | 1262 | current->state = TASK_INTERRUPTIBLE; |
@@ -1222,7 +1298,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1222 | */ | 1298 | */ |
1223 | if (timeout && jiffies_left == 0) | 1299 | if (timeout && jiffies_left == 0) |
1224 | error = -EAGAIN; | 1300 | error = -EAGAIN; |
1225 | list_del(&queue.list); | 1301 | unlink_queue(sma, &queue); |
1226 | 1302 | ||
1227 | out_unlock_free: | 1303 | out_unlock_free: |
1228 | sem_unlock(sma); | 1304 | sem_unlock(sma); |
@@ -1307,7 +1383,7 @@ void exit_sem(struct task_struct *tsk) | |||
1307 | if (IS_ERR(sma)) | 1383 | if (IS_ERR(sma)) |
1308 | continue; | 1384 | continue; |
1309 | 1385 | ||
1310 | un = lookup_undo(ulp, semid); | 1386 | un = __lookup_undo(ulp, semid); |
1311 | if (un == NULL) { | 1387 | if (un == NULL) { |
1312 | /* exit_sem raced with IPC_RMID+semget() that created | 1388 | /* exit_sem raced with IPC_RMID+semget() that created |
1313 | * exactly the same semid. Nothing to do. | 1389 | * exactly the same semid. Nothing to do. |
@@ -1351,7 +1427,7 @@ void exit_sem(struct task_struct *tsk) | |||
1351 | } | 1427 | } |
1352 | sma->sem_otime = get_seconds(); | 1428 | sma->sem_otime = get_seconds(); |
1353 | /* maybe some queued-up processes were waiting for this */ | 1429 | /* maybe some queued-up processes were waiting for this */ |
1354 | update_queue(sma); | 1430 | update_queue(sma, -1); |
1355 | sem_unlock(sma); | 1431 | sem_unlock(sma); |
1356 | 1432 | ||
1357 | call_rcu(&un->rcu, free_un); | 1433 | call_rcu(&un->rcu, free_un); |
@@ -1365,7 +1441,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) | |||
1365 | struct sem_array *sma = it; | 1441 | struct sem_array *sma = it; |
1366 | 1442 | ||
1367 | return seq_printf(s, | 1443 | return seq_printf(s, |
1368 | "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", | 1444 | "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n", |
1369 | sma->sem_perm.key, | 1445 | sma->sem_perm.key, |
1370 | sma->sem_perm.id, | 1446 | sma->sem_perm.id, |
1371 | sma->sem_perm.mode, | 1447 | sma->sem_perm.mode, |
@@ -100,6 +100,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) | |||
100 | void shm_exit_ns(struct ipc_namespace *ns) | 100 | void shm_exit_ns(struct ipc_namespace *ns) |
101 | { | 101 | { |
102 | free_ipcs(ns, &shm_ids(ns), do_shm_rmid); | 102 | free_ipcs(ns, &shm_ids(ns), do_shm_rmid); |
103 | idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); | ||
103 | } | 104 | } |
104 | #endif | 105 | #endif |
105 | 106 | ||