aboutsummaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
Diffstat (limited to 'ipc')
-rw-r--r--ipc/mqueue.c2
-rw-r--r--ipc/msg.c3
-rw-r--r--ipc/sem.c214
-rw-r--r--ipc/shm.c43
4 files changed, 171 insertions, 91 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index ee9d69707c0a..c79bd57353e7 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -32,7 +32,6 @@
32#include <linux/nsproxy.h> 32#include <linux/nsproxy.h>
33#include <linux/pid.h> 33#include <linux/pid.h>
34#include <linux/ipc_namespace.h> 34#include <linux/ipc_namespace.h>
35#include <linux/ima.h>
36 35
37#include <net/sock.h> 36#include <net/sock.h>
38#include "util.h" 37#include "util.h"
@@ -734,7 +733,6 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
734 error = PTR_ERR(filp); 733 error = PTR_ERR(filp);
735 goto out_putfd; 734 goto out_putfd;
736 } 735 }
737 ima_counts_get(filp);
738 736
739 fd_install(fd, filp); 737 fd_install(fd, filp);
740 goto out_upsem; 738 goto out_upsem;
diff --git a/ipc/msg.c b/ipc/msg.c
index 2ceab7f12fcb..af42ef8900a6 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -125,6 +125,7 @@ void msg_init_ns(struct ipc_namespace *ns)
125void msg_exit_ns(struct ipc_namespace *ns) 125void msg_exit_ns(struct ipc_namespace *ns)
126{ 126{
127 free_ipcs(ns, &msg_ids(ns), freeque); 127 free_ipcs(ns, &msg_ids(ns), freeque);
128 idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
128} 129}
129#endif 130#endif
130 131
@@ -412,7 +413,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
412 struct msqid_ds __user *buf, int version) 413 struct msqid_ds __user *buf, int version)
413{ 414{
414 struct kern_ipc_perm *ipcp; 415 struct kern_ipc_perm *ipcp;
415 struct msqid64_ds msqid64; 416 struct msqid64_ds uninitialized_var(msqid64);
416 struct msg_queue *msq; 417 struct msg_queue *msq;
417 int err; 418 int err;
418 419
diff --git a/ipc/sem.c b/ipc/sem.c
index 87c2b641fd7b..dbef95b15941 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -129,6 +129,7 @@ void sem_init_ns(struct ipc_namespace *ns)
129void sem_exit_ns(struct ipc_namespace *ns) 129void sem_exit_ns(struct ipc_namespace *ns)
130{ 130{
131 free_ipcs(ns, &sem_ids(ns), freeary); 131 free_ipcs(ns, &sem_ids(ns), freeary);
132 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
132} 133}
133#endif 134#endif
134 135
@@ -240,6 +241,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
240 key_t key = params->key; 241 key_t key = params->key;
241 int nsems = params->u.nsems; 242 int nsems = params->u.nsems;
242 int semflg = params->flg; 243 int semflg = params->flg;
244 int i;
243 245
244 if (!nsems) 246 if (!nsems)
245 return -EINVAL; 247 return -EINVAL;
@@ -272,6 +274,11 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
272 ns->used_sems += nsems; 274 ns->used_sems += nsems;
273 275
274 sma->sem_base = (struct sem *) &sma[1]; 276 sma->sem_base = (struct sem *) &sma[1];
277
278 for (i = 0; i < nsems; i++)
279 INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
280
281 sma->complex_count = 0;
275 INIT_LIST_HEAD(&sma->sem_pending); 282 INIT_LIST_HEAD(&sma->sem_pending);
276 INIT_LIST_HEAD(&sma->list_id); 283 INIT_LIST_HEAD(&sma->list_id);
277 sma->sem_nsems = nsems; 284 sma->sem_nsems = nsems;
@@ -397,63 +404,109 @@ undo:
397 return result; 404 return result;
398} 405}
399 406
400/* Go through the pending queue for the indicated semaphore 407/*
401 * looking for tasks that can be completed. 408 * Wake up a process waiting on the sem queue with a given error.
409 * The queue is invalid (may not be accessed) after the function returns.
402 */ 410 */
403static void update_queue (struct sem_array * sma) 411static void wake_up_sem_queue(struct sem_queue *q, int error)
404{ 412{
405 int error; 413 /*
406 struct sem_queue * q; 414 * Hold preempt off so that we don't get preempted and have the
415 * wakee busy-wait until we're scheduled back on. We're holding
416 * locks here so it may not strictly be needed, however if the
417 * locks become preemptible then this prevents such a problem.
418 */
419 preempt_disable();
420 q->status = IN_WAKEUP;
421 wake_up_process(q->sleeper);
422 /* hands-off: q can disappear immediately after writing q->status. */
423 smp_wmb();
424 q->status = error;
425 preempt_enable();
426}
427
428static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
429{
430 list_del(&q->list);
431 if (q->nsops == 1)
432 list_del(&q->simple_list);
433 else
434 sma->complex_count--;
435}
436
437
438/**
439 * update_queue(sma, semnum): Look for tasks that can be completed.
440 * @sma: semaphore array.
441 * @semnum: semaphore that was modified.
442 *
443 * update_queue must be called after a semaphore in a semaphore array
444 * was modified. If multiple semaphore were modified, then @semnum
445 * must be set to -1.
446 */
447static void update_queue(struct sem_array *sma, int semnum)
448{
449 struct sem_queue *q;
450 struct list_head *walk;
451 struct list_head *pending_list;
452 int offset;
453
454 /* if there are complex operations around, then knowing the semaphore
455 * that was modified doesn't help us. Assume that multiple semaphores
456 * were modified.
457 */
458 if (sma->complex_count)
459 semnum = -1;
460
461 if (semnum == -1) {
462 pending_list = &sma->sem_pending;
463 offset = offsetof(struct sem_queue, list);
464 } else {
465 pending_list = &sma->sem_base[semnum].sem_pending;
466 offset = offsetof(struct sem_queue, simple_list);
467 }
468
469again:
470 walk = pending_list->next;
471 while (walk != pending_list) {
472 int error, alter;
473
474 q = (struct sem_queue *)((char *)walk - offset);
475 walk = walk->next;
476
477 /* If we are scanning the single sop, per-semaphore list of
478 * one semaphore and that semaphore is 0, then it is not
479 * necessary to scan the "alter" entries: simple increments
480 * that affect only one entry succeed immediately and cannot
481 * be in the per semaphore pending queue, and decrements
482 * cannot be successful if the value is already 0.
483 */
484 if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
485 q->alter)
486 break;
407 487
408 q = list_entry(sma->sem_pending.next, struct sem_queue, list);
409 while (&q->list != &sma->sem_pending) {
410 error = try_atomic_semop(sma, q->sops, q->nsops, 488 error = try_atomic_semop(sma, q->sops, q->nsops,
411 q->undo, q->pid); 489 q->undo, q->pid);
412 490
413 /* Does q->sleeper still need to sleep? */ 491 /* Does q->sleeper still need to sleep? */
414 if (error <= 0) { 492 if (error > 0)
415 struct sem_queue *n; 493 continue;
416
417 /*
418 * Continue scanning. The next operation
419 * that must be checked depends on the type of the
420 * completed operation:
421 * - if the operation modified the array, then
422 * restart from the head of the queue and
423 * check for threads that might be waiting
424 * for semaphore values to become 0.
425 * - if the operation didn't modify the array,
426 * then just continue.
427 * The order of list_del() and reading ->next
428 * is crucial: In the former case, the list_del()
429 * must be done first [because we might be the
430 * first entry in ->sem_pending], in the latter
431 * case the list_del() must be done last
432 * [because the list is invalid after the list_del()]
433 */
434 if (q->alter) {
435 list_del(&q->list);
436 n = list_entry(sma->sem_pending.next,
437 struct sem_queue, list);
438 } else {
439 n = list_entry(q->list.next, struct sem_queue,
440 list);
441 list_del(&q->list);
442 }
443
444 /* wake up the waiting thread */
445 q->status = IN_WAKEUP;
446 494
447 wake_up_process(q->sleeper); 495 unlink_queue(sma, q);
448 /* hands-off: q will disappear immediately after 496
449 * writing q->status. 497 /*
450 */ 498 * The next operation that must be checked depends on the type
451 smp_wmb(); 499 * of the completed operation:
452 q->status = error; 500 * - if the operation modified the array, then restart from the
453 q = n; 501 * head of the queue and check for threads that might be
454 } else { 502 * waiting for the new semaphore values.
455 q = list_entry(q->list.next, struct sem_queue, list); 503 * - if the operation didn't modify the array, then just
456 } 504 * continue.
505 */
506 alter = q->alter;
507 wake_up_sem_queue(q, error);
508 if (alter && !error)
509 goto again;
457 } 510 }
458} 511}
459 512
@@ -533,12 +586,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
533 586
534 /* Wake up all pending processes and let them fail with EIDRM. */ 587 /* Wake up all pending processes and let them fail with EIDRM. */
535 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { 588 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
536 list_del(&q->list); 589 unlink_queue(sma, q);
537 590 wake_up_sem_queue(q, -EIDRM);
538 q->status = IN_WAKEUP;
539 wake_up_process(q->sleeper); /* doesn't sleep */
540 smp_wmb();
541 q->status = -EIDRM; /* hands-off q */
542 } 591 }
543 592
544 /* Remove the semaphore set from the IDR */ 593 /* Remove the semaphore set from the IDR */
@@ -575,7 +624,7 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
575static int semctl_nolock(struct ipc_namespace *ns, int semid, 624static int semctl_nolock(struct ipc_namespace *ns, int semid,
576 int cmd, int version, union semun arg) 625 int cmd, int version, union semun arg)
577{ 626{
578 int err = -EINVAL; 627 int err;
579 struct sem_array *sma; 628 struct sem_array *sma;
580 629
581 switch(cmd) { 630 switch(cmd) {
@@ -652,7 +701,6 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
652 default: 701 default:
653 return -EINVAL; 702 return -EINVAL;
654 } 703 }
655 return err;
656out_unlock: 704out_unlock:
657 sem_unlock(sma); 705 sem_unlock(sma);
658 return err; 706 return err;
@@ -759,7 +807,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
759 } 807 }
760 sma->sem_ctime = get_seconds(); 808 sma->sem_ctime = get_seconds();
761 /* maybe some queued-up processes were waiting for this */ 809 /* maybe some queued-up processes were waiting for this */
762 update_queue(sma); 810 update_queue(sma, -1);
763 err = 0; 811 err = 0;
764 goto out_unlock; 812 goto out_unlock;
765 } 813 }
@@ -801,7 +849,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
801 curr->sempid = task_tgid_vnr(current); 849 curr->sempid = task_tgid_vnr(current);
802 sma->sem_ctime = get_seconds(); 850 sma->sem_ctime = get_seconds();
803 /* maybe some queued-up processes were waiting for this */ 851 /* maybe some queued-up processes were waiting for this */
804 update_queue(sma); 852 update_queue(sma, semnum);
805 err = 0; 853 err = 0;
806 goto out_unlock; 854 goto out_unlock;
807 } 855 }
@@ -961,17 +1009,31 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
961 return 0; 1009 return 0;
962} 1010}
963 1011
964static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) 1012static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
965{ 1013{
966 struct sem_undo *walk; 1014 struct sem_undo *un;
967 1015
968 list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) { 1016 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
969 if (walk->semid == semid) 1017 if (un->semid == semid)
970 return walk; 1018 return un;
971 } 1019 }
972 return NULL; 1020 return NULL;
973} 1021}
974 1022
1023static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1024{
1025 struct sem_undo *un;
1026
1027 assert_spin_locked(&ulp->lock);
1028
1029 un = __lookup_undo(ulp, semid);
1030 if (un) {
1031 list_del_rcu(&un->list_proc);
1032 list_add_rcu(&un->list_proc, &ulp->list_proc);
1033 }
1034 return un;
1035}
1036
975/** 1037/**
976 * find_alloc_undo - Lookup (and if not present create) undo array 1038 * find_alloc_undo - Lookup (and if not present create) undo array
977 * @ns: namespace 1039 * @ns: namespace
@@ -1163,7 +1225,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1163 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); 1225 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1164 if (error <= 0) { 1226 if (error <= 0) {
1165 if (alter && error == 0) 1227 if (alter && error == 0)
1166 update_queue (sma); 1228 update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1);
1229
1167 goto out_unlock_free; 1230 goto out_unlock_free;
1168 } 1231 }
1169 1232
@@ -1181,6 +1244,19 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1181 else 1244 else
1182 list_add(&queue.list, &sma->sem_pending); 1245 list_add(&queue.list, &sma->sem_pending);
1183 1246
1247 if (nsops == 1) {
1248 struct sem *curr;
1249 curr = &sma->sem_base[sops->sem_num];
1250
1251 if (alter)
1252 list_add_tail(&queue.simple_list, &curr->sem_pending);
1253 else
1254 list_add(&queue.simple_list, &curr->sem_pending);
1255 } else {
1256 INIT_LIST_HEAD(&queue.simple_list);
1257 sma->complex_count++;
1258 }
1259
1184 queue.status = -EINTR; 1260 queue.status = -EINTR;
1185 queue.sleeper = current; 1261 queue.sleeper = current;
1186 current->state = TASK_INTERRUPTIBLE; 1262 current->state = TASK_INTERRUPTIBLE;
@@ -1222,7 +1298,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1222 */ 1298 */
1223 if (timeout && jiffies_left == 0) 1299 if (timeout && jiffies_left == 0)
1224 error = -EAGAIN; 1300 error = -EAGAIN;
1225 list_del(&queue.list); 1301 unlink_queue(sma, &queue);
1226 1302
1227out_unlock_free: 1303out_unlock_free:
1228 sem_unlock(sma); 1304 sem_unlock(sma);
@@ -1307,7 +1383,7 @@ void exit_sem(struct task_struct *tsk)
1307 if (IS_ERR(sma)) 1383 if (IS_ERR(sma))
1308 continue; 1384 continue;
1309 1385
1310 un = lookup_undo(ulp, semid); 1386 un = __lookup_undo(ulp, semid);
1311 if (un == NULL) { 1387 if (un == NULL) {
1312 /* exit_sem raced with IPC_RMID+semget() that created 1388 /* exit_sem raced with IPC_RMID+semget() that created
1313 * exactly the same semid. Nothing to do. 1389 * exactly the same semid. Nothing to do.
@@ -1351,7 +1427,7 @@ void exit_sem(struct task_struct *tsk)
1351 } 1427 }
1352 sma->sem_otime = get_seconds(); 1428 sma->sem_otime = get_seconds();
1353 /* maybe some queued-up processes were waiting for this */ 1429 /* maybe some queued-up processes were waiting for this */
1354 update_queue(sma); 1430 update_queue(sma, -1);
1355 sem_unlock(sma); 1431 sem_unlock(sma);
1356 1432
1357 call_rcu(&un->rcu, free_un); 1433 call_rcu(&un->rcu, free_un);
@@ -1365,7 +1441,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1365 struct sem_array *sma = it; 1441 struct sem_array *sma = it;
1366 1442
1367 return seq_printf(s, 1443 return seq_printf(s,
1368 "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", 1444 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
1369 sma->sem_perm.key, 1445 sma->sem_perm.key,
1370 sma->sem_perm.id, 1446 sma->sem_perm.id,
1371 sma->sem_perm.mode, 1447 sma->sem_perm.mode,
diff --git a/ipc/shm.c b/ipc/shm.c
index 464694e0aa4a..23256b855819 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -39,7 +39,6 @@
39#include <linux/nsproxy.h> 39#include <linux/nsproxy.h>
40#include <linux/mount.h> 40#include <linux/mount.h>
41#include <linux/ipc_namespace.h> 41#include <linux/ipc_namespace.h>
42#include <linux/ima.h>
43 42
44#include <asm/uaccess.h> 43#include <asm/uaccess.h>
45 44
@@ -101,6 +100,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
101void shm_exit_ns(struct ipc_namespace *ns) 100void shm_exit_ns(struct ipc_namespace *ns)
102{ 101{
103 free_ipcs(ns, &shm_ids(ns), do_shm_rmid); 102 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
103 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
104} 104}
105#endif 105#endif
106 106
@@ -290,28 +290,31 @@ static unsigned long shm_get_unmapped_area(struct file *file,
290 unsigned long flags) 290 unsigned long flags)
291{ 291{
292 struct shm_file_data *sfd = shm_file_data(file); 292 struct shm_file_data *sfd = shm_file_data(file);
293 return get_unmapped_area(sfd->file, addr, len, pgoff, flags); 293 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
294} 294 pgoff, flags);
295
296int is_file_shm_hugepages(struct file *file)
297{
298 int ret = 0;
299
300 if (file->f_op == &shm_file_operations) {
301 struct shm_file_data *sfd;
302 sfd = shm_file_data(file);
303 ret = is_file_hugepages(sfd->file);
304 }
305 return ret;
306} 295}
307 296
308static const struct file_operations shm_file_operations = { 297static const struct file_operations shm_file_operations = {
309 .mmap = shm_mmap, 298 .mmap = shm_mmap,
310 .fsync = shm_fsync, 299 .fsync = shm_fsync,
311 .release = shm_release, 300 .release = shm_release,
301#ifndef CONFIG_MMU
302 .get_unmapped_area = shm_get_unmapped_area,
303#endif
304};
305
306static const struct file_operations shm_file_operations_huge = {
307 .mmap = shm_mmap,
308 .fsync = shm_fsync,
309 .release = shm_release,
312 .get_unmapped_area = shm_get_unmapped_area, 310 .get_unmapped_area = shm_get_unmapped_area,
313}; 311};
314 312
313int is_file_shm_hugepages(struct file *file)
314{
315 return file->f_op == &shm_file_operations_huge;
316}
317
315static const struct vm_operations_struct shm_vm_ops = { 318static const struct vm_operations_struct shm_vm_ops = {
316 .open = shm_open, /* callback for a new vm-area open */ 319 .open = shm_open, /* callback for a new vm-area open */
317 .close = shm_close, /* callback for when the vm-area is released */ 320 .close = shm_close, /* callback for when the vm-area is released */
@@ -878,8 +881,8 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
878 if (err) 881 if (err)
879 goto out_unlock; 882 goto out_unlock;
880 883
881 path.dentry = dget(shp->shm_file->f_path.dentry); 884 path = shp->shm_file->f_path;
882 path.mnt = shp->shm_file->f_path.mnt; 885 path_get(&path);
883 shp->shm_nattch++; 886 shp->shm_nattch++;
884 size = i_size_read(path.dentry->d_inode); 887 size = i_size_read(path.dentry->d_inode);
885 shm_unlock(shp); 888 shm_unlock(shp);
@@ -889,10 +892,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
889 if (!sfd) 892 if (!sfd)
890 goto out_put_dentry; 893 goto out_put_dentry;
891 894
892 file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations); 895 file = alloc_file(&path, f_mode,
896 is_file_hugepages(shp->shm_file) ?
897 &shm_file_operations_huge :
898 &shm_file_operations);
893 if (!file) 899 if (!file)
894 goto out_free; 900 goto out_free;
895 ima_counts_get(file);
896 901
897 file->private_data = sfd; 902 file->private_data = sfd;
898 file->f_mapping = shp->shm_file->f_mapping; 903 file->f_mapping = shp->shm_file->f_mapping;
@@ -947,7 +952,7 @@ out_unlock:
947out_free: 952out_free:
948 kfree(sfd); 953 kfree(sfd);
949out_put_dentry: 954out_put_dentry:
950 dput(path.dentry); 955 path_put(&path);
951 goto out_nattch; 956 goto out_nattch;
952} 957}
953 958