aboutsummaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
Diffstat (limited to 'ipc')
-rw-r--r--ipc/mqueue.c7
-rw-r--r--ipc/msg.c253
-rw-r--r--ipc/sem.c308
-rw-r--r--ipc/shm.c316
-rw-r--r--ipc/util.c506
-rw-r--r--ipc/util.h168
6 files changed, 930 insertions, 628 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 20f1fed8fa48..c0b26dc4617b 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -29,6 +29,8 @@
29#include <linux/audit.h> 29#include <linux/audit.h>
30#include <linux/signal.h> 30#include <linux/signal.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/nsproxy.h>
33#include <linux/pid.h>
32 34
33#include <net/sock.h> 35#include <net/sock.h>
34#include "util.h" 36#include "util.h"
@@ -330,7 +332,8 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
330 (info->notify_owner && 332 (info->notify_owner &&
331 info->notify.sigev_notify == SIGEV_SIGNAL) ? 333 info->notify.sigev_notify == SIGEV_SIGNAL) ?
332 info->notify.sigev_signo : 0, 334 info->notify.sigev_signo : 0,
333 pid_nr(info->notify_owner)); 335 pid_nr_ns(info->notify_owner,
336 current->nsproxy->pid_ns));
334 spin_unlock(&info->lock); 337 spin_unlock(&info->lock);
335 buffer[sizeof(buffer)-1] = '\0'; 338 buffer[sizeof(buffer)-1] = '\0';
336 slen = strlen(buffer)+1; 339 slen = strlen(buffer)+1;
@@ -507,7 +510,7 @@ static void __do_notify(struct mqueue_inode_info *info)
507 sig_i.si_errno = 0; 510 sig_i.si_errno = 0;
508 sig_i.si_code = SI_MESGQ; 511 sig_i.si_code = SI_MESGQ;
509 sig_i.si_value = info->notify.sigev_value; 512 sig_i.si_value = info->notify.sigev_value;
510 sig_i.si_pid = current->tgid; 513 sig_i.si_pid = task_pid_vnr(current);
511 sig_i.si_uid = current->uid; 514 sig_i.si_uid = current->uid;
512 515
513 kill_pid_info(info->notify.sigev_signo, 516 kill_pid_info(info->notify.sigev_signo,
diff --git a/ipc/msg.c b/ipc/msg.c
index a03fcb522fff..fdf3db5731ce 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -34,7 +34,7 @@
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/audit.h> 35#include <linux/audit.h>
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <linux/mutex.h> 37#include <linux/rwsem.h>
38#include <linux/nsproxy.h> 38#include <linux/nsproxy.h>
39 39
40#include <asm/current.h> 40#include <asm/current.h>
@@ -66,23 +66,15 @@ struct msg_sender {
66#define SEARCH_NOTEQUAL 3 66#define SEARCH_NOTEQUAL 3
67#define SEARCH_LESSEQUAL 4 67#define SEARCH_LESSEQUAL 4
68 68
69static atomic_t msg_bytes = ATOMIC_INIT(0);
70static atomic_t msg_hdrs = ATOMIC_INIT(0);
71
72static struct ipc_ids init_msg_ids; 69static struct ipc_ids init_msg_ids;
73 70
74#define msg_ids(ns) (*((ns)->ids[IPC_MSG_IDS])) 71#define msg_ids(ns) (*((ns)->ids[IPC_MSG_IDS]))
75 72
76#define msg_lock(ns, id) ((struct msg_queue*)ipc_lock(&msg_ids(ns), id))
77#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) 73#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
78#define msg_rmid(ns, id) ((struct msg_queue*)ipc_rmid(&msg_ids(ns), id)) 74#define msg_buildid(id, seq) ipc_buildid(id, seq)
79#define msg_checkid(ns, msq, msgid) \ 75
80 ipc_checkid(&msg_ids(ns), &msq->q_perm, msgid) 76static void freeque(struct ipc_namespace *, struct msg_queue *);
81#define msg_buildid(ns, id, seq) \ 77static int newque(struct ipc_namespace *, struct ipc_params *);
82 ipc_buildid(&msg_ids(ns), id, seq)
83
84static void freeque (struct ipc_namespace *ns, struct msg_queue *msq, int id);
85static int newque (struct ipc_namespace *ns, key_t key, int msgflg);
86#ifdef CONFIG_PROC_FS 78#ifdef CONFIG_PROC_FS
87static int sysvipc_msg_proc_show(struct seq_file *s, void *it); 79static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
88#endif 80#endif
@@ -93,7 +85,9 @@ static void __msg_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
93 ns->msg_ctlmax = MSGMAX; 85 ns->msg_ctlmax = MSGMAX;
94 ns->msg_ctlmnb = MSGMNB; 86 ns->msg_ctlmnb = MSGMNB;
95 ns->msg_ctlmni = MSGMNI; 87 ns->msg_ctlmni = MSGMNI;
96 ipc_init_ids(ids, ns->msg_ctlmni); 88 atomic_set(&ns->msg_bytes, 0);
89 atomic_set(&ns->msg_hdrs, 0);
90 ipc_init_ids(ids);
97} 91}
98 92
99int msg_init_ns(struct ipc_namespace *ns) 93int msg_init_ns(struct ipc_namespace *ns)
@@ -110,20 +104,25 @@ int msg_init_ns(struct ipc_namespace *ns)
110 104
111void msg_exit_ns(struct ipc_namespace *ns) 105void msg_exit_ns(struct ipc_namespace *ns)
112{ 106{
113 int i;
114 struct msg_queue *msq; 107 struct msg_queue *msq;
108 int next_id;
109 int total, in_use;
110
111 down_write(&msg_ids(ns).rw_mutex);
112
113 in_use = msg_ids(ns).in_use;
115 114
116 mutex_lock(&msg_ids(ns).mutex); 115 for (total = 0, next_id = 0; total < in_use; next_id++) {
117 for (i = 0; i <= msg_ids(ns).max_id; i++) { 116 msq = idr_find(&msg_ids(ns).ipcs_idr, next_id);
118 msq = msg_lock(ns, i);
119 if (msq == NULL) 117 if (msq == NULL)
120 continue; 118 continue;
121 119 ipc_lock_by_ptr(&msq->q_perm);
122 freeque(ns, msq, i); 120 freeque(ns, msq);
121 total++;
123 } 122 }
124 mutex_unlock(&msg_ids(ns).mutex);
125 123
126 ipc_fini_ids(ns->ids[IPC_MSG_IDS]); 124 up_write(&msg_ids(ns).rw_mutex);
125
127 kfree(ns->ids[IPC_MSG_IDS]); 126 kfree(ns->ids[IPC_MSG_IDS]);
128 ns->ids[IPC_MSG_IDS] = NULL; 127 ns->ids[IPC_MSG_IDS] = NULL;
129} 128}
@@ -136,10 +135,55 @@ void __init msg_init(void)
136 IPC_MSG_IDS, sysvipc_msg_proc_show); 135 IPC_MSG_IDS, sysvipc_msg_proc_show);
137} 136}
138 137
139static int newque (struct ipc_namespace *ns, key_t key, int msgflg) 138/*
139 * This routine is called in the paths where the rw_mutex is held to protect
140 * access to the idr tree.
141 */
142static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns,
143 int id)
144{
145 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id);
146
147 return container_of(ipcp, struct msg_queue, q_perm);
148}
149
150/*
151 * msg_lock_(check_) routines are called in the paths where the rw_mutex
152 * is not held.
153 */
154static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
155{
156 struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
157
158 return container_of(ipcp, struct msg_queue, q_perm);
159}
160
161static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
162 int id)
163{
164 struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
165
166 return container_of(ipcp, struct msg_queue, q_perm);
167}
168
169static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
170{
171 ipc_rmid(&msg_ids(ns), &s->q_perm);
172}
173
174/**
175 * newque - Create a new msg queue
176 * @ns: namespace
177 * @params: ptr to the structure that contains the key and msgflg
178 *
179 * Called with msg_ids.rw_mutex held (writer)
180 */
181static int newque(struct ipc_namespace *ns, struct ipc_params *params)
140{ 182{
141 struct msg_queue *msq; 183 struct msg_queue *msq;
142 int id, retval; 184 int id, retval;
185 key_t key = params->key;
186 int msgflg = params->flg;
143 187
144 msq = ipc_rcu_alloc(sizeof(*msq)); 188 msq = ipc_rcu_alloc(sizeof(*msq));
145 if (!msq) 189 if (!msq)
@@ -155,14 +199,17 @@ static int newque (struct ipc_namespace *ns, key_t key, int msgflg)
155 return retval; 199 return retval;
156 } 200 }
157 201
202 /*
203 * ipc_addid() locks msq
204 */
158 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); 205 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
159 if (id == -1) { 206 if (id < 0) {
160 security_msg_queue_free(msq); 207 security_msg_queue_free(msq);
161 ipc_rcu_putref(msq); 208 ipc_rcu_putref(msq);
162 return -ENOSPC; 209 return id;
163 } 210 }
164 211
165 msq->q_id = msg_buildid(ns, id, msq->q_perm.seq); 212 msq->q_perm.id = msg_buildid(id, msq->q_perm.seq);
166 msq->q_stime = msq->q_rtime = 0; 213 msq->q_stime = msq->q_rtime = 0;
167 msq->q_ctime = get_seconds(); 214 msq->q_ctime = get_seconds();
168 msq->q_cbytes = msq->q_qnum = 0; 215 msq->q_cbytes = msq->q_qnum = 0;
@@ -171,9 +218,10 @@ static int newque (struct ipc_namespace *ns, key_t key, int msgflg)
171 INIT_LIST_HEAD(&msq->q_messages); 218 INIT_LIST_HEAD(&msq->q_messages);
172 INIT_LIST_HEAD(&msq->q_receivers); 219 INIT_LIST_HEAD(&msq->q_receivers);
173 INIT_LIST_HEAD(&msq->q_senders); 220 INIT_LIST_HEAD(&msq->q_senders);
221
174 msg_unlock(msq); 222 msg_unlock(msq);
175 223
176 return msq->q_id; 224 return msq->q_perm.id;
177} 225}
178 226
179static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) 227static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
@@ -224,19 +272,19 @@ static void expunge_all(struct msg_queue *msq, int res)
224 272
225/* 273/*
226 * freeque() wakes up waiters on the sender and receiver waiting queue, 274 * freeque() wakes up waiters on the sender and receiver waiting queue,
227 * removes the message queue from message queue ID 275 * removes the message queue from message queue ID IDR, and cleans up all the
228 * array, and cleans up all the messages associated with this queue. 276 * messages associated with this queue.
229 * 277 *
230 * msg_ids.mutex and the spinlock for this message queue is hold 278 * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
231 * before freeque() is called. msg_ids.mutex remains locked on exit. 279 * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
232 */ 280 */
233static void freeque(struct ipc_namespace *ns, struct msg_queue *msq, int id) 281static void freeque(struct ipc_namespace *ns, struct msg_queue *msq)
234{ 282{
235 struct list_head *tmp; 283 struct list_head *tmp;
236 284
237 expunge_all(msq, -EIDRM); 285 expunge_all(msq, -EIDRM);
238 ss_wakeup(&msq->q_senders, 1); 286 ss_wakeup(&msq->q_senders, 1);
239 msq = msg_rmid(ns, id); 287 msg_rmid(ns, msq);
240 msg_unlock(msq); 288 msg_unlock(msq);
241 289
242 tmp = msq->q_messages.next; 290 tmp = msq->q_messages.next;
@@ -244,49 +292,40 @@ static void freeque(struct ipc_namespace *ns, struct msg_queue *msq, int id)
244 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list); 292 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
245 293
246 tmp = tmp->next; 294 tmp = tmp->next;
247 atomic_dec(&msg_hdrs); 295 atomic_dec(&ns->msg_hdrs);
248 free_msg(msg); 296 free_msg(msg);
249 } 297 }
250 atomic_sub(msq->q_cbytes, &msg_bytes); 298 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
251 security_msg_queue_free(msq); 299 security_msg_queue_free(msq);
252 ipc_rcu_putref(msq); 300 ipc_rcu_putref(msq);
253} 301}
254 302
303/*
304 * Called with msg_ids.rw_mutex and ipcp locked.
305 */
306static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
307{
308 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
309
310 return security_msg_queue_associate(msq, msgflg);
311}
312
255asmlinkage long sys_msgget(key_t key, int msgflg) 313asmlinkage long sys_msgget(key_t key, int msgflg)
256{ 314{
257 struct msg_queue *msq;
258 int id, ret = -EPERM;
259 struct ipc_namespace *ns; 315 struct ipc_namespace *ns;
316 struct ipc_ops msg_ops;
317 struct ipc_params msg_params;
260 318
261 ns = current->nsproxy->ipc_ns; 319 ns = current->nsproxy->ipc_ns;
262
263 mutex_lock(&msg_ids(ns).mutex);
264 if (key == IPC_PRIVATE)
265 ret = newque(ns, key, msgflg);
266 else if ((id = ipc_findkey(&msg_ids(ns), key)) == -1) { /* key not used */
267 if (!(msgflg & IPC_CREAT))
268 ret = -ENOENT;
269 else
270 ret = newque(ns, key, msgflg);
271 } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) {
272 ret = -EEXIST;
273 } else {
274 msq = msg_lock(ns, id);
275 BUG_ON(msq == NULL);
276 if (ipcperms(&msq->q_perm, msgflg))
277 ret = -EACCES;
278 else {
279 int qid = msg_buildid(ns, id, msq->q_perm.seq);
280
281 ret = security_msg_queue_associate(msq, msgflg);
282 if (!ret)
283 ret = qid;
284 }
285 msg_unlock(msq);
286 }
287 mutex_unlock(&msg_ids(ns).mutex);
288 320
289 return ret; 321 msg_ops.getnew = newque;
322 msg_ops.associate = msg_security;
323 msg_ops.more_checks = NULL;
324
325 msg_params.key = key;
326 msg_params.flg = msgflg;
327
328 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
290} 329}
291 330
292static inline unsigned long 331static inline unsigned long
@@ -420,23 +459,23 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
420 msginfo.msgmnb = ns->msg_ctlmnb; 459 msginfo.msgmnb = ns->msg_ctlmnb;
421 msginfo.msgssz = MSGSSZ; 460 msginfo.msgssz = MSGSSZ;
422 msginfo.msgseg = MSGSEG; 461 msginfo.msgseg = MSGSEG;
423 mutex_lock(&msg_ids(ns).mutex); 462 down_read(&msg_ids(ns).rw_mutex);
424 if (cmd == MSG_INFO) { 463 if (cmd == MSG_INFO) {
425 msginfo.msgpool = msg_ids(ns).in_use; 464 msginfo.msgpool = msg_ids(ns).in_use;
426 msginfo.msgmap = atomic_read(&msg_hdrs); 465 msginfo.msgmap = atomic_read(&ns->msg_hdrs);
427 msginfo.msgtql = atomic_read(&msg_bytes); 466 msginfo.msgtql = atomic_read(&ns->msg_bytes);
428 } else { 467 } else {
429 msginfo.msgmap = MSGMAP; 468 msginfo.msgmap = MSGMAP;
430 msginfo.msgpool = MSGPOOL; 469 msginfo.msgpool = MSGPOOL;
431 msginfo.msgtql = MSGTQL; 470 msginfo.msgtql = MSGTQL;
432 } 471 }
433 max_id = msg_ids(ns).max_id; 472 max_id = ipc_get_maxid(&msg_ids(ns));
434 mutex_unlock(&msg_ids(ns).mutex); 473 up_read(&msg_ids(ns).rw_mutex);
435 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) 474 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
436 return -EFAULT; 475 return -EFAULT;
437 return (max_id < 0) ? 0 : max_id; 476 return (max_id < 0) ? 0 : max_id;
438 } 477 }
439 case MSG_STAT: 478 case MSG_STAT: /* msqid is an index rather than a msg queue id */
440 case IPC_STAT: 479 case IPC_STAT:
441 { 480 {
442 struct msqid64_ds tbuf; 481 struct msqid64_ds tbuf;
@@ -444,21 +483,16 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
444 483
445 if (!buf) 484 if (!buf)
446 return -EFAULT; 485 return -EFAULT;
447 if (cmd == MSG_STAT && msqid >= msg_ids(ns).entries->size)
448 return -EINVAL;
449
450 memset(&tbuf, 0, sizeof(tbuf));
451
452 msq = msg_lock(ns, msqid);
453 if (msq == NULL)
454 return -EINVAL;
455 486
456 if (cmd == MSG_STAT) { 487 if (cmd == MSG_STAT) {
457 success_return = msg_buildid(ns, msqid, msq->q_perm.seq); 488 msq = msg_lock(ns, msqid);
489 if (IS_ERR(msq))
490 return PTR_ERR(msq);
491 success_return = msq->q_perm.id;
458 } else { 492 } else {
459 err = -EIDRM; 493 msq = msg_lock_check(ns, msqid);
460 if (msg_checkid(ns, msq, msqid)) 494 if (IS_ERR(msq))
461 goto out_unlock; 495 return PTR_ERR(msq);
462 success_return = 0; 496 success_return = 0;
463 } 497 }
464 err = -EACCES; 498 err = -EACCES;
@@ -469,6 +503,8 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
469 if (err) 503 if (err)
470 goto out_unlock; 504 goto out_unlock;
471 505
506 memset(&tbuf, 0, sizeof(tbuf));
507
472 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); 508 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
473 tbuf.msg_stime = msq->q_stime; 509 tbuf.msg_stime = msq->q_stime;
474 tbuf.msg_rtime = msq->q_rtime; 510 tbuf.msg_rtime = msq->q_rtime;
@@ -495,15 +531,13 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
495 return -EINVAL; 531 return -EINVAL;
496 } 532 }
497 533
498 mutex_lock(&msg_ids(ns).mutex); 534 down_write(&msg_ids(ns).rw_mutex);
499 msq = msg_lock(ns, msqid); 535 msq = msg_lock_check_down(ns, msqid);
500 err = -EINVAL; 536 if (IS_ERR(msq)) {
501 if (msq == NULL) 537 err = PTR_ERR(msq);
502 goto out_up; 538 goto out_up;
539 }
503 540
504 err = -EIDRM;
505 if (msg_checkid(ns, msq, msqid))
506 goto out_unlock_up;
507 ipcp = &msq->q_perm; 541 ipcp = &msq->q_perm;
508 542
509 err = audit_ipc_obj(ipcp); 543 err = audit_ipc_obj(ipcp);
@@ -552,12 +586,12 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
552 break; 586 break;
553 } 587 }
554 case IPC_RMID: 588 case IPC_RMID:
555 freeque(ns, msq, msqid); 589 freeque(ns, msq);
556 break; 590 break;
557 } 591 }
558 err = 0; 592 err = 0;
559out_up: 593out_up:
560 mutex_unlock(&msg_ids(ns).mutex); 594 up_write(&msg_ids(ns).rw_mutex);
561 return err; 595 return err;
562out_unlock_up: 596out_unlock_up:
563 msg_unlock(msq); 597 msg_unlock(msq);
@@ -611,7 +645,7 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
611 msr->r_msg = ERR_PTR(-E2BIG); 645 msr->r_msg = ERR_PTR(-E2BIG);
612 } else { 646 } else {
613 msr->r_msg = NULL; 647 msr->r_msg = NULL;
614 msq->q_lrpid = msr->r_tsk->pid; 648 msq->q_lrpid = task_pid_vnr(msr->r_tsk);
615 msq->q_rtime = get_seconds(); 649 msq->q_rtime = get_seconds();
616 wake_up_process(msr->r_tsk); 650 wake_up_process(msr->r_tsk);
617 smp_mb(); 651 smp_mb();
@@ -646,14 +680,11 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
646 msg->m_type = mtype; 680 msg->m_type = mtype;
647 msg->m_ts = msgsz; 681 msg->m_ts = msgsz;
648 682
649 msq = msg_lock(ns, msqid); 683 msq = msg_lock_check(ns, msqid);
650 err = -EINVAL; 684 if (IS_ERR(msq)) {
651 if (msq == NULL) 685 err = PTR_ERR(msq);
652 goto out_free; 686 goto out_free;
653 687 }
654 err= -EIDRM;
655 if (msg_checkid(ns, msq, msqid))
656 goto out_unlock_free;
657 688
658 for (;;) { 689 for (;;) {
659 struct msg_sender s; 690 struct msg_sender s;
@@ -695,7 +726,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
695 } 726 }
696 } 727 }
697 728
698 msq->q_lspid = current->tgid; 729 msq->q_lspid = task_tgid_vnr(current);
699 msq->q_stime = get_seconds(); 730 msq->q_stime = get_seconds();
700 731
701 if (!pipelined_send(msq, msg)) { 732 if (!pipelined_send(msq, msg)) {
@@ -703,8 +734,8 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
703 list_add_tail(&msg->m_list, &msq->q_messages); 734 list_add_tail(&msg->m_list, &msq->q_messages);
704 msq->q_cbytes += msgsz; 735 msq->q_cbytes += msgsz;
705 msq->q_qnum++; 736 msq->q_qnum++;
706 atomic_add(msgsz, &msg_bytes); 737 atomic_add(msgsz, &ns->msg_bytes);
707 atomic_inc(&msg_hdrs); 738 atomic_inc(&ns->msg_hdrs);
708 } 739 }
709 740
710 err = 0; 741 err = 0;
@@ -760,13 +791,9 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
760 mode = convert_mode(&msgtyp, msgflg); 791 mode = convert_mode(&msgtyp, msgflg);
761 ns = current->nsproxy->ipc_ns; 792 ns = current->nsproxy->ipc_ns;
762 793
763 msq = msg_lock(ns, msqid); 794 msq = msg_lock_check(ns, msqid);
764 if (msq == NULL) 795 if (IS_ERR(msq))
765 return -EINVAL; 796 return PTR_ERR(msq);
766
767 msg = ERR_PTR(-EIDRM);
768 if (msg_checkid(ns, msq, msqid))
769 goto out_unlock;
770 797
771 for (;;) { 798 for (;;) {
772 struct msg_receiver msr_d; 799 struct msg_receiver msr_d;
@@ -810,10 +837,10 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
810 list_del(&msg->m_list); 837 list_del(&msg->m_list);
811 msq->q_qnum--; 838 msq->q_qnum--;
812 msq->q_rtime = get_seconds(); 839 msq->q_rtime = get_seconds();
813 msq->q_lrpid = current->tgid; 840 msq->q_lrpid = task_tgid_vnr(current);
814 msq->q_cbytes -= msg->m_ts; 841 msq->q_cbytes -= msg->m_ts;
815 atomic_sub(msg->m_ts, &msg_bytes); 842 atomic_sub(msg->m_ts, &ns->msg_bytes);
816 atomic_dec(&msg_hdrs); 843 atomic_dec(&ns->msg_hdrs);
817 ss_wakeup(&msq->q_senders, 0); 844 ss_wakeup(&msq->q_senders, 0);
818 msg_unlock(msq); 845 msg_unlock(msq);
819 break; 846 break;
@@ -926,7 +953,7 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
926 return seq_printf(s, 953 return seq_printf(s,
927 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", 954 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
928 msq->q_perm.key, 955 msq->q_perm.key,
929 msq->q_id, 956 msq->q_perm.id,
930 msq->q_perm.mode, 957 msq->q_perm.mode,
931 msq->q_cbytes, 958 msq->q_cbytes,
932 msq->q_qnum, 959 msq->q_qnum,
diff --git a/ipc/sem.c b/ipc/sem.c
index b676fef6d208..35952c0bae46 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -80,7 +80,7 @@
80#include <linux/audit.h> 80#include <linux/audit.h>
81#include <linux/capability.h> 81#include <linux/capability.h>
82#include <linux/seq_file.h> 82#include <linux/seq_file.h>
83#include <linux/mutex.h> 83#include <linux/rwsem.h>
84#include <linux/nsproxy.h> 84#include <linux/nsproxy.h>
85 85
86#include <asm/uaccess.h> 86#include <asm/uaccess.h>
@@ -88,18 +88,14 @@
88 88
89#define sem_ids(ns) (*((ns)->ids[IPC_SEM_IDS])) 89#define sem_ids(ns) (*((ns)->ids[IPC_SEM_IDS]))
90 90
91#define sem_lock(ns, id) ((struct sem_array*)ipc_lock(&sem_ids(ns), id))
92#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) 91#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
93#define sem_rmid(ns, id) ((struct sem_array*)ipc_rmid(&sem_ids(ns), id)) 92#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
94#define sem_checkid(ns, sma, semid) \ 93#define sem_buildid(id, seq) ipc_buildid(id, seq)
95 ipc_checkid(&sem_ids(ns),&sma->sem_perm,semid)
96#define sem_buildid(ns, id, seq) \
97 ipc_buildid(&sem_ids(ns), id, seq)
98 94
99static struct ipc_ids init_sem_ids; 95static struct ipc_ids init_sem_ids;
100 96
101static int newary(struct ipc_namespace *, key_t, int, int); 97static int newary(struct ipc_namespace *, struct ipc_params *);
102static void freeary(struct ipc_namespace *ns, struct sem_array *sma, int id); 98static void freeary(struct ipc_namespace *, struct sem_array *);
103#ifdef CONFIG_PROC_FS 99#ifdef CONFIG_PROC_FS
104static int sysvipc_sem_proc_show(struct seq_file *s, void *it); 100static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
105#endif 101#endif
@@ -129,7 +125,7 @@ static void __sem_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
129 ns->sc_semopm = SEMOPM; 125 ns->sc_semopm = SEMOPM;
130 ns->sc_semmni = SEMMNI; 126 ns->sc_semmni = SEMMNI;
131 ns->used_sems = 0; 127 ns->used_sems = 0;
132 ipc_init_ids(ids, ns->sc_semmni); 128 ipc_init_ids(ids);
133} 129}
134 130
135int sem_init_ns(struct ipc_namespace *ns) 131int sem_init_ns(struct ipc_namespace *ns)
@@ -146,20 +142,24 @@ int sem_init_ns(struct ipc_namespace *ns)
146 142
147void sem_exit_ns(struct ipc_namespace *ns) 143void sem_exit_ns(struct ipc_namespace *ns)
148{ 144{
149 int i;
150 struct sem_array *sma; 145 struct sem_array *sma;
146 int next_id;
147 int total, in_use;
151 148
152 mutex_lock(&sem_ids(ns).mutex); 149 down_write(&sem_ids(ns).rw_mutex);
153 for (i = 0; i <= sem_ids(ns).max_id; i++) { 150
154 sma = sem_lock(ns, i); 151 in_use = sem_ids(ns).in_use;
152
153 for (total = 0, next_id = 0; total < in_use; next_id++) {
154 sma = idr_find(&sem_ids(ns).ipcs_idr, next_id);
155 if (sma == NULL) 155 if (sma == NULL)
156 continue; 156 continue;
157 157 ipc_lock_by_ptr(&sma->sem_perm);
158 freeary(ns, sma, i); 158 freeary(ns, sma);
159 total++;
159 } 160 }
160 mutex_unlock(&sem_ids(ns).mutex); 161 up_write(&sem_ids(ns).rw_mutex);
161 162
162 ipc_fini_ids(ns->ids[IPC_SEM_IDS]);
163 kfree(ns->ids[IPC_SEM_IDS]); 163 kfree(ns->ids[IPC_SEM_IDS]);
164 ns->ids[IPC_SEM_IDS] = NULL; 164 ns->ids[IPC_SEM_IDS] = NULL;
165} 165}
@@ -173,6 +173,42 @@ void __init sem_init (void)
173} 173}
174 174
175/* 175/*
176 * This routine is called in the paths where the rw_mutex is held to protect
177 * access to the idr tree.
178 */
179static inline struct sem_array *sem_lock_check_down(struct ipc_namespace *ns,
180 int id)
181{
182 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&sem_ids(ns), id);
183
184 return container_of(ipcp, struct sem_array, sem_perm);
185}
186
187/*
188 * sem_lock_(check_) routines are called in the paths where the rw_mutex
189 * is not held.
190 */
191static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
192{
193 struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
194
195 return container_of(ipcp, struct sem_array, sem_perm);
196}
197
198static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
199 int id)
200{
201 struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
202
203 return container_of(ipcp, struct sem_array, sem_perm);
204}
205
206static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
207{
208 ipc_rmid(&sem_ids(ns), &s->sem_perm);
209}
210
211/*
176 * Lockless wakeup algorithm: 212 * Lockless wakeup algorithm:
177 * Without the check/retry algorithm a lockless wakeup is possible: 213 * Without the check/retry algorithm a lockless wakeup is possible:
178 * - queue.status is initialized to -EINTR before blocking. 214 * - queue.status is initialized to -EINTR before blocking.
@@ -206,12 +242,23 @@ void __init sem_init (void)
206 */ 242 */
207#define IN_WAKEUP 1 243#define IN_WAKEUP 1
208 244
209static int newary (struct ipc_namespace *ns, key_t key, int nsems, int semflg) 245/**
246 * newary - Create a new semaphore set
247 * @ns: namespace
248 * @params: ptr to the structure that contains key, semflg and nsems
249 *
250 * Called with sem_ids.rw_mutex held (as a writer)
251 */
252
253static int newary(struct ipc_namespace *ns, struct ipc_params *params)
210{ 254{
211 int id; 255 int id;
212 int retval; 256 int retval;
213 struct sem_array *sma; 257 struct sem_array *sma;
214 int size; 258 int size;
259 key_t key = params->key;
260 int nsems = params->u.nsems;
261 int semflg = params->flg;
215 262
216 if (!nsems) 263 if (!nsems)
217 return -EINVAL; 264 return -EINVAL;
@@ -236,14 +283,14 @@ static int newary (struct ipc_namespace *ns, key_t key, int nsems, int semflg)
236 } 283 }
237 284
238 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); 285 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
239 if(id == -1) { 286 if (id < 0) {
240 security_sem_free(sma); 287 security_sem_free(sma);
241 ipc_rcu_putref(sma); 288 ipc_rcu_putref(sma);
242 return -ENOSPC; 289 return id;
243 } 290 }
244 ns->used_sems += nsems; 291 ns->used_sems += nsems;
245 292
246 sma->sem_id = sem_buildid(ns, id, sma->sem_perm.seq); 293 sma->sem_perm.id = sem_buildid(id, sma->sem_perm.seq);
247 sma->sem_base = (struct sem *) &sma[1]; 294 sma->sem_base = (struct sem *) &sma[1];
248 /* sma->sem_pending = NULL; */ 295 /* sma->sem_pending = NULL; */
249 sma->sem_pending_last = &sma->sem_pending; 296 sma->sem_pending_last = &sma->sem_pending;
@@ -252,48 +299,56 @@ static int newary (struct ipc_namespace *ns, key_t key, int nsems, int semflg)
252 sma->sem_ctime = get_seconds(); 299 sma->sem_ctime = get_seconds();
253 sem_unlock(sma); 300 sem_unlock(sma);
254 301
255 return sma->sem_id; 302 return sma->sem_perm.id;
303}
304
305
306/*
307 * Called with sem_ids.rw_mutex and ipcp locked.
308 */
309static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
310{
311 struct sem_array *sma;
312
313 sma = container_of(ipcp, struct sem_array, sem_perm);
314 return security_sem_associate(sma, semflg);
256} 315}
257 316
258asmlinkage long sys_semget (key_t key, int nsems, int semflg) 317/*
318 * Called with sem_ids.rw_mutex and ipcp locked.
319 */
320static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
321 struct ipc_params *params)
259{ 322{
260 int id, err = -EINVAL;
261 struct sem_array *sma; 323 struct sem_array *sma;
324
325 sma = container_of(ipcp, struct sem_array, sem_perm);
326 if (params->u.nsems > sma->sem_nsems)
327 return -EINVAL;
328
329 return 0;
330}
331
332asmlinkage long sys_semget(key_t key, int nsems, int semflg)
333{
262 struct ipc_namespace *ns; 334 struct ipc_namespace *ns;
335 struct ipc_ops sem_ops;
336 struct ipc_params sem_params;
263 337
264 ns = current->nsproxy->ipc_ns; 338 ns = current->nsproxy->ipc_ns;
265 339
266 if (nsems < 0 || nsems > ns->sc_semmsl) 340 if (nsems < 0 || nsems > ns->sc_semmsl)
267 return -EINVAL; 341 return -EINVAL;
268 mutex_lock(&sem_ids(ns).mutex);
269
270 if (key == IPC_PRIVATE) {
271 err = newary(ns, key, nsems, semflg);
272 } else if ((id = ipc_findkey(&sem_ids(ns), key)) == -1) { /* key not used */
273 if (!(semflg & IPC_CREAT))
274 err = -ENOENT;
275 else
276 err = newary(ns, key, nsems, semflg);
277 } else if (semflg & IPC_CREAT && semflg & IPC_EXCL) {
278 err = -EEXIST;
279 } else {
280 sma = sem_lock(ns, id);
281 BUG_ON(sma==NULL);
282 if (nsems > sma->sem_nsems)
283 err = -EINVAL;
284 else if (ipcperms(&sma->sem_perm, semflg))
285 err = -EACCES;
286 else {
287 int semid = sem_buildid(ns, id, sma->sem_perm.seq);
288 err = security_sem_associate(sma, semflg);
289 if (!err)
290 err = semid;
291 }
292 sem_unlock(sma);
293 }
294 342
295 mutex_unlock(&sem_ids(ns).mutex); 343 sem_ops.getnew = newary;
296 return err; 344 sem_ops.associate = sem_security;
345 sem_ops.more_checks = sem_more_checks;
346
347 sem_params.key = key;
348 sem_params.flg = semflg;
349 sem_params.u.nsems = nsems;
350
351 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
297} 352}
298 353
299/* Manage the doubly linked list sma->sem_pending as a FIFO: 354/* Manage the doubly linked list sma->sem_pending as a FIFO:
@@ -487,15 +542,14 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
487 return semzcnt; 542 return semzcnt;
488} 543}
489 544
490/* Free a semaphore set. freeary() is called with sem_ids.mutex locked and 545/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
491 * the spinlock for this semaphore set hold. sem_ids.mutex remains locked 546 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
492 * on exit. 547 * remains locked on exit.
493 */ 548 */
494static void freeary (struct ipc_namespace *ns, struct sem_array *sma, int id) 549static void freeary(struct ipc_namespace *ns, struct sem_array *sma)
495{ 550{
496 struct sem_undo *un; 551 struct sem_undo *un;
497 struct sem_queue *q; 552 struct sem_queue *q;
498 int size;
499 553
500 /* Invalidate the existing undo structures for this semaphore set. 554 /* Invalidate the existing undo structures for this semaphore set.
501 * (They will be freed without any further action in exit_sem() 555 * (They will be freed without any further action in exit_sem()
@@ -518,12 +572,11 @@ static void freeary (struct ipc_namespace *ns, struct sem_array *sma, int id)
518 q = n; 572 q = n;
519 } 573 }
520 574
521 /* Remove the semaphore set from the ID array*/ 575 /* Remove the semaphore set from the IDR */
522 sma = sem_rmid(ns, id); 576 sem_rmid(ns, sma);
523 sem_unlock(sma); 577 sem_unlock(sma);
524 578
525 ns->used_sems -= sma->sem_nsems; 579 ns->used_sems -= sma->sem_nsems;
526 size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem);
527 security_sem_free(sma); 580 security_sem_free(sma);
528 ipc_rcu_putref(sma); 581 ipc_rcu_putref(sma);
529} 582}
@@ -576,7 +629,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum,
576 seminfo.semmnu = SEMMNU; 629 seminfo.semmnu = SEMMNU;
577 seminfo.semmap = SEMMAP; 630 seminfo.semmap = SEMMAP;
578 seminfo.semume = SEMUME; 631 seminfo.semume = SEMUME;
579 mutex_lock(&sem_ids(ns).mutex); 632 down_read(&sem_ids(ns).rw_mutex);
580 if (cmd == SEM_INFO) { 633 if (cmd == SEM_INFO) {
581 seminfo.semusz = sem_ids(ns).in_use; 634 seminfo.semusz = sem_ids(ns).in_use;
582 seminfo.semaem = ns->used_sems; 635 seminfo.semaem = ns->used_sems;
@@ -584,8 +637,8 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum,
584 seminfo.semusz = SEMUSZ; 637 seminfo.semusz = SEMUSZ;
585 seminfo.semaem = SEMAEM; 638 seminfo.semaem = SEMAEM;
586 } 639 }
587 max_id = sem_ids(ns).max_id; 640 max_id = ipc_get_maxid(&sem_ids(ns));
588 mutex_unlock(&sem_ids(ns).mutex); 641 up_read(&sem_ids(ns).rw_mutex);
589 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) 642 if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
590 return -EFAULT; 643 return -EFAULT;
591 return (max_id < 0) ? 0: max_id; 644 return (max_id < 0) ? 0: max_id;
@@ -595,14 +648,9 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum,
595 struct semid64_ds tbuf; 648 struct semid64_ds tbuf;
596 int id; 649 int id;
597 650
598 if(semid >= sem_ids(ns).entries->size)
599 return -EINVAL;
600
601 memset(&tbuf,0,sizeof(tbuf));
602
603 sma = sem_lock(ns, semid); 651 sma = sem_lock(ns, semid);
604 if(sma == NULL) 652 if (IS_ERR(sma))
605 return -EINVAL; 653 return PTR_ERR(sma);
606 654
607 err = -EACCES; 655 err = -EACCES;
608 if (ipcperms (&sma->sem_perm, S_IRUGO)) 656 if (ipcperms (&sma->sem_perm, S_IRUGO))
@@ -612,7 +660,9 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum,
612 if (err) 660 if (err)
613 goto out_unlock; 661 goto out_unlock;
614 662
615 id = sem_buildid(ns, semid, sma->sem_perm.seq); 663 id = sma->sem_perm.id;
664
665 memset(&tbuf, 0, sizeof(tbuf));
616 666
617 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); 667 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
618 tbuf.sem_otime = sma->sem_otime; 668 tbuf.sem_otime = sma->sem_otime;
@@ -642,16 +692,12 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
642 ushort* sem_io = fast_sem_io; 692 ushort* sem_io = fast_sem_io;
643 int nsems; 693 int nsems;
644 694
645 sma = sem_lock(ns, semid); 695 sma = sem_lock_check(ns, semid);
646 if(sma==NULL) 696 if (IS_ERR(sma))
647 return -EINVAL; 697 return PTR_ERR(sma);
648 698
649 nsems = sma->sem_nsems; 699 nsems = sma->sem_nsems;
650 700
651 err=-EIDRM;
652 if (sem_checkid(ns,sma,semid))
653 goto out_unlock;
654
655 err = -EACCES; 701 err = -EACCES;
656 if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) 702 if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO))
657 goto out_unlock; 703 goto out_unlock;
@@ -795,7 +841,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
795 for (un = sma->undo; un; un = un->id_next) 841 for (un = sma->undo; un; un = un->id_next)
796 un->semadj[semnum] = 0; 842 un->semadj[semnum] = 0;
797 curr->semval = val; 843 curr->semval = val;
798 curr->sempid = current->tgid; 844 curr->sempid = task_tgid_vnr(current);
799 sma->sem_ctime = get_seconds(); 845 sma->sem_ctime = get_seconds();
800 /* maybe some queued-up processes were waiting for this */ 846 /* maybe some queued-up processes were waiting for this */
801 update_queue(sma); 847 update_queue(sma);
@@ -863,14 +909,10 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum,
863 if(copy_semid_from_user (&setbuf, arg.buf, version)) 909 if(copy_semid_from_user (&setbuf, arg.buf, version))
864 return -EFAULT; 910 return -EFAULT;
865 } 911 }
866 sma = sem_lock(ns, semid); 912 sma = sem_lock_check_down(ns, semid);
867 if(sma==NULL) 913 if (IS_ERR(sma))
868 return -EINVAL; 914 return PTR_ERR(sma);
869 915
870 if (sem_checkid(ns,sma,semid)) {
871 err=-EIDRM;
872 goto out_unlock;
873 }
874 ipcp = &sma->sem_perm; 916 ipcp = &sma->sem_perm;
875 917
876 err = audit_ipc_obj(ipcp); 918 err = audit_ipc_obj(ipcp);
@@ -894,7 +936,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum,
894 936
895 switch(cmd){ 937 switch(cmd){
896 case IPC_RMID: 938 case IPC_RMID:
897 freeary(ns, sma, semid); 939 freeary(ns, sma);
898 err = 0; 940 err = 0;
899 break; 941 break;
900 case IPC_SET: 942 case IPC_SET:
@@ -948,45 +990,15 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
948 return err; 990 return err;
949 case IPC_RMID: 991 case IPC_RMID:
950 case IPC_SET: 992 case IPC_SET:
951 mutex_lock(&sem_ids(ns).mutex); 993 down_write(&sem_ids(ns).rw_mutex);
952 err = semctl_down(ns,semid,semnum,cmd,version,arg); 994 err = semctl_down(ns,semid,semnum,cmd,version,arg);
953 mutex_unlock(&sem_ids(ns).mutex); 995 up_write(&sem_ids(ns).rw_mutex);
954 return err; 996 return err;
955 default: 997 default:
956 return -EINVAL; 998 return -EINVAL;
957 } 999 }
958} 1000}
959 1001
960static inline void lock_semundo(void)
961{
962 struct sem_undo_list *undo_list;
963
964 undo_list = current->sysvsem.undo_list;
965 if (undo_list)
966 spin_lock(&undo_list->lock);
967}
968
969/* This code has an interaction with copy_semundo().
970 * Consider; two tasks are sharing the undo_list. task1
971 * acquires the undo_list lock in lock_semundo(). If task2 now
972 * exits before task1 releases the lock (by calling
973 * unlock_semundo()), then task1 will never call spin_unlock().
974 * This leave the sem_undo_list in a locked state. If task1 now creats task3
975 * and once again shares the sem_undo_list, the sem_undo_list will still be
976 * locked, and future SEM_UNDO operations will deadlock. This case is
977 * dealt with in copy_semundo() by having it reinitialize the spin lock when
978 * the refcnt goes from 1 to 2.
979 */
980static inline void unlock_semundo(void)
981{
982 struct sem_undo_list *undo_list;
983
984 undo_list = current->sysvsem.undo_list;
985 if (undo_list)
986 spin_unlock(&undo_list->lock);
987}
988
989
990/* If the task doesn't already have a undo_list, then allocate one 1002/* If the task doesn't already have a undo_list, then allocate one
991 * here. We guarantee there is only one thread using this undo list, 1003 * here. We guarantee there is only one thread using this undo list,
992 * and current is THE ONE 1004 * and current is THE ONE
@@ -1047,22 +1059,17 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1047 if (error) 1059 if (error)
1048 return ERR_PTR(error); 1060 return ERR_PTR(error);
1049 1061
1050 lock_semundo(); 1062 spin_lock(&ulp->lock);
1051 un = lookup_undo(ulp, semid); 1063 un = lookup_undo(ulp, semid);
1052 unlock_semundo(); 1064 spin_unlock(&ulp->lock);
1053 if (likely(un!=NULL)) 1065 if (likely(un!=NULL))
1054 goto out; 1066 goto out;
1055 1067
1056 /* no undo structure around - allocate one. */ 1068 /* no undo structure around - allocate one. */
1057 sma = sem_lock(ns, semid); 1069 sma = sem_lock_check(ns, semid);
1058 un = ERR_PTR(-EINVAL); 1070 if (IS_ERR(sma))
1059 if(sma==NULL) 1071 return ERR_PTR(PTR_ERR(sma));
1060 goto out; 1072
1061 un = ERR_PTR(-EIDRM);
1062 if (sem_checkid(ns,sma,semid)) {
1063 sem_unlock(sma);
1064 goto out;
1065 }
1066 nsems = sma->sem_nsems; 1073 nsems = sma->sem_nsems;
1067 ipc_rcu_getref(sma); 1074 ipc_rcu_getref(sma);
1068 sem_unlock(sma); 1075 sem_unlock(sma);
@@ -1077,10 +1084,10 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1077 new->semadj = (short *) &new[1]; 1084 new->semadj = (short *) &new[1];
1078 new->semid = semid; 1085 new->semid = semid;
1079 1086
1080 lock_semundo(); 1087 spin_lock(&ulp->lock);
1081 un = lookup_undo(ulp, semid); 1088 un = lookup_undo(ulp, semid);
1082 if (un) { 1089 if (un) {
1083 unlock_semundo(); 1090 spin_unlock(&ulp->lock);
1084 kfree(new); 1091 kfree(new);
1085 ipc_lock_by_ptr(&sma->sem_perm); 1092 ipc_lock_by_ptr(&sma->sem_perm);
1086 ipc_rcu_putref(sma); 1093 ipc_rcu_putref(sma);
@@ -1091,7 +1098,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1091 ipc_rcu_putref(sma); 1098 ipc_rcu_putref(sma);
1092 if (sma->sem_perm.deleted) { 1099 if (sma->sem_perm.deleted) {
1093 sem_unlock(sma); 1100 sem_unlock(sma);
1094 unlock_semundo(); 1101 spin_unlock(&ulp->lock);
1095 kfree(new); 1102 kfree(new);
1096 un = ERR_PTR(-EIDRM); 1103 un = ERR_PTR(-EIDRM);
1097 goto out; 1104 goto out;
@@ -1102,7 +1109,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1102 sma->undo = new; 1109 sma->undo = new;
1103 sem_unlock(sma); 1110 sem_unlock(sma);
1104 un = new; 1111 un = new;
1105 unlock_semundo(); 1112 spin_unlock(&ulp->lock);
1106out: 1113out:
1107 return un; 1114 return un;
1108} 1115}
@@ -1168,15 +1175,14 @@ retry_undos:
1168 } else 1175 } else
1169 un = NULL; 1176 un = NULL;
1170 1177
1171 sma = sem_lock(ns, semid); 1178 sma = sem_lock_check(ns, semid);
1172 error=-EINVAL; 1179 if (IS_ERR(sma)) {
1173 if(sma==NULL) 1180 error = PTR_ERR(sma);
1174 goto out_free; 1181 goto out_free;
1175 error = -EIDRM; 1182 }
1176 if (sem_checkid(ns,sma,semid)) 1183
1177 goto out_unlock_free;
1178 /* 1184 /*
1179 * semid identifies are not unique - find_undo may have 1185 * semid identifiers are not unique - find_undo may have
1180 * allocated an undo structure, it was invalidated by an RMID 1186 * allocated an undo structure, it was invalidated by an RMID
1181 * and now a new array with received the same id. Check and retry. 1187 * and now a new array with received the same id. Check and retry.
1182 */ 1188 */
@@ -1196,7 +1202,7 @@ retry_undos:
1196 if (error) 1202 if (error)
1197 goto out_unlock_free; 1203 goto out_unlock_free;
1198 1204
1199 error = try_atomic_semop (sma, sops, nsops, un, current->tgid); 1205 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1200 if (error <= 0) { 1206 if (error <= 0) {
1201 if (alter && error == 0) 1207 if (alter && error == 0)
1202 update_queue (sma); 1208 update_queue (sma);
@@ -1211,7 +1217,7 @@ retry_undos:
1211 queue.sops = sops; 1217 queue.sops = sops;
1212 queue.nsops = nsops; 1218 queue.nsops = nsops;
1213 queue.undo = un; 1219 queue.undo = un;
1214 queue.pid = current->tgid; 1220 queue.pid = task_tgid_vnr(current);
1215 queue.id = semid; 1221 queue.id = semid;
1216 queue.alter = alter; 1222 queue.alter = alter;
1217 if (alter) 1223 if (alter)
@@ -1242,7 +1248,7 @@ retry_undos:
1242 } 1248 }
1243 1249
1244 sma = sem_lock(ns, semid); 1250 sma = sem_lock(ns, semid);
1245 if(sma==NULL) { 1251 if (IS_ERR(sma)) {
1246 BUG_ON(queue.prev != NULL); 1252 BUG_ON(queue.prev != NULL);
1247 error = -EIDRM; 1253 error = -EIDRM;
1248 goto out_free; 1254 goto out_free;
@@ -1279,10 +1285,6 @@ asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsop
1279 1285
1280/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between 1286/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1281 * parent and child tasks. 1287 * parent and child tasks.
1282 *
1283 * See the notes above unlock_semundo() regarding the spin_lock_init()
1284 * in this code. Initialize the undo_list->lock here instead of get_undo_list()
1285 * because of the reasoning in the comment above unlock_semundo.
1286 */ 1288 */
1287 1289
1288int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) 1290int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
@@ -1342,13 +1344,13 @@ void exit_sem(struct task_struct *tsk)
1342 if(semid == -1) 1344 if(semid == -1)
1343 continue; 1345 continue;
1344 sma = sem_lock(ns, semid); 1346 sma = sem_lock(ns, semid);
1345 if (sma == NULL) 1347 if (IS_ERR(sma))
1346 continue; 1348 continue;
1347 1349
1348 if (u->semid == -1) 1350 if (u->semid == -1)
1349 goto next_entry; 1351 goto next_entry;
1350 1352
1351 BUG_ON(sem_checkid(ns,sma,u->semid)); 1353 BUG_ON(sem_checkid(sma, u->semid));
1352 1354
1353 /* remove u from the sma->undo list */ 1355 /* remove u from the sma->undo list */
1354 for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { 1356 for (unp = &sma->undo; (un = *unp); unp = &un->id_next) {
@@ -1382,7 +1384,7 @@ found:
1382 semaphore->semval = 0; 1384 semaphore->semval = 0;
1383 if (semaphore->semval > SEMVMX) 1385 if (semaphore->semval > SEMVMX)
1384 semaphore->semval = SEMVMX; 1386 semaphore->semval = SEMVMX;
1385 semaphore->sempid = current->tgid; 1387 semaphore->sempid = task_tgid_vnr(current);
1386 } 1388 }
1387 } 1389 }
1388 sma->sem_otime = get_seconds(); 1390 sma->sem_otime = get_seconds();
@@ -1402,7 +1404,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1402 return seq_printf(s, 1404 return seq_printf(s,
1403 "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", 1405 "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
1404 sma->sem_perm.key, 1406 sma->sem_perm.key,
1405 sma->sem_id, 1407 sma->sem_perm.id,
1406 sma->sem_perm.mode, 1408 sma->sem_perm.mode,
1407 sma->sem_nsems, 1409 sma->sem_nsems,
1408 sma->sem_perm.uid, 1410 sma->sem_perm.uid,
diff --git a/ipc/shm.c b/ipc/shm.c
index 5fc5cf50cf1b..3818fae625c5 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -35,7 +35,7 @@
35#include <linux/capability.h> 35#include <linux/capability.h>
36#include <linux/ptrace.h> 36#include <linux/ptrace.h>
37#include <linux/seq_file.h> 37#include <linux/seq_file.h>
38#include <linux/mutex.h> 38#include <linux/rwsem.h>
39#include <linux/nsproxy.h> 39#include <linux/nsproxy.h>
40#include <linux/mount.h> 40#include <linux/mount.h>
41 41
@@ -59,17 +59,11 @@ static struct ipc_ids init_shm_ids;
59 59
60#define shm_ids(ns) (*((ns)->ids[IPC_SHM_IDS])) 60#define shm_ids(ns) (*((ns)->ids[IPC_SHM_IDS]))
61 61
62#define shm_lock(ns, id) \
63 ((struct shmid_kernel*)ipc_lock(&shm_ids(ns),id))
64#define shm_unlock(shp) \ 62#define shm_unlock(shp) \
65 ipc_unlock(&(shp)->shm_perm) 63 ipc_unlock(&(shp)->shm_perm)
66#define shm_get(ns, id) \ 64#define shm_buildid(id, seq) ipc_buildid(id, seq)
67 ((struct shmid_kernel*)ipc_get(&shm_ids(ns),id))
68#define shm_buildid(ns, id, seq) \
69 ipc_buildid(&shm_ids(ns), id, seq)
70 65
71static int newseg (struct ipc_namespace *ns, key_t key, 66static int newseg(struct ipc_namespace *, struct ipc_params *);
72 int shmflg, size_t size);
73static void shm_open(struct vm_area_struct *vma); 67static void shm_open(struct vm_area_struct *vma);
74static void shm_close(struct vm_area_struct *vma); 68static void shm_close(struct vm_area_struct *vma);
75static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); 69static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
@@ -84,9 +78,13 @@ static void __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
84 ns->shm_ctlall = SHMALL; 78 ns->shm_ctlall = SHMALL;
85 ns->shm_ctlmni = SHMMNI; 79 ns->shm_ctlmni = SHMMNI;
86 ns->shm_tot = 0; 80 ns->shm_tot = 0;
87 ipc_init_ids(ids, 1); 81 ipc_init_ids(ids);
88} 82}
89 83
84/*
85 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
86 * Only shm_ids.rw_mutex remains locked on exit.
87 */
90static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp) 88static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp)
91{ 89{
92 if (shp->shm_nattch){ 90 if (shp->shm_nattch){
@@ -112,20 +110,24 @@ int shm_init_ns(struct ipc_namespace *ns)
112 110
113void shm_exit_ns(struct ipc_namespace *ns) 111void shm_exit_ns(struct ipc_namespace *ns)
114{ 112{
115 int i;
116 struct shmid_kernel *shp; 113 struct shmid_kernel *shp;
114 int next_id;
115 int total, in_use;
116
117 down_write(&shm_ids(ns).rw_mutex);
117 118
118 mutex_lock(&shm_ids(ns).mutex); 119 in_use = shm_ids(ns).in_use;
119 for (i = 0; i <= shm_ids(ns).max_id; i++) { 120
120 shp = shm_lock(ns, i); 121 for (total = 0, next_id = 0; total < in_use; next_id++) {
122 shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
121 if (shp == NULL) 123 if (shp == NULL)
122 continue; 124 continue;
123 125 ipc_lock_by_ptr(&shp->shm_perm);
124 do_shm_rmid(ns, shp); 126 do_shm_rmid(ns, shp);
127 total++;
125 } 128 }
126 mutex_unlock(&shm_ids(ns).mutex); 129 up_write(&shm_ids(ns).rw_mutex);
127 130
128 ipc_fini_ids(ns->ids[IPC_SHM_IDS]);
129 kfree(ns->ids[IPC_SHM_IDS]); 131 kfree(ns->ids[IPC_SHM_IDS]);
130 ns->ids[IPC_SHM_IDS] = NULL; 132 ns->ids[IPC_SHM_IDS] = NULL;
131} 133}
@@ -138,17 +140,49 @@ void __init shm_init (void)
138 IPC_SHM_IDS, sysvipc_shm_proc_show); 140 IPC_SHM_IDS, sysvipc_shm_proc_show);
139} 141}
140 142
141static inline int shm_checkid(struct ipc_namespace *ns, 143/*
142 struct shmid_kernel *s, int id) 144 * shm_lock_(check_)down routines are called in the paths where the rw_mutex
145 * is held to protect access to the idr tree.
146 */
147static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
148 int id)
143{ 149{
144 if (ipc_checkid(&shm_ids(ns), &s->shm_perm, id)) 150 struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id);
145 return -EIDRM; 151
146 return 0; 152 return container_of(ipcp, struct shmid_kernel, shm_perm);
153}
154
155static inline struct shmid_kernel *shm_lock_check_down(
156 struct ipc_namespace *ns,
157 int id)
158{
159 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id);
160
161 return container_of(ipcp, struct shmid_kernel, shm_perm);
162}
163
164/*
165 * shm_lock_(check_) routines are called in the paths where the rw_mutex
166 * is not held.
167 */
168static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
169{
170 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
171
172 return container_of(ipcp, struct shmid_kernel, shm_perm);
173}
174
175static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
176 int id)
177{
178 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
179
180 return container_of(ipcp, struct shmid_kernel, shm_perm);
147} 181}
148 182
149static inline struct shmid_kernel *shm_rmid(struct ipc_namespace *ns, int id) 183static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
150{ 184{
151 return (struct shmid_kernel *)ipc_rmid(&shm_ids(ns), id); 185 ipc_rmid(&shm_ids(ns), &s->shm_perm);
152} 186}
153 187
154static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp) 188static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
@@ -166,9 +200,9 @@ static void shm_open(struct vm_area_struct *vma)
166 struct shmid_kernel *shp; 200 struct shmid_kernel *shp;
167 201
168 shp = shm_lock(sfd->ns, sfd->id); 202 shp = shm_lock(sfd->ns, sfd->id);
169 BUG_ON(!shp); 203 BUG_ON(IS_ERR(shp));
170 shp->shm_atim = get_seconds(); 204 shp->shm_atim = get_seconds();
171 shp->shm_lprid = current->tgid; 205 shp->shm_lprid = task_tgid_vnr(current);
172 shp->shm_nattch++; 206 shp->shm_nattch++;
173 shm_unlock(shp); 207 shm_unlock(shp);
174} 208}
@@ -176,15 +210,16 @@ static void shm_open(struct vm_area_struct *vma)
176/* 210/*
177 * shm_destroy - free the struct shmid_kernel 211 * shm_destroy - free the struct shmid_kernel
178 * 212 *
213 * @ns: namespace
179 * @shp: struct to free 214 * @shp: struct to free
180 * 215 *
181 * It has to be called with shp and shm_ids.mutex locked, 216 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
182 * but returns with shp unlocked and freed. 217 * but returns with shp unlocked and freed.
183 */ 218 */
184static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 219static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
185{ 220{
186 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 221 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
187 shm_rmid(ns, shp->id); 222 shm_rmid(ns, shp);
188 shm_unlock(shp); 223 shm_unlock(shp);
189 if (!is_file_hugepages(shp->shm_file)) 224 if (!is_file_hugepages(shp->shm_file))
190 shmem_lock(shp->shm_file, 0, shp->mlock_user); 225 shmem_lock(shp->shm_file, 0, shp->mlock_user);
@@ -209,11 +244,11 @@ static void shm_close(struct vm_area_struct *vma)
209 struct shmid_kernel *shp; 244 struct shmid_kernel *shp;
210 struct ipc_namespace *ns = sfd->ns; 245 struct ipc_namespace *ns = sfd->ns;
211 246
212 mutex_lock(&shm_ids(ns).mutex); 247 down_write(&shm_ids(ns).rw_mutex);
213 /* remove from the list of attaches of the shm segment */ 248 /* remove from the list of attaches of the shm segment */
214 shp = shm_lock(ns, sfd->id); 249 shp = shm_lock_down(ns, sfd->id);
215 BUG_ON(!shp); 250 BUG_ON(IS_ERR(shp));
216 shp->shm_lprid = current->tgid; 251 shp->shm_lprid = task_tgid_vnr(current);
217 shp->shm_dtim = get_seconds(); 252 shp->shm_dtim = get_seconds();
218 shp->shm_nattch--; 253 shp->shm_nattch--;
219 if(shp->shm_nattch == 0 && 254 if(shp->shm_nattch == 0 &&
@@ -221,7 +256,7 @@ static void shm_close(struct vm_area_struct *vma)
221 shm_destroy(ns, shp); 256 shm_destroy(ns, shp);
222 else 257 else
223 shm_unlock(shp); 258 shm_unlock(shp);
224 mutex_unlock(&shm_ids(ns).mutex); 259 up_write(&shm_ids(ns).rw_mutex);
225} 260}
226 261
227static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 262static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -337,8 +372,19 @@ static struct vm_operations_struct shm_vm_ops = {
337#endif 372#endif
338}; 373};
339 374
340static int newseg (struct ipc_namespace *ns, key_t key, int shmflg, size_t size) 375/**
376 * newseg - Create a new shared memory segment
377 * @ns: namespace
378 * @params: ptr to the structure that contains key, size and shmflg
379 *
380 * Called with shm_ids.rw_mutex held as a writer.
381 */
382
383static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
341{ 384{
385 key_t key = params->key;
386 int shmflg = params->flg;
387 size_t size = params->u.size;
342 int error; 388 int error;
343 struct shmid_kernel *shp; 389 struct shmid_kernel *shp;
344 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; 390 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
@@ -387,28 +433,30 @@ static int newseg (struct ipc_namespace *ns, key_t key, int shmflg, size_t size)
387 if (IS_ERR(file)) 433 if (IS_ERR(file))
388 goto no_file; 434 goto no_file;
389 435
390 error = -ENOSPC;
391 id = shm_addid(ns, shp); 436 id = shm_addid(ns, shp);
392 if(id == -1) 437 if (id < 0) {
438 error = id;
393 goto no_id; 439 goto no_id;
440 }
394 441
395 shp->shm_cprid = current->tgid; 442 shp->shm_cprid = task_tgid_vnr(current);
396 shp->shm_lprid = 0; 443 shp->shm_lprid = 0;
397 shp->shm_atim = shp->shm_dtim = 0; 444 shp->shm_atim = shp->shm_dtim = 0;
398 shp->shm_ctim = get_seconds(); 445 shp->shm_ctim = get_seconds();
399 shp->shm_segsz = size; 446 shp->shm_segsz = size;
400 shp->shm_nattch = 0; 447 shp->shm_nattch = 0;
401 shp->id = shm_buildid(ns, id, shp->shm_perm.seq); 448 shp->shm_perm.id = shm_buildid(id, shp->shm_perm.seq);
402 shp->shm_file = file; 449 shp->shm_file = file;
403 /* 450 /*
404 * shmid gets reported as "inode#" in /proc/pid/maps. 451 * shmid gets reported as "inode#" in /proc/pid/maps.
405 * proc-ps tools use this. Changing this will break them. 452 * proc-ps tools use this. Changing this will break them.
406 */ 453 */
407 file->f_dentry->d_inode->i_ino = shp->id; 454 file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
408 455
409 ns->shm_tot += numpages; 456 ns->shm_tot += numpages;
457 error = shp->shm_perm.id;
410 shm_unlock(shp); 458 shm_unlock(shp);
411 return shp->id; 459 return error;
412 460
413no_id: 461no_id:
414 fput(file); 462 fput(file);
@@ -418,42 +466,49 @@ no_file:
418 return error; 466 return error;
419} 467}
420 468
421asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) 469/*
470 * Called with shm_ids.rw_mutex and ipcp locked.
471 */
472static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
422{ 473{
423 struct shmid_kernel *shp; 474 struct shmid_kernel *shp;
424 int err, id = 0; 475
476 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
477 return security_shm_associate(shp, shmflg);
478}
479
480/*
481 * Called with shm_ids.rw_mutex and ipcp locked.
482 */
483static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
484 struct ipc_params *params)
485{
486 struct shmid_kernel *shp;
487
488 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
489 if (shp->shm_segsz < params->u.size)
490 return -EINVAL;
491
492 return 0;
493}
494
495asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
496{
425 struct ipc_namespace *ns; 497 struct ipc_namespace *ns;
498 struct ipc_ops shm_ops;
499 struct ipc_params shm_params;
426 500
427 ns = current->nsproxy->ipc_ns; 501 ns = current->nsproxy->ipc_ns;
428 502
429 mutex_lock(&shm_ids(ns).mutex); 503 shm_ops.getnew = newseg;
430 if (key == IPC_PRIVATE) { 504 shm_ops.associate = shm_security;
431 err = newseg(ns, key, shmflg, size); 505 shm_ops.more_checks = shm_more_checks;
432 } else if ((id = ipc_findkey(&shm_ids(ns), key)) == -1) {
433 if (!(shmflg & IPC_CREAT))
434 err = -ENOENT;
435 else
436 err = newseg(ns, key, shmflg, size);
437 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
438 err = -EEXIST;
439 } else {
440 shp = shm_lock(ns, id);
441 BUG_ON(shp==NULL);
442 if (shp->shm_segsz < size)
443 err = -EINVAL;
444 else if (ipcperms(&shp->shm_perm, shmflg))
445 err = -EACCES;
446 else {
447 int shmid = shm_buildid(ns, id, shp->shm_perm.seq);
448 err = security_shm_associate(shp, shmflg);
449 if (!err)
450 err = shmid;
451 }
452 shm_unlock(shp);
453 }
454 mutex_unlock(&shm_ids(ns).mutex);
455 506
456 return err; 507 shm_params.key = key;
508 shm_params.flg = shmflg;
509 shm_params.u.size = size;
510
511 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
457} 512}
458 513
459static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 514static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
@@ -547,20 +602,26 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf
547 } 602 }
548} 603}
549 604
605/*
606 * Called with shm_ids.rw_mutex held as a reader
607 */
550static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, 608static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
551 unsigned long *swp) 609 unsigned long *swp)
552{ 610{
553 int i; 611 int next_id;
612 int total, in_use;
554 613
555 *rss = 0; 614 *rss = 0;
556 *swp = 0; 615 *swp = 0;
557 616
558 for (i = 0; i <= shm_ids(ns).max_id; i++) { 617 in_use = shm_ids(ns).in_use;
618
619 for (total = 0, next_id = 0; total < in_use; next_id++) {
559 struct shmid_kernel *shp; 620 struct shmid_kernel *shp;
560 struct inode *inode; 621 struct inode *inode;
561 622
562 shp = shm_get(ns, i); 623 shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
563 if(!shp) 624 if (shp == NULL)
564 continue; 625 continue;
565 626
566 inode = shp->shm_file->f_path.dentry->d_inode; 627 inode = shp->shm_file->f_path.dentry->d_inode;
@@ -575,6 +636,8 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
575 *swp += info->swapped; 636 *swp += info->swapped;
576 spin_unlock(&info->lock); 637 spin_unlock(&info->lock);
577 } 638 }
639
640 total++;
578 } 641 }
579} 642}
580 643
@@ -610,8 +673,11 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
610 shminfo.shmmin = SHMMIN; 673 shminfo.shmmin = SHMMIN;
611 if(copy_shminfo_to_user (buf, &shminfo, version)) 674 if(copy_shminfo_to_user (buf, &shminfo, version))
612 return -EFAULT; 675 return -EFAULT;
613 /* reading a integer is always atomic */ 676
614 err= shm_ids(ns).max_id; 677 down_read(&shm_ids(ns).rw_mutex);
678 err = ipc_get_maxid(&shm_ids(ns));
679 up_read(&shm_ids(ns).rw_mutex);
680
615 if(err<0) 681 if(err<0)
616 err = 0; 682 err = 0;
617 goto out; 683 goto out;
@@ -625,14 +691,14 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
625 return err; 691 return err;
626 692
627 memset(&shm_info,0,sizeof(shm_info)); 693 memset(&shm_info,0,sizeof(shm_info));
628 mutex_lock(&shm_ids(ns).mutex); 694 down_read(&shm_ids(ns).rw_mutex);
629 shm_info.used_ids = shm_ids(ns).in_use; 695 shm_info.used_ids = shm_ids(ns).in_use;
630 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); 696 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
631 shm_info.shm_tot = ns->shm_tot; 697 shm_info.shm_tot = ns->shm_tot;
632 shm_info.swap_attempts = 0; 698 shm_info.swap_attempts = 0;
633 shm_info.swap_successes = 0; 699 shm_info.swap_successes = 0;
634 err = shm_ids(ns).max_id; 700 err = ipc_get_maxid(&shm_ids(ns));
635 mutex_unlock(&shm_ids(ns).mutex); 701 up_read(&shm_ids(ns).rw_mutex);
636 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { 702 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
637 err = -EFAULT; 703 err = -EFAULT;
638 goto out; 704 goto out;
@@ -646,20 +712,25 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
646 { 712 {
647 struct shmid64_ds tbuf; 713 struct shmid64_ds tbuf;
648 int result; 714 int result;
649 memset(&tbuf, 0, sizeof(tbuf)); 715
650 shp = shm_lock(ns, shmid); 716 if (!buf) {
651 if(shp==NULL) { 717 err = -EFAULT;
652 err = -EINVAL;
653 goto out; 718 goto out;
654 } else if(cmd==SHM_STAT) { 719 }
655 err = -EINVAL; 720
656 if (shmid > shm_ids(ns).max_id) 721 if (cmd == SHM_STAT) {
657 goto out_unlock; 722 shp = shm_lock(ns, shmid);
658 result = shm_buildid(ns, shmid, shp->shm_perm.seq); 723 if (IS_ERR(shp)) {
724 err = PTR_ERR(shp);
725 goto out;
726 }
727 result = shp->shm_perm.id;
659 } else { 728 } else {
660 err = shm_checkid(ns, shp,shmid); 729 shp = shm_lock_check(ns, shmid);
661 if(err) 730 if (IS_ERR(shp)) {
662 goto out_unlock; 731 err = PTR_ERR(shp);
732 goto out;
733 }
663 result = 0; 734 result = 0;
664 } 735 }
665 err=-EACCES; 736 err=-EACCES;
@@ -668,6 +739,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
668 err = security_shm_shmctl(shp, cmd); 739 err = security_shm_shmctl(shp, cmd);
669 if (err) 740 if (err)
670 goto out_unlock; 741 goto out_unlock;
742 memset(&tbuf, 0, sizeof(tbuf));
671 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); 743 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
672 tbuf.shm_segsz = shp->shm_segsz; 744 tbuf.shm_segsz = shp->shm_segsz;
673 tbuf.shm_atime = shp->shm_atim; 745 tbuf.shm_atime = shp->shm_atim;
@@ -686,14 +758,11 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
686 case SHM_LOCK: 758 case SHM_LOCK:
687 case SHM_UNLOCK: 759 case SHM_UNLOCK:
688 { 760 {
689 shp = shm_lock(ns, shmid); 761 shp = shm_lock_check(ns, shmid);
690 if(shp==NULL) { 762 if (IS_ERR(shp)) {
691 err = -EINVAL; 763 err = PTR_ERR(shp);
692 goto out; 764 goto out;
693 } 765 }
694 err = shm_checkid(ns, shp,shmid);
695 if(err)
696 goto out_unlock;
697 766
698 err = audit_ipc_obj(&(shp->shm_perm)); 767 err = audit_ipc_obj(&(shp->shm_perm));
699 if (err) 768 if (err)
@@ -742,14 +811,12 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
742 * Instead we set a destroyed flag, and then blow 811 * Instead we set a destroyed flag, and then blow
743 * the name away when the usage hits zero. 812 * the name away when the usage hits zero.
744 */ 813 */
745 mutex_lock(&shm_ids(ns).mutex); 814 down_write(&shm_ids(ns).rw_mutex);
746 shp = shm_lock(ns, shmid); 815 shp = shm_lock_check_down(ns, shmid);
747 err = -EINVAL; 816 if (IS_ERR(shp)) {
748 if (shp == NULL) 817 err = PTR_ERR(shp);
749 goto out_up; 818 goto out_up;
750 err = shm_checkid(ns, shp, shmid); 819 }
751 if(err)
752 goto out_unlock_up;
753 820
754 err = audit_ipc_obj(&(shp->shm_perm)); 821 err = audit_ipc_obj(&(shp->shm_perm));
755 if (err) 822 if (err)
@@ -767,24 +834,27 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
767 goto out_unlock_up; 834 goto out_unlock_up;
768 835
769 do_shm_rmid(ns, shp); 836 do_shm_rmid(ns, shp);
770 mutex_unlock(&shm_ids(ns).mutex); 837 up_write(&shm_ids(ns).rw_mutex);
771 goto out; 838 goto out;
772 } 839 }
773 840
774 case IPC_SET: 841 case IPC_SET:
775 { 842 {
843 if (!buf) {
844 err = -EFAULT;
845 goto out;
846 }
847
776 if (copy_shmid_from_user (&setbuf, buf, version)) { 848 if (copy_shmid_from_user (&setbuf, buf, version)) {
777 err = -EFAULT; 849 err = -EFAULT;
778 goto out; 850 goto out;
779 } 851 }
780 mutex_lock(&shm_ids(ns).mutex); 852 down_write(&shm_ids(ns).rw_mutex);
781 shp = shm_lock(ns, shmid); 853 shp = shm_lock_check_down(ns, shmid);
782 err=-EINVAL; 854 if (IS_ERR(shp)) {
783 if(shp==NULL) 855 err = PTR_ERR(shp);
784 goto out_up; 856 goto out_up;
785 err = shm_checkid(ns, shp,shmid); 857 }
786 if(err)
787 goto out_unlock_up;
788 err = audit_ipc_obj(&(shp->shm_perm)); 858 err = audit_ipc_obj(&(shp->shm_perm));
789 if (err) 859 if (err)
790 goto out_unlock_up; 860 goto out_unlock_up;
@@ -819,7 +889,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
819out_unlock_up: 889out_unlock_up:
820 shm_unlock(shp); 890 shm_unlock(shp);
821out_up: 891out_up:
822 mutex_unlock(&shm_ids(ns).mutex); 892 up_write(&shm_ids(ns).rw_mutex);
823 goto out; 893 goto out;
824out_unlock: 894out_unlock:
825 shm_unlock(shp); 895 shm_unlock(shp);
@@ -890,13 +960,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
890 * additional creator id... 960 * additional creator id...
891 */ 961 */
892 ns = current->nsproxy->ipc_ns; 962 ns = current->nsproxy->ipc_ns;
893 shp = shm_lock(ns, shmid); 963 shp = shm_lock_check(ns, shmid);
894 if(shp == NULL) 964 if (IS_ERR(shp)) {
965 err = PTR_ERR(shp);
895 goto out; 966 goto out;
896 967 }
897 err = shm_checkid(ns, shp,shmid);
898 if (err)
899 goto out_unlock;
900 968
901 err = -EACCES; 969 err = -EACCES;
902 if (ipcperms(&shp->shm_perm, acc_mode)) 970 if (ipcperms(&shp->shm_perm, acc_mode))
@@ -925,7 +993,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
925 993
926 file->private_data = sfd; 994 file->private_data = sfd;
927 file->f_mapping = shp->shm_file->f_mapping; 995 file->f_mapping = shp->shm_file->f_mapping;
928 sfd->id = shp->id; 996 sfd->id = shp->shm_perm.id;
929 sfd->ns = get_ipc_ns(ns); 997 sfd->ns = get_ipc_ns(ns);
930 sfd->file = shp->shm_file; 998 sfd->file = shp->shm_file;
931 sfd->vm_ops = NULL; 999 sfd->vm_ops = NULL;
@@ -955,16 +1023,16 @@ invalid:
955 fput(file); 1023 fput(file);
956 1024
957out_nattch: 1025out_nattch:
958 mutex_lock(&shm_ids(ns).mutex); 1026 down_write(&shm_ids(ns).rw_mutex);
959 shp = shm_lock(ns, shmid); 1027 shp = shm_lock_down(ns, shmid);
960 BUG_ON(!shp); 1028 BUG_ON(IS_ERR(shp));
961 shp->shm_nattch--; 1029 shp->shm_nattch--;
962 if(shp->shm_nattch == 0 && 1030 if(shp->shm_nattch == 0 &&
963 shp->shm_perm.mode & SHM_DEST) 1031 shp->shm_perm.mode & SHM_DEST)
964 shm_destroy(ns, shp); 1032 shm_destroy(ns, shp);
965 else 1033 else
966 shm_unlock(shp); 1034 shm_unlock(shp);
967 mutex_unlock(&shm_ids(ns).mutex); 1035 up_write(&shm_ids(ns).rw_mutex);
968 1036
969out: 1037out:
970 return err; 1038 return err;
@@ -1094,7 +1162,7 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1094 format = BIG_STRING; 1162 format = BIG_STRING;
1095 return seq_printf(s, format, 1163 return seq_printf(s, format,
1096 shp->shm_perm.key, 1164 shp->shm_perm.key,
1097 shp->id, 1165 shp->shm_perm.id,
1098 shp->shm_perm.mode, 1166 shp->shm_perm.mode,
1099 shp->shm_segsz, 1167 shp->shm_segsz,
1100 shp->shm_cprid, 1168 shp->shm_cprid,
diff --git a/ipc/util.c b/ipc/util.c
index 44e5135aee47..1aa0ebf71bac 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/audit.h> 33#include <linux/audit.h>
34#include <linux/nsproxy.h> 34#include <linux/nsproxy.h>
35#include <linux/rwsem.h>
35 36
36#include <asm/unistd.h> 37#include <asm/unistd.h>
37 38
@@ -129,23 +130,16 @@ __initcall(ipc_init);
129/** 130/**
130 * ipc_init_ids - initialise IPC identifiers 131 * ipc_init_ids - initialise IPC identifiers
131 * @ids: Identifier set 132 * @ids: Identifier set
132 * @size: Number of identifiers
133 * 133 *
134 * Given a size for the ipc identifier range (limited below IPCMNI) 134 * Set up the sequence range to use for the ipc identifier range (limited
135 * set up the sequence range to use then allocate and initialise the 135 * below IPCMNI) then initialise the ids idr.
136 * array itself.
137 */ 136 */
138 137
139void ipc_init_ids(struct ipc_ids* ids, int size) 138void ipc_init_ids(struct ipc_ids *ids)
140{ 139{
141 int i; 140 init_rwsem(&ids->rw_mutex);
142 141
143 mutex_init(&ids->mutex);
144
145 if(size > IPCMNI)
146 size = IPCMNI;
147 ids->in_use = 0; 142 ids->in_use = 0;
148 ids->max_id = -1;
149 ids->seq = 0; 143 ids->seq = 0;
150 { 144 {
151 int seq_limit = INT_MAX/SEQ_MULTIPLIER; 145 int seq_limit = INT_MAX/SEQ_MULTIPLIER;
@@ -155,17 +149,7 @@ void ipc_init_ids(struct ipc_ids* ids, int size)
155 ids->seq_max = seq_limit; 149 ids->seq_max = seq_limit;
156 } 150 }
157 151
158 ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size + 152 idr_init(&ids->ipcs_idr);
159 sizeof(struct ipc_id_ary));
160
161 if(ids->entries == NULL) {
162 printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n");
163 size = 0;
164 ids->entries = &ids->nullentry;
165 }
166 ids->entries->size = size;
167 for(i=0;i<size;i++)
168 ids->entries->p[i] = NULL;
169} 153}
170 154
171#ifdef CONFIG_PROC_FS 155#ifdef CONFIG_PROC_FS
@@ -208,99 +192,96 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
208 * @ids: Identifier set 192 * @ids: Identifier set
209 * @key: The key to find 193 * @key: The key to find
210 * 194 *
211 * Requires ipc_ids.mutex locked. 195 * Requires ipc_ids.rw_mutex locked.
212 * Returns the identifier if found or -1 if not. 196 * Returns the LOCKED pointer to the ipc structure if found or NULL
197 * if not.
198 * If key is found ipc points to the owning ipc structure
213 */ 199 */
214 200
215int ipc_findkey(struct ipc_ids* ids, key_t key) 201static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
216{ 202{
217 int id; 203 struct kern_ipc_perm *ipc;
218 struct kern_ipc_perm* p; 204 int next_id;
219 int max_id = ids->max_id; 205 int total;
220 206
221 /* 207 for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
222 * rcu_dereference() is not needed here 208 ipc = idr_find(&ids->ipcs_idr, next_id);
223 * since ipc_ids.mutex is held 209
224 */ 210 if (ipc == NULL)
225 for (id = 0; id <= max_id; id++) { 211 continue;
226 p = ids->entries->p[id]; 212
227 if(p==NULL) 213 if (ipc->key != key) {
214 total++;
228 continue; 215 continue;
229 if (key == p->key) 216 }
230 return id; 217
218 ipc_lock_by_ptr(ipc);
219 return ipc;
231 } 220 }
232 return -1; 221
222 return NULL;
233} 223}
234 224
235/* 225/**
236 * Requires ipc_ids.mutex locked 226 * ipc_get_maxid - get the last assigned id
227 * @ids: IPC identifier set
228 *
229 * Called with ipc_ids.rw_mutex held.
237 */ 230 */
238static int grow_ary(struct ipc_ids* ids, int newsize)
239{
240 struct ipc_id_ary* new;
241 struct ipc_id_ary* old;
242 int i;
243 int size = ids->entries->size;
244
245 if(newsize > IPCMNI)
246 newsize = IPCMNI;
247 if(newsize <= size)
248 return newsize;
249
250 new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize +
251 sizeof(struct ipc_id_ary));
252 if(new == NULL)
253 return size;
254 new->size = newsize;
255 memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size);
256 for(i=size;i<newsize;i++) {
257 new->p[i] = NULL;
258 }
259 old = ids->entries;
260 231
261 /* 232int ipc_get_maxid(struct ipc_ids *ids)
262 * Use rcu_assign_pointer() to make sure the memcpyed contents 233{
263 * of the new array are visible before the new array becomes visible. 234 struct kern_ipc_perm *ipc;
264 */ 235 int max_id = -1;
265 rcu_assign_pointer(ids->entries, new); 236 int total, id;
237
238 if (ids->in_use == 0)
239 return -1;
266 240
267 __ipc_fini_ids(ids, old); 241 if (ids->in_use == IPCMNI)
268 return newsize; 242 return IPCMNI - 1;
243
244 /* Look for the last assigned id */
245 total = 0;
246 for (id = 0; id < IPCMNI && total < ids->in_use; id++) {
247 ipc = idr_find(&ids->ipcs_idr, id);
248 if (ipc != NULL) {
249 max_id = id;
250 total++;
251 }
252 }
253 return max_id;
269} 254}
270 255
271/** 256/**
272 * ipc_addid - add an IPC identifier 257 * ipc_addid - add an IPC identifier
273 * @ids: IPC identifier set 258 * @ids: IPC identifier set
274 * @new: new IPC permission set 259 * @new: new IPC permission set
275 * @size: new size limit for the id array 260 * @size: limit for the number of used ids
276 * 261 *
277 * Add an entry 'new' to the IPC arrays. The permissions object is 262 * Add an entry 'new' to the IPC ids idr. The permissions object is
278 * initialised and the first free entry is set up and the id assigned 263 * initialised and the first free entry is set up and the id assigned
279 * is returned. The list is returned in a locked state on success. 264 * is returned. The 'new' entry is returned in a locked state on success.
280 * On failure the list is not locked and -1 is returned. 265 * On failure the entry is not locked and a negative err-code is returned.
281 * 266 *
282 * Called with ipc_ids.mutex held. 267 * Called with ipc_ids.rw_mutex held as a writer.
283 */ 268 */
284 269
285int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) 270int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
286{ 271{
287 int id; 272 int id, err;
288 273
289 size = grow_ary(ids,size); 274 if (size > IPCMNI)
275 size = IPCMNI;
276
277 if (ids->in_use >= size)
278 return -ENOSPC;
279
280 err = idr_get_new(&ids->ipcs_idr, new, &id);
281 if (err)
282 return err;
290 283
291 /*
292 * rcu_dereference()() is not needed here since
293 * ipc_ids.mutex is held
294 */
295 for (id = 0; id < size; id++) {
296 if(ids->entries->p[id] == NULL)
297 goto found;
298 }
299 return -1;
300found:
301 ids->in_use++; 284 ids->in_use++;
302 if (id > ids->max_id)
303 ids->max_id = id;
304 285
305 new->cuid = new->uid = current->euid; 286 new->cuid = new->uid = current->euid;
306 new->gid = new->cgid = current->egid; 287 new->gid = new->cgid = current->egid;
@@ -313,48 +294,153 @@ found:
313 new->deleted = 0; 294 new->deleted = 0;
314 rcu_read_lock(); 295 rcu_read_lock();
315 spin_lock(&new->lock); 296 spin_lock(&new->lock);
316 ids->entries->p[id] = new;
317 return id; 297 return id;
318} 298}
319 299
320/** 300/**
301 * ipcget_new - create a new ipc object
302 * @ns: namespace
303 * @ids: IPC identifer set
304 * @ops: the actual creation routine to call
305 * @params: its parameters
306 *
307 * This routine is called by sys_msgget, sys_semget() and sys_shmget()
308 * when the key is IPC_PRIVATE.
309 */
310int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
311 struct ipc_ops *ops, struct ipc_params *params)
312{
313 int err;
314retry:
315 err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
316
317 if (!err)
318 return -ENOMEM;
319
320 down_write(&ids->rw_mutex);
321 err = ops->getnew(ns, params);
322 up_write(&ids->rw_mutex);
323
324 if (err == -EAGAIN)
325 goto retry;
326
327 return err;
328}
329
330/**
331 * ipc_check_perms - check security and permissions for an IPC
332 * @ipcp: ipc permission set
333 * @ops: the actual security routine to call
334 * @params: its parameters
335 *
336 * This routine is called by sys_msgget(), sys_semget() and sys_shmget()
337 * when the key is not IPC_PRIVATE and that key already exists in the
338 * ids IDR.
339 *
340 * On success, the IPC id is returned.
341 *
342 * It is called with ipc_ids.rw_mutex and ipcp->lock held.
343 */
344static int ipc_check_perms(struct kern_ipc_perm *ipcp, struct ipc_ops *ops,
345 struct ipc_params *params)
346{
347 int err;
348
349 if (ipcperms(ipcp, params->flg))
350 err = -EACCES;
351 else {
352 err = ops->associate(ipcp, params->flg);
353 if (!err)
354 err = ipcp->id;
355 }
356
357 return err;
358}
359
360/**
361 * ipcget_public - get an ipc object or create a new one
362 * @ns: namespace
363 * @ids: IPC identifer set
364 * @ops: the actual creation routine to call
365 * @params: its parameters
366 *
367 * This routine is called by sys_msgget, sys_semget() and sys_shmget()
368 * when the key is not IPC_PRIVATE.
369 * It adds a new entry if the key is not found and does some permission
370 * / security checkings if the key is found.
371 *
372 * On success, the ipc id is returned.
373 */
374int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
375 struct ipc_ops *ops, struct ipc_params *params)
376{
377 struct kern_ipc_perm *ipcp;
378 int flg = params->flg;
379 int err;
380retry:
381 err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
382
383 /*
384 * Take the lock as a writer since we are potentially going to add
385 * a new entry + read locks are not "upgradable"
386 */
387 down_write(&ids->rw_mutex);
388 ipcp = ipc_findkey(ids, params->key);
389 if (ipcp == NULL) {
390 /* key not used */
391 if (!(flg & IPC_CREAT))
392 err = -ENOENT;
393 else if (!err)
394 err = -ENOMEM;
395 else
396 err = ops->getnew(ns, params);
397 } else {
398 /* ipc object has been locked by ipc_findkey() */
399
400 if (flg & IPC_CREAT && flg & IPC_EXCL)
401 err = -EEXIST;
402 else {
403 err = 0;
404 if (ops->more_checks)
405 err = ops->more_checks(ipcp, params);
406 if (!err)
407 /*
408 * ipc_check_perms returns the IPC id on
409 * success
410 */
411 err = ipc_check_perms(ipcp, ops, params);
412 }
413 ipc_unlock(ipcp);
414 }
415 up_write(&ids->rw_mutex);
416
417 if (err == -EAGAIN)
418 goto retry;
419
420 return err;
421}
422
423
424/**
321 * ipc_rmid - remove an IPC identifier 425 * ipc_rmid - remove an IPC identifier
322 * @ids: identifier set 426 * @ids: IPC identifier set
323 * @id: Identifier to remove 427 * @ipcp: ipc perm structure containing the identifier to remove
324 * 428 *
325 * The identifier must be valid, and in use. The kernel will panic if 429 * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
326 * fed an invalid identifier. The entry is removed and internal 430 * before this function is called, and remain locked on the exit.
327 * variables recomputed. The object associated with the identifier
328 * is returned.
329 * ipc_ids.mutex and the spinlock for this ID is hold before this function
330 * is called, and remain locked on the exit.
331 */ 431 */
332 432
333struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) 433void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
334{ 434{
335 struct kern_ipc_perm* p; 435 int lid = ipcid_to_idx(ipcp->id);
336 int lid = id % SEQ_MULTIPLIER; 436
337 BUG_ON(lid >= ids->entries->size); 437 idr_remove(&ids->ipcs_idr, lid);
338 438
339 /*
340 * do not need a rcu_dereference()() here to force ordering
341 * on Alpha, since the ipc_ids.mutex is held.
342 */
343 p = ids->entries->p[lid];
344 ids->entries->p[lid] = NULL;
345 BUG_ON(p==NULL);
346 ids->in_use--; 439 ids->in_use--;
347 440
348 if (lid == ids->max_id) { 441 ipcp->deleted = 1;
349 do { 442
350 lid--; 443 return;
351 if(lid == -1)
352 break;
353 } while (ids->entries->p[lid] == NULL);
354 ids->max_id = lid;
355 }
356 p->deleted = 1;
357 return p;
358} 444}
359 445
360/** 446/**
@@ -491,10 +577,12 @@ static void ipc_do_vfree(struct work_struct *work)
491 */ 577 */
492static void ipc_schedule_free(struct rcu_head *head) 578static void ipc_schedule_free(struct rcu_head *head)
493{ 579{
494 struct ipc_rcu_grace *grace = 580 struct ipc_rcu_grace *grace;
495 container_of(head, struct ipc_rcu_grace, rcu); 581 struct ipc_rcu_sched *sched;
496 struct ipc_rcu_sched *sched = 582
497 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); 583 grace = container_of(head, struct ipc_rcu_grace, rcu);
584 sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
585 data[0]);
498 586
499 INIT_WORK(&sched->work, ipc_do_vfree); 587 INIT_WORK(&sched->work, ipc_do_vfree);
500 schedule_work(&sched->work); 588 schedule_work(&sched->work);
@@ -583,7 +671,7 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
583} 671}
584 672
585/** 673/**
586 * ipc64_perm_to_ipc_perm - convert old ipc permissions to new 674 * ipc64_perm_to_ipc_perm - convert new ipc permissions to old
587 * @in: new style IPC permissions 675 * @in: new style IPC permissions
588 * @out: old style IPC permissions 676 * @out: old style IPC permissions
589 * 677 *
@@ -602,44 +690,37 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
602 out->seq = in->seq; 690 out->seq = in->seq;
603} 691}
604 692
605/* 693/**
606 * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() 694 * ipc_lock - Lock an ipc structure without rw_mutex held
607 * is called with shm_ids.mutex locked. Since grow_ary() is also called with 695 * @ids: IPC identifier set
608 * shm_ids.mutex down(for Shared Memory), there is no need to add read 696 * @id: ipc id to look for
609 * barriers here to gurantee the writes in grow_ary() are seen in order 697 *
610 * here (for Alpha). 698 * Look for an id in the ipc ids idr and lock the associated ipc object.
611 * 699 *
612 * However ipc_get() itself does not necessary require ipc_ids.mutex down. So 700 * The ipc object is locked on exit.
613 * if in the future ipc_get() is used by other places without ipc_ids.mutex 701 *
614 * down, then ipc_get() needs read memery barriers as ipc_lock() does. 702 * This is the routine that should be called when the rw_mutex is not already
703 * held, i.e. idr tree not protected: it protects the idr tree in read mode
704 * during the idr_find().
615 */ 705 */
616struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
617{
618 struct kern_ipc_perm* out;
619 int lid = id % SEQ_MULTIPLIER;
620 if(lid >= ids->entries->size)
621 return NULL;
622 out = ids->entries->p[lid];
623 return out;
624}
625 706
626struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) 707struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
627{ 708{
628 struct kern_ipc_perm* out; 709 struct kern_ipc_perm *out;
629 int lid = id % SEQ_MULTIPLIER; 710 int lid = ipcid_to_idx(id);
630 struct ipc_id_ary* entries; 711
712 down_read(&ids->rw_mutex);
631 713
632 rcu_read_lock(); 714 rcu_read_lock();
633 entries = rcu_dereference(ids->entries); 715 out = idr_find(&ids->ipcs_idr, lid);
634 if(lid >= entries->size) { 716 if (out == NULL) {
635 rcu_read_unlock();
636 return NULL;
637 }
638 out = entries->p[lid];
639 if(out == NULL) {
640 rcu_read_unlock(); 717 rcu_read_unlock();
641 return NULL; 718 up_read(&ids->rw_mutex);
719 return ERR_PTR(-EINVAL);
642 } 720 }
721
722 up_read(&ids->rw_mutex);
723
643 spin_lock(&out->lock); 724 spin_lock(&out->lock);
644 725
645 /* ipc_rmid() may have already freed the ID while ipc_lock 726 /* ipc_rmid() may have already freed the ID while ipc_lock
@@ -648,33 +729,44 @@ struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
648 if (out->deleted) { 729 if (out->deleted) {
649 spin_unlock(&out->lock); 730 spin_unlock(&out->lock);
650 rcu_read_unlock(); 731 rcu_read_unlock();
651 return NULL; 732 return ERR_PTR(-EINVAL);
652 } 733 }
734
653 return out; 735 return out;
654} 736}
655 737
656void ipc_lock_by_ptr(struct kern_ipc_perm *perm) 738/**
657{ 739 * ipc_lock_down - Lock an ipc structure with rw_sem held
658 rcu_read_lock(); 740 * @ids: IPC identifier set
659 spin_lock(&perm->lock); 741 * @id: ipc id to look for
660} 742 *
743 * Look for an id in the ipc ids idr and lock the associated ipc object.
744 *
745 * The ipc object is locked on exit.
746 *
747 * This is the routine that should be called when the rw_mutex is already
748 * held, i.e. idr tree protected.
749 */
661 750
662void ipc_unlock(struct kern_ipc_perm* perm) 751struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id)
663{ 752{
664 spin_unlock(&perm->lock); 753 struct kern_ipc_perm *out;
665 rcu_read_unlock(); 754 int lid = ipcid_to_idx(id);
666}
667 755
668int ipc_buildid(struct ipc_ids* ids, int id, int seq) 756 rcu_read_lock();
669{ 757 out = idr_find(&ids->ipcs_idr, lid);
670 return SEQ_MULTIPLIER*seq + id; 758 if (out == NULL) {
671} 759 rcu_read_unlock();
760 return ERR_PTR(-EINVAL);
761 }
672 762
673int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid) 763 spin_lock(&out->lock);
674{ 764
675 if(uid/SEQ_MULTIPLIER != ipcp->seq) 765 /*
676 return 1; 766 * No need to verify that the structure is still valid since the
677 return 0; 767 * rw_mutex is held.
768 */
769 return out;
678} 770}
679 771
680#ifdef __ARCH_WANT_IPC_PARSE_VERSION 772#ifdef __ARCH_WANT_IPC_PARSE_VERSION
@@ -707,27 +799,30 @@ struct ipc_proc_iter {
707 struct ipc_proc_iface *iface; 799 struct ipc_proc_iface *iface;
708}; 800};
709 801
710static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) 802/*
803 * This routine locks the ipc structure found at least at position pos.
804 */
805struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
806 loff_t *new_pos)
711{ 807{
712 struct ipc_proc_iter *iter = s->private; 808 struct kern_ipc_perm *ipc;
713 struct ipc_proc_iface *iface = iter->iface; 809 int total, id;
714 struct kern_ipc_perm *ipc = it;
715 loff_t p;
716 struct ipc_ids *ids;
717 810
718 ids = iter->ns->ids[iface->ids]; 811 total = 0;
812 for (id = 0; id < pos && total < ids->in_use; id++) {
813 ipc = idr_find(&ids->ipcs_idr, id);
814 if (ipc != NULL)
815 total++;
816 }
719 817
720 /* If we had an ipc id locked before, unlock it */ 818 if (total >= ids->in_use)
721 if (ipc && ipc != SEQ_START_TOKEN) 819 return NULL;
722 ipc_unlock(ipc);
723 820
724 /* 821 for ( ; pos < IPCMNI; pos++) {
725 * p = *pos - 1 (because id 0 starts at position 1) 822 ipc = idr_find(&ids->ipcs_idr, pos);
726 * + 1 (because we increment the position by one) 823 if (ipc != NULL) {
727 */ 824 *new_pos = pos + 1;
728 for (p = *pos; p <= ids->max_id; p++) { 825 ipc_lock_by_ptr(ipc);
729 if ((ipc = ipc_lock(ids, p)) != NULL) {
730 *pos = p + 1;
731 return ipc; 826 return ipc;
732 } 827 }
733 } 828 }
@@ -736,16 +831,27 @@ static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
736 return NULL; 831 return NULL;
737} 832}
738 833
834static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
835{
836 struct ipc_proc_iter *iter = s->private;
837 struct ipc_proc_iface *iface = iter->iface;
838 struct kern_ipc_perm *ipc = it;
839
840 /* If we had an ipc id locked before, unlock it */
841 if (ipc && ipc != SEQ_START_TOKEN)
842 ipc_unlock(ipc);
843
844 return sysvipc_find_ipc(iter->ns->ids[iface->ids], *pos, pos);
845}
846
739/* 847/*
740 * File positions: pos 0 -> header, pos n -> ipc id + 1. 848 * File positions: pos 0 -> header, pos n -> ipc id = n - 1.
741 * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START. 849 * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START.
742 */ 850 */
743static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) 851static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
744{ 852{
745 struct ipc_proc_iter *iter = s->private; 853 struct ipc_proc_iter *iter = s->private;
746 struct ipc_proc_iface *iface = iter->iface; 854 struct ipc_proc_iface *iface = iter->iface;
747 struct kern_ipc_perm *ipc;
748 loff_t p;
749 struct ipc_ids *ids; 855 struct ipc_ids *ids;
750 856
751 ids = iter->ns->ids[iface->ids]; 857 ids = iter->ns->ids[iface->ids];
@@ -754,7 +860,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
754 * Take the lock - this will be released by the corresponding 860 * Take the lock - this will be released by the corresponding
755 * call to stop(). 861 * call to stop().
756 */ 862 */
757 mutex_lock(&ids->mutex); 863 down_read(&ids->rw_mutex);
758 864
759 /* pos < 0 is invalid */ 865 /* pos < 0 is invalid */
760 if (*pos < 0) 866 if (*pos < 0)
@@ -765,13 +871,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
765 return SEQ_START_TOKEN; 871 return SEQ_START_TOKEN;
766 872
767 /* Find the (pos-1)th ipc */ 873 /* Find the (pos-1)th ipc */
768 for (p = *pos - 1; p <= ids->max_id; p++) { 874 return sysvipc_find_ipc(ids, *pos - 1, pos);
769 if ((ipc = ipc_lock(ids, p)) != NULL) {
770 *pos = p + 1;
771 return ipc;
772 }
773 }
774 return NULL;
775} 875}
776 876
777static void sysvipc_proc_stop(struct seq_file *s, void *it) 877static void sysvipc_proc_stop(struct seq_file *s, void *it)
@@ -781,13 +881,13 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it)
781 struct ipc_proc_iface *iface = iter->iface; 881 struct ipc_proc_iface *iface = iter->iface;
782 struct ipc_ids *ids; 882 struct ipc_ids *ids;
783 883
784 /* If we had a locked segment, release it */ 884 /* If we had a locked structure, release it */
785 if (ipc && ipc != SEQ_START_TOKEN) 885 if (ipc && ipc != SEQ_START_TOKEN)
786 ipc_unlock(ipc); 886 ipc_unlock(ipc);
787 887
788 ids = iter->ns->ids[iface->ids]; 888 ids = iter->ns->ids[iface->ids];
789 /* Release the lock we took in start() */ 889 /* Release the lock we took in start() */
790 mutex_unlock(&ids->mutex); 890 up_read(&ids->rw_mutex);
791} 891}
792 892
793static int sysvipc_proc_show(struct seq_file *s, void *it) 893static int sysvipc_proc_show(struct seq_file *s, void *it)
diff --git a/ipc/util.h b/ipc/util.h
index 333e891bcaca..9ffea40457ce 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -10,6 +10,9 @@
10#ifndef _IPC_UTIL_H 10#ifndef _IPC_UTIL_H
11#define _IPC_UTIL_H 11#define _IPC_UTIL_H
12 12
13#include <linux/idr.h>
14#include <linux/err.h>
15
13#define USHRT_MAX 0xffff 16#define USHRT_MAX 0xffff
14#define SEQ_MULTIPLIER (IPCMNI) 17#define SEQ_MULTIPLIER (IPCMNI)
15 18
@@ -25,24 +28,46 @@ void sem_exit_ns(struct ipc_namespace *ns);
25void msg_exit_ns(struct ipc_namespace *ns); 28void msg_exit_ns(struct ipc_namespace *ns);
26void shm_exit_ns(struct ipc_namespace *ns); 29void shm_exit_ns(struct ipc_namespace *ns);
27 30
28struct ipc_id_ary {
29 int size;
30 struct kern_ipc_perm *p[0];
31};
32
33struct ipc_ids { 31struct ipc_ids {
34 int in_use; 32 int in_use;
35 int max_id;
36 unsigned short seq; 33 unsigned short seq;
37 unsigned short seq_max; 34 unsigned short seq_max;
38 struct mutex mutex; 35 struct rw_semaphore rw_mutex;
39 struct ipc_id_ary nullentry; 36 struct idr ipcs_idr;
40 struct ipc_id_ary* entries; 37};
38
39/*
40 * Structure that holds the parameters needed by the ipc operations
41 * (see after)
42 */
43struct ipc_params {
44 key_t key;
45 int flg;
46 union {
47 size_t size; /* for shared memories */
48 int nsems; /* for semaphores */
49 } u; /* holds the getnew() specific param */
50};
51
52/*
53 * Structure that holds some ipc operations. This structure is used to unify
54 * the calls to sys_msgget(), sys_semget(), sys_shmget()
55 * . routine to call to create a new ipc object. Can be one of newque,
56 * newary, newseg
57 * . routine to call to check permissions for a new ipc object.
58 * Can be one of security_msg_associate, security_sem_associate,
59 * security_shm_associate
60 * . routine to call for an extra check if needed
61 */
62struct ipc_ops {
63 int (*getnew) (struct ipc_namespace *, struct ipc_params *);
64 int (*associate) (struct kern_ipc_perm *, int);
65 int (*more_checks) (struct kern_ipc_perm *, struct ipc_params *);
41}; 66};
42 67
43struct seq_file; 68struct seq_file;
44 69
45void ipc_init_ids(struct ipc_ids *ids, int size); 70void ipc_init_ids(struct ipc_ids *);
46#ifdef CONFIG_PROC_FS 71#ifdef CONFIG_PROC_FS
47void __init ipc_init_proc_interface(const char *path, const char *header, 72void __init ipc_init_proc_interface(const char *path, const char *header,
48 int ids, int (*show)(struct seq_file *, void *)); 73 int ids, int (*show)(struct seq_file *, void *));
@@ -54,14 +79,19 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
54#define IPC_MSG_IDS 1 79#define IPC_MSG_IDS 1
55#define IPC_SHM_IDS 2 80#define IPC_SHM_IDS 2
56 81
57/* must be called with ids->mutex acquired.*/ 82#define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
58int ipc_findkey(struct ipc_ids* ids, key_t key); 83
59int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); 84/* must be called with ids->rw_mutex acquired for writing */
85int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
86
87/* must be called with ids->rw_mutex acquired for reading */
88int ipc_get_maxid(struct ipc_ids *);
60 89
61/* must be called with both locks acquired. */ 90/* must be called with both locks acquired. */
62struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id); 91void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *);
63 92
64int ipcperms (struct kern_ipc_perm *ipcp, short flg); 93/* must be called with ipcp locked */
94int ipcperms(struct kern_ipc_perm *ipcp, short flg);
65 95
66/* for rare, potentially huge allocations. 96/* for rare, potentially huge allocations.
67 * both function can sleep 97 * both function can sleep
@@ -79,24 +109,12 @@ void* ipc_rcu_alloc(int size);
79void ipc_rcu_getref(void *ptr); 109void ipc_rcu_getref(void *ptr);
80void ipc_rcu_putref(void *ptr); 110void ipc_rcu_putref(void *ptr);
81 111
82static inline void __ipc_fini_ids(struct ipc_ids *ids, 112/*
83 struct ipc_id_ary *entries) 113 * ipc_lock_down: called with rw_mutex held
84{ 114 * ipc_lock: called without that lock held
85 if (entries != &ids->nullentry) 115 */
86 ipc_rcu_putref(entries); 116struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *, int);
87} 117struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
88
89static inline void ipc_fini_ids(struct ipc_ids *ids)
90{
91 __ipc_fini_ids(ids, ids->entries);
92}
93
94struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id);
95struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id);
96void ipc_lock_by_ptr(struct kern_ipc_perm *ipcp);
97void ipc_unlock(struct kern_ipc_perm* perm);
98int ipc_buildid(struct ipc_ids* ids, int id, int seq);
99int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid);
100 118
101void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); 119void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
102void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); 120void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
@@ -111,5 +129,89 @@ int ipc_parse_version (int *cmd);
111extern void free_msg(struct msg_msg *msg); 129extern void free_msg(struct msg_msg *msg);
112extern struct msg_msg *load_msg(const void __user *src, int len); 130extern struct msg_msg *load_msg(const void __user *src, int len);
113extern int store_msg(void __user *dest, struct msg_msg *msg, int len); 131extern int store_msg(void __user *dest, struct msg_msg *msg, int len);
132extern int ipcget_new(struct ipc_namespace *, struct ipc_ids *,
133 struct ipc_ops *, struct ipc_params *);
134extern int ipcget_public(struct ipc_namespace *, struct ipc_ids *,
135 struct ipc_ops *, struct ipc_params *);
136
137static inline int ipc_buildid(int id, int seq)
138{
139 return SEQ_MULTIPLIER * seq + id;
140}
141
142/*
143 * Must be called with ipcp locked
144 */
145static inline int ipc_checkid(struct kern_ipc_perm *ipcp, int uid)
146{
147 if (uid / SEQ_MULTIPLIER != ipcp->seq)
148 return 1;
149 return 0;
150}
151
152static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
153{
154 rcu_read_lock();
155 spin_lock(&perm->lock);
156}
157
158static inline void ipc_unlock(struct kern_ipc_perm *perm)
159{
160 spin_unlock(&perm->lock);
161 rcu_read_unlock();
162}
163
164static inline struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids,
165 int id)
166{
167 struct kern_ipc_perm *out;
168
169 out = ipc_lock_down(ids, id);
170 if (IS_ERR(out))
171 return out;
172
173 if (ipc_checkid(out, id)) {
174 ipc_unlock(out);
175 return ERR_PTR(-EIDRM);
176 }
177
178 return out;
179}
180
181static inline struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids,
182 int id)
183{
184 struct kern_ipc_perm *out;
185
186 out = ipc_lock(ids, id);
187 if (IS_ERR(out))
188 return out;
189
190 if (ipc_checkid(out, id)) {
191 ipc_unlock(out);
192 return ERR_PTR(-EIDRM);
193 }
194
195 return out;
196}
197
198/**
199 * ipcget - Common sys_*get() code
200 * @ns : namsepace
201 * @ids : IPC identifier set
202 * @ops : operations to be called on ipc object creation, permission checks
203 * and further checks
204 * @params : the parameters needed by the previous operations.
205 *
206 * Common routine called by sys_msgget(), sys_semget() and sys_shmget().
207 */
208static inline int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
209 struct ipc_ops *ops, struct ipc_params *params)
210{
211 if (params->key == IPC_PRIVATE)
212 return ipcget_new(ns, ids, ops, params);
213 else
214 return ipcget_public(ns, ids, ops, params);
215}
114 216
115#endif 217#endif