diff options
Diffstat (limited to 'ipc')
-rw-r--r-- | ipc/msg.c | 115 | ||||
-rw-r--r-- | ipc/sem.c | 111 | ||||
-rw-r--r-- | ipc/shm.c | 116 | ||||
-rw-r--r-- | ipc/util.c | 266 | ||||
-rw-r--r-- | ipc/util.h | 32 |
5 files changed, 330 insertions, 310 deletions
@@ -75,13 +75,12 @@ static struct ipc_ids init_msg_ids; | |||
75 | 75 | ||
76 | #define msg_lock(ns, id) ((struct msg_queue*)ipc_lock(&msg_ids(ns), id)) | 76 | #define msg_lock(ns, id) ((struct msg_queue*)ipc_lock(&msg_ids(ns), id)) |
77 | #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) | 77 | #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) |
78 | #define msg_rmid(ns, id) ((struct msg_queue*)ipc_rmid(&msg_ids(ns), id)) | ||
79 | #define msg_checkid(ns, msq, msgid) \ | 78 | #define msg_checkid(ns, msq, msgid) \ |
80 | ipc_checkid(&msg_ids(ns), &msq->q_perm, msgid) | 79 | ipc_checkid(&msg_ids(ns), &msq->q_perm, msgid) |
81 | #define msg_buildid(ns, id, seq) \ | 80 | #define msg_buildid(ns, id, seq) \ |
82 | ipc_buildid(&msg_ids(ns), id, seq) | 81 | ipc_buildid(&msg_ids(ns), id, seq) |
83 | 82 | ||
84 | static void freeque (struct ipc_namespace *ns, struct msg_queue *msq, int id); | 83 | static void freeque(struct ipc_namespace *, struct msg_queue *); |
85 | static int newque (struct ipc_namespace *ns, key_t key, int msgflg); | 84 | static int newque (struct ipc_namespace *ns, key_t key, int msgflg); |
86 | #ifdef CONFIG_PROC_FS | 85 | #ifdef CONFIG_PROC_FS |
87 | static int sysvipc_msg_proc_show(struct seq_file *s, void *it); | 86 | static int sysvipc_msg_proc_show(struct seq_file *s, void *it); |
@@ -93,7 +92,7 @@ static void __msg_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) | |||
93 | ns->msg_ctlmax = MSGMAX; | 92 | ns->msg_ctlmax = MSGMAX; |
94 | ns->msg_ctlmnb = MSGMNB; | 93 | ns->msg_ctlmnb = MSGMNB; |
95 | ns->msg_ctlmni = MSGMNI; | 94 | ns->msg_ctlmni = MSGMNI; |
96 | ipc_init_ids(ids, ns->msg_ctlmni); | 95 | ipc_init_ids(ids); |
97 | } | 96 | } |
98 | 97 | ||
99 | int msg_init_ns(struct ipc_namespace *ns) | 98 | int msg_init_ns(struct ipc_namespace *ns) |
@@ -110,20 +109,24 @@ int msg_init_ns(struct ipc_namespace *ns) | |||
110 | 109 | ||
111 | void msg_exit_ns(struct ipc_namespace *ns) | 110 | void msg_exit_ns(struct ipc_namespace *ns) |
112 | { | 111 | { |
113 | int i; | ||
114 | struct msg_queue *msq; | 112 | struct msg_queue *msq; |
113 | int next_id; | ||
114 | int total, in_use; | ||
115 | 115 | ||
116 | mutex_lock(&msg_ids(ns).mutex); | 116 | mutex_lock(&msg_ids(ns).mutex); |
117 | for (i = 0; i <= msg_ids(ns).max_id; i++) { | 117 | |
118 | msq = msg_lock(ns, i); | 118 | in_use = msg_ids(ns).in_use; |
119 | |||
120 | for (total = 0, next_id = 0; total < in_use; next_id++) { | ||
121 | msq = idr_find(&msg_ids(ns).ipcs_idr, next_id); | ||
119 | if (msq == NULL) | 122 | if (msq == NULL) |
120 | continue; | 123 | continue; |
121 | 124 | ipc_lock_by_ptr(&msq->q_perm); | |
122 | freeque(ns, msq, i); | 125 | freeque(ns, msq); |
126 | total++; | ||
123 | } | 127 | } |
124 | mutex_unlock(&msg_ids(ns).mutex); | 128 | mutex_unlock(&msg_ids(ns).mutex); |
125 | 129 | ||
126 | ipc_fini_ids(ns->ids[IPC_MSG_IDS]); | ||
127 | kfree(ns->ids[IPC_MSG_IDS]); | 130 | kfree(ns->ids[IPC_MSG_IDS]); |
128 | ns->ids[IPC_MSG_IDS] = NULL; | 131 | ns->ids[IPC_MSG_IDS] = NULL; |
129 | } | 132 | } |
@@ -136,6 +139,11 @@ void __init msg_init(void) | |||
136 | IPC_MSG_IDS, sysvipc_msg_proc_show); | 139 | IPC_MSG_IDS, sysvipc_msg_proc_show); |
137 | } | 140 | } |
138 | 141 | ||
142 | static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) | ||
143 | { | ||
144 | ipc_rmid(&msg_ids(ns), &s->q_perm); | ||
145 | } | ||
146 | |||
139 | static int newque (struct ipc_namespace *ns, key_t key, int msgflg) | 147 | static int newque (struct ipc_namespace *ns, key_t key, int msgflg) |
140 | { | 148 | { |
141 | struct msg_queue *msq; | 149 | struct msg_queue *msq; |
@@ -155,6 +163,9 @@ static int newque (struct ipc_namespace *ns, key_t key, int msgflg) | |||
155 | return retval; | 163 | return retval; |
156 | } | 164 | } |
157 | 165 | ||
166 | /* | ||
167 | * ipc_addid() locks msq | ||
168 | */ | ||
158 | id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); | 169 | id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); |
159 | if (id == -1) { | 170 | if (id == -1) { |
160 | security_msg_queue_free(msq); | 171 | security_msg_queue_free(msq); |
@@ -162,7 +173,7 @@ static int newque (struct ipc_namespace *ns, key_t key, int msgflg) | |||
162 | return -ENOSPC; | 173 | return -ENOSPC; |
163 | } | 174 | } |
164 | 175 | ||
165 | msq->q_id = msg_buildid(ns, id, msq->q_perm.seq); | 176 | msq->q_perm.id = msg_buildid(ns, id, msq->q_perm.seq); |
166 | msq->q_stime = msq->q_rtime = 0; | 177 | msq->q_stime = msq->q_rtime = 0; |
167 | msq->q_ctime = get_seconds(); | 178 | msq->q_ctime = get_seconds(); |
168 | msq->q_cbytes = msq->q_qnum = 0; | 179 | msq->q_cbytes = msq->q_qnum = 0; |
@@ -171,9 +182,10 @@ static int newque (struct ipc_namespace *ns, key_t key, int msgflg) | |||
171 | INIT_LIST_HEAD(&msq->q_messages); | 182 | INIT_LIST_HEAD(&msq->q_messages); |
172 | INIT_LIST_HEAD(&msq->q_receivers); | 183 | INIT_LIST_HEAD(&msq->q_receivers); |
173 | INIT_LIST_HEAD(&msq->q_senders); | 184 | INIT_LIST_HEAD(&msq->q_senders); |
185 | |||
174 | msg_unlock(msq); | 186 | msg_unlock(msq); |
175 | 187 | ||
176 | return msq->q_id; | 188 | return msq->q_perm.id; |
177 | } | 189 | } |
178 | 190 | ||
179 | static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) | 191 | static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) |
@@ -225,18 +237,18 @@ static void expunge_all(struct msg_queue *msq, int res) | |||
225 | /* | 237 | /* |
226 | * freeque() wakes up waiters on the sender and receiver waiting queue, | 238 | * freeque() wakes up waiters on the sender and receiver waiting queue, |
227 | * removes the message queue from message queue ID | 239 | * removes the message queue from message queue ID |
228 | * array, and cleans up all the messages associated with this queue. | 240 | * IDR, and cleans up all the messages associated with this queue. |
229 | * | 241 | * |
230 | * msg_ids.mutex and the spinlock for this message queue is hold | 242 | * msg_ids.mutex and the spinlock for this message queue are held |
231 | * before freeque() is called. msg_ids.mutex remains locked on exit. | 243 | * before freeque() is called. msg_ids.mutex remains locked on exit. |
232 | */ | 244 | */ |
233 | static void freeque(struct ipc_namespace *ns, struct msg_queue *msq, int id) | 245 | static void freeque(struct ipc_namespace *ns, struct msg_queue *msq) |
234 | { | 246 | { |
235 | struct list_head *tmp; | 247 | struct list_head *tmp; |
236 | 248 | ||
237 | expunge_all(msq, -EIDRM); | 249 | expunge_all(msq, -EIDRM); |
238 | ss_wakeup(&msq->q_senders, 1); | 250 | ss_wakeup(&msq->q_senders, 1); |
239 | msq = msg_rmid(ns, id); | 251 | msg_rmid(ns, msq); |
240 | msg_unlock(msq); | 252 | msg_unlock(msq); |
241 | 253 | ||
242 | tmp = msq->q_messages.next; | 254 | tmp = msq->q_messages.next; |
@@ -255,36 +267,51 @@ static void freeque(struct ipc_namespace *ns, struct msg_queue *msq, int id) | |||
255 | asmlinkage long sys_msgget(key_t key, int msgflg) | 267 | asmlinkage long sys_msgget(key_t key, int msgflg) |
256 | { | 268 | { |
257 | struct msg_queue *msq; | 269 | struct msg_queue *msq; |
258 | int id, ret = -EPERM; | 270 | int ret; |
259 | struct ipc_namespace *ns; | 271 | struct ipc_namespace *ns; |
260 | 272 | ||
261 | ns = current->nsproxy->ipc_ns; | 273 | ns = current->nsproxy->ipc_ns; |
262 | 274 | ||
263 | mutex_lock(&msg_ids(ns).mutex); | 275 | ret = idr_pre_get(&msg_ids(ns).ipcs_idr, GFP_KERNEL); |
264 | if (key == IPC_PRIVATE) | 276 | |
265 | ret = newque(ns, key, msgflg); | 277 | if (key == IPC_PRIVATE) { |
266 | else if ((id = ipc_findkey(&msg_ids(ns), key)) == -1) { /* key not used */ | 278 | if (!ret) |
267 | if (!(msgflg & IPC_CREAT)) | 279 | ret = -ENOMEM; |
268 | ret = -ENOENT; | 280 | else { |
269 | else | 281 | mutex_lock(&msg_ids(ns).mutex); |
270 | ret = newque(ns, key, msgflg); | 282 | ret = newque(ns, key, msgflg); |
271 | } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) { | 283 | mutex_unlock(&msg_ids(ns).mutex); |
272 | ret = -EEXIST; | 284 | } |
273 | } else { | 285 | } else { |
274 | msq = msg_lock(ns, id); | 286 | mutex_lock(&msg_ids(ns).mutex); |
275 | BUG_ON(msq == NULL); | 287 | msq = (struct msg_queue *) ipc_findkey(&msg_ids(ns), key); |
276 | if (ipcperms(&msq->q_perm, msgflg)) | 288 | if (msq == NULL) { |
277 | ret = -EACCES; | 289 | /* key not used */ |
278 | else { | 290 | if (!(msgflg & IPC_CREAT)) |
279 | int qid = msg_buildid(ns, id, msq->q_perm.seq); | 291 | ret = -ENOENT; |
280 | 292 | else if (!ret) | |
281 | ret = security_msg_queue_associate(msq, msgflg); | 293 | ret = -ENOMEM; |
282 | if (!ret) | 294 | else |
283 | ret = qid; | 295 | ret = newque(ns, key, msgflg); |
296 | } else { | ||
297 | /* msq has been locked by ipc_findkey() */ | ||
298 | |||
299 | if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) | ||
300 | ret = -EEXIST; | ||
301 | else { | ||
302 | if (ipcperms(&msq->q_perm, msgflg)) | ||
303 | ret = -EACCES; | ||
304 | else { | ||
305 | ret = security_msg_queue_associate( | ||
306 | msq, msgflg); | ||
307 | if (!ret) | ||
308 | ret = msq->q_perm.id; | ||
309 | } | ||
310 | } | ||
311 | msg_unlock(msq); | ||
284 | } | 312 | } |
285 | msg_unlock(msq); | 313 | mutex_unlock(&msg_ids(ns).mutex); |
286 | } | 314 | } |
287 | mutex_unlock(&msg_ids(ns).mutex); | ||
288 | 315 | ||
289 | return ret; | 316 | return ret; |
290 | } | 317 | } |
@@ -430,13 +457,13 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) | |||
430 | msginfo.msgpool = MSGPOOL; | 457 | msginfo.msgpool = MSGPOOL; |
431 | msginfo.msgtql = MSGTQL; | 458 | msginfo.msgtql = MSGTQL; |
432 | } | 459 | } |
433 | max_id = msg_ids(ns).max_id; | 460 | max_id = ipc_get_maxid(&msg_ids(ns)); |
434 | mutex_unlock(&msg_ids(ns).mutex); | 461 | mutex_unlock(&msg_ids(ns).mutex); |
435 | if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) | 462 | if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) |
436 | return -EFAULT; | 463 | return -EFAULT; |
437 | return (max_id < 0) ? 0 : max_id; | 464 | return (max_id < 0) ? 0 : max_id; |
438 | } | 465 | } |
439 | case MSG_STAT: | 466 | case MSG_STAT: /* msqid is an index rather than a msg queue id */ |
440 | case IPC_STAT: | 467 | case IPC_STAT: |
441 | { | 468 | { |
442 | struct msqid64_ds tbuf; | 469 | struct msqid64_ds tbuf; |
@@ -444,8 +471,6 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) | |||
444 | 471 | ||
445 | if (!buf) | 472 | if (!buf) |
446 | return -EFAULT; | 473 | return -EFAULT; |
447 | if (cmd == MSG_STAT && msqid >= msg_ids(ns).entries->size) | ||
448 | return -EINVAL; | ||
449 | 474 | ||
450 | memset(&tbuf, 0, sizeof(tbuf)); | 475 | memset(&tbuf, 0, sizeof(tbuf)); |
451 | 476 | ||
@@ -454,7 +479,7 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) | |||
454 | return -EINVAL; | 479 | return -EINVAL; |
455 | 480 | ||
456 | if (cmd == MSG_STAT) { | 481 | if (cmd == MSG_STAT) { |
457 | success_return = msg_buildid(ns, msqid, msq->q_perm.seq); | 482 | success_return = msq->q_perm.id; |
458 | } else { | 483 | } else { |
459 | err = -EIDRM; | 484 | err = -EIDRM; |
460 | if (msg_checkid(ns, msq, msqid)) | 485 | if (msg_checkid(ns, msq, msqid)) |
@@ -552,7 +577,7 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) | |||
552 | break; | 577 | break; |
553 | } | 578 | } |
554 | case IPC_RMID: | 579 | case IPC_RMID: |
555 | freeque(ns, msq, msqid); | 580 | freeque(ns, msq); |
556 | break; | 581 | break; |
557 | } | 582 | } |
558 | err = 0; | 583 | err = 0; |
@@ -926,7 +951,7 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it) | |||
926 | return seq_printf(s, | 951 | return seq_printf(s, |
927 | "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", | 952 | "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", |
928 | msq->q_perm.key, | 953 | msq->q_perm.key, |
929 | msq->q_id, | 954 | msq->q_perm.id, |
930 | msq->q_perm.mode, | 955 | msq->q_perm.mode, |
931 | msq->q_cbytes, | 956 | msq->q_cbytes, |
932 | msq->q_qnum, | 957 | msq->q_qnum, |
@@ -90,7 +90,6 @@ | |||
90 | 90 | ||
91 | #define sem_lock(ns, id) ((struct sem_array*)ipc_lock(&sem_ids(ns), id)) | 91 | #define sem_lock(ns, id) ((struct sem_array*)ipc_lock(&sem_ids(ns), id)) |
92 | #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) | 92 | #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) |
93 | #define sem_rmid(ns, id) ((struct sem_array*)ipc_rmid(&sem_ids(ns), id)) | ||
94 | #define sem_checkid(ns, sma, semid) \ | 93 | #define sem_checkid(ns, sma, semid) \ |
95 | ipc_checkid(&sem_ids(ns),&sma->sem_perm,semid) | 94 | ipc_checkid(&sem_ids(ns),&sma->sem_perm,semid) |
96 | #define sem_buildid(ns, id, seq) \ | 95 | #define sem_buildid(ns, id, seq) \ |
@@ -99,7 +98,7 @@ | |||
99 | static struct ipc_ids init_sem_ids; | 98 | static struct ipc_ids init_sem_ids; |
100 | 99 | ||
101 | static int newary(struct ipc_namespace *, key_t, int, int); | 100 | static int newary(struct ipc_namespace *, key_t, int, int); |
102 | static void freeary(struct ipc_namespace *ns, struct sem_array *sma, int id); | 101 | static void freeary(struct ipc_namespace *, struct sem_array *); |
103 | #ifdef CONFIG_PROC_FS | 102 | #ifdef CONFIG_PROC_FS |
104 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it); | 103 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it); |
105 | #endif | 104 | #endif |
@@ -129,7 +128,7 @@ static void __sem_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) | |||
129 | ns->sc_semopm = SEMOPM; | 128 | ns->sc_semopm = SEMOPM; |
130 | ns->sc_semmni = SEMMNI; | 129 | ns->sc_semmni = SEMMNI; |
131 | ns->used_sems = 0; | 130 | ns->used_sems = 0; |
132 | ipc_init_ids(ids, ns->sc_semmni); | 131 | ipc_init_ids(ids); |
133 | } | 132 | } |
134 | 133 | ||
135 | int sem_init_ns(struct ipc_namespace *ns) | 134 | int sem_init_ns(struct ipc_namespace *ns) |
@@ -146,20 +145,24 @@ int sem_init_ns(struct ipc_namespace *ns) | |||
146 | 145 | ||
147 | void sem_exit_ns(struct ipc_namespace *ns) | 146 | void sem_exit_ns(struct ipc_namespace *ns) |
148 | { | 147 | { |
149 | int i; | ||
150 | struct sem_array *sma; | 148 | struct sem_array *sma; |
149 | int next_id; | ||
150 | int total, in_use; | ||
151 | 151 | ||
152 | mutex_lock(&sem_ids(ns).mutex); | 152 | mutex_lock(&sem_ids(ns).mutex); |
153 | for (i = 0; i <= sem_ids(ns).max_id; i++) { | 153 | |
154 | sma = sem_lock(ns, i); | 154 | in_use = sem_ids(ns).in_use; |
155 | |||
156 | for (total = 0, next_id = 0; total < in_use; next_id++) { | ||
157 | sma = idr_find(&sem_ids(ns).ipcs_idr, next_id); | ||
155 | if (sma == NULL) | 158 | if (sma == NULL) |
156 | continue; | 159 | continue; |
157 | 160 | ipc_lock_by_ptr(&sma->sem_perm); | |
158 | freeary(ns, sma, i); | 161 | freeary(ns, sma); |
162 | total++; | ||
159 | } | 163 | } |
160 | mutex_unlock(&sem_ids(ns).mutex); | 164 | mutex_unlock(&sem_ids(ns).mutex); |
161 | 165 | ||
162 | ipc_fini_ids(ns->ids[IPC_SEM_IDS]); | ||
163 | kfree(ns->ids[IPC_SEM_IDS]); | 166 | kfree(ns->ids[IPC_SEM_IDS]); |
164 | ns->ids[IPC_SEM_IDS] = NULL; | 167 | ns->ids[IPC_SEM_IDS] = NULL; |
165 | } | 168 | } |
@@ -172,6 +175,11 @@ void __init sem_init (void) | |||
172 | IPC_SEM_IDS, sysvipc_sem_proc_show); | 175 | IPC_SEM_IDS, sysvipc_sem_proc_show); |
173 | } | 176 | } |
174 | 177 | ||
178 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) | ||
179 | { | ||
180 | ipc_rmid(&sem_ids(ns), &s->sem_perm); | ||
181 | } | ||
182 | |||
175 | /* | 183 | /* |
176 | * Lockless wakeup algorithm: | 184 | * Lockless wakeup algorithm: |
177 | * Without the check/retry algorithm a lockless wakeup is possible: | 185 | * Without the check/retry algorithm a lockless wakeup is possible: |
@@ -243,7 +251,7 @@ static int newary (struct ipc_namespace *ns, key_t key, int nsems, int semflg) | |||
243 | } | 251 | } |
244 | ns->used_sems += nsems; | 252 | ns->used_sems += nsems; |
245 | 253 | ||
246 | sma->sem_id = sem_buildid(ns, id, sma->sem_perm.seq); | 254 | sma->sem_perm.id = sem_buildid(ns, id, sma->sem_perm.seq); |
247 | sma->sem_base = (struct sem *) &sma[1]; | 255 | sma->sem_base = (struct sem *) &sma[1]; |
248 | /* sma->sem_pending = NULL; */ | 256 | /* sma->sem_pending = NULL; */ |
249 | sma->sem_pending_last = &sma->sem_pending; | 257 | sma->sem_pending_last = &sma->sem_pending; |
@@ -252,12 +260,12 @@ static int newary (struct ipc_namespace *ns, key_t key, int nsems, int semflg) | |||
252 | sma->sem_ctime = get_seconds(); | 260 | sma->sem_ctime = get_seconds(); |
253 | sem_unlock(sma); | 261 | sem_unlock(sma); |
254 | 262 | ||
255 | return sma->sem_id; | 263 | return sma->sem_perm.id; |
256 | } | 264 | } |
257 | 265 | ||
258 | asmlinkage long sys_semget (key_t key, int nsems, int semflg) | 266 | asmlinkage long sys_semget (key_t key, int nsems, int semflg) |
259 | { | 267 | { |
260 | int id, err = -EINVAL; | 268 | int err; |
261 | struct sem_array *sma; | 269 | struct sem_array *sma; |
262 | struct ipc_namespace *ns; | 270 | struct ipc_namespace *ns; |
263 | 271 | ||
@@ -265,34 +273,50 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) | |||
265 | 273 | ||
266 | if (nsems < 0 || nsems > ns->sc_semmsl) | 274 | if (nsems < 0 || nsems > ns->sc_semmsl) |
267 | return -EINVAL; | 275 | return -EINVAL; |
268 | mutex_lock(&sem_ids(ns).mutex); | 276 | |
269 | 277 | err = idr_pre_get(&sem_ids(ns).ipcs_idr, GFP_KERNEL); | |
278 | |||
270 | if (key == IPC_PRIVATE) { | 279 | if (key == IPC_PRIVATE) { |
271 | err = newary(ns, key, nsems, semflg); | 280 | if (!err) |
272 | } else if ((id = ipc_findkey(&sem_ids(ns), key)) == -1) { /* key not used */ | 281 | err = -ENOMEM; |
273 | if (!(semflg & IPC_CREAT)) | 282 | else { |
274 | err = -ENOENT; | 283 | mutex_lock(&sem_ids(ns).mutex); |
275 | else | ||
276 | err = newary(ns, key, nsems, semflg); | 284 | err = newary(ns, key, nsems, semflg); |
277 | } else if (semflg & IPC_CREAT && semflg & IPC_EXCL) { | 285 | mutex_unlock(&sem_ids(ns).mutex); |
278 | err = -EEXIST; | 286 | } |
279 | } else { | 287 | } else { |
280 | sma = sem_lock(ns, id); | 288 | mutex_lock(&sem_ids(ns).mutex); |
281 | BUG_ON(sma==NULL); | 289 | sma = (struct sem_array *) ipc_findkey(&sem_ids(ns), key); |
282 | if (nsems > sma->sem_nsems) | 290 | if (sma == NULL) { |
283 | err = -EINVAL; | 291 | /* key not used */ |
284 | else if (ipcperms(&sma->sem_perm, semflg)) | 292 | if (!(semflg & IPC_CREAT)) |
285 | err = -EACCES; | 293 | err = -ENOENT; |
286 | else { | 294 | else if (!err) |
287 | int semid = sem_buildid(ns, id, sma->sem_perm.seq); | 295 | err = -ENOMEM; |
288 | err = security_sem_associate(sma, semflg); | 296 | else |
289 | if (!err) | 297 | err = newary(ns, key, nsems, semflg); |
290 | err = semid; | 298 | } else { |
299 | /* sma has been locked by ipc_findkey() */ | ||
300 | |||
301 | if (semflg & IPC_CREAT && semflg & IPC_EXCL) | ||
302 | err = -EEXIST; | ||
303 | else { | ||
304 | if (nsems > sma->sem_nsems) | ||
305 | err = -EINVAL; | ||
306 | else if (ipcperms(&sma->sem_perm, semflg)) | ||
307 | err = -EACCES; | ||
308 | else { | ||
309 | err = security_sem_associate(sma, | ||
310 | semflg); | ||
311 | if (!err) | ||
312 | err = sma->sem_perm.id; | ||
313 | } | ||
314 | } | ||
315 | sem_unlock(sma); | ||
291 | } | 316 | } |
292 | sem_unlock(sma); | 317 | mutex_unlock(&sem_ids(ns).mutex); |
293 | } | 318 | } |
294 | 319 | ||
295 | mutex_unlock(&sem_ids(ns).mutex); | ||
296 | return err; | 320 | return err; |
297 | } | 321 | } |
298 | 322 | ||
@@ -491,11 +515,10 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) | |||
491 | * the spinlock for this semaphore set hold. sem_ids.mutex remains locked | 515 | * the spinlock for this semaphore set hold. sem_ids.mutex remains locked |
492 | * on exit. | 516 | * on exit. |
493 | */ | 517 | */ |
494 | static void freeary (struct ipc_namespace *ns, struct sem_array *sma, int id) | 518 | static void freeary(struct ipc_namespace *ns, struct sem_array *sma) |
495 | { | 519 | { |
496 | struct sem_undo *un; | 520 | struct sem_undo *un; |
497 | struct sem_queue *q; | 521 | struct sem_queue *q; |
498 | int size; | ||
499 | 522 | ||
500 | /* Invalidate the existing undo structures for this semaphore set. | 523 | /* Invalidate the existing undo structures for this semaphore set. |
501 | * (They will be freed without any further action in exit_sem() | 524 | * (They will be freed without any further action in exit_sem() |
@@ -518,12 +541,11 @@ static void freeary (struct ipc_namespace *ns, struct sem_array *sma, int id) | |||
518 | q = n; | 541 | q = n; |
519 | } | 542 | } |
520 | 543 | ||
521 | /* Remove the semaphore set from the ID array*/ | 544 | /* Remove the semaphore set from the IDR */ |
522 | sma = sem_rmid(ns, id); | 545 | sem_rmid(ns, sma); |
523 | sem_unlock(sma); | 546 | sem_unlock(sma); |
524 | 547 | ||
525 | ns->used_sems -= sma->sem_nsems; | 548 | ns->used_sems -= sma->sem_nsems; |
526 | size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem); | ||
527 | security_sem_free(sma); | 549 | security_sem_free(sma); |
528 | ipc_rcu_putref(sma); | 550 | ipc_rcu_putref(sma); |
529 | } | 551 | } |
@@ -584,7 +606,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, | |||
584 | seminfo.semusz = SEMUSZ; | 606 | seminfo.semusz = SEMUSZ; |
585 | seminfo.semaem = SEMAEM; | 607 | seminfo.semaem = SEMAEM; |
586 | } | 608 | } |
587 | max_id = sem_ids(ns).max_id; | 609 | max_id = ipc_get_maxid(&sem_ids(ns)); |
588 | mutex_unlock(&sem_ids(ns).mutex); | 610 | mutex_unlock(&sem_ids(ns).mutex); |
589 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) | 611 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) |
590 | return -EFAULT; | 612 | return -EFAULT; |
@@ -595,9 +617,6 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, | |||
595 | struct semid64_ds tbuf; | 617 | struct semid64_ds tbuf; |
596 | int id; | 618 | int id; |
597 | 619 | ||
598 | if(semid >= sem_ids(ns).entries->size) | ||
599 | return -EINVAL; | ||
600 | |||
601 | memset(&tbuf,0,sizeof(tbuf)); | 620 | memset(&tbuf,0,sizeof(tbuf)); |
602 | 621 | ||
603 | sma = sem_lock(ns, semid); | 622 | sma = sem_lock(ns, semid); |
@@ -612,7 +631,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, int semnum, | |||
612 | if (err) | 631 | if (err) |
613 | goto out_unlock; | 632 | goto out_unlock; |
614 | 633 | ||
615 | id = sem_buildid(ns, semid, sma->sem_perm.seq); | 634 | id = sma->sem_perm.id; |
616 | 635 | ||
617 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); | 636 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); |
618 | tbuf.sem_otime = sma->sem_otime; | 637 | tbuf.sem_otime = sma->sem_otime; |
@@ -894,7 +913,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, | |||
894 | 913 | ||
895 | switch(cmd){ | 914 | switch(cmd){ |
896 | case IPC_RMID: | 915 | case IPC_RMID: |
897 | freeary(ns, sma, semid); | 916 | freeary(ns, sma); |
898 | err = 0; | 917 | err = 0; |
899 | break; | 918 | break; |
900 | case IPC_SET: | 919 | case IPC_SET: |
@@ -1402,7 +1421,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it) | |||
1402 | return seq_printf(s, | 1421 | return seq_printf(s, |
1403 | "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", | 1422 | "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", |
1404 | sma->sem_perm.key, | 1423 | sma->sem_perm.key, |
1405 | sma->sem_id, | 1424 | sma->sem_perm.id, |
1406 | sma->sem_perm.mode, | 1425 | sma->sem_perm.mode, |
1407 | sma->sem_nsems, | 1426 | sma->sem_nsems, |
1408 | sma->sem_perm.uid, | 1427 | sma->sem_perm.uid, |
@@ -84,7 +84,7 @@ static void __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids) | |||
84 | ns->shm_ctlall = SHMALL; | 84 | ns->shm_ctlall = SHMALL; |
85 | ns->shm_ctlmni = SHMMNI; | 85 | ns->shm_ctlmni = SHMMNI; |
86 | ns->shm_tot = 0; | 86 | ns->shm_tot = 0; |
87 | ipc_init_ids(ids, 1); | 87 | ipc_init_ids(ids); |
88 | } | 88 | } |
89 | 89 | ||
90 | static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp) | 90 | static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp) |
@@ -112,20 +112,24 @@ int shm_init_ns(struct ipc_namespace *ns) | |||
112 | 112 | ||
113 | void shm_exit_ns(struct ipc_namespace *ns) | 113 | void shm_exit_ns(struct ipc_namespace *ns) |
114 | { | 114 | { |
115 | int i; | ||
116 | struct shmid_kernel *shp; | 115 | struct shmid_kernel *shp; |
116 | int next_id; | ||
117 | int total, in_use; | ||
117 | 118 | ||
118 | mutex_lock(&shm_ids(ns).mutex); | 119 | mutex_lock(&shm_ids(ns).mutex); |
119 | for (i = 0; i <= shm_ids(ns).max_id; i++) { | 120 | |
120 | shp = shm_lock(ns, i); | 121 | in_use = shm_ids(ns).in_use; |
122 | |||
123 | for (total = 0, next_id = 0; total < in_use; next_id++) { | ||
124 | shp = idr_find(&shm_ids(ns).ipcs_idr, next_id); | ||
121 | if (shp == NULL) | 125 | if (shp == NULL) |
122 | continue; | 126 | continue; |
123 | 127 | ipc_lock_by_ptr(&shp->shm_perm); | |
124 | do_shm_rmid(ns, shp); | 128 | do_shm_rmid(ns, shp); |
129 | total++; | ||
125 | } | 130 | } |
126 | mutex_unlock(&shm_ids(ns).mutex); | 131 | mutex_unlock(&shm_ids(ns).mutex); |
127 | 132 | ||
128 | ipc_fini_ids(ns->ids[IPC_SHM_IDS]); | ||
129 | kfree(ns->ids[IPC_SHM_IDS]); | 133 | kfree(ns->ids[IPC_SHM_IDS]); |
130 | ns->ids[IPC_SHM_IDS] = NULL; | 134 | ns->ids[IPC_SHM_IDS] = NULL; |
131 | } | 135 | } |
@@ -146,9 +150,9 @@ static inline int shm_checkid(struct ipc_namespace *ns, | |||
146 | return 0; | 150 | return 0; |
147 | } | 151 | } |
148 | 152 | ||
149 | static inline struct shmid_kernel *shm_rmid(struct ipc_namespace *ns, int id) | 153 | static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) |
150 | { | 154 | { |
151 | return (struct shmid_kernel *)ipc_rmid(&shm_ids(ns), id); | 155 | ipc_rmid(&shm_ids(ns), &s->shm_perm); |
152 | } | 156 | } |
153 | 157 | ||
154 | static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp) | 158 | static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp) |
@@ -184,7 +188,7 @@ static void shm_open(struct vm_area_struct *vma) | |||
184 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) | 188 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
185 | { | 189 | { |
186 | ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; | 190 | ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; |
187 | shm_rmid(ns, shp->id); | 191 | shm_rmid(ns, shp); |
188 | shm_unlock(shp); | 192 | shm_unlock(shp); |
189 | if (!is_file_hugepages(shp->shm_file)) | 193 | if (!is_file_hugepages(shp->shm_file)) |
190 | shmem_lock(shp->shm_file, 0, shp->mlock_user); | 194 | shmem_lock(shp->shm_file, 0, shp->mlock_user); |
@@ -398,17 +402,18 @@ static int newseg (struct ipc_namespace *ns, key_t key, int shmflg, size_t size) | |||
398 | shp->shm_ctim = get_seconds(); | 402 | shp->shm_ctim = get_seconds(); |
399 | shp->shm_segsz = size; | 403 | shp->shm_segsz = size; |
400 | shp->shm_nattch = 0; | 404 | shp->shm_nattch = 0; |
401 | shp->id = shm_buildid(ns, id, shp->shm_perm.seq); | 405 | shp->shm_perm.id = shm_buildid(ns, id, shp->shm_perm.seq); |
402 | shp->shm_file = file; | 406 | shp->shm_file = file; |
403 | /* | 407 | /* |
404 | * shmid gets reported as "inode#" in /proc/pid/maps. | 408 | * shmid gets reported as "inode#" in /proc/pid/maps. |
405 | * proc-ps tools use this. Changing this will break them. | 409 | * proc-ps tools use this. Changing this will break them. |
406 | */ | 410 | */ |
407 | file->f_dentry->d_inode->i_ino = shp->id; | 411 | file->f_dentry->d_inode->i_ino = shp->shm_perm.id; |
408 | 412 | ||
409 | ns->shm_tot += numpages; | 413 | ns->shm_tot += numpages; |
414 | error = shp->shm_perm.id; | ||
410 | shm_unlock(shp); | 415 | shm_unlock(shp); |
411 | return shp->id; | 416 | return error; |
412 | 417 | ||
413 | no_id: | 418 | no_id: |
414 | fput(file); | 419 | fput(file); |
@@ -421,37 +426,52 @@ no_file: | |||
421 | asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) | 426 | asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) |
422 | { | 427 | { |
423 | struct shmid_kernel *shp; | 428 | struct shmid_kernel *shp; |
424 | int err, id = 0; | 429 | int err; |
425 | struct ipc_namespace *ns; | 430 | struct ipc_namespace *ns; |
426 | 431 | ||
427 | ns = current->nsproxy->ipc_ns; | 432 | ns = current->nsproxy->ipc_ns; |
428 | 433 | ||
429 | mutex_lock(&shm_ids(ns).mutex); | 434 | err = idr_pre_get(&shm_ids(ns).ipcs_idr, GFP_KERNEL); |
435 | |||
430 | if (key == IPC_PRIVATE) { | 436 | if (key == IPC_PRIVATE) { |
431 | err = newseg(ns, key, shmflg, size); | 437 | if (!err) |
432 | } else if ((id = ipc_findkey(&shm_ids(ns), key)) == -1) { | 438 | err = -ENOMEM; |
433 | if (!(shmflg & IPC_CREAT)) | 439 | else { |
434 | err = -ENOENT; | 440 | mutex_lock(&shm_ids(ns).mutex); |
435 | else | ||
436 | err = newseg(ns, key, shmflg, size); | 441 | err = newseg(ns, key, shmflg, size); |
437 | } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) { | 442 | mutex_unlock(&shm_ids(ns).mutex); |
438 | err = -EEXIST; | 443 | } |
439 | } else { | 444 | } else { |
440 | shp = shm_lock(ns, id); | 445 | mutex_lock(&shm_ids(ns).mutex); |
441 | BUG_ON(shp==NULL); | 446 | shp = (struct shmid_kernel *) ipc_findkey(&shm_ids(ns), key); |
442 | if (shp->shm_segsz < size) | 447 | if (shp == NULL) { |
443 | err = -EINVAL; | 448 | if (!(shmflg & IPC_CREAT)) |
444 | else if (ipcperms(&shp->shm_perm, shmflg)) | 449 | err = -ENOENT; |
445 | err = -EACCES; | 450 | else if (!err) |
446 | else { | 451 | err = -ENOMEM; |
447 | int shmid = shm_buildid(ns, id, shp->shm_perm.seq); | 452 | else |
448 | err = security_shm_associate(shp, shmflg); | 453 | err = newseg(ns, key, shmflg, size); |
449 | if (!err) | 454 | } else { |
450 | err = shmid; | 455 | /* shp has been locked by ipc_findkey() */ |
456 | |||
457 | if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) | ||
458 | err = -EEXIST; | ||
459 | else { | ||
460 | if (shp->shm_segsz < size) | ||
461 | err = -EINVAL; | ||
462 | else if (ipcperms(&shp->shm_perm, shmflg)) | ||
463 | err = -EACCES; | ||
464 | else { | ||
465 | err = security_shm_associate(shp, | ||
466 | shmflg); | ||
467 | if (!err) | ||
468 | err = shp->shm_perm.id; | ||
469 | } | ||
470 | } | ||
471 | shm_unlock(shp); | ||
451 | } | 472 | } |
452 | shm_unlock(shp); | 473 | mutex_unlock(&shm_ids(ns).mutex); |
453 | } | 474 | } |
454 | mutex_unlock(&shm_ids(ns).mutex); | ||
455 | 475 | ||
456 | return err; | 476 | return err; |
457 | } | 477 | } |
@@ -550,17 +570,20 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf | |||
550 | static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, | 570 | static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, |
551 | unsigned long *swp) | 571 | unsigned long *swp) |
552 | { | 572 | { |
553 | int i; | 573 | int next_id; |
574 | int total, in_use; | ||
554 | 575 | ||
555 | *rss = 0; | 576 | *rss = 0; |
556 | *swp = 0; | 577 | *swp = 0; |
557 | 578 | ||
558 | for (i = 0; i <= shm_ids(ns).max_id; i++) { | 579 | in_use = shm_ids(ns).in_use; |
580 | |||
581 | for (total = 0, next_id = 0; total < in_use; next_id++) { | ||
559 | struct shmid_kernel *shp; | 582 | struct shmid_kernel *shp; |
560 | struct inode *inode; | 583 | struct inode *inode; |
561 | 584 | ||
562 | shp = shm_get(ns, i); | 585 | shp = shm_get(ns, next_id); |
563 | if(!shp) | 586 | if (shp == NULL) |
564 | continue; | 587 | continue; |
565 | 588 | ||
566 | inode = shp->shm_file->f_path.dentry->d_inode; | 589 | inode = shp->shm_file->f_path.dentry->d_inode; |
@@ -575,6 +598,8 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, | |||
575 | *swp += info->swapped; | 598 | *swp += info->swapped; |
576 | spin_unlock(&info->lock); | 599 | spin_unlock(&info->lock); |
577 | } | 600 | } |
601 | |||
602 | total++; | ||
578 | } | 603 | } |
579 | } | 604 | } |
580 | 605 | ||
@@ -611,7 +636,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
611 | if(copy_shminfo_to_user (buf, &shminfo, version)) | 636 | if(copy_shminfo_to_user (buf, &shminfo, version)) |
612 | return -EFAULT; | 637 | return -EFAULT; |
613 | /* reading a integer is always atomic */ | 638 | /* reading a integer is always atomic */ |
614 | err= shm_ids(ns).max_id; | 639 | err = ipc_get_maxid(&shm_ids(ns)); |
615 | if(err<0) | 640 | if(err<0) |
616 | err = 0; | 641 | err = 0; |
617 | goto out; | 642 | goto out; |
@@ -631,7 +656,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
631 | shm_info.shm_tot = ns->shm_tot; | 656 | shm_info.shm_tot = ns->shm_tot; |
632 | shm_info.swap_attempts = 0; | 657 | shm_info.swap_attempts = 0; |
633 | shm_info.swap_successes = 0; | 658 | shm_info.swap_successes = 0; |
634 | err = shm_ids(ns).max_id; | 659 | err = ipc_get_maxid(&shm_ids(ns)); |
635 | mutex_unlock(&shm_ids(ns).mutex); | 660 | mutex_unlock(&shm_ids(ns).mutex); |
636 | if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { | 661 | if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { |
637 | err = -EFAULT; | 662 | err = -EFAULT; |
@@ -651,11 +676,8 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
651 | if(shp==NULL) { | 676 | if(shp==NULL) { |
652 | err = -EINVAL; | 677 | err = -EINVAL; |
653 | goto out; | 678 | goto out; |
654 | } else if(cmd==SHM_STAT) { | 679 | } else if (cmd == SHM_STAT) { |
655 | err = -EINVAL; | 680 | result = shp->shm_perm.id; |
656 | if (shmid > shm_ids(ns).max_id) | ||
657 | goto out_unlock; | ||
658 | result = shm_buildid(ns, shmid, shp->shm_perm.seq); | ||
659 | } else { | 681 | } else { |
660 | err = shm_checkid(ns, shp,shmid); | 682 | err = shm_checkid(ns, shp,shmid); |
661 | if(err) | 683 | if(err) |
@@ -925,7 +947,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) | |||
925 | 947 | ||
926 | file->private_data = sfd; | 948 | file->private_data = sfd; |
927 | file->f_mapping = shp->shm_file->f_mapping; | 949 | file->f_mapping = shp->shm_file->f_mapping; |
928 | sfd->id = shp->id; | 950 | sfd->id = shp->shm_perm.id; |
929 | sfd->ns = get_ipc_ns(ns); | 951 | sfd->ns = get_ipc_ns(ns); |
930 | sfd->file = shp->shm_file; | 952 | sfd->file = shp->shm_file; |
931 | sfd->vm_ops = NULL; | 953 | sfd->vm_ops = NULL; |
@@ -1094,7 +1116,7 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it) | |||
1094 | format = BIG_STRING; | 1116 | format = BIG_STRING; |
1095 | return seq_printf(s, format, | 1117 | return seq_printf(s, format, |
1096 | shp->shm_perm.key, | 1118 | shp->shm_perm.key, |
1097 | shp->id, | 1119 | shp->shm_perm.id, |
1098 | shp->shm_perm.mode, | 1120 | shp->shm_perm.mode, |
1099 | shp->shm_segsz, | 1121 | shp->shm_segsz, |
1100 | shp->shm_cprid, | 1122 | shp->shm_cprid, |
diff --git a/ipc/util.c b/ipc/util.c index 44e5135aee47..2a205875d277 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -129,23 +129,16 @@ __initcall(ipc_init); | |||
129 | /** | 129 | /** |
130 | * ipc_init_ids - initialise IPC identifiers | 130 | * ipc_init_ids - initialise IPC identifiers |
131 | * @ids: Identifier set | 131 | * @ids: Identifier set |
132 | * @size: Number of identifiers | ||
133 | * | 132 | * |
134 | * Given a size for the ipc identifier range (limited below IPCMNI) | 133 | * Set up the sequence range to use for the ipc identifier range (limited |
135 | * set up the sequence range to use then allocate and initialise the | 134 | * below IPCMNI) then initialise the ids idr. |
136 | * array itself. | ||
137 | */ | 135 | */ |
138 | 136 | ||
139 | void ipc_init_ids(struct ipc_ids* ids, int size) | 137 | void ipc_init_ids(struct ipc_ids *ids) |
140 | { | 138 | { |
141 | int i; | ||
142 | |||
143 | mutex_init(&ids->mutex); | 139 | mutex_init(&ids->mutex); |
144 | 140 | ||
145 | if(size > IPCMNI) | ||
146 | size = IPCMNI; | ||
147 | ids->in_use = 0; | 141 | ids->in_use = 0; |
148 | ids->max_id = -1; | ||
149 | ids->seq = 0; | 142 | ids->seq = 0; |
150 | { | 143 | { |
151 | int seq_limit = INT_MAX/SEQ_MULTIPLIER; | 144 | int seq_limit = INT_MAX/SEQ_MULTIPLIER; |
@@ -155,17 +148,7 @@ void ipc_init_ids(struct ipc_ids* ids, int size) | |||
155 | ids->seq_max = seq_limit; | 148 | ids->seq_max = seq_limit; |
156 | } | 149 | } |
157 | 150 | ||
158 | ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size + | 151 | idr_init(&ids->ipcs_idr); |
159 | sizeof(struct ipc_id_ary)); | ||
160 | |||
161 | if(ids->entries == NULL) { | ||
162 | printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n"); | ||
163 | size = 0; | ||
164 | ids->entries = &ids->nullentry; | ||
165 | } | ||
166 | ids->entries->size = size; | ||
167 | for(i=0;i<size;i++) | ||
168 | ids->entries->p[i] = NULL; | ||
169 | } | 152 | } |
170 | 153 | ||
171 | #ifdef CONFIG_PROC_FS | 154 | #ifdef CONFIG_PROC_FS |
@@ -209,72 +192,73 @@ void __init ipc_init_proc_interface(const char *path, const char *header, | |||
209 | * @key: The key to find | 192 | * @key: The key to find |
210 | * | 193 | * |
211 | * Requires ipc_ids.mutex locked. | 194 | * Requires ipc_ids.mutex locked. |
212 | * Returns the identifier if found or -1 if not. | 195 | * Returns the LOCKED pointer to the ipc structure if found or NULL |
196 | * if not. | ||
197 | * If key is found ipc contains its ipc structure | ||
213 | */ | 198 | */ |
214 | 199 | ||
215 | int ipc_findkey(struct ipc_ids* ids, key_t key) | 200 | struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) |
216 | { | 201 | { |
217 | int id; | 202 | struct kern_ipc_perm *ipc; |
218 | struct kern_ipc_perm* p; | 203 | int next_id; |
219 | int max_id = ids->max_id; | 204 | int total; |
220 | 205 | ||
221 | /* | 206 | for (total = 0, next_id = 0; total < ids->in_use; next_id++) { |
222 | * rcu_dereference() is not needed here | 207 | ipc = idr_find(&ids->ipcs_idr, next_id); |
223 | * since ipc_ids.mutex is held | 208 | |
224 | */ | 209 | if (ipc == NULL) |
225 | for (id = 0; id <= max_id; id++) { | ||
226 | p = ids->entries->p[id]; | ||
227 | if(p==NULL) | ||
228 | continue; | 210 | continue; |
229 | if (key == p->key) | 211 | |
230 | return id; | 212 | if (ipc->key != key) { |
213 | total++; | ||
214 | continue; | ||
215 | } | ||
216 | |||
217 | ipc_lock_by_ptr(ipc); | ||
218 | return ipc; | ||
231 | } | 219 | } |
232 | return -1; | 220 | |
221 | return NULL; | ||
233 | } | 222 | } |
234 | 223 | ||
235 | /* | 224 | /** |
236 | * Requires ipc_ids.mutex locked | 225 | * ipc_get_maxid - get the last assigned id |
226 | * @ids: IPC identifier set | ||
227 | * | ||
228 | * Called with ipc_ids.mutex held. | ||
237 | */ | 229 | */ |
238 | static int grow_ary(struct ipc_ids* ids, int newsize) | ||
239 | { | ||
240 | struct ipc_id_ary* new; | ||
241 | struct ipc_id_ary* old; | ||
242 | int i; | ||
243 | int size = ids->entries->size; | ||
244 | |||
245 | if(newsize > IPCMNI) | ||
246 | newsize = IPCMNI; | ||
247 | if(newsize <= size) | ||
248 | return newsize; | ||
249 | |||
250 | new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize + | ||
251 | sizeof(struct ipc_id_ary)); | ||
252 | if(new == NULL) | ||
253 | return size; | ||
254 | new->size = newsize; | ||
255 | memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size); | ||
256 | for(i=size;i<newsize;i++) { | ||
257 | new->p[i] = NULL; | ||
258 | } | ||
259 | old = ids->entries; | ||
260 | 230 | ||
261 | /* | 231 | int ipc_get_maxid(struct ipc_ids *ids) |
262 | * Use rcu_assign_pointer() to make sure the memcpyed contents | 232 | { |
263 | * of the new array are visible before the new array becomes visible. | 233 | struct kern_ipc_perm *ipc; |
264 | */ | 234 | int max_id = -1; |
265 | rcu_assign_pointer(ids->entries, new); | 235 | int total, id; |
236 | |||
237 | if (ids->in_use == 0) | ||
238 | return -1; | ||
266 | 239 | ||
267 | __ipc_fini_ids(ids, old); | 240 | if (ids->in_use == IPCMNI) |
268 | return newsize; | 241 | return IPCMNI - 1; |
242 | |||
243 | /* Look for the last assigned id */ | ||
244 | total = 0; | ||
245 | for (id = 0; id < IPCMNI && total < ids->in_use; id++) { | ||
246 | ipc = idr_find(&ids->ipcs_idr, id); | ||
247 | if (ipc != NULL) { | ||
248 | max_id = id; | ||
249 | total++; | ||
250 | } | ||
251 | } | ||
252 | return max_id; | ||
269 | } | 253 | } |
270 | 254 | ||
271 | /** | 255 | /** |
272 | * ipc_addid - add an IPC identifier | 256 | * ipc_addid - add an IPC identifier |
273 | * @ids: IPC identifier set | 257 | * @ids: IPC identifier set |
274 | * @new: new IPC permission set | 258 | * @new: new IPC permission set |
275 | * @size: new size limit for the id array | 259 | * @size: limit for the number of used ids |
276 | * | 260 | * |
277 | * Add an entry 'new' to the IPC arrays. The permissions object is | 261 | * Add an entry 'new' to the IPC idr. The permissions object is |
278 | * initialised and the first free entry is set up and the id assigned | 262 | * initialised and the first free entry is set up and the id assigned |
279 | * is returned. The list is returned in a locked state on success. | 263 | * is returned. The list is returned in a locked state on success. |
280 | * On failure the list is not locked and -1 is returned. | 264 | * On failure the list is not locked and -1 is returned. |
@@ -284,23 +268,23 @@ static int grow_ary(struct ipc_ids* ids, int newsize) | |||
284 | 268 | ||
285 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) | 269 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) |
286 | { | 270 | { |
287 | int id; | 271 | int id, err; |
288 | |||
289 | size = grow_ary(ids,size); | ||
290 | 272 | ||
291 | /* | 273 | /* |
292 | * rcu_dereference()() is not needed here since | 274 | * rcu_dereference()() is not needed here since |
293 | * ipc_ids.mutex is held | 275 | * ipc_ids.mutex is held |
294 | */ | 276 | */ |
295 | for (id = 0; id < size; id++) { | 277 | if (size > IPCMNI) |
296 | if(ids->entries->p[id] == NULL) | 278 | size = IPCMNI; |
297 | goto found; | 279 | |
298 | } | 280 | if (ids->in_use >= size) |
299 | return -1; | 281 | return -1; |
300 | found: | 282 | |
283 | err = idr_get_new(&ids->ipcs_idr, new, &id); | ||
284 | if (err) | ||
285 | return -1; | ||
286 | |||
301 | ids->in_use++; | 287 | ids->in_use++; |
302 | if (id > ids->max_id) | ||
303 | ids->max_id = id; | ||
304 | 288 | ||
305 | new->cuid = new->uid = current->euid; | 289 | new->cuid = new->uid = current->euid; |
306 | new->gid = new->cgid = current->egid; | 290 | new->gid = new->cgid = current->egid; |
@@ -313,48 +297,32 @@ found: | |||
313 | new->deleted = 0; | 297 | new->deleted = 0; |
314 | rcu_read_lock(); | 298 | rcu_read_lock(); |
315 | spin_lock(&new->lock); | 299 | spin_lock(&new->lock); |
316 | ids->entries->p[id] = new; | ||
317 | return id; | 300 | return id; |
318 | } | 301 | } |
319 | 302 | ||
320 | /** | 303 | /** |
321 | * ipc_rmid - remove an IPC identifier | 304 | * ipc_rmid - remove an IPC identifier |
322 | * @ids: identifier set | 305 | * @ids: identifier set |
323 | * @id: Identifier to remove | 306 | * @id: ipc perm structure containing the identifier to remove |
324 | * | 307 | * |
325 | * The identifier must be valid, and in use. The kernel will panic if | 308 | * The identifier must be valid, and in use. The kernel will panic if |
326 | * fed an invalid identifier. The entry is removed and internal | 309 | * fed an invalid identifier. The entry is removed and internal |
327 | * variables recomputed. The object associated with the identifier | 310 | * variables recomputed. |
328 | * is returned. | 311 | * ipc_ids.mutex and the spinlock for this ID are held before this |
329 | * ipc_ids.mutex and the spinlock for this ID is hold before this function | 312 | * function is called, and remain locked on the exit. |
330 | * is called, and remain locked on the exit. | ||
331 | */ | 313 | */ |
332 | 314 | ||
333 | struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) | 315 | void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) |
334 | { | 316 | { |
335 | struct kern_ipc_perm* p; | 317 | int lid = ipcp->id % SEQ_MULTIPLIER; |
336 | int lid = id % SEQ_MULTIPLIER; | 318 | |
337 | BUG_ON(lid >= ids->entries->size); | 319 | idr_remove(&ids->ipcs_idr, lid); |
338 | 320 | ||
339 | /* | ||
340 | * do not need a rcu_dereference()() here to force ordering | ||
341 | * on Alpha, since the ipc_ids.mutex is held. | ||
342 | */ | ||
343 | p = ids->entries->p[lid]; | ||
344 | ids->entries->p[lid] = NULL; | ||
345 | BUG_ON(p==NULL); | ||
346 | ids->in_use--; | 321 | ids->in_use--; |
347 | 322 | ||
348 | if (lid == ids->max_id) { | 323 | ipcp->deleted = 1; |
349 | do { | 324 | |
350 | lid--; | 325 | return; |
351 | if(lid == -1) | ||
352 | break; | ||
353 | } while (ids->entries->p[lid] == NULL); | ||
354 | ids->max_id = lid; | ||
355 | } | ||
356 | p->deleted = 1; | ||
357 | return p; | ||
358 | } | 326 | } |
359 | 327 | ||
360 | /** | 328 | /** |
@@ -613,33 +581,26 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) | |||
613 | * if in the future ipc_get() is used by other places without ipc_ids.mutex | 581 | * if in the future ipc_get() is used by other places without ipc_ids.mutex |
614 | * down, then ipc_get() needs read memery barriers as ipc_lock() does. | 582 | * down, then ipc_get() needs read memery barriers as ipc_lock() does. |
615 | */ | 583 | */ |
616 | struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) | 584 | struct kern_ipc_perm *ipc_get(struct ipc_ids *ids, int id) |
617 | { | 585 | { |
618 | struct kern_ipc_perm* out; | 586 | struct kern_ipc_perm *out; |
619 | int lid = id % SEQ_MULTIPLIER; | 587 | int lid = id % SEQ_MULTIPLIER; |
620 | if(lid >= ids->entries->size) | 588 | out = idr_find(&ids->ipcs_idr, lid); |
621 | return NULL; | ||
622 | out = ids->entries->p[lid]; | ||
623 | return out; | 589 | return out; |
624 | } | 590 | } |
625 | 591 | ||
626 | struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) | 592 | struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) |
627 | { | 593 | { |
628 | struct kern_ipc_perm* out; | 594 | struct kern_ipc_perm *out; |
629 | int lid = id % SEQ_MULTIPLIER; | 595 | int lid = id % SEQ_MULTIPLIER; |
630 | struct ipc_id_ary* entries; | ||
631 | 596 | ||
632 | rcu_read_lock(); | 597 | rcu_read_lock(); |
633 | entries = rcu_dereference(ids->entries); | 598 | out = idr_find(&ids->ipcs_idr, lid); |
634 | if(lid >= entries->size) { | 599 | if (out == NULL) { |
635 | rcu_read_unlock(); | ||
636 | return NULL; | ||
637 | } | ||
638 | out = entries->p[lid]; | ||
639 | if(out == NULL) { | ||
640 | rcu_read_unlock(); | 600 | rcu_read_unlock(); |
641 | return NULL; | 601 | return NULL; |
642 | } | 602 | } |
603 | |||
643 | spin_lock(&out->lock); | 604 | spin_lock(&out->lock); |
644 | 605 | ||
645 | /* ipc_rmid() may have already freed the ID while ipc_lock | 606 | /* ipc_rmid() may have already freed the ID while ipc_lock |
@@ -650,6 +611,7 @@ struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) | |||
650 | rcu_read_unlock(); | 611 | rcu_read_unlock(); |
651 | return NULL; | 612 | return NULL; |
652 | } | 613 | } |
614 | |||
653 | return out; | 615 | return out; |
654 | } | 616 | } |
655 | 617 | ||
@@ -707,27 +669,30 @@ struct ipc_proc_iter { | |||
707 | struct ipc_proc_iface *iface; | 669 | struct ipc_proc_iface *iface; |
708 | }; | 670 | }; |
709 | 671 | ||
710 | static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) | 672 | /* |
673 | * This routine locks the ipc structure found at least at position pos. | ||
674 | */ | ||
675 | struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, | ||
676 | loff_t *new_pos) | ||
711 | { | 677 | { |
712 | struct ipc_proc_iter *iter = s->private; | 678 | struct kern_ipc_perm *ipc; |
713 | struct ipc_proc_iface *iface = iter->iface; | 679 | int total, id; |
714 | struct kern_ipc_perm *ipc = it; | ||
715 | loff_t p; | ||
716 | struct ipc_ids *ids; | ||
717 | 680 | ||
718 | ids = iter->ns->ids[iface->ids]; | 681 | total = 0; |
682 | for (id = 0; id < pos && total < ids->in_use; id++) { | ||
683 | ipc = idr_find(&ids->ipcs_idr, id); | ||
684 | if (ipc != NULL) | ||
685 | total++; | ||
686 | } | ||
719 | 687 | ||
720 | /* If we had an ipc id locked before, unlock it */ | 688 | if (total >= ids->in_use) |
721 | if (ipc && ipc != SEQ_START_TOKEN) | 689 | return NULL; |
722 | ipc_unlock(ipc); | ||
723 | 690 | ||
724 | /* | 691 | for ( ; pos < IPCMNI; pos++) { |
725 | * p = *pos - 1 (because id 0 starts at position 1) | 692 | ipc = idr_find(&ids->ipcs_idr, pos); |
726 | * + 1 (because we increment the position by one) | 693 | if (ipc != NULL) { |
727 | */ | 694 | *new_pos = pos + 1; |
728 | for (p = *pos; p <= ids->max_id; p++) { | 695 | ipc_lock_by_ptr(ipc); |
729 | if ((ipc = ipc_lock(ids, p)) != NULL) { | ||
730 | *pos = p + 1; | ||
731 | return ipc; | 696 | return ipc; |
732 | } | 697 | } |
733 | } | 698 | } |
@@ -736,6 +701,19 @@ static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) | |||
736 | return NULL; | 701 | return NULL; |
737 | } | 702 | } |
738 | 703 | ||
704 | static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) | ||
705 | { | ||
706 | struct ipc_proc_iter *iter = s->private; | ||
707 | struct ipc_proc_iface *iface = iter->iface; | ||
708 | struct kern_ipc_perm *ipc = it; | ||
709 | |||
710 | /* If we had an ipc id locked before, unlock it */ | ||
711 | if (ipc && ipc != SEQ_START_TOKEN) | ||
712 | ipc_unlock(ipc); | ||
713 | |||
714 | return sysvipc_find_ipc(iter->ns->ids[iface->ids], *pos, pos); | ||
715 | } | ||
716 | |||
739 | /* | 717 | /* |
740 | * File positions: pos 0 -> header, pos n -> ipc id + 1. | 718 | * File positions: pos 0 -> header, pos n -> ipc id + 1. |
741 | * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START. | 719 | * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START. |
@@ -744,8 +722,6 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) | |||
744 | { | 722 | { |
745 | struct ipc_proc_iter *iter = s->private; | 723 | struct ipc_proc_iter *iter = s->private; |
746 | struct ipc_proc_iface *iface = iter->iface; | 724 | struct ipc_proc_iface *iface = iter->iface; |
747 | struct kern_ipc_perm *ipc; | ||
748 | loff_t p; | ||
749 | struct ipc_ids *ids; | 725 | struct ipc_ids *ids; |
750 | 726 | ||
751 | ids = iter->ns->ids[iface->ids]; | 727 | ids = iter->ns->ids[iface->ids]; |
@@ -765,13 +741,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) | |||
765 | return SEQ_START_TOKEN; | 741 | return SEQ_START_TOKEN; |
766 | 742 | ||
767 | /* Find the (pos-1)th ipc */ | 743 | /* Find the (pos-1)th ipc */ |
768 | for (p = *pos - 1; p <= ids->max_id; p++) { | 744 | return sysvipc_find_ipc(ids, *pos - 1, pos); |
769 | if ((ipc = ipc_lock(ids, p)) != NULL) { | ||
770 | *pos = p + 1; | ||
771 | return ipc; | ||
772 | } | ||
773 | } | ||
774 | return NULL; | ||
775 | } | 745 | } |
776 | 746 | ||
777 | static void sysvipc_proc_stop(struct seq_file *s, void *it) | 747 | static void sysvipc_proc_stop(struct seq_file *s, void *it) |
diff --git a/ipc/util.h b/ipc/util.h index 333e891bcaca..c9063267d4f8 100644 --- a/ipc/util.h +++ b/ipc/util.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef _IPC_UTIL_H | 10 | #ifndef _IPC_UTIL_H |
11 | #define _IPC_UTIL_H | 11 | #define _IPC_UTIL_H |
12 | 12 | ||
13 | #include <linux/idr.h> | ||
14 | |||
13 | #define USHRT_MAX 0xffff | 15 | #define USHRT_MAX 0xffff |
14 | #define SEQ_MULTIPLIER (IPCMNI) | 16 | #define SEQ_MULTIPLIER (IPCMNI) |
15 | 17 | ||
@@ -25,24 +27,17 @@ void sem_exit_ns(struct ipc_namespace *ns); | |||
25 | void msg_exit_ns(struct ipc_namespace *ns); | 27 | void msg_exit_ns(struct ipc_namespace *ns); |
26 | void shm_exit_ns(struct ipc_namespace *ns); | 28 | void shm_exit_ns(struct ipc_namespace *ns); |
27 | 29 | ||
28 | struct ipc_id_ary { | ||
29 | int size; | ||
30 | struct kern_ipc_perm *p[0]; | ||
31 | }; | ||
32 | |||
33 | struct ipc_ids { | 30 | struct ipc_ids { |
34 | int in_use; | 31 | int in_use; |
35 | int max_id; | ||
36 | unsigned short seq; | 32 | unsigned short seq; |
37 | unsigned short seq_max; | 33 | unsigned short seq_max; |
38 | struct mutex mutex; | 34 | struct mutex mutex; |
39 | struct ipc_id_ary nullentry; | 35 | struct idr ipcs_idr; |
40 | struct ipc_id_ary* entries; | ||
41 | }; | 36 | }; |
42 | 37 | ||
43 | struct seq_file; | 38 | struct seq_file; |
44 | 39 | ||
45 | void ipc_init_ids(struct ipc_ids *ids, int size); | 40 | void ipc_init_ids(struct ipc_ids *); |
46 | #ifdef CONFIG_PROC_FS | 41 | #ifdef CONFIG_PROC_FS |
47 | void __init ipc_init_proc_interface(const char *path, const char *header, | 42 | void __init ipc_init_proc_interface(const char *path, const char *header, |
48 | int ids, int (*show)(struct seq_file *, void *)); | 43 | int ids, int (*show)(struct seq_file *, void *)); |
@@ -55,11 +50,12 @@ void __init ipc_init_proc_interface(const char *path, const char *header, | |||
55 | #define IPC_SHM_IDS 2 | 50 | #define IPC_SHM_IDS 2 |
56 | 51 | ||
57 | /* must be called with ids->mutex acquired.*/ | 52 | /* must be called with ids->mutex acquired.*/ |
58 | int ipc_findkey(struct ipc_ids* ids, key_t key); | 53 | struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key); |
59 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); | 54 | int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); |
55 | int ipc_get_maxid(struct ipc_ids *); | ||
60 | 56 | ||
61 | /* must be called with both locks acquired. */ | 57 | /* must be called with both locks acquired. */ |
62 | struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id); | 58 | void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *); |
63 | 59 | ||
64 | int ipcperms (struct kern_ipc_perm *ipcp, short flg); | 60 | int ipcperms (struct kern_ipc_perm *ipcp, short flg); |
65 | 61 | ||
@@ -79,18 +75,6 @@ void* ipc_rcu_alloc(int size); | |||
79 | void ipc_rcu_getref(void *ptr); | 75 | void ipc_rcu_getref(void *ptr); |
80 | void ipc_rcu_putref(void *ptr); | 76 | void ipc_rcu_putref(void *ptr); |
81 | 77 | ||
82 | static inline void __ipc_fini_ids(struct ipc_ids *ids, | ||
83 | struct ipc_id_ary *entries) | ||
84 | { | ||
85 | if (entries != &ids->nullentry) | ||
86 | ipc_rcu_putref(entries); | ||
87 | } | ||
88 | |||
89 | static inline void ipc_fini_ids(struct ipc_ids *ids) | ||
90 | { | ||
91 | __ipc_fini_ids(ids, ids->entries); | ||
92 | } | ||
93 | |||
94 | struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id); | 78 | struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id); |
95 | struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id); | 79 | struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id); |
96 | void ipc_lock_by_ptr(struct kern_ipc_perm *ipcp); | 80 | void ipc_lock_by_ptr(struct kern_ipc_perm *ipcp); |