aboutsummaryrefslogtreecommitdiffstats
path: root/ipc/util.c
diff options
context:
space:
mode:
Diffstat (limited to 'ipc/util.c')
-rw-r--r--ipc/util.c506
1 files changed, 303 insertions, 203 deletions
diff --git a/ipc/util.c b/ipc/util.c
index 44e5135aee47..1aa0ebf71bac 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/audit.h> 33#include <linux/audit.h>
34#include <linux/nsproxy.h> 34#include <linux/nsproxy.h>
35#include <linux/rwsem.h>
35 36
36#include <asm/unistd.h> 37#include <asm/unistd.h>
37 38
@@ -129,23 +130,16 @@ __initcall(ipc_init);
129/** 130/**
130 * ipc_init_ids - initialise IPC identifiers 131 * ipc_init_ids - initialise IPC identifiers
131 * @ids: Identifier set 132 * @ids: Identifier set
132 * @size: Number of identifiers
133 * 133 *
134 * Given a size for the ipc identifier range (limited below IPCMNI) 134 * Set up the sequence range to use for the ipc identifier range (limited
135 * set up the sequence range to use then allocate and initialise the 135 * below IPCMNI) then initialise the ids idr.
136 * array itself.
137 */ 136 */
138 137
139void ipc_init_ids(struct ipc_ids* ids, int size) 138void ipc_init_ids(struct ipc_ids *ids)
140{ 139{
141 int i; 140 init_rwsem(&ids->rw_mutex);
142 141
143 mutex_init(&ids->mutex);
144
145 if(size > IPCMNI)
146 size = IPCMNI;
147 ids->in_use = 0; 142 ids->in_use = 0;
148 ids->max_id = -1;
149 ids->seq = 0; 143 ids->seq = 0;
150 { 144 {
151 int seq_limit = INT_MAX/SEQ_MULTIPLIER; 145 int seq_limit = INT_MAX/SEQ_MULTIPLIER;
@@ -155,17 +149,7 @@ void ipc_init_ids(struct ipc_ids* ids, int size)
155 ids->seq_max = seq_limit; 149 ids->seq_max = seq_limit;
156 } 150 }
157 151
158 ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size + 152 idr_init(&ids->ipcs_idr);
159 sizeof(struct ipc_id_ary));
160
161 if(ids->entries == NULL) {
162 printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n");
163 size = 0;
164 ids->entries = &ids->nullentry;
165 }
166 ids->entries->size = size;
167 for(i=0;i<size;i++)
168 ids->entries->p[i] = NULL;
169} 153}
170 154
171#ifdef CONFIG_PROC_FS 155#ifdef CONFIG_PROC_FS
@@ -208,99 +192,96 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
208 * @ids: Identifier set 192 * @ids: Identifier set
209 * @key: The key to find 193 * @key: The key to find
210 * 194 *
211 * Requires ipc_ids.mutex locked. 195 * Requires ipc_ids.rw_mutex locked.
212 * Returns the identifier if found or -1 if not. 196 * Returns the LOCKED pointer to the ipc structure if found or NULL
197 * if not.
198 * If key is found ipc points to the owning ipc structure
213 */ 199 */
214 200
215int ipc_findkey(struct ipc_ids* ids, key_t key) 201static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
216{ 202{
217 int id; 203 struct kern_ipc_perm *ipc;
218 struct kern_ipc_perm* p; 204 int next_id;
219 int max_id = ids->max_id; 205 int total;
220 206
221 /* 207 for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
222 * rcu_dereference() is not needed here 208 ipc = idr_find(&ids->ipcs_idr, next_id);
223 * since ipc_ids.mutex is held 209
224 */ 210 if (ipc == NULL)
225 for (id = 0; id <= max_id; id++) { 211 continue;
226 p = ids->entries->p[id]; 212
227 if(p==NULL) 213 if (ipc->key != key) {
214 total++;
228 continue; 215 continue;
229 if (key == p->key) 216 }
230 return id; 217
218 ipc_lock_by_ptr(ipc);
219 return ipc;
231 } 220 }
232 return -1; 221
222 return NULL;
233} 223}
234 224
235/* 225/**
236 * Requires ipc_ids.mutex locked 226 * ipc_get_maxid - get the last assigned id
227 * @ids: IPC identifier set
228 *
229 * Called with ipc_ids.rw_mutex held.
237 */ 230 */
238static int grow_ary(struct ipc_ids* ids, int newsize)
239{
240 struct ipc_id_ary* new;
241 struct ipc_id_ary* old;
242 int i;
243 int size = ids->entries->size;
244
245 if(newsize > IPCMNI)
246 newsize = IPCMNI;
247 if(newsize <= size)
248 return newsize;
249
250 new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize +
251 sizeof(struct ipc_id_ary));
252 if(new == NULL)
253 return size;
254 new->size = newsize;
255 memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size);
256 for(i=size;i<newsize;i++) {
257 new->p[i] = NULL;
258 }
259 old = ids->entries;
260 231
261 /* 232int ipc_get_maxid(struct ipc_ids *ids)
262 * Use rcu_assign_pointer() to make sure the memcpyed contents 233{
263 * of the new array are visible before the new array becomes visible. 234 struct kern_ipc_perm *ipc;
264 */ 235 int max_id = -1;
265 rcu_assign_pointer(ids->entries, new); 236 int total, id;
237
238 if (ids->in_use == 0)
239 return -1;
266 240
267 __ipc_fini_ids(ids, old); 241 if (ids->in_use == IPCMNI)
268 return newsize; 242 return IPCMNI - 1;
243
244 /* Look for the last assigned id */
245 total = 0;
246 for (id = 0; id < IPCMNI && total < ids->in_use; id++) {
247 ipc = idr_find(&ids->ipcs_idr, id);
248 if (ipc != NULL) {
249 max_id = id;
250 total++;
251 }
252 }
253 return max_id;
269} 254}
270 255
271/** 256/**
272 * ipc_addid - add an IPC identifier 257 * ipc_addid - add an IPC identifier
273 * @ids: IPC identifier set 258 * @ids: IPC identifier set
274 * @new: new IPC permission set 259 * @new: new IPC permission set
275 * @size: new size limit for the id array 260 * @size: limit for the number of used ids
276 * 261 *
277 * Add an entry 'new' to the IPC arrays. The permissions object is 262 * Add an entry 'new' to the IPC ids idr. The permissions object is
278 * initialised and the first free entry is set up and the id assigned 263 * initialised and the first free entry is set up and the id assigned
279 * is returned. The list is returned in a locked state on success. 264 * is returned. The 'new' entry is returned in a locked state on success.
280 * On failure the list is not locked and -1 is returned. 265 * On failure the entry is not locked and a negative err-code is returned.
281 * 266 *
282 * Called with ipc_ids.mutex held. 267 * Called with ipc_ids.rw_mutex held as a writer.
283 */ 268 */
284 269
285int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) 270int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
286{ 271{
287 int id; 272 int id, err;
288 273
289 size = grow_ary(ids,size); 274 if (size > IPCMNI)
275 size = IPCMNI;
276
277 if (ids->in_use >= size)
278 return -ENOSPC;
279
280 err = idr_get_new(&ids->ipcs_idr, new, &id);
281 if (err)
282 return err;
290 283
291 /*
292 * rcu_dereference()() is not needed here since
293 * ipc_ids.mutex is held
294 */
295 for (id = 0; id < size; id++) {
296 if(ids->entries->p[id] == NULL)
297 goto found;
298 }
299 return -1;
300found:
301 ids->in_use++; 284 ids->in_use++;
302 if (id > ids->max_id)
303 ids->max_id = id;
304 285
305 new->cuid = new->uid = current->euid; 286 new->cuid = new->uid = current->euid;
306 new->gid = new->cgid = current->egid; 287 new->gid = new->cgid = current->egid;
@@ -313,48 +294,153 @@ found:
313 new->deleted = 0; 294 new->deleted = 0;
314 rcu_read_lock(); 295 rcu_read_lock();
315 spin_lock(&new->lock); 296 spin_lock(&new->lock);
316 ids->entries->p[id] = new;
317 return id; 297 return id;
318} 298}
319 299
320/** 300/**
301 * ipcget_new - create a new ipc object
302 * @ns: namespace
303 * @ids: IPC identifer set
304 * @ops: the actual creation routine to call
305 * @params: its parameters
306 *
307 * This routine is called by sys_msgget, sys_semget() and sys_shmget()
308 * when the key is IPC_PRIVATE.
309 */
310int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
311 struct ipc_ops *ops, struct ipc_params *params)
312{
313 int err;
314retry:
315 err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
316
317 if (!err)
318 return -ENOMEM;
319
320 down_write(&ids->rw_mutex);
321 err = ops->getnew(ns, params);
322 up_write(&ids->rw_mutex);
323
324 if (err == -EAGAIN)
325 goto retry;
326
327 return err;
328}
329
330/**
331 * ipc_check_perms - check security and permissions for an IPC
332 * @ipcp: ipc permission set
333 * @ops: the actual security routine to call
334 * @params: its parameters
335 *
336 * This routine is called by sys_msgget(), sys_semget() and sys_shmget()
337 * when the key is not IPC_PRIVATE and that key already exists in the
338 * ids IDR.
339 *
340 * On success, the IPC id is returned.
341 *
342 * It is called with ipc_ids.rw_mutex and ipcp->lock held.
343 */
344static int ipc_check_perms(struct kern_ipc_perm *ipcp, struct ipc_ops *ops,
345 struct ipc_params *params)
346{
347 int err;
348
349 if (ipcperms(ipcp, params->flg))
350 err = -EACCES;
351 else {
352 err = ops->associate(ipcp, params->flg);
353 if (!err)
354 err = ipcp->id;
355 }
356
357 return err;
358}
359
360/**
361 * ipcget_public - get an ipc object or create a new one
362 * @ns: namespace
363 * @ids: IPC identifer set
364 * @ops: the actual creation routine to call
365 * @params: its parameters
366 *
367 * This routine is called by sys_msgget, sys_semget() and sys_shmget()
368 * when the key is not IPC_PRIVATE.
369 * It adds a new entry if the key is not found and does some permission
370 * / security checkings if the key is found.
371 *
372 * On success, the ipc id is returned.
373 */
374int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
375 struct ipc_ops *ops, struct ipc_params *params)
376{
377 struct kern_ipc_perm *ipcp;
378 int flg = params->flg;
379 int err;
380retry:
381 err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
382
383 /*
384 * Take the lock as a writer since we are potentially going to add
385 * a new entry + read locks are not "upgradable"
386 */
387 down_write(&ids->rw_mutex);
388 ipcp = ipc_findkey(ids, params->key);
389 if (ipcp == NULL) {
390 /* key not used */
391 if (!(flg & IPC_CREAT))
392 err = -ENOENT;
393 else if (!err)
394 err = -ENOMEM;
395 else
396 err = ops->getnew(ns, params);
397 } else {
398 /* ipc object has been locked by ipc_findkey() */
399
400 if (flg & IPC_CREAT && flg & IPC_EXCL)
401 err = -EEXIST;
402 else {
403 err = 0;
404 if (ops->more_checks)
405 err = ops->more_checks(ipcp, params);
406 if (!err)
407 /*
408 * ipc_check_perms returns the IPC id on
409 * success
410 */
411 err = ipc_check_perms(ipcp, ops, params);
412 }
413 ipc_unlock(ipcp);
414 }
415 up_write(&ids->rw_mutex);
416
417 if (err == -EAGAIN)
418 goto retry;
419
420 return err;
421}
422
423
424/**
321 * ipc_rmid - remove an IPC identifier 425 * ipc_rmid - remove an IPC identifier
322 * @ids: identifier set 426 * @ids: IPC identifier set
323 * @id: Identifier to remove 427 * @ipcp: ipc perm structure containing the identifier to remove
324 * 428 *
325 * The identifier must be valid, and in use. The kernel will panic if 429 * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
326 * fed an invalid identifier. The entry is removed and internal 430 * before this function is called, and remain locked on the exit.
327 * variables recomputed. The object associated with the identifier
328 * is returned.
329 * ipc_ids.mutex and the spinlock for this ID is hold before this function
330 * is called, and remain locked on the exit.
331 */ 431 */
332 432
333struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) 433void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
334{ 434{
335 struct kern_ipc_perm* p; 435 int lid = ipcid_to_idx(ipcp->id);
336 int lid = id % SEQ_MULTIPLIER; 436
337 BUG_ON(lid >= ids->entries->size); 437 idr_remove(&ids->ipcs_idr, lid);
338 438
339 /*
340 * do not need a rcu_dereference()() here to force ordering
341 * on Alpha, since the ipc_ids.mutex is held.
342 */
343 p = ids->entries->p[lid];
344 ids->entries->p[lid] = NULL;
345 BUG_ON(p==NULL);
346 ids->in_use--; 439 ids->in_use--;
347 440
348 if (lid == ids->max_id) { 441 ipcp->deleted = 1;
349 do { 442
350 lid--; 443 return;
351 if(lid == -1)
352 break;
353 } while (ids->entries->p[lid] == NULL);
354 ids->max_id = lid;
355 }
356 p->deleted = 1;
357 return p;
358} 444}
359 445
360/** 446/**
@@ -491,10 +577,12 @@ static void ipc_do_vfree(struct work_struct *work)
491 */ 577 */
492static void ipc_schedule_free(struct rcu_head *head) 578static void ipc_schedule_free(struct rcu_head *head)
493{ 579{
494 struct ipc_rcu_grace *grace = 580 struct ipc_rcu_grace *grace;
495 container_of(head, struct ipc_rcu_grace, rcu); 581 struct ipc_rcu_sched *sched;
496 struct ipc_rcu_sched *sched = 582
497 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); 583 grace = container_of(head, struct ipc_rcu_grace, rcu);
584 sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
585 data[0]);
498 586
499 INIT_WORK(&sched->work, ipc_do_vfree); 587 INIT_WORK(&sched->work, ipc_do_vfree);
500 schedule_work(&sched->work); 588 schedule_work(&sched->work);
@@ -583,7 +671,7 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
583} 671}
584 672
585/** 673/**
586 * ipc64_perm_to_ipc_perm - convert old ipc permissions to new 674 * ipc64_perm_to_ipc_perm - convert new ipc permissions to old
587 * @in: new style IPC permissions 675 * @in: new style IPC permissions
588 * @out: old style IPC permissions 676 * @out: old style IPC permissions
589 * 677 *
@@ -602,44 +690,37 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
602 out->seq = in->seq; 690 out->seq = in->seq;
603} 691}
604 692
605/* 693/**
606 * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() 694 * ipc_lock - Lock an ipc structure without rw_mutex held
607 * is called with shm_ids.mutex locked. Since grow_ary() is also called with 695 * @ids: IPC identifier set
608 * shm_ids.mutex down(for Shared Memory), there is no need to add read 696 * @id: ipc id to look for
609 * barriers here to gurantee the writes in grow_ary() are seen in order 697 *
610 * here (for Alpha). 698 * Look for an id in the ipc ids idr and lock the associated ipc object.
611 * 699 *
612 * However ipc_get() itself does not necessary require ipc_ids.mutex down. So 700 * The ipc object is locked on exit.
613 * if in the future ipc_get() is used by other places without ipc_ids.mutex 701 *
614 * down, then ipc_get() needs read memery barriers as ipc_lock() does. 702 * This is the routine that should be called when the rw_mutex is not already
703 * held, i.e. idr tree not protected: it protects the idr tree in read mode
704 * during the idr_find().
615 */ 705 */
616struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
617{
618 struct kern_ipc_perm* out;
619 int lid = id % SEQ_MULTIPLIER;
620 if(lid >= ids->entries->size)
621 return NULL;
622 out = ids->entries->p[lid];
623 return out;
624}
625 706
626struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) 707struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
627{ 708{
628 struct kern_ipc_perm* out; 709 struct kern_ipc_perm *out;
629 int lid = id % SEQ_MULTIPLIER; 710 int lid = ipcid_to_idx(id);
630 struct ipc_id_ary* entries; 711
712 down_read(&ids->rw_mutex);
631 713
632 rcu_read_lock(); 714 rcu_read_lock();
633 entries = rcu_dereference(ids->entries); 715 out = idr_find(&ids->ipcs_idr, lid);
634 if(lid >= entries->size) { 716 if (out == NULL) {
635 rcu_read_unlock();
636 return NULL;
637 }
638 out = entries->p[lid];
639 if(out == NULL) {
640 rcu_read_unlock(); 717 rcu_read_unlock();
641 return NULL; 718 up_read(&ids->rw_mutex);
719 return ERR_PTR(-EINVAL);
642 } 720 }
721
722 up_read(&ids->rw_mutex);
723
643 spin_lock(&out->lock); 724 spin_lock(&out->lock);
644 725
645 /* ipc_rmid() may have already freed the ID while ipc_lock 726 /* ipc_rmid() may have already freed the ID while ipc_lock
@@ -648,33 +729,44 @@ struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
648 if (out->deleted) { 729 if (out->deleted) {
649 spin_unlock(&out->lock); 730 spin_unlock(&out->lock);
650 rcu_read_unlock(); 731 rcu_read_unlock();
651 return NULL; 732 return ERR_PTR(-EINVAL);
652 } 733 }
734
653 return out; 735 return out;
654} 736}
655 737
656void ipc_lock_by_ptr(struct kern_ipc_perm *perm) 738/**
657{ 739 * ipc_lock_down - Lock an ipc structure with rw_sem held
658 rcu_read_lock(); 740 * @ids: IPC identifier set
659 spin_lock(&perm->lock); 741 * @id: ipc id to look for
660} 742 *
743 * Look for an id in the ipc ids idr and lock the associated ipc object.
744 *
745 * The ipc object is locked on exit.
746 *
747 * This is the routine that should be called when the rw_mutex is already
748 * held, i.e. idr tree protected.
749 */
661 750
662void ipc_unlock(struct kern_ipc_perm* perm) 751struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id)
663{ 752{
664 spin_unlock(&perm->lock); 753 struct kern_ipc_perm *out;
665 rcu_read_unlock(); 754 int lid = ipcid_to_idx(id);
666}
667 755
668int ipc_buildid(struct ipc_ids* ids, int id, int seq) 756 rcu_read_lock();
669{ 757 out = idr_find(&ids->ipcs_idr, lid);
670 return SEQ_MULTIPLIER*seq + id; 758 if (out == NULL) {
671} 759 rcu_read_unlock();
760 return ERR_PTR(-EINVAL);
761 }
672 762
673int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid) 763 spin_lock(&out->lock);
674{ 764
675 if(uid/SEQ_MULTIPLIER != ipcp->seq) 765 /*
676 return 1; 766 * No need to verify that the structure is still valid since the
677 return 0; 767 * rw_mutex is held.
768 */
769 return out;
678} 770}
679 771
680#ifdef __ARCH_WANT_IPC_PARSE_VERSION 772#ifdef __ARCH_WANT_IPC_PARSE_VERSION
@@ -707,27 +799,30 @@ struct ipc_proc_iter {
707 struct ipc_proc_iface *iface; 799 struct ipc_proc_iface *iface;
708}; 800};
709 801
710static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) 802/*
803 * This routine locks the ipc structure found at least at position pos.
804 */
805struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
806 loff_t *new_pos)
711{ 807{
712 struct ipc_proc_iter *iter = s->private; 808 struct kern_ipc_perm *ipc;
713 struct ipc_proc_iface *iface = iter->iface; 809 int total, id;
714 struct kern_ipc_perm *ipc = it;
715 loff_t p;
716 struct ipc_ids *ids;
717 810
718 ids = iter->ns->ids[iface->ids]; 811 total = 0;
812 for (id = 0; id < pos && total < ids->in_use; id++) {
813 ipc = idr_find(&ids->ipcs_idr, id);
814 if (ipc != NULL)
815 total++;
816 }
719 817
720 /* If we had an ipc id locked before, unlock it */ 818 if (total >= ids->in_use)
721 if (ipc && ipc != SEQ_START_TOKEN) 819 return NULL;
722 ipc_unlock(ipc);
723 820
724 /* 821 for ( ; pos < IPCMNI; pos++) {
725 * p = *pos - 1 (because id 0 starts at position 1) 822 ipc = idr_find(&ids->ipcs_idr, pos);
726 * + 1 (because we increment the position by one) 823 if (ipc != NULL) {
727 */ 824 *new_pos = pos + 1;
728 for (p = *pos; p <= ids->max_id; p++) { 825 ipc_lock_by_ptr(ipc);
729 if ((ipc = ipc_lock(ids, p)) != NULL) {
730 *pos = p + 1;
731 return ipc; 826 return ipc;
732 } 827 }
733 } 828 }
@@ -736,16 +831,27 @@ static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
736 return NULL; 831 return NULL;
737} 832}
738 833
834static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
835{
836 struct ipc_proc_iter *iter = s->private;
837 struct ipc_proc_iface *iface = iter->iface;
838 struct kern_ipc_perm *ipc = it;
839
840 /* If we had an ipc id locked before, unlock it */
841 if (ipc && ipc != SEQ_START_TOKEN)
842 ipc_unlock(ipc);
843
844 return sysvipc_find_ipc(iter->ns->ids[iface->ids], *pos, pos);
845}
846
739/* 847/*
740 * File positions: pos 0 -> header, pos n -> ipc id + 1. 848 * File positions: pos 0 -> header, pos n -> ipc id = n - 1.
741 * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START. 849 * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START.
742 */ 850 */
743static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) 851static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
744{ 852{
745 struct ipc_proc_iter *iter = s->private; 853 struct ipc_proc_iter *iter = s->private;
746 struct ipc_proc_iface *iface = iter->iface; 854 struct ipc_proc_iface *iface = iter->iface;
747 struct kern_ipc_perm *ipc;
748 loff_t p;
749 struct ipc_ids *ids; 855 struct ipc_ids *ids;
750 856
751 ids = iter->ns->ids[iface->ids]; 857 ids = iter->ns->ids[iface->ids];
@@ -754,7 +860,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
754 * Take the lock - this will be released by the corresponding 860 * Take the lock - this will be released by the corresponding
755 * call to stop(). 861 * call to stop().
756 */ 862 */
757 mutex_lock(&ids->mutex); 863 down_read(&ids->rw_mutex);
758 864
759 /* pos < 0 is invalid */ 865 /* pos < 0 is invalid */
760 if (*pos < 0) 866 if (*pos < 0)
@@ -765,13 +871,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
765 return SEQ_START_TOKEN; 871 return SEQ_START_TOKEN;
766 872
767 /* Find the (pos-1)th ipc */ 873 /* Find the (pos-1)th ipc */
768 for (p = *pos - 1; p <= ids->max_id; p++) { 874 return sysvipc_find_ipc(ids, *pos - 1, pos);
769 if ((ipc = ipc_lock(ids, p)) != NULL) {
770 *pos = p + 1;
771 return ipc;
772 }
773 }
774 return NULL;
775} 875}
776 876
777static void sysvipc_proc_stop(struct seq_file *s, void *it) 877static void sysvipc_proc_stop(struct seq_file *s, void *it)
@@ -781,13 +881,13 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it)
781 struct ipc_proc_iface *iface = iter->iface; 881 struct ipc_proc_iface *iface = iter->iface;
782 struct ipc_ids *ids; 882 struct ipc_ids *ids;
783 883
784 /* If we had a locked segment, release it */ 884 /* If we had a locked structure, release it */
785 if (ipc && ipc != SEQ_START_TOKEN) 885 if (ipc && ipc != SEQ_START_TOKEN)
786 ipc_unlock(ipc); 886 ipc_unlock(ipc);
787 887
788 ids = iter->ns->ids[iface->ids]; 888 ids = iter->ns->ids[iface->ids];
789 /* Release the lock we took in start() */ 889 /* Release the lock we took in start() */
790 mutex_unlock(&ids->mutex); 890 up_read(&ids->rw_mutex);
791} 891}
792 892
793static int sysvipc_proc_show(struct seq_file *s, void *it) 893static int sysvipc_proc_show(struct seq_file *s, void *it)