diff options
-rw-r--r-- | ipc/compat.c | 2 | ||||
-rw-r--r-- | ipc/mqueue.c | 4 | ||||
-rw-r--r-- | ipc/msg.c | 18 | ||||
-rw-r--r-- | ipc/sem.c | 34 | ||||
-rw-r--r-- | ipc/shm.c | 30 | ||||
-rw-r--r-- | ipc/util.c | 29 | ||||
-rw-r--r-- | ipc/util.h | 4 |
7 files changed, 65 insertions, 56 deletions
diff --git a/ipc/compat.c b/ipc/compat.c index 1fe95f6659dd..a544dfbb082a 100644 --- a/ipc/compat.c +++ b/ipc/compat.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/syscalls.h> | 31 | #include <linux/syscalls.h> |
32 | 32 | ||
33 | #include <asm/semaphore.h> | 33 | #include <linux/mutex.h> |
34 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
35 | 35 | ||
36 | #include "util.h" | 36 | #include "util.h" |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 85c52fd26bff..a3bb0c8201c7 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <linux/netlink.h> | 25 | #include <linux/netlink.h> |
26 | #include <linux/syscalls.h> | 26 | #include <linux/syscalls.h> |
27 | #include <linux/signal.h> | 27 | #include <linux/signal.h> |
28 | #include <linux/mutex.h> | ||
29 | |||
28 | #include <net/sock.h> | 30 | #include <net/sock.h> |
29 | #include "util.h" | 31 | #include "util.h" |
30 | 32 | ||
@@ -760,7 +762,7 @@ out_unlock: | |||
760 | * The receiver accepts the message and returns without grabbing the queue | 762 | * The receiver accepts the message and returns without grabbing the queue |
761 | * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers | 763 | * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers |
762 | * are necessary. The same algorithm is used for sysv semaphores, see | 764 | * are necessary. The same algorithm is used for sysv semaphores, see |
763 | * ipc/sem.c fore more details. | 765 | * ipc/mutex.c fore more details. |
764 | * | 766 | * |
765 | * The same algorithm is used for senders. | 767 | * The same algorithm is used for senders. |
766 | */ | 768 | */ |
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/syscalls.h> | 28 | #include <linux/syscalls.h> |
29 | #include <linux/audit.h> | 29 | #include <linux/audit.h> |
30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
31 | #include <linux/mutex.h> | ||
32 | |||
31 | #include <asm/current.h> | 33 | #include <asm/current.h> |
32 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
33 | #include "util.h" | 35 | #include "util.h" |
@@ -179,8 +181,8 @@ static void expunge_all(struct msg_queue* msq, int res) | |||
179 | * removes the message queue from message queue ID | 181 | * removes the message queue from message queue ID |
180 | * array, and cleans up all the messages associated with this queue. | 182 | * array, and cleans up all the messages associated with this queue. |
181 | * | 183 | * |
182 | * msg_ids.sem and the spinlock for this message queue is hold | 184 | * msg_ids.mutex and the spinlock for this message queue is hold |
183 | * before freeque() is called. msg_ids.sem remains locked on exit. | 185 | * before freeque() is called. msg_ids.mutex remains locked on exit. |
184 | */ | 186 | */ |
185 | static void freeque (struct msg_queue *msq, int id) | 187 | static void freeque (struct msg_queue *msq, int id) |
186 | { | 188 | { |
@@ -208,7 +210,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg) | |||
208 | int id, ret = -EPERM; | 210 | int id, ret = -EPERM; |
209 | struct msg_queue *msq; | 211 | struct msg_queue *msq; |
210 | 212 | ||
211 | down(&msg_ids.sem); | 213 | mutex_lock(&msg_ids.mutex); |
212 | if (key == IPC_PRIVATE) | 214 | if (key == IPC_PRIVATE) |
213 | ret = newque(key, msgflg); | 215 | ret = newque(key, msgflg); |
214 | else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ | 216 | else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ |
@@ -231,7 +233,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg) | |||
231 | } | 233 | } |
232 | msg_unlock(msq); | 234 | msg_unlock(msq); |
233 | } | 235 | } |
234 | up(&msg_ids.sem); | 236 | mutex_unlock(&msg_ids.mutex); |
235 | return ret; | 237 | return ret; |
236 | } | 238 | } |
237 | 239 | ||
@@ -361,7 +363,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) | |||
361 | msginfo.msgmnb = msg_ctlmnb; | 363 | msginfo.msgmnb = msg_ctlmnb; |
362 | msginfo.msgssz = MSGSSZ; | 364 | msginfo.msgssz = MSGSSZ; |
363 | msginfo.msgseg = MSGSEG; | 365 | msginfo.msgseg = MSGSEG; |
364 | down(&msg_ids.sem); | 366 | mutex_lock(&msg_ids.mutex); |
365 | if (cmd == MSG_INFO) { | 367 | if (cmd == MSG_INFO) { |
366 | msginfo.msgpool = msg_ids.in_use; | 368 | msginfo.msgpool = msg_ids.in_use; |
367 | msginfo.msgmap = atomic_read(&msg_hdrs); | 369 | msginfo.msgmap = atomic_read(&msg_hdrs); |
@@ -372,7 +374,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) | |||
372 | msginfo.msgtql = MSGTQL; | 374 | msginfo.msgtql = MSGTQL; |
373 | } | 375 | } |
374 | max_id = msg_ids.max_id; | 376 | max_id = msg_ids.max_id; |
375 | up(&msg_ids.sem); | 377 | mutex_unlock(&msg_ids.mutex); |
376 | if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) | 378 | if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) |
377 | return -EFAULT; | 379 | return -EFAULT; |
378 | return (max_id < 0) ? 0: max_id; | 380 | return (max_id < 0) ? 0: max_id; |
@@ -435,7 +437,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) | |||
435 | return -EINVAL; | 437 | return -EINVAL; |
436 | } | 438 | } |
437 | 439 | ||
438 | down(&msg_ids.sem); | 440 | mutex_lock(&msg_ids.mutex); |
439 | msq = msg_lock(msqid); | 441 | msq = msg_lock(msqid); |
440 | err=-EINVAL; | 442 | err=-EINVAL; |
441 | if (msq == NULL) | 443 | if (msq == NULL) |
@@ -489,7 +491,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) | |||
489 | } | 491 | } |
490 | err = 0; | 492 | err = 0; |
491 | out_up: | 493 | out_up: |
492 | up(&msg_ids.sem); | 494 | mutex_unlock(&msg_ids.mutex); |
493 | return err; | 495 | return err; |
494 | out_unlock_up: | 496 | out_unlock_up: |
495 | msg_unlock(msq); | 497 | msg_unlock(msq); |
@@ -75,6 +75,8 @@ | |||
75 | #include <linux/audit.h> | 75 | #include <linux/audit.h> |
76 | #include <linux/capability.h> | 76 | #include <linux/capability.h> |
77 | #include <linux/seq_file.h> | 77 | #include <linux/seq_file.h> |
78 | #include <linux/mutex.h> | ||
79 | |||
78 | #include <asm/uaccess.h> | 80 | #include <asm/uaccess.h> |
79 | #include "util.h" | 81 | #include "util.h" |
80 | 82 | ||
@@ -139,7 +141,7 @@ void __init sem_init (void) | |||
139 | * * if it's IN_WAKEUP, then it must wait until the value changes | 141 | * * if it's IN_WAKEUP, then it must wait until the value changes |
140 | * * if it's not -EINTR, then the operation was completed by | 142 | * * if it's not -EINTR, then the operation was completed by |
141 | * update_queue. semtimedop can return queue.status without | 143 | * update_queue. semtimedop can return queue.status without |
142 | * performing any operation on the semaphore array. | 144 | * performing any operation on the sem array. |
143 | * * otherwise it must acquire the spinlock and check what's up. | 145 | * * otherwise it must acquire the spinlock and check what's up. |
144 | * | 146 | * |
145 | * The two-stage algorithm is necessary to protect against the following | 147 | * The two-stage algorithm is necessary to protect against the following |
@@ -214,7 +216,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) | |||
214 | 216 | ||
215 | if (nsems < 0 || nsems > sc_semmsl) | 217 | if (nsems < 0 || nsems > sc_semmsl) |
216 | return -EINVAL; | 218 | return -EINVAL; |
217 | down(&sem_ids.sem); | 219 | mutex_lock(&sem_ids.mutex); |
218 | 220 | ||
219 | if (key == IPC_PRIVATE) { | 221 | if (key == IPC_PRIVATE) { |
220 | err = newary(key, nsems, semflg); | 222 | err = newary(key, nsems, semflg); |
@@ -242,7 +244,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) | |||
242 | sem_unlock(sma); | 244 | sem_unlock(sma); |
243 | } | 245 | } |
244 | 246 | ||
245 | up(&sem_ids.sem); | 247 | mutex_unlock(&sem_ids.mutex); |
246 | return err; | 248 | return err; |
247 | } | 249 | } |
248 | 250 | ||
@@ -437,8 +439,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) | |||
437 | return semzcnt; | 439 | return semzcnt; |
438 | } | 440 | } |
439 | 441 | ||
440 | /* Free a semaphore set. freeary() is called with sem_ids.sem down and | 442 | /* Free a semaphore set. freeary() is called with sem_ids.mutex locked and |
441 | * the spinlock for this semaphore set hold. sem_ids.sem remains locked | 443 | * the spinlock for this semaphore set hold. sem_ids.mutex remains locked |
442 | * on exit. | 444 | * on exit. |
443 | */ | 445 | */ |
444 | static void freeary (struct sem_array *sma, int id) | 446 | static void freeary (struct sem_array *sma, int id) |
@@ -525,7 +527,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu | |||
525 | seminfo.semmnu = SEMMNU; | 527 | seminfo.semmnu = SEMMNU; |
526 | seminfo.semmap = SEMMAP; | 528 | seminfo.semmap = SEMMAP; |
527 | seminfo.semume = SEMUME; | 529 | seminfo.semume = SEMUME; |
528 | down(&sem_ids.sem); | 530 | mutex_lock(&sem_ids.mutex); |
529 | if (cmd == SEM_INFO) { | 531 | if (cmd == SEM_INFO) { |
530 | seminfo.semusz = sem_ids.in_use; | 532 | seminfo.semusz = sem_ids.in_use; |
531 | seminfo.semaem = used_sems; | 533 | seminfo.semaem = used_sems; |
@@ -534,7 +536,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu | |||
534 | seminfo.semaem = SEMAEM; | 536 | seminfo.semaem = SEMAEM; |
535 | } | 537 | } |
536 | max_id = sem_ids.max_id; | 538 | max_id = sem_ids.max_id; |
537 | up(&sem_ids.sem); | 539 | mutex_unlock(&sem_ids.mutex); |
538 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) | 540 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) |
539 | return -EFAULT; | 541 | return -EFAULT; |
540 | return (max_id < 0) ? 0: max_id; | 542 | return (max_id < 0) ? 0: max_id; |
@@ -885,9 +887,9 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) | |||
885 | return err; | 887 | return err; |
886 | case IPC_RMID: | 888 | case IPC_RMID: |
887 | case IPC_SET: | 889 | case IPC_SET: |
888 | down(&sem_ids.sem); | 890 | mutex_lock(&sem_ids.mutex); |
889 | err = semctl_down(semid,semnum,cmd,version,arg); | 891 | err = semctl_down(semid,semnum,cmd,version,arg); |
890 | up(&sem_ids.sem); | 892 | mutex_unlock(&sem_ids.mutex); |
891 | return err; | 893 | return err; |
892 | default: | 894 | default: |
893 | return -EINVAL; | 895 | return -EINVAL; |
@@ -1299,9 +1301,9 @@ found: | |||
1299 | /* perform adjustments registered in u */ | 1301 | /* perform adjustments registered in u */ |
1300 | nsems = sma->sem_nsems; | 1302 | nsems = sma->sem_nsems; |
1301 | for (i = 0; i < nsems; i++) { | 1303 | for (i = 0; i < nsems; i++) { |
1302 | struct sem * sem = &sma->sem_base[i]; | 1304 | struct sem * semaphore = &sma->sem_base[i]; |
1303 | if (u->semadj[i]) { | 1305 | if (u->semadj[i]) { |
1304 | sem->semval += u->semadj[i]; | 1306 | semaphore->semval += u->semadj[i]; |
1305 | /* | 1307 | /* |
1306 | * Range checks of the new semaphore value, | 1308 | * Range checks of the new semaphore value, |
1307 | * not defined by sus: | 1309 | * not defined by sus: |
@@ -1315,11 +1317,11 @@ found: | |||
1315 | * | 1317 | * |
1316 | * Manfred <manfred@colorfullife.com> | 1318 | * Manfred <manfred@colorfullife.com> |
1317 | */ | 1319 | */ |
1318 | if (sem->semval < 0) | 1320 | if (semaphore->semval < 0) |
1319 | sem->semval = 0; | 1321 | semaphore->semval = 0; |
1320 | if (sem->semval > SEMVMX) | 1322 | if (semaphore->semval > SEMVMX) |
1321 | sem->semval = SEMVMX; | 1323 | semaphore->semval = SEMVMX; |
1322 | sem->sempid = current->tgid; | 1324 | semaphore->sempid = current->tgid; |
1323 | } | 1325 | } |
1324 | } | 1326 | } |
1325 | sma->sem_otime = get_seconds(); | 1327 | sma->sem_otime = get_seconds(); |
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/capability.h> | 30 | #include <linux/capability.h> |
31 | #include <linux/ptrace.h> | 31 | #include <linux/ptrace.h> |
32 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
33 | #include <linux/mutex.h> | ||
33 | 34 | ||
34 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
35 | 36 | ||
@@ -109,7 +110,7 @@ static void shm_open (struct vm_area_struct *shmd) | |||
109 | * | 110 | * |
110 | * @shp: struct to free | 111 | * @shp: struct to free |
111 | * | 112 | * |
112 | * It has to be called with shp and shm_ids.sem locked, | 113 | * It has to be called with shp and shm_ids.mutex locked, |
113 | * but returns with shp unlocked and freed. | 114 | * but returns with shp unlocked and freed. |
114 | */ | 115 | */ |
115 | static void shm_destroy (struct shmid_kernel *shp) | 116 | static void shm_destroy (struct shmid_kernel *shp) |
@@ -139,7 +140,7 @@ static void shm_close (struct vm_area_struct *shmd) | |||
139 | int id = file->f_dentry->d_inode->i_ino; | 140 | int id = file->f_dentry->d_inode->i_ino; |
140 | struct shmid_kernel *shp; | 141 | struct shmid_kernel *shp; |
141 | 142 | ||
142 | down (&shm_ids.sem); | 143 | mutex_lock(&shm_ids.mutex); |
143 | /* remove from the list of attaches of the shm segment */ | 144 | /* remove from the list of attaches of the shm segment */ |
144 | if(!(shp = shm_lock(id))) | 145 | if(!(shp = shm_lock(id))) |
145 | BUG(); | 146 | BUG(); |
@@ -151,7 +152,7 @@ static void shm_close (struct vm_area_struct *shmd) | |||
151 | shm_destroy (shp); | 152 | shm_destroy (shp); |
152 | else | 153 | else |
153 | shm_unlock(shp); | 154 | shm_unlock(shp); |
154 | up (&shm_ids.sem); | 155 | mutex_unlock(&shm_ids.mutex); |
155 | } | 156 | } |
156 | 157 | ||
157 | static int shm_mmap(struct file * file, struct vm_area_struct * vma) | 158 | static int shm_mmap(struct file * file, struct vm_area_struct * vma) |
@@ -270,7 +271,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) | |||
270 | struct shmid_kernel *shp; | 271 | struct shmid_kernel *shp; |
271 | int err, id = 0; | 272 | int err, id = 0; |
272 | 273 | ||
273 | down(&shm_ids.sem); | 274 | mutex_lock(&shm_ids.mutex); |
274 | if (key == IPC_PRIVATE) { | 275 | if (key == IPC_PRIVATE) { |
275 | err = newseg(key, shmflg, size); | 276 | err = newseg(key, shmflg, size); |
276 | } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { | 277 | } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { |
@@ -296,7 +297,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) | |||
296 | } | 297 | } |
297 | shm_unlock(shp); | 298 | shm_unlock(shp); |
298 | } | 299 | } |
299 | up(&shm_ids.sem); | 300 | mutex_unlock(&shm_ids.mutex); |
300 | 301 | ||
301 | return err; | 302 | return err; |
302 | } | 303 | } |
@@ -467,14 +468,14 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
467 | return err; | 468 | return err; |
468 | 469 | ||
469 | memset(&shm_info,0,sizeof(shm_info)); | 470 | memset(&shm_info,0,sizeof(shm_info)); |
470 | down(&shm_ids.sem); | 471 | mutex_lock(&shm_ids.mutex); |
471 | shm_info.used_ids = shm_ids.in_use; | 472 | shm_info.used_ids = shm_ids.in_use; |
472 | shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); | 473 | shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); |
473 | shm_info.shm_tot = shm_tot; | 474 | shm_info.shm_tot = shm_tot; |
474 | shm_info.swap_attempts = 0; | 475 | shm_info.swap_attempts = 0; |
475 | shm_info.swap_successes = 0; | 476 | shm_info.swap_successes = 0; |
476 | err = shm_ids.max_id; | 477 | err = shm_ids.max_id; |
477 | up(&shm_ids.sem); | 478 | mutex_unlock(&shm_ids.mutex); |
478 | if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { | 479 | if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { |
479 | err = -EFAULT; | 480 | err = -EFAULT; |
480 | goto out; | 481 | goto out; |
@@ -583,7 +584,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
583 | * Instead we set a destroyed flag, and then blow | 584 | * Instead we set a destroyed flag, and then blow |
584 | * the name away when the usage hits zero. | 585 | * the name away when the usage hits zero. |
585 | */ | 586 | */ |
586 | down(&shm_ids.sem); | 587 | mutex_lock(&shm_ids.mutex); |
587 | shp = shm_lock(shmid); | 588 | shp = shm_lock(shmid); |
588 | err = -EINVAL; | 589 | err = -EINVAL; |
589 | if (shp == NULL) | 590 | if (shp == NULL) |
@@ -610,7 +611,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
610 | shm_unlock(shp); | 611 | shm_unlock(shp); |
611 | } else | 612 | } else |
612 | shm_destroy (shp); | 613 | shm_destroy (shp); |
613 | up(&shm_ids.sem); | 614 | mutex_unlock(&shm_ids.mutex); |
614 | goto out; | 615 | goto out; |
615 | } | 616 | } |
616 | 617 | ||
@@ -620,12 +621,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
620 | err = -EFAULT; | 621 | err = -EFAULT; |
621 | goto out; | 622 | goto out; |
622 | } | 623 | } |
623 | down(&shm_ids.sem); | 624 | mutex_lock(&shm_ids.mutex); |
624 | shp = shm_lock(shmid); | 625 | shp = shm_lock(shmid); |
625 | err=-EINVAL; | 626 | err=-EINVAL; |
626 | if(shp==NULL) | 627 | if(shp==NULL) |
627 | goto out_up; | 628 | goto out_up; |
628 | if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode, &(shp->shm_perm)))) | 629 | if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, |
630 | setbuf.mode, &(shp->shm_perm)))) | ||
629 | goto out_unlock_up; | 631 | goto out_unlock_up; |
630 | err = shm_checkid(shp,shmid); | 632 | err = shm_checkid(shp,shmid); |
631 | if(err) | 633 | if(err) |
@@ -658,7 +660,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
658 | out_unlock_up: | 660 | out_unlock_up: |
659 | shm_unlock(shp); | 661 | shm_unlock(shp); |
660 | out_up: | 662 | out_up: |
661 | up(&shm_ids.sem); | 663 | mutex_unlock(&shm_ids.mutex); |
662 | goto out; | 664 | goto out; |
663 | out_unlock: | 665 | out_unlock: |
664 | shm_unlock(shp); | 666 | shm_unlock(shp); |
@@ -771,7 +773,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) | |||
771 | invalid: | 773 | invalid: |
772 | up_write(¤t->mm->mmap_sem); | 774 | up_write(¤t->mm->mmap_sem); |
773 | 775 | ||
774 | down (&shm_ids.sem); | 776 | mutex_lock(&shm_ids.mutex); |
775 | if(!(shp = shm_lock(shmid))) | 777 | if(!(shp = shm_lock(shmid))) |
776 | BUG(); | 778 | BUG(); |
777 | shp->shm_nattch--; | 779 | shp->shm_nattch--; |
@@ -780,7 +782,7 @@ invalid: | |||
780 | shm_destroy (shp); | 782 | shm_destroy (shp); |
781 | else | 783 | else |
782 | shm_unlock(shp); | 784 | shm_unlock(shp); |
783 | up (&shm_ids.sem); | 785 | mutex_unlock(&shm_ids.mutex); |
784 | 786 | ||
785 | *raddr = (unsigned long) user_addr; | 787 | *raddr = (unsigned long) user_addr; |
786 | err = 0; | 788 | err = 0; |
diff --git a/ipc/util.c b/ipc/util.c index 862621980b01..23151ef32590 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -68,7 +68,8 @@ __initcall(ipc_init); | |||
68 | void __init ipc_init_ids(struct ipc_ids* ids, int size) | 68 | void __init ipc_init_ids(struct ipc_ids* ids, int size) |
69 | { | 69 | { |
70 | int i; | 70 | int i; |
71 | sema_init(&ids->sem,1); | 71 | |
72 | mutex_init(&ids->mutex); | ||
72 | 73 | ||
73 | if(size > IPCMNI) | 74 | if(size > IPCMNI) |
74 | size = IPCMNI; | 75 | size = IPCMNI; |
@@ -138,7 +139,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, | |||
138 | * @ids: Identifier set | 139 | * @ids: Identifier set |
139 | * @key: The key to find | 140 | * @key: The key to find |
140 | * | 141 | * |
141 | * Requires ipc_ids.sem locked. | 142 | * Requires ipc_ids.mutex locked. |
142 | * Returns the identifier if found or -1 if not. | 143 | * Returns the identifier if found or -1 if not. |
143 | */ | 144 | */ |
144 | 145 | ||
@@ -150,7 +151,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key) | |||
150 | 151 | ||
151 | /* | 152 | /* |
152 | * rcu_dereference() is not needed here | 153 | * rcu_dereference() is not needed here |
153 | * since ipc_ids.sem is held | 154 | * since ipc_ids.mutex is held |
154 | */ | 155 | */ |
155 | for (id = 0; id <= max_id; id++) { | 156 | for (id = 0; id <= max_id; id++) { |
156 | p = ids->entries->p[id]; | 157 | p = ids->entries->p[id]; |
@@ -163,7 +164,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key) | |||
163 | } | 164 | } |
164 | 165 | ||
165 | /* | 166 | /* |
166 | * Requires ipc_ids.sem locked | 167 | * Requires ipc_ids.mutex locked |
167 | */ | 168 | */ |
168 | static int grow_ary(struct ipc_ids* ids, int newsize) | 169 | static int grow_ary(struct ipc_ids* ids, int newsize) |
169 | { | 170 | { |
@@ -210,7 +211,7 @@ static int grow_ary(struct ipc_ids* ids, int newsize) | |||
210 | * is returned. The list is returned in a locked state on success. | 211 | * is returned. The list is returned in a locked state on success. |
211 | * On failure the list is not locked and -1 is returned. | 212 | * On failure the list is not locked and -1 is returned. |
212 | * | 213 | * |
213 | * Called with ipc_ids.sem held. | 214 | * Called with ipc_ids.mutex held. |
214 | */ | 215 | */ |
215 | 216 | ||
216 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) | 217 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) |
@@ -221,7 +222,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) | |||
221 | 222 | ||
222 | /* | 223 | /* |
223 | * rcu_dereference()() is not needed here since | 224 | * rcu_dereference()() is not needed here since |
224 | * ipc_ids.sem is held | 225 | * ipc_ids.mutex is held |
225 | */ | 226 | */ |
226 | for (id = 0; id < size; id++) { | 227 | for (id = 0; id < size; id++) { |
227 | if(ids->entries->p[id] == NULL) | 228 | if(ids->entries->p[id] == NULL) |
@@ -257,7 +258,7 @@ found: | |||
257 | * fed an invalid identifier. The entry is removed and internal | 258 | * fed an invalid identifier. The entry is removed and internal |
258 | * variables recomputed. The object associated with the identifier | 259 | * variables recomputed. The object associated with the identifier |
259 | * is returned. | 260 | * is returned. |
260 | * ipc_ids.sem and the spinlock for this ID is hold before this function | 261 | * ipc_ids.mutex and the spinlock for this ID is hold before this function |
261 | * is called, and remain locked on the exit. | 262 | * is called, and remain locked on the exit. |
262 | */ | 263 | */ |
263 | 264 | ||
@@ -270,7 +271,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) | |||
270 | 271 | ||
271 | /* | 272 | /* |
272 | * do not need a rcu_dereference()() here to force ordering | 273 | * do not need a rcu_dereference()() here to force ordering |
273 | * on Alpha, since the ipc_ids.sem is held. | 274 | * on Alpha, since the ipc_ids.mutex is held. |
274 | */ | 275 | */ |
275 | p = ids->entries->p[lid]; | 276 | p = ids->entries->p[lid]; |
276 | ids->entries->p[lid] = NULL; | 277 | ids->entries->p[lid] = NULL; |
@@ -530,13 +531,13 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) | |||
530 | 531 | ||
531 | /* | 532 | /* |
532 | * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() | 533 | * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() |
533 | * is called with shm_ids.sem locked. Since grow_ary() is also called with | 534 | * is called with shm_ids.mutex locked. Since grow_ary() is also called with |
534 | * shm_ids.sem down(for Shared Memory), there is no need to add read | 535 | * shm_ids.mutex down(for Shared Memory), there is no need to add read |
535 | * barriers here to gurantee the writes in grow_ary() are seen in order | 536 | * barriers here to gurantee the writes in grow_ary() are seen in order |
536 | * here (for Alpha). | 537 | * here (for Alpha). |
537 | * | 538 | * |
538 | * However ipc_get() itself does not necessary require ipc_ids.sem down. So | 539 | * However ipc_get() itself does not necessary require ipc_ids.mutex down. So |
539 | * if in the future ipc_get() is used by other places without ipc_ids.sem | 540 | * if in the future ipc_get() is used by other places without ipc_ids.mutex |
540 | * down, then ipc_get() needs read memery barriers as ipc_lock() does. | 541 | * down, then ipc_get() needs read memery barriers as ipc_lock() does. |
541 | */ | 542 | */ |
542 | struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) | 543 | struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) |
@@ -667,7 +668,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) | |||
667 | * Take the lock - this will be released by the corresponding | 668 | * Take the lock - this will be released by the corresponding |
668 | * call to stop(). | 669 | * call to stop(). |
669 | */ | 670 | */ |
670 | down(&iface->ids->sem); | 671 | mutex_lock(&iface->ids->mutex); |
671 | 672 | ||
672 | /* pos < 0 is invalid */ | 673 | /* pos < 0 is invalid */ |
673 | if (*pos < 0) | 674 | if (*pos < 0) |
@@ -697,7 +698,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it) | |||
697 | ipc_unlock(ipc); | 698 | ipc_unlock(ipc); |
698 | 699 | ||
699 | /* Release the lock we took in start() */ | 700 | /* Release the lock we took in start() */ |
700 | up(&iface->ids->sem); | 701 | mutex_unlock(&iface->ids->mutex); |
701 | } | 702 | } |
702 | 703 | ||
703 | static int sysvipc_proc_show(struct seq_file *s, void *it) | 704 | static int sysvipc_proc_show(struct seq_file *s, void *it) |
diff --git a/ipc/util.h b/ipc/util.h index efaff3ee7de7..0181553d31d8 100644 --- a/ipc/util.h +++ b/ipc/util.h | |||
@@ -25,7 +25,7 @@ struct ipc_ids { | |||
25 | int max_id; | 25 | int max_id; |
26 | unsigned short seq; | 26 | unsigned short seq; |
27 | unsigned short seq_max; | 27 | unsigned short seq_max; |
28 | struct semaphore sem; | 28 | struct mutex mutex; |
29 | struct ipc_id_ary nullentry; | 29 | struct ipc_id_ary nullentry; |
30 | struct ipc_id_ary* entries; | 30 | struct ipc_id_ary* entries; |
31 | }; | 31 | }; |
@@ -40,7 +40,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, | |||
40 | #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) | 40 | #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | /* must be called with ids->sem acquired.*/ | 43 | /* must be called with ids->mutex acquired.*/ |
44 | int ipc_findkey(struct ipc_ids* ids, key_t key); | 44 | int ipc_findkey(struct ipc_ids* ids, key_t key); |
45 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); | 45 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); |
46 | 46 | ||