aboutsummaryrefslogtreecommitdiffstats
path: root/ipc/sem.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2005-08-05 17:05:27 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-05 18:56:41 -0400
commit00a5dfdb93f74e4d95fb0d83c890728e331f8810 (patch)
treef1d1043d7d02ad6bde3b158807b28bcfdafa21f6 /ipc/sem.c
parentba02508248e90a9d696aebd18b48a3290235b53c (diff)
[PATCH] Fix semundo lock leakage
semundo->lock can leak if semundo->refcount goes from 2 to 1 while another thread has it locked. This causes major problems for PREEMPT kernels. The simplest fix for now is to undo the single-thread optimization. This bug was found via relentless testing by Dominik Karall. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'ipc/sem.c')
-rw-r--r--ipc/sem.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index 7e8a25c82ef3..70975ce0784a 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -895,7 +895,7 @@ static inline void lock_semundo(void)
895 struct sem_undo_list *undo_list; 895 struct sem_undo_list *undo_list;
896 896
897 undo_list = current->sysvsem.undo_list; 897 undo_list = current->sysvsem.undo_list;
898 if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) 898 if (undo_list)
899 spin_lock(&undo_list->lock); 899 spin_lock(&undo_list->lock);
900} 900}
901 901
@@ -915,7 +915,7 @@ static inline void unlock_semundo(void)
915 struct sem_undo_list *undo_list; 915 struct sem_undo_list *undo_list;
916 916
917 undo_list = current->sysvsem.undo_list; 917 undo_list = current->sysvsem.undo_list;
918 if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) 918 if (undo_list)
919 spin_unlock(&undo_list->lock); 919 spin_unlock(&undo_list->lock);
920} 920}
921 921
@@ -943,9 +943,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
943 if (undo_list == NULL) 943 if (undo_list == NULL)
944 return -ENOMEM; 944 return -ENOMEM;
945 memset(undo_list, 0, size); 945 memset(undo_list, 0, size);
946 /* don't initialize unodhd->lock here. It's done 946 spin_lock_init(&undo_list->lock);
947 * in copy_semundo() instead.
948 */
949 atomic_set(&undo_list->refcnt, 1); 947 atomic_set(&undo_list->refcnt, 1);
950 current->sysvsem.undo_list = undo_list; 948 current->sysvsem.undo_list = undo_list;
951 } 949 }
@@ -1231,8 +1229,6 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1231 error = get_undo_list(&undo_list); 1229 error = get_undo_list(&undo_list);
1232 if (error) 1230 if (error)
1233 return error; 1231 return error;
1234 if (atomic_read(&undo_list->refcnt) == 1)
1235 spin_lock_init(&undo_list->lock);
1236 atomic_inc(&undo_list->refcnt); 1232 atomic_inc(&undo_list->refcnt);
1237 tsk->sysvsem.undo_list = undo_list; 1233 tsk->sysvsem.undo_list = undo_list;
1238 } else 1234 } else