aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2007-10-01 04:20:13 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-01 10:52:23 -0400
commit9f96cb1e8bca179a92afa40dfc3c49990f1cfc71 (patch)
tree7d1f921f488aa570083420dc3846856b17a7b2b6 /kernel
parent8792f961ba8057d9f27987def3600253a3ba060f (diff)
robust futex thread exit race
Calling handle_futex_death in exit_robust_list for the different robust mutexes of a thread basically frees the mutex. Another thread might grab the lock immediately which updates the next pointer of the mutex. fetch_robust_entry over the next pointer might therefore branch into the robust mutex list of a different thread. This can cause two problems: 1) some mutexes held by the dead thread are not getting freed and 2) some mutexs held by a different thread are freed. The next point need to be read before calling handle_futex_death. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c26
-rw-r--r--kernel/futex_compat.c28
2 files changed, 34 insertions, 20 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index e8935b195e88..fcc94e7b4086 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1943,9 +1943,10 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
1943void exit_robust_list(struct task_struct *curr) 1943void exit_robust_list(struct task_struct *curr)
1944{ 1944{
1945 struct robust_list_head __user *head = curr->robust_list; 1945 struct robust_list_head __user *head = curr->robust_list;
1946 struct robust_list __user *entry, *pending; 1946 struct robust_list __user *entry, *next_entry, *pending;
1947 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 1947 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
1948 unsigned long futex_offset; 1948 unsigned long futex_offset;
1949 int rc;
1949 1950
1950 /* 1951 /*
1951 * Fetch the list head (which was registered earlier, via 1952 * Fetch the list head (which was registered earlier, via
@@ -1965,12 +1966,14 @@ void exit_robust_list(struct task_struct *curr)
1965 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) 1966 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1966 return; 1967 return;
1967 1968
1968 if (pending) 1969 next_entry = NULL; /* avoid warning with gcc */
1969 handle_futex_death((void __user *)pending + futex_offset,
1970 curr, pip);
1971
1972 while (entry != &head->list) { 1970 while (entry != &head->list) {
1973 /* 1971 /*
1972 * Fetch the next entry in the list before calling
1973 * handle_futex_death:
1974 */
1975 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1976 /*
1974 * A pending lock might already be on the list, so 1977 * A pending lock might already be on the list, so
1975 * don't process it twice: 1978 * don't process it twice:
1976 */ 1979 */
@@ -1978,11 +1981,10 @@ void exit_robust_list(struct task_struct *curr)
1978 if (handle_futex_death((void __user *)entry + futex_offset, 1981 if (handle_futex_death((void __user *)entry + futex_offset,
1979 curr, pi)) 1982 curr, pi))
1980 return; 1983 return;
1981 /* 1984 if (rc)
1982 * Fetch the next entry in the list:
1983 */
1984 if (fetch_robust_entry(&entry, &entry->next, &pi))
1985 return; 1985 return;
1986 entry = next_entry;
1987 pi = next_pi;
1986 /* 1988 /*
1987 * Avoid excessively long or circular lists: 1989 * Avoid excessively long or circular lists:
1988 */ 1990 */
@@ -1991,6 +1993,10 @@ void exit_robust_list(struct task_struct *curr)
1991 1993
1992 cond_resched(); 1994 cond_resched();
1993 } 1995 }
1996
1997 if (pending)
1998 handle_futex_death((void __user *)pending + futex_offset,
1999 curr, pip);
1994} 2000}
1995 2001
1996long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, 2002long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 7e52eb051f22..2c2e2954b713 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -38,10 +38,11 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
38void compat_exit_robust_list(struct task_struct *curr) 38void compat_exit_robust_list(struct task_struct *curr)
39{ 39{
40 struct compat_robust_list_head __user *head = curr->compat_robust_list; 40 struct compat_robust_list_head __user *head = curr->compat_robust_list;
41 struct robust_list __user *entry, *pending; 41 struct robust_list __user *entry, *next_entry, *pending;
42 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 42 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
43 compat_uptr_t uentry, upending; 43 compat_uptr_t uentry, next_uentry, upending;
44 compat_long_t futex_offset; 44 compat_long_t futex_offset;
45 int rc;
45 46
46 /* 47 /*
47 * Fetch the list head (which was registered earlier, via 48 * Fetch the list head (which was registered earlier, via
@@ -61,11 +62,16 @@ void compat_exit_robust_list(struct task_struct *curr)
61 if (fetch_robust_entry(&upending, &pending, 62 if (fetch_robust_entry(&upending, &pending,
62 &head->list_op_pending, &pip)) 63 &head->list_op_pending, &pip))
63 return; 64 return;
64 if (pending)
65 handle_futex_death((void __user *)pending + futex_offset, curr, pip);
66 65
66 next_entry = NULL; /* avoid warning with gcc */
67 while (entry != (struct robust_list __user *) &head->list) { 67 while (entry != (struct robust_list __user *) &head->list) {
68 /* 68 /*
69 * Fetch the next entry in the list before calling
70 * handle_futex_death:
71 */
72 rc = fetch_robust_entry(&next_uentry, &next_entry,
73 (compat_uptr_t __user *)&entry->next, &next_pi);
74 /*
69 * A pending lock might already be on the list, so 75 * A pending lock might already be on the list, so
70 * dont process it twice: 76 * dont process it twice:
71 */ 77 */
@@ -74,12 +80,11 @@ void compat_exit_robust_list(struct task_struct *curr)
74 curr, pi)) 80 curr, pi))
75 return; 81 return;
76 82
77 /* 83 if (rc)
78 * Fetch the next entry in the list:
79 */
80 if (fetch_robust_entry(&uentry, &entry,
81 (compat_uptr_t __user *)&entry->next, &pi))
82 return; 84 return;
85 uentry = next_uentry;
86 entry = next_entry;
87 pi = next_pi;
83 /* 88 /*
84 * Avoid excessively long or circular lists: 89 * Avoid excessively long or circular lists:
85 */ 90 */
@@ -88,6 +93,9 @@ void compat_exit_robust_list(struct task_struct *curr)
88 93
89 cond_resched(); 94 cond_resched();
90 } 95 }
96 if (pending)
97 handle_futex_death((void __user *)pending + futex_offset,
98 curr, pip);
91} 99}
92 100
93asmlinkage long 101asmlinkage long