diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-10-01 04:20:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-01 10:52:23 -0400 |
commit | 9f96cb1e8bca179a92afa40dfc3c49990f1cfc71 (patch) | |
tree | 7d1f921f488aa570083420dc3846856b17a7b2b6 /kernel/futex_compat.c | |
parent | 8792f961ba8057d9f27987def3600253a3ba060f (diff) |
robust futex thread exit race
Calling handle_futex_death in exit_robust_list for the different robust
mutexes of a thread basically frees the mutex. Another thread might grab
the lock immediately which updates the next pointer of the mutex.
fetch_robust_entry over the next pointer might therefore branch into the
robust mutex list of a different thread. This can cause two problems: 1)
some mutexes held by the dead thread are not getting freed and 2) some
mutexs held by a different thread are freed.
The next point need to be read before calling handle_futex_death.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/futex_compat.c')
-rw-r--r-- | kernel/futex_compat.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 7e52eb051f22..2c2e2954b713 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -38,10 +38,11 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, | |||
38 | void compat_exit_robust_list(struct task_struct *curr) | 38 | void compat_exit_robust_list(struct task_struct *curr) |
39 | { | 39 | { |
40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; | 40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
41 | struct robust_list __user *entry, *pending; | 41 | struct robust_list __user *entry, *next_entry, *pending; |
42 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; | 42 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; |
43 | compat_uptr_t uentry, upending; | 43 | compat_uptr_t uentry, next_uentry, upending; |
44 | compat_long_t futex_offset; | 44 | compat_long_t futex_offset; |
45 | int rc; | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * Fetch the list head (which was registered earlier, via | 48 | * Fetch the list head (which was registered earlier, via |
@@ -61,11 +62,16 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
61 | if (fetch_robust_entry(&upending, &pending, | 62 | if (fetch_robust_entry(&upending, &pending, |
62 | &head->list_op_pending, &pip)) | 63 | &head->list_op_pending, &pip)) |
63 | return; | 64 | return; |
64 | if (pending) | ||
65 | handle_futex_death((void __user *)pending + futex_offset, curr, pip); | ||
66 | 65 | ||
66 | next_entry = NULL; /* avoid warning with gcc */ | ||
67 | while (entry != (struct robust_list __user *) &head->list) { | 67 | while (entry != (struct robust_list __user *) &head->list) { |
68 | /* | 68 | /* |
69 | * Fetch the next entry in the list before calling | ||
70 | * handle_futex_death: | ||
71 | */ | ||
72 | rc = fetch_robust_entry(&next_uentry, &next_entry, | ||
73 | (compat_uptr_t __user *)&entry->next, &next_pi); | ||
74 | /* | ||
69 | * A pending lock might already be on the list, so | 75 | * A pending lock might already be on the list, so |
70 | * dont process it twice: | 76 | * dont process it twice: |
71 | */ | 77 | */ |
@@ -74,12 +80,11 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
74 | curr, pi)) | 80 | curr, pi)) |
75 | return; | 81 | return; |
76 | 82 | ||
77 | /* | 83 | if (rc) |
78 | * Fetch the next entry in the list: | ||
79 | */ | ||
80 | if (fetch_robust_entry(&uentry, &entry, | ||
81 | (compat_uptr_t __user *)&entry->next, &pi)) | ||
82 | return; | 84 | return; |
85 | uentry = next_uentry; | ||
86 | entry = next_entry; | ||
87 | pi = next_pi; | ||
83 | /* | 88 | /* |
84 | * Avoid excessively long or circular lists: | 89 | * Avoid excessively long or circular lists: |
85 | */ | 90 | */ |
@@ -88,6 +93,9 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
88 | 93 | ||
89 | cond_resched(); | 94 | cond_resched(); |
90 | } | 95 | } |
96 | if (pending) | ||
97 | handle_futex_death((void __user *)pending + futex_offset, | ||
98 | curr, pip); | ||
91 | } | 99 | } |
92 | 100 | ||
93 | asmlinkage long | 101 | asmlinkage long |