aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorTodd Kjos <tkjos@android.com>2017-06-29 15:01:51 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-07-17 08:47:29 -0400
commit53d311cfa19ad35beba74d706effee02c86d198f (patch)
tree669eb2b29fbf3b3c0dd24e8eb16212017eb0f24d /drivers/android/binder_alloc.c
parente4cffcf4bf8b540e150c311e70559d735cc95358 (diff)
binder: protect against two threads freeing buffer
Adds protection against malicious user code freeing the same buffer at the same time which could cause a crash. Cannot happen under normal use. Signed-off-by: Todd Kjos <tkjos@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index a0af1419cc79..2a2e41b13de5 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -116,7 +116,7 @@ static void binder_insert_allocated_buffer_locked(
116 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); 116 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
117} 117}
118 118
119static struct binder_buffer *binder_alloc_buffer_lookup_locked( 119static struct binder_buffer *binder_alloc_prepare_to_free_locked(
120 struct binder_alloc *alloc, 120 struct binder_alloc *alloc,
121 uintptr_t user_ptr) 121 uintptr_t user_ptr)
122{ 122{
@@ -135,8 +135,19 @@ static struct binder_buffer *binder_alloc_buffer_lookup_locked(
135 n = n->rb_left; 135 n = n->rb_left;
136 else if (kern_ptr > buffer) 136 else if (kern_ptr > buffer)
137 n = n->rb_right; 137 n = n->rb_right;
138 else 138 else {
139 /*
140 * Guard against user threads attempting to
141 * free the buffer twice
142 */
143 if (buffer->free_in_progress) {
144 pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
145 alloc->pid, current->pid, (u64)user_ptr);
146 return NULL;
147 }
148 buffer->free_in_progress = 1;
139 return buffer; 149 return buffer;
150 }
140 } 151 }
141 return NULL; 152 return NULL;
142} 153}
@@ -152,13 +163,13 @@ static struct binder_buffer *binder_alloc_buffer_lookup_locked(
152 * 163 *
153 * Return: Pointer to buffer or NULL 164 * Return: Pointer to buffer or NULL
154 */ 165 */
155struct binder_buffer *binder_alloc_buffer_lookup(struct binder_alloc *alloc, 166struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
156 uintptr_t user_ptr) 167 uintptr_t user_ptr)
157{ 168{
158 struct binder_buffer *buffer; 169 struct binder_buffer *buffer;
159 170
160 mutex_lock(&alloc->mutex); 171 mutex_lock(&alloc->mutex);
161 buffer = binder_alloc_buffer_lookup_locked(alloc, user_ptr); 172 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
162 mutex_unlock(&alloc->mutex); 173 mutex_unlock(&alloc->mutex);
163 return buffer; 174 return buffer;
164} 175}
@@ -358,6 +369,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
358 369
359 rb_erase(best_fit, &alloc->free_buffers); 370 rb_erase(best_fit, &alloc->free_buffers);
360 buffer->free = 0; 371 buffer->free = 0;
372 buffer->free_in_progress = 0;
361 binder_insert_allocated_buffer_locked(alloc, buffer); 373 binder_insert_allocated_buffer_locked(alloc, buffer);
362 if (buffer_size != size) { 374 if (buffer_size != size) {
363 struct binder_buffer *new_buffer = (void *)buffer->data + size; 375 struct binder_buffer *new_buffer = (void *)buffer->data + size;