diff options
author | Todd Kjos <tkjos@android.com> | 2019-02-08 13:35:15 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-02-12 04:43:57 -0500 |
commit | 8ced0c6231ead26eca8cb416dcb7cc1c2cdd41d8 (patch) | |
tree | 367bf43c0df5e7bee2ca62a7939062d581f465ff /drivers/android/binder_alloc.c | |
parent | 1a7c3d9bb7a926e88d5f57643e75ad1abfc55013 (diff) |
binder: add functions to copy to/from binder buffers
Avoid vm_area when copying to or from binder buffers.
Instead, new copy functions are added that copy from
kernel space to binder buffer space. These use
kmap_atomic() and kunmap_atomic() to create temporary
mappings and then memcpy() is used to copy within
that page.
Also, kmap_atomic() / kunmap_atomic() use the appropriate
cache flushing to support VIVT cache architectures.
Allow binder to build if CPU_CACHE_VIVT is defined.
Several uses of the new functions are added here. More
to follow in subsequent patches.
Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r-- | drivers/android/binder_alloc.c | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 94c0d85c4e75..2eebff4be83e 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
@@ -1166,3 +1166,62 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, | |||
1166 | } | 1166 | } |
1167 | return 0; | 1167 | return 0; |
1168 | } | 1168 | } |
1169 | |||
1170 | static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc, | ||
1171 | bool to_buffer, | ||
1172 | struct binder_buffer *buffer, | ||
1173 | binder_size_t buffer_offset, | ||
1174 | void *ptr, | ||
1175 | size_t bytes) | ||
1176 | { | ||
1177 | /* All copies must be 32-bit aligned and 32-bit size */ | ||
1178 | BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes)); | ||
1179 | |||
1180 | while (bytes) { | ||
1181 | unsigned long size; | ||
1182 | struct page *page; | ||
1183 | pgoff_t pgoff; | ||
1184 | void *tmpptr; | ||
1185 | void *base_ptr; | ||
1186 | |||
1187 | page = binder_alloc_get_page(alloc, buffer, | ||
1188 | buffer_offset, &pgoff); | ||
1189 | size = min_t(size_t, bytes, PAGE_SIZE - pgoff); | ||
1190 | base_ptr = kmap_atomic(page); | ||
1191 | tmpptr = base_ptr + pgoff; | ||
1192 | if (to_buffer) | ||
1193 | memcpy(tmpptr, ptr, size); | ||
1194 | else | ||
1195 | memcpy(ptr, tmpptr, size); | ||
1196 | /* | ||
1197 | * kunmap_atomic() takes care of flushing the cache | ||
1198 | * if this device has VIVT cache arch | ||
1199 | */ | ||
1200 | kunmap_atomic(base_ptr); | ||
1201 | bytes -= size; | ||
1202 | pgoff = 0; | ||
1203 | ptr = ptr + size; | ||
1204 | buffer_offset += size; | ||
1205 | } | ||
1206 | } | ||
1207 | |||
1208 | void binder_alloc_copy_to_buffer(struct binder_alloc *alloc, | ||
1209 | struct binder_buffer *buffer, | ||
1210 | binder_size_t buffer_offset, | ||
1211 | void *src, | ||
1212 | size_t bytes) | ||
1213 | { | ||
1214 | binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, | ||
1215 | src, bytes); | ||
1216 | } | ||
1217 | |||
1218 | void binder_alloc_copy_from_buffer(struct binder_alloc *alloc, | ||
1219 | void *dest, | ||
1220 | struct binder_buffer *buffer, | ||
1221 | binder_size_t buffer_offset, | ||
1222 | size_t bytes) | ||
1223 | { | ||
1224 | binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, | ||
1225 | dest, bytes); | ||
1226 | } | ||
1227 | |||