aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c113
1 files changed, 113 insertions, 0 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 022cd80e80cc..94c0d85c4e75 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -29,6 +29,8 @@
29#include <linux/list_lru.h> 29#include <linux/list_lru.h>
30#include <linux/ratelimit.h> 30#include <linux/ratelimit.h>
31#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <linux/uaccess.h>
33#include <linux/highmem.h>
32#include "binder_alloc.h" 34#include "binder_alloc.h"
33#include "binder_trace.h" 35#include "binder_trace.h"
34 36
@@ -1053,3 +1055,114 @@ int binder_alloc_shrinker_init(void)
1053 } 1055 }
1054 return ret; 1056 return ret;
1055} 1057}
1058
1059/**
1060 * check_buffer() - verify that buffer/offset is safe to access
1061 * @alloc: binder_alloc for this proc
1062 * @buffer: binder buffer to be accessed
1063 * @offset: offset into @buffer data
1064 * @bytes: bytes to access from offset
1065 *
1066 * Check that the @offset/@bytes are within the size of the given
1067 * @buffer and that the buffer is currently active and not freeable.
1068 * Offsets must also be multiples of sizeof(u32). The kernel is
1069 * allowed to touch the buffer in two cases:
1070 *
1071 * 1) when the buffer is being created:
1072 * (buffer->free == 0 && buffer->allow_user_free == 0)
1073 * 2) when the buffer is being torn down:
1074 * (buffer->free == 0 && buffer->transaction == NULL).
1075 *
1076 * Return: true if the buffer is safe to access
1077 */
1078static inline bool check_buffer(struct binder_alloc *alloc,
1079 struct binder_buffer *buffer,
1080 binder_size_t offset, size_t bytes)
1081{
1082 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1083
1084 return buffer_size >= bytes &&
1085 offset <= buffer_size - bytes &&
1086 IS_ALIGNED(offset, sizeof(u32)) &&
1087 !buffer->free &&
1088 (!buffer->allow_user_free || !buffer->transaction);
1089}
1090
1091/**
1092 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1093 * @alloc: binder_alloc for this proc
1094 * @buffer: binder buffer to be accessed
1095 * @buffer_offset: offset into @buffer data
1096 * @pgoffp: address to copy final page offset to
1097 *
1098 * Lookup the struct page corresponding to the address
1099 * at @buffer_offset into @buffer->data. If @pgoffp is not
1100 * NULL, the byte-offset into the page is written there.
1101 *
1102 * The caller is responsible to ensure that the offset points
1103 * to a valid address within the @buffer and that @buffer is
1104 * not freeable by the user. Since it can't be freed, we are
1105 * guaranteed that the corresponding elements of @alloc->pages[]
1106 * cannot change.
1107 *
1108 * Return: struct page
1109 */
1110static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1111 struct binder_buffer *buffer,
1112 binder_size_t buffer_offset,
1113 pgoff_t *pgoffp)
1114{
1115 binder_size_t buffer_space_offset = buffer_offset +
1116 (buffer->data - alloc->buffer);
1117 pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1118 size_t index = buffer_space_offset >> PAGE_SHIFT;
1119 struct binder_lru_page *lru_page;
1120
1121 lru_page = &alloc->pages[index];
1122 *pgoffp = pgoff;
1123 return lru_page->page_ptr;
1124}
1125
1126/**
1127 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1128 * @alloc: binder_alloc for this proc
1129 * @buffer: binder buffer to be accessed
1130 * @buffer_offset: offset into @buffer data
1131 * @from: userspace pointer to source buffer
1132 * @bytes: bytes to copy
1133 *
1134 * Copy bytes from source userspace to target buffer.
1135 *
1136 * Return: bytes remaining to be copied
1137 */
1138unsigned long
1139binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1140 struct binder_buffer *buffer,
1141 binder_size_t buffer_offset,
1142 const void __user *from,
1143 size_t bytes)
1144{
1145 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1146 return bytes;
1147
1148 while (bytes) {
1149 unsigned long size;
1150 unsigned long ret;
1151 struct page *page;
1152 pgoff_t pgoff;
1153 void *kptr;
1154
1155 page = binder_alloc_get_page(alloc, buffer,
1156 buffer_offset, &pgoff);
1157 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1158 kptr = kmap(page) + pgoff;
1159 ret = copy_from_user(kptr, from, size);
1160 kunmap(page);
1161 if (ret)
1162 return bytes - size + ret;
1163 bytes -= size;
1164 from += size;
1165 buffer_offset += size;
1166 }
1167 return 0;
1168}