diff options
author | Sherry Yang <sherryy@android.com> | 2017-08-23 11:46:40 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-08-28 10:47:17 -0400 |
commit | 4175e2b46fd4b9021ef81f18f1be9474b2f45d4a (patch) | |
tree | f71d84f71001cecc96563950400c15f609375619 /drivers/android/binder_alloc_selftest.c | |
parent | e21762196118c272321532fe64c512efaa088b7e (diff) |
android: binder: Add allocator selftest
binder_alloc_selftest tests that alloc_new_buf handles page allocation and
deallocation properly when allocate and free buffers. The test allocates 5
buffers of various sizes to cover all possible page alignment cases, and
frees the buffers using a list of exhaustive freeing order.
Test: boot the device with ANDROID_BINDER_IPC_SELFTEST config option
enabled. Allocator selftest passes.
Signed-off-by: Sherry Yang <sherryy@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc_selftest.c')
-rw-r--r-- | drivers/android/binder_alloc_selftest.c | 271 |
1 files changed, 271 insertions, 0 deletions
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c new file mode 100644 index 000000000000..cc00ab6ee29d --- /dev/null +++ b/drivers/android/binder_alloc_selftest.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* binder_alloc_selftest.c | ||
2 | * | ||
3 | * Android IPC Subsystem | ||
4 | * | ||
5 | * Copyright (C) 2017 Google, Inc. | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
19 | |||
20 | #include <linux/mm_types.h> | ||
21 | #include <linux/err.h> | ||
22 | #include "binder_alloc.h" | ||
23 | |||
24 | #define BUFFER_NUM 5 | ||
25 | #define BUFFER_MIN_SIZE (PAGE_SIZE / 8) | ||
26 | |||
27 | static bool binder_selftest_run = true; | ||
28 | static int binder_selftest_failures; | ||
29 | static DEFINE_MUTEX(binder_selftest_lock); | ||
30 | |||
31 | /** | ||
32 | * enum buf_end_align_type - Page alignment of a buffer | ||
33 | * end with regard to the end of the previous buffer. | ||
34 | * | ||
35 | * In the pictures below, buf2 refers to the buffer we | ||
36 | * are aligning. buf1 refers to previous buffer by addr. | ||
37 | * Symbol [ means the start of a buffer, ] means the end | ||
38 | * of a buffer, and | means page boundaries. | ||
39 | */ | ||
40 | enum buf_end_align_type { | ||
41 | /** | ||
42 | * @SAME_PAGE_UNALIGNED: The end of this buffer is on | ||
43 | * the same page as the end of the previous buffer and | ||
44 | * is not page aligned. Examples: | ||
45 | * buf1 ][ buf2 ][ ... | ||
46 | * buf1 ]|[ buf2 ][ ... | ||
47 | */ | ||
48 | SAME_PAGE_UNALIGNED = 0, | ||
49 | /** | ||
50 | * @SAME_PAGE_ALIGNED: When the end of the previous buffer | ||
51 | * is not page aligned, the end of this buffer is on the | ||
52 | * same page as the end of the previous buffer and is page | ||
53 | * aligned. When the previous buffer is page aligned, the | ||
54 | * end of this buffer is aligned to the next page boundary. | ||
55 | * Examples: | ||
56 | * buf1 ][ buf2 ]| ... | ||
57 | * buf1 ]|[ buf2 ]| ... | ||
58 | */ | ||
59 | SAME_PAGE_ALIGNED, | ||
60 | /** | ||
61 | * @NEXT_PAGE_UNALIGNED: The end of this buffer is on | ||
62 | * the page next to the end of the previous buffer and | ||
63 | * is not page aligned. Examples: | ||
64 | * buf1 ][ buf2 | buf2 ][ ... | ||
65 | * buf1 ]|[ buf2 | buf2 ][ ... | ||
66 | */ | ||
67 | NEXT_PAGE_UNALIGNED, | ||
68 | /** | ||
69 | * @NEXT_PAGE_ALIGNED: The end of this buffer is on | ||
70 | * the page next to the end of the previous buffer and | ||
71 | * is page aligned. Examples: | ||
72 | * buf1 ][ buf2 | buf2 ]| ... | ||
73 | * buf1 ]|[ buf2 | buf2 ]| ... | ||
74 | */ | ||
75 | NEXT_PAGE_ALIGNED, | ||
76 | /** | ||
77 | * @NEXT_NEXT_UNALIGNED: The end of this buffer is on | ||
78 | * the page that follows the page after the end of the | ||
79 | * previous buffer and is not page aligned. Examples: | ||
80 | * buf1 ][ buf2 | buf2 | buf2 ][ ... | ||
81 | * buf1 ]|[ buf2 | buf2 | buf2 ][ ... | ||
82 | */ | ||
83 | NEXT_NEXT_UNALIGNED, | ||
84 | LOOP_END, | ||
85 | }; | ||
86 | |||
87 | static void pr_err_size_seq(size_t *sizes, int *seq) | ||
88 | { | ||
89 | int i; | ||
90 | |||
91 | pr_err("alloc sizes: "); | ||
92 | for (i = 0; i < BUFFER_NUM; i++) | ||
93 | pr_cont("[%zu]", sizes[i]); | ||
94 | pr_cont("\n"); | ||
95 | pr_err("free seq: "); | ||
96 | for (i = 0; i < BUFFER_NUM; i++) | ||
97 | pr_cont("[%d]", seq[i]); | ||
98 | pr_cont("\n"); | ||
99 | } | ||
100 | |||
101 | static bool check_buffer_pages_allocated(struct binder_alloc *alloc, | ||
102 | struct binder_buffer *buffer, | ||
103 | size_t size) | ||
104 | { | ||
105 | void *page_addr, *end; | ||
106 | int page_index; | ||
107 | |||
108 | end = (void *)PAGE_ALIGN((uintptr_t)buffer + size); | ||
109 | for (page_addr = buffer; page_addr < end; page_addr += PAGE_SIZE) { | ||
110 | page_index = (page_addr - alloc->buffer) / PAGE_SIZE; | ||
111 | if (!alloc->pages[page_index]) { | ||
112 | pr_err("incorrect alloc state at page index %d\n", | ||
113 | page_index); | ||
114 | return false; | ||
115 | } | ||
116 | } | ||
117 | return true; | ||
118 | } | ||
119 | |||
120 | static void binder_selftest_alloc_buf(struct binder_alloc *alloc, | ||
121 | struct binder_buffer *buffers[], | ||
122 | size_t *sizes, int *seq) | ||
123 | { | ||
124 | int i; | ||
125 | |||
126 | for (i = 0; i < BUFFER_NUM; i++) { | ||
127 | buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); | ||
128 | if (IS_ERR(buffers[i]) || | ||
129 | !check_buffer_pages_allocated(alloc, buffers[i], | ||
130 | sizes[i])) { | ||
131 | pr_err_size_seq(sizes, seq); | ||
132 | binder_selftest_failures++; | ||
133 | } | ||
134 | } | ||
135 | } | ||
136 | |||
137 | static void binder_selftest_free_buf(struct binder_alloc *alloc, | ||
138 | struct binder_buffer *buffers[], | ||
139 | size_t *sizes, int *seq) | ||
140 | { | ||
141 | int i; | ||
142 | |||
143 | for (i = 0; i < BUFFER_NUM; i++) | ||
144 | binder_alloc_free_buf(alloc, buffers[seq[i]]); | ||
145 | |||
146 | for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) { | ||
147 | if ((!alloc->pages[i]) == (i == 0)) { | ||
148 | pr_err("incorrect free state at page index %d\n", i); | ||
149 | binder_selftest_failures++; | ||
150 | } | ||
151 | } | ||
152 | } | ||
153 | |||
154 | static void binder_selftest_alloc_free(struct binder_alloc *alloc, | ||
155 | size_t *sizes, int *seq) | ||
156 | { | ||
157 | struct binder_buffer *buffers[BUFFER_NUM]; | ||
158 | |||
159 | binder_selftest_alloc_buf(alloc, buffers, sizes, seq); | ||
160 | binder_selftest_free_buf(alloc, buffers, sizes, seq); | ||
161 | } | ||
162 | |||
163 | static bool is_dup(int *seq, int index, int val) | ||
164 | { | ||
165 | int i; | ||
166 | |||
167 | for (i = 0; i < index; i++) { | ||
168 | if (seq[i] == val) | ||
169 | return true; | ||
170 | } | ||
171 | return false; | ||
172 | } | ||
173 | |||
174 | /* Generate BUFFER_NUM factorial free orders. */ | ||
175 | static void binder_selftest_free_seq(struct binder_alloc *alloc, | ||
176 | size_t *sizes, int *seq, int index) | ||
177 | { | ||
178 | int i; | ||
179 | |||
180 | if (index == BUFFER_NUM) { | ||
181 | binder_selftest_alloc_free(alloc, sizes, seq); | ||
182 | return; | ||
183 | } | ||
184 | for (i = 0; i < BUFFER_NUM; i++) { | ||
185 | if (is_dup(seq, index, i)) | ||
186 | continue; | ||
187 | seq[index] = i; | ||
188 | binder_selftest_free_seq(alloc, sizes, seq, index + 1); | ||
189 | } | ||
190 | } | ||
191 | |||
192 | static void binder_selftest_alloc_size(struct binder_alloc *alloc, | ||
193 | size_t *end_offset) | ||
194 | { | ||
195 | int i; | ||
196 | int seq[BUFFER_NUM] = {0}; | ||
197 | size_t front_sizes[BUFFER_NUM]; | ||
198 | size_t back_sizes[BUFFER_NUM]; | ||
199 | size_t last_offset, offset = 0; | ||
200 | |||
201 | for (i = 0; i < BUFFER_NUM; i++) { | ||
202 | last_offset = offset; | ||
203 | offset = end_offset[i]; | ||
204 | front_sizes[i] = offset - last_offset; | ||
205 | back_sizes[BUFFER_NUM - i - 1] = front_sizes[i]; | ||
206 | } | ||
207 | /* | ||
208 | * Buffers share the first or last few pages. | ||
209 | * Only BUFFER_NUM - 1 buffer sizes are adjustable since | ||
210 | * we need one giant buffer before getting to the last page. | ||
211 | */ | ||
212 | back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1] | ||
213 | - sizeof(struct binder_buffer) * BUFFER_NUM; | ||
214 | binder_selftest_free_seq(alloc, front_sizes, seq, 0); | ||
215 | binder_selftest_free_seq(alloc, back_sizes, seq, 0); | ||
216 | } | ||
217 | |||
218 | static void binder_selftest_alloc_offset(struct binder_alloc *alloc, | ||
219 | size_t *end_offset, int index) | ||
220 | { | ||
221 | int align; | ||
222 | size_t end, prev; | ||
223 | |||
224 | if (index == BUFFER_NUM) { | ||
225 | binder_selftest_alloc_size(alloc, end_offset); | ||
226 | return; | ||
227 | } | ||
228 | prev = index == 0 ? 0 : end_offset[index - 1]; | ||
229 | end = prev; | ||
230 | |||
231 | BUILD_BUG_ON((BUFFER_MIN_SIZE + sizeof(struct binder_buffer)) | ||
232 | * BUFFER_NUM >= PAGE_SIZE); | ||
233 | |||
234 | for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) { | ||
235 | if (align % 2) | ||
236 | end = ALIGN(end, PAGE_SIZE); | ||
237 | else | ||
238 | end += BUFFER_MIN_SIZE; | ||
239 | end_offset[index] = end; | ||
240 | binder_selftest_alloc_offset(alloc, end_offset, index + 1); | ||
241 | } | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * binder_selftest_alloc() - Test alloc and free of buffer pages. | ||
246 | * @alloc: Pointer to alloc struct. | ||
247 | * | ||
248 | * Allocate BUFFER_NUM buffers to cover all page alignment cases, | ||
249 | * then free them in all orders possible. Check that pages are | ||
250 | * allocated after buffer alloc and freed after freeing buffer. | ||
251 | */ | ||
252 | void binder_selftest_alloc(struct binder_alloc *alloc) | ||
253 | { | ||
254 | size_t end_offset[BUFFER_NUM]; | ||
255 | |||
256 | if (!binder_selftest_run) | ||
257 | return; | ||
258 | mutex_lock(&binder_selftest_lock); | ||
259 | if (!binder_selftest_run || !alloc->vma) | ||
260 | goto done; | ||
261 | pr_info("STARTED\n"); | ||
262 | binder_selftest_alloc_offset(alloc, end_offset, 0); | ||
263 | binder_selftest_run = false; | ||
264 | if (binder_selftest_failures > 0) | ||
265 | pr_info("%d tests FAILED\n", binder_selftest_failures); | ||
266 | else | ||
267 | pr_info("PASSED\n"); | ||
268 | |||
269 | done: | ||
270 | mutex_unlock(&binder_selftest_lock); | ||
271 | } | ||