diff options
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_heap.c')
-rw-r--r-- | drivers/video/tegra/nvmap/nvmap_heap.c | 1113 |
1 files changed, 1113 insertions, 0 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_heap.c b/drivers/video/tegra/nvmap/nvmap_heap.c new file mode 100644 index 00000000000..7474f31534f --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_heap.c | |||
@@ -0,0 +1,1113 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap_heap.c | ||
3 | * | ||
4 | * GPU heap allocator. | ||
5 | * | ||
6 | * Copyright (c) 2011, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/device.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/err.h> | ||
30 | |||
31 | #include <mach/nvmap.h> | ||
32 | #include "nvmap.h" | ||
33 | #include "nvmap_heap.h" | ||
34 | #include "nvmap_common.h" | ||
35 | |||
36 | #include <asm/tlbflush.h> | ||
37 | #include <asm/cacheflush.h> | ||
38 | |||
39 | /* | ||
40 | * "carveouts" are platform-defined regions of physically contiguous memory | ||
41 | * which are not managed by the OS. a platform may specify multiple carveouts, | ||
42 | * for either small special-purpose memory regions (like IRAM on Tegra SoCs) | ||
43 | * or reserved regions of main system memory. | ||
44 | * | ||
45 | * the carveout allocator returns allocations which are physically contiguous. | ||
46 | * to reduce external fragmentation, the allocation algorithm implemented in | ||
47 | * this file employs 3 strategies for keeping allocations of similar size | ||
48 | * grouped together inside the larger heap: the "small", "normal" and "huge" | ||
49 | * strategies. the size thresholds (in bytes) for determining which strategy | ||
50 | * to employ should be provided by the platform for each heap. it is possible | ||
51 | * for a platform to define a heap where only the "normal" strategy is used. | ||
52 | * | ||
53 | * o "normal" allocations use an address-order first-fit allocator (called | ||
54 | * BOTTOM_UP in the code below). each allocation is rounded up to be | ||
55 | * an integer multiple of the "small" allocation size. | ||
56 | * | ||
57 | * o "huge" allocations use an address-order last-fit allocator (called | ||
58 | * TOP_DOWN in the code below). like "normal" allocations, each allocation | ||
59 | * is rounded up to be an integer multiple of the "small" allocation size. | ||
60 | * | ||
61 | * o "small" allocations are treated differently: the heap manager maintains | ||
62 | * a pool of "small"-sized blocks internally from which allocations less | ||
63 | * than 1/2 of the "small" size are buddy-allocated. if a "small" allocation | ||
64 | * is requested and none of the buddy sub-heaps is able to service it, | ||
65 | * the heap manager will try to allocate a new buddy-heap. | ||
66 | * | ||
67 | * this allocator is intended to keep "splinters" colocated in the carveout, | ||
68 | * and to ensure that the minimum free block size in the carveout (i.e., the | ||
69 | * "small" threshold) is still a meaningful size. | ||
70 | * | ||
71 | */ | ||
72 | |||
73 | #define MAX_BUDDY_NR 128 /* maximum buddies in a buddy allocator */ | ||
74 | |||
75 | enum direction { | ||
76 | TOP_DOWN, | ||
77 | BOTTOM_UP | ||
78 | }; | ||
79 | |||
80 | enum block_type { | ||
81 | BLOCK_FIRST_FIT, /* block was allocated directly from the heap */ | ||
82 | BLOCK_BUDDY, /* block was allocated from a buddy sub-heap */ | ||
83 | BLOCK_EMPTY, | ||
84 | }; | ||
85 | |||
86 | struct heap_stat { | ||
87 | size_t free; /* total free size */ | ||
88 | size_t free_largest; /* largest free block */ | ||
89 | size_t free_count; /* number of free blocks */ | ||
90 | size_t total; /* total size */ | ||
91 | size_t largest; /* largest unique block */ | ||
92 | size_t count; /* total number of blocks */ | ||
93 | /* fast compaction attempt counter */ | ||
94 | unsigned int compaction_count_fast; | ||
95 | /* full compaction attempt counter */ | ||
96 | unsigned int compaction_count_full; | ||
97 | }; | ||
98 | |||
99 | struct buddy_heap; | ||
100 | |||
101 | struct buddy_block { | ||
102 | struct nvmap_heap_block block; | ||
103 | struct buddy_heap *heap; | ||
104 | }; | ||
105 | |||
106 | struct list_block { | ||
107 | struct nvmap_heap_block block; | ||
108 | struct list_head all_list; | ||
109 | unsigned int mem_prot; | ||
110 | unsigned long orig_addr; | ||
111 | size_t size; | ||
112 | size_t align; | ||
113 | struct nvmap_heap *heap; | ||
114 | struct list_head free_list; | ||
115 | }; | ||
116 | |||
117 | struct combo_block { | ||
118 | union { | ||
119 | struct list_block lb; | ||
120 | struct buddy_block bb; | ||
121 | }; | ||
122 | }; | ||
123 | |||
124 | struct buddy_bits { | ||
125 | unsigned int alloc:1; | ||
126 | unsigned int order:7; /* log2(MAX_BUDDY_NR); */ | ||
127 | }; | ||
128 | |||
129 | struct buddy_heap { | ||
130 | struct list_block *heap_base; | ||
131 | unsigned int nr_buddies; | ||
132 | struct list_head buddy_list; | ||
133 | struct buddy_bits bitmap[MAX_BUDDY_NR]; | ||
134 | }; | ||
135 | |||
136 | struct nvmap_heap { | ||
137 | struct list_head all_list; | ||
138 | struct list_head free_list; | ||
139 | struct mutex lock; | ||
140 | struct list_head buddy_list; | ||
141 | unsigned int min_buddy_shift; | ||
142 | unsigned int buddy_heap_size; | ||
143 | unsigned int small_alloc; | ||
144 | const char *name; | ||
145 | void *arg; | ||
146 | struct device dev; | ||
147 | }; | ||
148 | |||
149 | static struct kmem_cache *buddy_heap_cache; | ||
150 | static struct kmem_cache *block_cache; | ||
151 | |||
152 | static inline struct nvmap_heap *parent_of(struct buddy_heap *heap) | ||
153 | { | ||
154 | return heap->heap_base->heap; | ||
155 | } | ||
156 | |||
157 | static inline unsigned int order_of(size_t len, size_t min_shift) | ||
158 | { | ||
159 | len = 2 * DIV_ROUND_UP(len, (1 << min_shift)) - 1; | ||
160 | return fls(len)-1; | ||
161 | } | ||
162 | |||
163 | /* returns the free size in bytes of the buddy heap; must be called while | ||
164 | * holding the parent heap's lock. */ | ||
165 | static void buddy_stat(struct buddy_heap *heap, struct heap_stat *stat) | ||
166 | { | ||
167 | unsigned int index; | ||
168 | unsigned int shift = parent_of(heap)->min_buddy_shift; | ||
169 | |||
170 | for (index = 0; index < heap->nr_buddies; | ||
171 | index += (1 << heap->bitmap[index].order)) { | ||
172 | size_t curr = 1 << (heap->bitmap[index].order + shift); | ||
173 | |||
174 | stat->largest = max(stat->largest, curr); | ||
175 | stat->total += curr; | ||
176 | stat->count++; | ||
177 | |||
178 | if (!heap->bitmap[index].alloc) { | ||
179 | stat->free += curr; | ||
180 | stat->free_largest = max(stat->free_largest, curr); | ||
181 | stat->free_count++; | ||
182 | } | ||
183 | } | ||
184 | } | ||
185 | |||
186 | /* returns the free size of the heap (including any free blocks in any | ||
187 | * buddy-heap suballocators; must be called while holding the parent | ||
188 | * heap's lock. */ | ||
189 | static unsigned long heap_stat(struct nvmap_heap *heap, struct heap_stat *stat) | ||
190 | { | ||
191 | struct buddy_heap *bh; | ||
192 | struct list_block *l = NULL; | ||
193 | unsigned long base = -1ul; | ||
194 | |||
195 | memset(stat, 0, sizeof(*stat)); | ||
196 | mutex_lock(&heap->lock); | ||
197 | list_for_each_entry(l, &heap->all_list, all_list) { | ||
198 | stat->total += l->size; | ||
199 | stat->largest = max(l->size, stat->largest); | ||
200 | stat->count++; | ||
201 | base = min(base, l->orig_addr); | ||
202 | } | ||
203 | |||
204 | list_for_each_entry(bh, &heap->buddy_list, buddy_list) { | ||
205 | buddy_stat(bh, stat); | ||
206 | /* the total counts are double-counted for buddy heaps | ||
207 | * since the blocks allocated for buddy heaps exist in the | ||
208 | * all_list; subtract out the doubly-added stats */ | ||
209 | stat->total -= bh->heap_base->size; | ||
210 | stat->count--; | ||
211 | } | ||
212 | |||
213 | list_for_each_entry(l, &heap->free_list, free_list) { | ||
214 | stat->free += l->size; | ||
215 | stat->free_count++; | ||
216 | stat->free_largest = max(l->size, stat->free_largest); | ||
217 | } | ||
218 | mutex_unlock(&heap->lock); | ||
219 | |||
220 | return base; | ||
221 | } | ||
222 | |||
223 | static ssize_t heap_name_show(struct device *dev, | ||
224 | struct device_attribute *attr, char *buf); | ||
225 | |||
226 | static ssize_t heap_stat_show(struct device *dev, | ||
227 | struct device_attribute *attr, char *buf); | ||
228 | |||
229 | static struct device_attribute heap_stat_total_max = | ||
230 | __ATTR(total_max, S_IRUGO, heap_stat_show, NULL); | ||
231 | |||
232 | static struct device_attribute heap_stat_total_count = | ||
233 | __ATTR(total_count, S_IRUGO, heap_stat_show, NULL); | ||
234 | |||
235 | static struct device_attribute heap_stat_total_size = | ||
236 | __ATTR(total_size, S_IRUGO, heap_stat_show, NULL); | ||
237 | |||
238 | static struct device_attribute heap_stat_free_max = | ||
239 | __ATTR(free_max, S_IRUGO, heap_stat_show, NULL); | ||
240 | |||
241 | static struct device_attribute heap_stat_free_count = | ||
242 | __ATTR(free_count, S_IRUGO, heap_stat_show, NULL); | ||
243 | |||
244 | static struct device_attribute heap_stat_free_size = | ||
245 | __ATTR(free_size, S_IRUGO, heap_stat_show, NULL); | ||
246 | |||
247 | static struct device_attribute heap_stat_base = | ||
248 | __ATTR(base, S_IRUGO, heap_stat_show, NULL); | ||
249 | |||
250 | static struct device_attribute heap_attr_name = | ||
251 | __ATTR(name, S_IRUGO, heap_name_show, NULL); | ||
252 | |||
253 | static struct attribute *heap_stat_attrs[] = { | ||
254 | &heap_stat_total_max.attr, | ||
255 | &heap_stat_total_count.attr, | ||
256 | &heap_stat_total_size.attr, | ||
257 | &heap_stat_free_max.attr, | ||
258 | &heap_stat_free_count.attr, | ||
259 | &heap_stat_free_size.attr, | ||
260 | &heap_stat_base.attr, | ||
261 | &heap_attr_name.attr, | ||
262 | NULL, | ||
263 | }; | ||
264 | |||
265 | static struct attribute_group heap_stat_attr_group = { | ||
266 | .attrs = heap_stat_attrs, | ||
267 | }; | ||
268 | |||
269 | static ssize_t heap_name_show(struct device *dev, | ||
270 | struct device_attribute *attr, char *buf) | ||
271 | { | ||
272 | |||
273 | struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev); | ||
274 | return sprintf(buf, "%s\n", heap->name); | ||
275 | } | ||
276 | |||
277 | static ssize_t heap_stat_show(struct device *dev, | ||
278 | struct device_attribute *attr, char *buf) | ||
279 | { | ||
280 | struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev); | ||
281 | struct heap_stat stat; | ||
282 | unsigned long base; | ||
283 | |||
284 | base = heap_stat(heap, &stat); | ||
285 | |||
286 | if (attr == &heap_stat_total_max) | ||
287 | return sprintf(buf, "%u\n", stat.largest); | ||
288 | else if (attr == &heap_stat_total_count) | ||
289 | return sprintf(buf, "%u\n", stat.count); | ||
290 | else if (attr == &heap_stat_total_size) | ||
291 | return sprintf(buf, "%u\n", stat.total); | ||
292 | else if (attr == &heap_stat_free_max) | ||
293 | return sprintf(buf, "%u\n", stat.free_largest); | ||
294 | else if (attr == &heap_stat_free_count) | ||
295 | return sprintf(buf, "%u\n", stat.free_count); | ||
296 | else if (attr == &heap_stat_free_size) | ||
297 | return sprintf(buf, "%u\n", stat.free); | ||
298 | else if (attr == &heap_stat_base) | ||
299 | return sprintf(buf, "%08lx\n", base); | ||
300 | else | ||
301 | return -EINVAL; | ||
302 | } | ||
303 | #ifndef CONFIG_NVMAP_CARVEOUT_COMPACTOR | ||
304 | static struct nvmap_heap_block *buddy_alloc(struct buddy_heap *heap, | ||
305 | size_t size, size_t align, | ||
306 | unsigned int mem_prot) | ||
307 | { | ||
308 | unsigned int index = 0; | ||
309 | unsigned int min_shift = parent_of(heap)->min_buddy_shift; | ||
310 | unsigned int order = order_of(size, min_shift); | ||
311 | unsigned int align_mask; | ||
312 | unsigned int best = heap->nr_buddies; | ||
313 | struct buddy_block *b; | ||
314 | |||
315 | if (heap->heap_base->mem_prot != mem_prot) | ||
316 | return NULL; | ||
317 | |||
318 | align = max(align, (size_t)(1 << min_shift)); | ||
319 | align_mask = (align >> min_shift) - 1; | ||
320 | |||
321 | for (index = 0; index < heap->nr_buddies; | ||
322 | index += (1 << heap->bitmap[index].order)) { | ||
323 | |||
324 | if (heap->bitmap[index].alloc || (index & align_mask) || | ||
325 | (heap->bitmap[index].order < order)) | ||
326 | continue; | ||
327 | |||
328 | if (best == heap->nr_buddies || | ||
329 | heap->bitmap[index].order < heap->bitmap[best].order) | ||
330 | best = index; | ||
331 | |||
332 | if (heap->bitmap[best].order == order) | ||
333 | break; | ||
334 | } | ||
335 | |||
336 | if (best == heap->nr_buddies) | ||
337 | return NULL; | ||
338 | |||
339 | b = kmem_cache_zalloc(block_cache, GFP_KERNEL); | ||
340 | if (!b) | ||
341 | return NULL; | ||
342 | |||
343 | while (heap->bitmap[best].order != order) { | ||
344 | unsigned int buddy; | ||
345 | heap->bitmap[best].order--; | ||
346 | buddy = best ^ (1 << heap->bitmap[best].order); | ||
347 | heap->bitmap[buddy].order = heap->bitmap[best].order; | ||
348 | heap->bitmap[buddy].alloc = 0; | ||
349 | } | ||
350 | heap->bitmap[best].alloc = 1; | ||
351 | b->block.base = heap->heap_base->block.base + (best << min_shift); | ||
352 | b->heap = heap; | ||
353 | b->block.type = BLOCK_BUDDY; | ||
354 | return &b->block; | ||
355 | } | ||
356 | #endif | ||
357 | |||
358 | static struct buddy_heap *do_buddy_free(struct nvmap_heap_block *block) | ||
359 | { | ||
360 | struct buddy_block *b = container_of(block, struct buddy_block, block); | ||
361 | struct buddy_heap *h = b->heap; | ||
362 | unsigned int min_shift = parent_of(h)->min_buddy_shift; | ||
363 | unsigned int index; | ||
364 | |||
365 | index = (block->base - h->heap_base->block.base) >> min_shift; | ||
366 | h->bitmap[index].alloc = 0; | ||
367 | |||
368 | for (;;) { | ||
369 | unsigned int buddy = index ^ (1 << h->bitmap[index].order); | ||
370 | if (buddy >= h->nr_buddies || h->bitmap[buddy].alloc || | ||
371 | h->bitmap[buddy].order != h->bitmap[index].order) | ||
372 | break; | ||
373 | |||
374 | h->bitmap[buddy].order++; | ||
375 | h->bitmap[index].order++; | ||
376 | index = min(buddy, index); | ||
377 | } | ||
378 | |||
379 | kmem_cache_free(block_cache, b); | ||
380 | if ((1 << h->bitmap[0].order) == h->nr_buddies) | ||
381 | return h; | ||
382 | |||
383 | return NULL; | ||
384 | } | ||
385 | |||
386 | |||
387 | /* | ||
388 | * base_max limits position of allocated chunk in memory. | ||
389 | * if base_max is 0 then there is no such limitation. | ||
390 | */ | ||
391 | static struct nvmap_heap_block *do_heap_alloc(struct nvmap_heap *heap, | ||
392 | size_t len, size_t align, | ||
393 | unsigned int mem_prot, | ||
394 | unsigned long base_max) | ||
395 | { | ||
396 | struct list_block *b = NULL; | ||
397 | struct list_block *i = NULL; | ||
398 | struct list_block *rem = NULL; | ||
399 | unsigned long fix_base; | ||
400 | enum direction dir; | ||
401 | |||
402 | /* since pages are only mappable with one cache attribute, | ||
403 | * and most allocations from carveout heaps are DMA coherent | ||
404 | * (i.e., non-cacheable), round cacheable allocations up to | ||
405 | * a page boundary to ensure that the physical pages will | ||
406 | * only be mapped one way. */ | ||
407 | if (mem_prot == NVMAP_HANDLE_CACHEABLE || | ||
408 | mem_prot == NVMAP_HANDLE_INNER_CACHEABLE) { | ||
409 | align = max_t(size_t, align, PAGE_SIZE); | ||
410 | len = PAGE_ALIGN(len); | ||
411 | } | ||
412 | |||
413 | #ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR | ||
414 | dir = BOTTOM_UP; | ||
415 | #else | ||
416 | dir = (len <= heap->small_alloc) ? BOTTOM_UP : TOP_DOWN; | ||
417 | #endif | ||
418 | |||
419 | if (dir == BOTTOM_UP) { | ||
420 | list_for_each_entry(i, &heap->free_list, free_list) { | ||
421 | size_t fix_size; | ||
422 | fix_base = ALIGN(i->block.base, align); | ||
423 | fix_size = i->size - (fix_base - i->block.base); | ||
424 | |||
425 | /* needed for compaction. relocated chunk | ||
426 | * should never go up */ | ||
427 | if (base_max && fix_base > base_max) | ||
428 | break; | ||
429 | |||
430 | if (fix_size >= len) { | ||
431 | b = i; | ||
432 | break; | ||
433 | } | ||
434 | } | ||
435 | } else { | ||
436 | list_for_each_entry_reverse(i, &heap->free_list, free_list) { | ||
437 | if (i->size >= len) { | ||
438 | fix_base = i->block.base + i->size - len; | ||
439 | fix_base &= ~(align-1); | ||
440 | if (fix_base >= i->block.base) { | ||
441 | b = i; | ||
442 | break; | ||
443 | } | ||
444 | } | ||
445 | } | ||
446 | } | ||
447 | |||
448 | if (!b) | ||
449 | return NULL; | ||
450 | |||
451 | if (dir == BOTTOM_UP) | ||
452 | b->block.type = BLOCK_FIRST_FIT; | ||
453 | |||
454 | /* split free block */ | ||
455 | if (b->block.base != fix_base) { | ||
456 | /* insert a new free block before allocated */ | ||
457 | rem = kmem_cache_zalloc(block_cache, GFP_KERNEL); | ||
458 | if (!rem) { | ||
459 | b->orig_addr = b->block.base; | ||
460 | b->block.base = fix_base; | ||
461 | b->size -= (b->block.base - b->orig_addr); | ||
462 | goto out; | ||
463 | } | ||
464 | |||
465 | rem->block.type = BLOCK_EMPTY; | ||
466 | rem->block.base = b->block.base; | ||
467 | rem->orig_addr = rem->block.base; | ||
468 | rem->size = fix_base - rem->block.base; | ||
469 | b->block.base = fix_base; | ||
470 | b->orig_addr = fix_base; | ||
471 | b->size -= rem->size; | ||
472 | list_add_tail(&rem->all_list, &b->all_list); | ||
473 | list_add_tail(&rem->free_list, &b->free_list); | ||
474 | } | ||
475 | |||
476 | b->orig_addr = b->block.base; | ||
477 | |||
478 | if (b->size > len) { | ||
479 | /* insert a new free block after allocated */ | ||
480 | rem = kmem_cache_zalloc(block_cache, GFP_KERNEL); | ||
481 | if (!rem) | ||
482 | goto out; | ||
483 | |||
484 | rem->block.type = BLOCK_EMPTY; | ||
485 | rem->block.base = b->block.base + len; | ||
486 | rem->size = b->size - len; | ||
487 | BUG_ON(rem->size > b->size); | ||
488 | rem->orig_addr = rem->block.base; | ||
489 | b->size = len; | ||
490 | list_add(&rem->all_list, &b->all_list); | ||
491 | list_add(&rem->free_list, &b->free_list); | ||
492 | } | ||
493 | |||
494 | out: | ||
495 | list_del(&b->free_list); | ||
496 | b->heap = heap; | ||
497 | b->mem_prot = mem_prot; | ||
498 | b->align = align; | ||
499 | return &b->block; | ||
500 | } | ||
501 | |||
502 | #ifdef DEBUG_FREE_LIST | ||
503 | static void freelist_debug(struct nvmap_heap *heap, const char *title, | ||
504 | struct list_block *token) | ||
505 | { | ||
506 | int i; | ||
507 | struct list_block *n; | ||
508 | |||
509 | dev_debug(&heap->dev, "%s\n", title); | ||
510 | i = 0; | ||
511 | list_for_each_entry(n, &heap->free_list, free_list) { | ||
512 | dev_debug(&heap->dev, "\t%d [%p..%p]%s\n", i, (void *)n->orig_addr, | ||
513 | (void *)(n->orig_addr + n->size), | ||
514 | (n == token) ? "<--" : ""); | ||
515 | i++; | ||
516 | } | ||
517 | } | ||
518 | #else | ||
519 | #define freelist_debug(_heap, _title, _token) do { } while (0) | ||
520 | #endif | ||
521 | |||
522 | static struct list_block *do_heap_free(struct nvmap_heap_block *block) | ||
523 | { | ||
524 | struct list_block *b = container_of(block, struct list_block, block); | ||
525 | struct list_block *n = NULL; | ||
526 | struct nvmap_heap *heap = b->heap; | ||
527 | |||
528 | BUG_ON(b->block.base > b->orig_addr); | ||
529 | b->size += (b->block.base - b->orig_addr); | ||
530 | b->block.base = b->orig_addr; | ||
531 | |||
532 | freelist_debug(heap, "free list before", b); | ||
533 | |||
534 | /* Find position of first free block to the right of freed one */ | ||
535 | list_for_each_entry(n, &heap->free_list, free_list) { | ||
536 | if (n->block.base > b->block.base) | ||
537 | break; | ||
538 | } | ||
539 | |||
540 | /* Add freed block before found free one */ | ||
541 | list_add_tail(&b->free_list, &n->free_list); | ||
542 | BUG_ON(list_empty(&b->all_list)); | ||
543 | |||
544 | freelist_debug(heap, "free list pre-merge", b); | ||
545 | |||
546 | /* merge freed block with next if they connect | ||
547 | * freed block becomes bigger, next one is destroyed */ | ||
548 | if (!list_is_last(&b->free_list, &heap->free_list)) { | ||
549 | n = list_first_entry(&b->free_list, struct list_block, free_list); | ||
550 | if (n->block.base == b->block.base + b->size) { | ||
551 | list_del(&n->all_list); | ||
552 | list_del(&n->free_list); | ||
553 | BUG_ON(b->orig_addr >= n->orig_addr); | ||
554 | b->size += n->size; | ||
555 | kmem_cache_free(block_cache, n); | ||
556 | } | ||
557 | } | ||
558 | |||
559 | /* merge freed block with prev if they connect | ||
560 | * previous free block becomes bigger, freed one is destroyed */ | ||
561 | if (b->free_list.prev != &heap->free_list) { | ||
562 | n = list_entry(b->free_list.prev, struct list_block, free_list); | ||
563 | if (n->block.base + n->size == b->block.base) { | ||
564 | list_del(&b->all_list); | ||
565 | list_del(&b->free_list); | ||
566 | BUG_ON(n->orig_addr >= b->orig_addr); | ||
567 | n->size += b->size; | ||
568 | kmem_cache_free(block_cache, b); | ||
569 | b = n; | ||
570 | } | ||
571 | } | ||
572 | |||
573 | freelist_debug(heap, "free list after", b); | ||
574 | b->block.type = BLOCK_EMPTY; | ||
575 | return b; | ||
576 | } | ||
577 | |||
578 | #ifndef CONFIG_NVMAP_CARVEOUT_COMPACTOR | ||
579 | |||
580 | static struct nvmap_heap_block *do_buddy_alloc(struct nvmap_heap *h, | ||
581 | size_t len, size_t align, | ||
582 | unsigned int mem_prot) | ||
583 | { | ||
584 | struct buddy_heap *bh; | ||
585 | struct nvmap_heap_block *b = NULL; | ||
586 | |||
587 | list_for_each_entry(bh, &h->buddy_list, buddy_list) { | ||
588 | b = buddy_alloc(bh, len, align, mem_prot); | ||
589 | if (b) | ||
590 | return b; | ||
591 | } | ||
592 | |||
593 | /* no buddy heaps could service this allocation: try to create a new | ||
594 | * buddy heap instead */ | ||
595 | bh = kmem_cache_zalloc(buddy_heap_cache, GFP_KERNEL); | ||
596 | if (!bh) | ||
597 | return NULL; | ||
598 | |||
599 | b = do_heap_alloc(h, h->buddy_heap_size, | ||
600 | h->buddy_heap_size, mem_prot, 0); | ||
601 | if (!b) { | ||
602 | kmem_cache_free(buddy_heap_cache, bh); | ||
603 | return NULL; | ||
604 | } | ||
605 | |||
606 | bh->heap_base = container_of(b, struct list_block, block); | ||
607 | bh->nr_buddies = h->buddy_heap_size >> h->min_buddy_shift; | ||
608 | bh->bitmap[0].alloc = 0; | ||
609 | bh->bitmap[0].order = order_of(h->buddy_heap_size, h->min_buddy_shift); | ||
610 | list_add_tail(&bh->buddy_list, &h->buddy_list); | ||
611 | return buddy_alloc(bh, len, align, mem_prot); | ||
612 | } | ||
613 | |||
614 | #endif | ||
615 | |||
616 | #ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR | ||
617 | |||
618 | static int do_heap_copy_listblock(struct nvmap_device *dev, | ||
619 | unsigned long dst_base, unsigned long src_base, size_t len) | ||
620 | { | ||
621 | pte_t **pte_src = NULL; | ||
622 | pte_t **pte_dst = NULL; | ||
623 | void *addr_src = NULL; | ||
624 | void *addr_dst = NULL; | ||
625 | unsigned long kaddr_src; | ||
626 | unsigned long kaddr_dst; | ||
627 | unsigned long phys_src = src_base; | ||
628 | unsigned long phys_dst = dst_base; | ||
629 | unsigned long pfn_src; | ||
630 | unsigned long pfn_dst; | ||
631 | int error = 0; | ||
632 | |||
633 | pgprot_t prot = pgprot_writecombine(pgprot_kernel); | ||
634 | |||
635 | int page; | ||
636 | |||
637 | pte_src = nvmap_alloc_pte(dev, &addr_src); | ||
638 | if (IS_ERR(pte_src)) { | ||
639 | pr_err("Error when allocating pte_src\n"); | ||
640 | pte_src = NULL; | ||
641 | error = -1; | ||
642 | goto fail; | ||
643 | } | ||
644 | |||
645 | pte_dst = nvmap_alloc_pte(dev, &addr_dst); | ||
646 | if (IS_ERR(pte_dst)) { | ||
647 | pr_err("Error while allocating pte_dst\n"); | ||
648 | pte_dst = NULL; | ||
649 | error = -1; | ||
650 | goto fail; | ||
651 | } | ||
652 | |||
653 | kaddr_src = (unsigned long)addr_src; | ||
654 | kaddr_dst = (unsigned long)addr_dst; | ||
655 | |||
656 | BUG_ON(phys_dst > phys_src); | ||
657 | BUG_ON((phys_src & PAGE_MASK) != phys_src); | ||
658 | BUG_ON((phys_dst & PAGE_MASK) != phys_dst); | ||
659 | BUG_ON((len & PAGE_MASK) != len); | ||
660 | |||
661 | for (page = 0; page < (len >> PAGE_SHIFT) ; page++) { | ||
662 | |||
663 | pfn_src = __phys_to_pfn(phys_src) + page; | ||
664 | pfn_dst = __phys_to_pfn(phys_dst) + page; | ||
665 | |||
666 | set_pte_at(&init_mm, kaddr_src, *pte_src, | ||
667 | pfn_pte(pfn_src, prot)); | ||
668 | flush_tlb_kernel_page(kaddr_src); | ||
669 | |||
670 | set_pte_at(&init_mm, kaddr_dst, *pte_dst, | ||
671 | pfn_pte(pfn_dst, prot)); | ||
672 | flush_tlb_kernel_page(kaddr_dst); | ||
673 | |||
674 | memcpy(addr_dst, addr_src, PAGE_SIZE); | ||
675 | } | ||
676 | |||
677 | fail: | ||
678 | if (pte_src) | ||
679 | nvmap_free_pte(dev, pte_src); | ||
680 | if (pte_dst) | ||
681 | nvmap_free_pte(dev, pte_dst); | ||
682 | return error; | ||
683 | } | ||
684 | |||
685 | |||
686 | static struct nvmap_heap_block *do_heap_relocate_listblock( | ||
687 | struct list_block *block, bool fast) | ||
688 | { | ||
689 | struct nvmap_heap_block *heap_block = &block->block; | ||
690 | struct nvmap_heap_block *heap_block_new = NULL; | ||
691 | struct nvmap_heap *heap = block->heap; | ||
692 | struct nvmap_handle *handle = heap_block->handle; | ||
693 | unsigned long src_base = heap_block->base; | ||
694 | unsigned long dst_base; | ||
695 | size_t src_size = block->size; | ||
696 | size_t src_align = block->align; | ||
697 | unsigned int src_prot = block->mem_prot; | ||
698 | int error = 0; | ||
699 | struct nvmap_share *share; | ||
700 | |||
701 | if (!handle) { | ||
702 | pr_err("INVALID HANDLE!\n"); | ||
703 | return NULL; | ||
704 | } | ||
705 | |||
706 | mutex_lock(&handle->lock); | ||
707 | |||
708 | share = nvmap_get_share_from_dev(handle->dev); | ||
709 | |||
710 | /* TODO: It is possible to use only handle lock and no share | ||
711 | * pin_lock, but then we'll need to lock every handle during | ||
712 | * each pinning operation. Need to estimate performance impact | ||
713 | * if we decide to simplify locking this way. */ | ||
714 | mutex_lock(&share->pin_lock); | ||
715 | |||
716 | /* abort if block is pinned */ | ||
717 | if (atomic_read(&handle->pin)) | ||
718 | goto fail; | ||
719 | /* abort if block is mapped */ | ||
720 | if (handle->usecount) | ||
721 | goto fail; | ||
722 | |||
723 | if (fast) { | ||
724 | /* Fast compaction path - first allocate, then free. */ | ||
725 | heap_block_new = do_heap_alloc(heap, src_size, src_align, | ||
726 | src_prot, src_base); | ||
727 | if (heap_block_new) | ||
728 | do_heap_free(heap_block); | ||
729 | else | ||
730 | goto fail; | ||
731 | } else { | ||
732 | /* Full compaction path, first free, then allocate | ||
733 | * It is slower but provide best compaction results */ | ||
734 | do_heap_free(heap_block); | ||
735 | heap_block_new = do_heap_alloc(heap, src_size, src_align, | ||
736 | src_prot, src_base); | ||
737 | /* Allocation should always succeed*/ | ||
738 | BUG_ON(!heap_block_new); | ||
739 | } | ||
740 | |||
741 | /* update handle */ | ||
742 | handle->carveout = heap_block_new; | ||
743 | heap_block_new->handle = handle; | ||
744 | |||
745 | /* copy source data to new block location */ | ||
746 | dst_base = heap_block_new->base; | ||
747 | |||
748 | /* new allocation should always go lower addresses */ | ||
749 | BUG_ON(dst_base >= src_base); | ||
750 | |||
751 | error = do_heap_copy_listblock(handle->dev, | ||
752 | dst_base, src_base, src_size); | ||
753 | BUG_ON(error); | ||
754 | |||
755 | fail: | ||
756 | mutex_unlock(&share->pin_lock); | ||
757 | mutex_unlock(&handle->lock); | ||
758 | return heap_block_new; | ||
759 | } | ||
760 | |||
761 | static void nvmap_heap_compact(struct nvmap_heap *heap, | ||
762 | size_t requested_size, bool fast) | ||
763 | { | ||
764 | struct list_block *block_current = NULL; | ||
765 | struct list_block *block_prev = NULL; | ||
766 | struct list_block *block_next = NULL; | ||
767 | |||
768 | struct list_head *ptr, *ptr_prev, *ptr_next; | ||
769 | int relocation_count = 0; | ||
770 | |||
771 | ptr = heap->all_list.next; | ||
772 | |||
773 | /* walk through all blocks */ | ||
774 | while (ptr != &heap->all_list) { | ||
775 | block_current = list_entry(ptr, struct list_block, all_list); | ||
776 | |||
777 | ptr_prev = ptr->prev; | ||
778 | ptr_next = ptr->next; | ||
779 | |||
780 | if (block_current->block.type != BLOCK_EMPTY) { | ||
781 | ptr = ptr_next; | ||
782 | continue; | ||
783 | } | ||
784 | |||
785 | if (fast && block_current->size >= requested_size) | ||
786 | break; | ||
787 | |||
788 | /* relocate prev block */ | ||
789 | if (ptr_prev != &heap->all_list) { | ||
790 | |||
791 | block_prev = list_entry(ptr_prev, | ||
792 | struct list_block, all_list); | ||
793 | |||
794 | BUG_ON(block_prev->block.type != BLOCK_FIRST_FIT); | ||
795 | |||
796 | if (do_heap_relocate_listblock(block_prev, true)) { | ||
797 | |||
798 | /* After relocation current free block can be | ||
799 | * destroyed when it is merged with previous | ||
800 | * free block. Updated pointer to new free | ||
801 | * block can be obtained from the next block */ | ||
802 | relocation_count++; | ||
803 | ptr = ptr_next->prev; | ||
804 | continue; | ||
805 | } | ||
806 | } | ||
807 | |||
808 | if (ptr_next != &heap->all_list) { | ||
809 | |||
810 | block_next = list_entry(ptr_next, | ||
811 | struct list_block, all_list); | ||
812 | |||
813 | BUG_ON(block_next->block.type != BLOCK_FIRST_FIT); | ||
814 | |||
815 | if (do_heap_relocate_listblock(block_next, fast)) { | ||
816 | ptr = ptr_prev->next; | ||
817 | relocation_count++; | ||
818 | continue; | ||
819 | } | ||
820 | } | ||
821 | ptr = ptr_next; | ||
822 | } | ||
823 | pr_err("Relocated %d chunks\n", relocation_count); | ||
824 | } | ||
825 | #endif | ||
826 | |||
827 | void nvmap_usecount_inc(struct nvmap_handle *h) | ||
828 | { | ||
829 | if (h->alloc && !h->heap_pgalloc) { | ||
830 | mutex_lock(&h->lock); | ||
831 | h->usecount++; | ||
832 | mutex_unlock(&h->lock); | ||
833 | } else { | ||
834 | h->usecount++; | ||
835 | } | ||
836 | } | ||
837 | |||
838 | |||
839 | void nvmap_usecount_dec(struct nvmap_handle *h) | ||
840 | { | ||
841 | h->usecount--; | ||
842 | } | ||
843 | |||
844 | /* nvmap_heap_alloc: allocates a block of memory of len bytes, aligned to | ||
845 | * align bytes. */ | ||
846 | struct nvmap_heap_block *nvmap_heap_alloc(struct nvmap_heap *h, | ||
847 | struct nvmap_handle *handle) | ||
848 | { | ||
849 | struct nvmap_heap_block *b; | ||
850 | size_t len = handle->size; | ||
851 | size_t align = handle->align; | ||
852 | unsigned int prot = handle->flags; | ||
853 | |||
854 | mutex_lock(&h->lock); | ||
855 | |||
856 | #ifdef CONFIG_NVMAP_CARVEOUT_COMPACTOR | ||
857 | /* Align to page size */ | ||
858 | align = ALIGN(align, PAGE_SIZE); | ||
859 | len = ALIGN(len, PAGE_SIZE); | ||
860 | b = do_heap_alloc(h, len, align, prot, 0); | ||
861 | if (!b) { | ||
862 | pr_err("Compaction triggered!\n"); | ||
863 | nvmap_heap_compact(h, len, true); | ||
864 | b = do_heap_alloc(h, len, align, prot, 0); | ||
865 | if (!b) { | ||
866 | pr_err("Full compaction triggered!\n"); | ||
867 | nvmap_heap_compact(h, len, false); | ||
868 | b = do_heap_alloc(h, len, align, prot, 0); | ||
869 | } | ||
870 | } | ||
871 | #else | ||
872 | if (len <= h->buddy_heap_size / 2) { | ||
873 | b = do_buddy_alloc(h, len, align, prot); | ||
874 | } else { | ||
875 | if (h->buddy_heap_size) | ||
876 | len = ALIGN(len, h->buddy_heap_size); | ||
877 | align = max(align, (size_t)L1_CACHE_BYTES); | ||
878 | b = do_heap_alloc(h, len, align, prot, 0); | ||
879 | } | ||
880 | #endif | ||
881 | |||
882 | if (b) { | ||
883 | b->handle = handle; | ||
884 | handle->carveout = b; | ||
885 | } | ||
886 | mutex_unlock(&h->lock); | ||
887 | return b; | ||
888 | } | ||
889 | |||
890 | struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b) | ||
891 | { | ||
892 | if (b->type == BLOCK_BUDDY) { | ||
893 | struct buddy_block *bb; | ||
894 | bb = container_of(b, struct buddy_block, block); | ||
895 | return parent_of(bb->heap); | ||
896 | } else { | ||
897 | struct list_block *lb; | ||
898 | lb = container_of(b, struct list_block, block); | ||
899 | return lb->heap; | ||
900 | } | ||
901 | } | ||
902 | |||
903 | /* nvmap_heap_free: frees block b*/ | ||
904 | void nvmap_heap_free(struct nvmap_heap_block *b) | ||
905 | { | ||
906 | struct buddy_heap *bh = NULL; | ||
907 | struct nvmap_heap *h = nvmap_block_to_heap(b); | ||
908 | struct list_block *lb; | ||
909 | |||
910 | mutex_lock(&h->lock); | ||
911 | if (b->type == BLOCK_BUDDY) | ||
912 | bh = do_buddy_free(b); | ||
913 | else { | ||
914 | lb = container_of(b, struct list_block, block); | ||
915 | nvmap_flush_heap_block(NULL, b, lb->size, lb->mem_prot); | ||
916 | do_heap_free(b); | ||
917 | } | ||
918 | |||
919 | if (bh) { | ||
920 | list_del(&bh->buddy_list); | ||
921 | mutex_unlock(&h->lock); | ||
922 | nvmap_heap_free(&bh->heap_base->block); | ||
923 | kmem_cache_free(buddy_heap_cache, bh); | ||
924 | } else | ||
925 | mutex_unlock(&h->lock); | ||
926 | } | ||
927 | |||
928 | |||
929 | static void heap_release(struct device *heap) | ||
930 | { | ||
931 | } | ||
932 | |||
933 | /* nvmap_heap_create: create a heap object of len bytes, starting from | ||
934 | * address base. | ||
935 | * | ||
936 | * if buddy_size is >= NVMAP_HEAP_MIN_BUDDY_SIZE, then allocations <= 1/2 | ||
937 | * of the buddy heap size will use a buddy sub-allocator, where each buddy | ||
938 | * heap is buddy_size bytes (should be a power of 2). all other allocations | ||
939 | * will be rounded up to be a multiple of buddy_size bytes. | ||
940 | */ | ||
941 | struct nvmap_heap *nvmap_heap_create(struct device *parent, const char *name, | ||
942 | phys_addr_t base, size_t len, | ||
943 | size_t buddy_size, void *arg) | ||
944 | { | ||
945 | struct nvmap_heap *h = NULL; | ||
946 | struct list_block *l = NULL; | ||
947 | |||
948 | if (WARN_ON(buddy_size && buddy_size < NVMAP_HEAP_MIN_BUDDY_SIZE)) { | ||
949 | dev_warn(parent, "%s: buddy_size %u too small\n", __func__, | ||
950 | buddy_size); | ||
951 | buddy_size = 0; | ||
952 | } else if (WARN_ON(buddy_size >= len)) { | ||
953 | dev_warn(parent, "%s: buddy_size %u too large\n", __func__, | ||
954 | buddy_size); | ||
955 | buddy_size = 0; | ||
956 | } else if (WARN_ON(buddy_size & (buddy_size - 1))) { | ||
957 | dev_warn(parent, "%s: buddy_size %u not a power of 2\n", | ||
958 | __func__, buddy_size); | ||
959 | buddy_size = 1 << (ilog2(buddy_size) + 1); | ||
960 | } | ||
961 | |||
962 | if (WARN_ON(buddy_size && (base & (buddy_size - 1)))) { | ||
963 | unsigned long orig = base; | ||
964 | dev_warn(parent, "%s: base address %p not aligned to " | ||
965 | "buddy_size %u\n", __func__, (void *)base, buddy_size); | ||
966 | base = ALIGN(base, buddy_size); | ||
967 | len -= (base - orig); | ||
968 | } | ||
969 | |||
970 | if (WARN_ON(buddy_size && (len & (buddy_size - 1)))) { | ||
971 | dev_warn(parent, "%s: length %u not aligned to " | ||
972 | "buddy_size %u\n", __func__, len, buddy_size); | ||
973 | len &= ~(buddy_size - 1); | ||
974 | } | ||
975 | |||
976 | h = kzalloc(sizeof(*h), GFP_KERNEL); | ||
977 | if (!h) { | ||
978 | dev_err(parent, "%s: out of memory\n", __func__); | ||
979 | goto fail_alloc; | ||
980 | } | ||
981 | |||
982 | l = kmem_cache_zalloc(block_cache, GFP_KERNEL); | ||
983 | if (!l) { | ||
984 | dev_err(parent, "%s: out of memory\n", __func__); | ||
985 | goto fail_alloc; | ||
986 | } | ||
987 | |||
988 | dev_set_name(&h->dev, "heap-%s", name); | ||
989 | h->name = name; | ||
990 | h->arg = arg; | ||
991 | h->dev.parent = parent; | ||
992 | h->dev.driver = NULL; | ||
993 | h->dev.release = heap_release; | ||
994 | if (device_register(&h->dev)) { | ||
995 | dev_err(parent, "%s: failed to register %s\n", __func__, | ||
996 | dev_name(&h->dev)); | ||
997 | goto fail_alloc; | ||
998 | } | ||
999 | if (sysfs_create_group(&h->dev.kobj, &heap_stat_attr_group)) { | ||
1000 | dev_err(&h->dev, "%s: failed to create attributes\n", __func__); | ||
1001 | goto fail_register; | ||
1002 | } | ||
1003 | h->small_alloc = max(2 * buddy_size, len / 256); | ||
1004 | h->buddy_heap_size = buddy_size; | ||
1005 | if (buddy_size) | ||
1006 | h->min_buddy_shift = ilog2(buddy_size / MAX_BUDDY_NR); | ||
1007 | INIT_LIST_HEAD(&h->free_list); | ||
1008 | INIT_LIST_HEAD(&h->buddy_list); | ||
1009 | INIT_LIST_HEAD(&h->all_list); | ||
1010 | mutex_init(&h->lock); | ||
1011 | l->block.base = base; | ||
1012 | l->block.type = BLOCK_EMPTY; | ||
1013 | l->size = len; | ||
1014 | l->orig_addr = base; | ||
1015 | list_add_tail(&l->free_list, &h->free_list); | ||
1016 | list_add_tail(&l->all_list, &h->all_list); | ||
1017 | |||
1018 | inner_flush_cache_all(); | ||
1019 | outer_flush_range(base, base + len); | ||
1020 | wmb(); | ||
1021 | return h; | ||
1022 | |||
1023 | fail_register: | ||
1024 | device_unregister(&h->dev); | ||
1025 | fail_alloc: | ||
1026 | if (l) | ||
1027 | kmem_cache_free(block_cache, l); | ||
1028 | kfree(h); | ||
1029 | return NULL; | ||
1030 | } | ||
1031 | |||
1032 | void *nvmap_heap_device_to_arg(struct device *dev) | ||
1033 | { | ||
1034 | struct nvmap_heap *heap = container_of(dev, struct nvmap_heap, dev); | ||
1035 | return heap->arg; | ||
1036 | } | ||
1037 | |||
1038 | void *nvmap_heap_to_arg(struct nvmap_heap *heap) | ||
1039 | { | ||
1040 | return heap->arg; | ||
1041 | } | ||
1042 | |||
1043 | /* nvmap_heap_destroy: frees all resources in heap */ | ||
1044 | void nvmap_heap_destroy(struct nvmap_heap *heap) | ||
1045 | { | ||
1046 | WARN_ON(!list_empty(&heap->buddy_list)); | ||
1047 | |||
1048 | sysfs_remove_group(&heap->dev.kobj, &heap_stat_attr_group); | ||
1049 | device_unregister(&heap->dev); | ||
1050 | |||
1051 | while (!list_empty(&heap->buddy_list)) { | ||
1052 | struct buddy_heap *b; | ||
1053 | b = list_first_entry(&heap->buddy_list, struct buddy_heap, | ||
1054 | buddy_list); | ||
1055 | list_del(&heap->buddy_list); | ||
1056 | nvmap_heap_free(&b->heap_base->block); | ||
1057 | kmem_cache_free(buddy_heap_cache, b); | ||
1058 | } | ||
1059 | |||
1060 | WARN_ON(!list_is_singular(&heap->all_list)); | ||
1061 | while (!list_empty(&heap->all_list)) { | ||
1062 | struct list_block *l; | ||
1063 | l = list_first_entry(&heap->all_list, struct list_block, | ||
1064 | all_list); | ||
1065 | list_del(&l->all_list); | ||
1066 | kmem_cache_free(block_cache, l); | ||
1067 | } | ||
1068 | |||
1069 | kfree(heap); | ||
1070 | } | ||
1071 | |||
1072 | /* nvmap_heap_create_group: adds the attribute_group grp to the heap kobject */ | ||
1073 | int nvmap_heap_create_group(struct nvmap_heap *heap, | ||
1074 | const struct attribute_group *grp) | ||
1075 | { | ||
1076 | return sysfs_create_group(&heap->dev.kobj, grp); | ||
1077 | } | ||
1078 | |||
1079 | /* nvmap_heap_remove_group: removes the attribute_group grp */ | ||
1080 | void nvmap_heap_remove_group(struct nvmap_heap *heap, | ||
1081 | const struct attribute_group *grp) | ||
1082 | { | ||
1083 | sysfs_remove_group(&heap->dev.kobj, grp); | ||
1084 | } | ||
1085 | |||
1086 | int nvmap_heap_init(void) | ||
1087 | { | ||
1088 | BUG_ON(buddy_heap_cache != NULL); | ||
1089 | buddy_heap_cache = KMEM_CACHE(buddy_heap, 0); | ||
1090 | if (!buddy_heap_cache) { | ||
1091 | pr_err("%s: unable to create buddy heap cache\n", __func__); | ||
1092 | return -ENOMEM; | ||
1093 | } | ||
1094 | |||
1095 | block_cache = KMEM_CACHE(combo_block, 0); | ||
1096 | if (!block_cache) { | ||
1097 | kmem_cache_destroy(buddy_heap_cache); | ||
1098 | pr_err("%s: unable to create block cache\n", __func__); | ||
1099 | return -ENOMEM; | ||
1100 | } | ||
1101 | return 0; | ||
1102 | } | ||
1103 | |||
1104 | void nvmap_heap_deinit(void) | ||
1105 | { | ||
1106 | if (buddy_heap_cache) | ||
1107 | kmem_cache_destroy(buddy_heap_cache); | ||
1108 | if (block_cache) | ||
1109 | kmem_cache_destroy(block_cache); | ||
1110 | |||
1111 | block_cache = NULL; | ||
1112 | buddy_heap_cache = NULL; | ||
1113 | } | ||