aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/tegra/nvmap/nvmap_handle.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/video/tegra/nvmap/nvmap_handle.c
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_handle.c')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_handle.c1020
1 files changed, 1020 insertions, 0 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c
new file mode 100644
index 00000000000..539b7ce9801
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_handle.c
@@ -0,0 +1,1020 @@
1/*
2 * drivers/video/tegra/nvmap/nvmap_handle.c
3 *
4 * Handle allocation and freeing routines for nvmap
5 *
6 * Copyright (c) 2009-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#define pr_fmt(fmt) "%s: " fmt, __func__
24
25#include <linux/err.h>
26#include <linux/kernel.h>
27#include <linux/list.h>
28#include <linux/mm.h>
29#include <linux/rbtree.h>
30#include <linux/slab.h>
31#include <linux/vmalloc.h>
32#include <linux/fs.h>
33
34#include <asm/cacheflush.h>
35#include <asm/outercache.h>
36#include <asm/pgtable.h>
37
38#include <mach/iovmm.h>
39#include <mach/nvmap.h>
40
41#include <linux/vmstat.h>
42#include <linux/swap.h>
43#include <linux/shrinker.h>
44#include <linux/moduleparam.h>
45
46#include "nvmap.h"
47#include "nvmap_mru.h"
48#include "nvmap_common.h"
49
50#define PRINT_CARVEOUT_CONVERSION 0
51#if PRINT_CARVEOUT_CONVERSION
52#define PR_INFO pr_info
53#else
54#define PR_INFO(...)
55#endif
56
57#define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
58 NVMAP_HEAP_CARVEOUT_VPR)
59#ifdef CONFIG_NVMAP_HIGHMEM_ONLY
60#define GFP_NVMAP (__GFP_HIGHMEM | __GFP_NOWARN)
61#else
62#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
63#endif
64/* handles may be arbitrarily large (16+MiB), and any handle allocated from
65 * the kernel (i.e., not a carveout handle) includes its array of pages. to
66 * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
67 * the array is allocated using vmalloc. */
68#define PAGELIST_VMALLOC_MIN (PAGE_SIZE * 2)
69#define NVMAP_TEST_PAGE_POOL_SHRINKER 1
70static bool enable_pp = 1;
71static int pool_size[NVMAP_NUM_POOLS];
72
73static char *s_memtype_str[] = {
74 "uc",
75 "wc",
76 "iwb",
77 "wb",
78};
79
80static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
81{
82 mutex_lock(&pool->lock);
83}
84
85static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
86{
87 mutex_unlock(&pool->lock);
88}
89
90static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool)
91{
92 struct page *page = NULL;
93
94 if (pool->npages > 0)
95 page = pool->page_array[--pool->npages];
96 return page;
97}
98
99static struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool)
100{
101 struct page *page = NULL;
102
103 if (pool) {
104 nvmap_page_pool_lock(pool);
105 page = nvmap_page_pool_alloc_locked(pool);
106 nvmap_page_pool_unlock(pool);
107 }
108 return page;
109}
110
111static bool nvmap_page_pool_release_locked(struct nvmap_page_pool *pool,
112 struct page *page)
113{
114 int ret = false;
115
116 if (enable_pp && pool->npages < pool->max_pages) {
117 pool->page_array[pool->npages++] = page;
118 ret = true;
119 }
120 return ret;
121}
122
123static bool nvmap_page_pool_release(struct nvmap_page_pool *pool,
124 struct page *page)
125{
126 int ret = false;
127
128 if (pool) {
129 nvmap_page_pool_lock(pool);
130 ret = nvmap_page_pool_release_locked(pool, page);
131 nvmap_page_pool_unlock(pool);
132 }
133 return ret;
134}
135
136static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
137{
138 return pool->npages;
139}
140
141static int nvmap_page_pool_free(struct nvmap_page_pool *pool, int nr_free)
142{
143 int i = nr_free;
144 int idx = 0;
145 struct page *page;
146
147 if (!nr_free)
148 return nr_free;
149 nvmap_page_pool_lock(pool);
150 while (i) {
151 page = nvmap_page_pool_alloc_locked(pool);
152 if (!page)
153 break;
154 pool->shrink_array[idx++] = page;
155 i--;
156 }
157
158 if (idx)
159 set_pages_array_wb(pool->shrink_array, idx);
160 while (idx--)
161 __free_page(pool->shrink_array[idx]);
162 nvmap_page_pool_unlock(pool);
163 return i;
164}
165
166static int nvmap_page_pool_get_unused_pages(void)
167{
168 unsigned int i;
169 int total = 0;
170 struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);
171
172 for (i = 0; i < NVMAP_NUM_POOLS; i++)
173 total += nvmap_page_pool_get_available_count(&share->pools[i]);
174
175 return total;
176}
177
178static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
179{
180 int available_pages;
181 int pages_to_release = 0;
182 struct page **page_array = NULL;
183 struct page **shrink_array = NULL;
184
185 if (size == pool->max_pages)
186 return;
187repeat:
188 nvmap_page_pool_free(pool, pages_to_release);
189 nvmap_page_pool_lock(pool);
190 available_pages = nvmap_page_pool_get_available_count(pool);
191 if (available_pages > size) {
192 nvmap_page_pool_unlock(pool);
193 pages_to_release = available_pages - size;
194 goto repeat;
195 }
196
197 if (size == 0) {
198 vfree(pool->page_array);
199 vfree(pool->shrink_array);
200 pool->page_array = pool->shrink_array = NULL;
201 goto out;
202 }
203
204 page_array = vmalloc(sizeof(struct page *) * size);
205 shrink_array = vmalloc(sizeof(struct page *) * size);
206 if (!page_array || !shrink_array)
207 goto fail;
208
209 memcpy(page_array, pool->page_array,
210 pool->npages * sizeof(struct page *));
211 vfree(pool->page_array);
212 vfree(pool->shrink_array);
213 pool->page_array = page_array;
214 pool->shrink_array = shrink_array;
215out:
216 pr_debug("%s pool resized to %d from %d pages",
217 s_memtype_str[pool->flags], size, pool->max_pages);
218 pool->max_pages = size;
219 goto exit;
220fail:
221 vfree(page_array);
222 vfree(shrink_array);
223 pr_err("failed");
224exit:
225 nvmap_page_pool_unlock(pool);
226}
227
228static int nvmap_page_pool_shrink(struct shrinker *shrinker,
229 struct shrink_control *sc)
230{
231 unsigned int i;
232 unsigned int pool_offset;
233 struct nvmap_page_pool *pool;
234 int shrink_pages = sc->nr_to_scan;
235 static atomic_t start_pool = ATOMIC_INIT(-1);
236 struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev);
237
238 if (!shrink_pages)
239 goto out;
240
241 pr_debug("sh_pages=%d", shrink_pages);
242
243 for (i = 0; i < NVMAP_NUM_POOLS && shrink_pages; i++) {
244 pool_offset = atomic_add_return(1, &start_pool) %
245 NVMAP_NUM_POOLS;
246 pool = &share->pools[pool_offset];
247 shrink_pages = nvmap_page_pool_free(pool, shrink_pages);
248 }
249out:
250 return nvmap_page_pool_get_unused_pages();
251}
252
253static struct shrinker nvmap_page_pool_shrinker = {
254 .shrink = nvmap_page_pool_shrink,
255 .seeks = 1,
256};
257
258static void shrink_page_pools(int *total_pages, int *available_pages)
259{
260 struct shrink_control sc;
261
262 sc.gfp_mask = GFP_KERNEL;
263 sc.nr_to_scan = 0;
264 *total_pages = nvmap_page_pool_shrink(NULL, &sc);
265 sc.nr_to_scan = *total_pages * 2;
266 *available_pages = nvmap_page_pool_shrink(NULL, &sc);
267}
268
269#if NVMAP_TEST_PAGE_POOL_SHRINKER
270static bool shrink_pp;
271static int shrink_set(const char *arg, const struct kernel_param *kp)
272{
273 int cpu = smp_processor_id();
274 unsigned long long t1, t2;
275 int total_pages, available_pages;
276
277 param_set_bool(arg, kp);
278
279 if (shrink_pp) {
280 t1 = cpu_clock(cpu);
281 shrink_page_pools(&total_pages, &available_pages);
282 t2 = cpu_clock(cpu);
283 pr_info("shrink page pools: time=%lldns, "
284 "total_pages_released=%d, free_pages_available=%d",
285 t2-t1, total_pages, available_pages);
286 }
287 return 0;
288}
289
290static int shrink_get(char *buff, const struct kernel_param *kp)
291{
292 return param_get_bool(buff, kp);
293}
294
295static struct kernel_param_ops shrink_ops = {
296 .get = shrink_get,
297 .set = shrink_set,
298};
299
300module_param_cb(shrink_page_pools, &shrink_ops, &shrink_pp, 0644);
301#endif
302
303static int enable_pp_set(const char *arg, const struct kernel_param *kp)
304{
305 int total_pages, available_pages;
306
307 param_set_bool(arg, kp);
308
309 if (!enable_pp) {
310 shrink_page_pools(&total_pages, &available_pages);
311 pr_info("disabled page pools and released pages, "
312 "total_pages_released=%d, free_pages_available=%d",
313 total_pages, available_pages);
314 }
315 return 0;
316}
317
318static int enable_pp_get(char *buff, const struct kernel_param *kp)
319{
320 return param_get_int(buff, kp);
321}
322
323static struct kernel_param_ops enable_pp_ops = {
324 .get = enable_pp_get,
325 .set = enable_pp_set,
326};
327
328module_param_cb(enable_page_pools, &enable_pp_ops, &enable_pp, 0644);
329
330#define POOL_SIZE_SET(m, i) \
331static int pool_size_##m##_set(const char *arg, const struct kernel_param *kp) \
332{ \
333 struct nvmap_share *share = nvmap_get_share_from_dev(nvmap_dev); \
334 param_set_int(arg, kp); \
335 nvmap_page_pool_resize(&share->pools[i], pool_size[i]); \
336 return 0; \
337}
338
339#define POOL_SIZE_GET(m) \
340static int pool_size_##m##_get(char *buff, const struct kernel_param *kp) \
341{ \
342 return param_get_int(buff, kp); \
343}
344
345#define POOL_SIZE_OPS(m) \
346static struct kernel_param_ops pool_size_##m##_ops = { \
347 .get = pool_size_##m##_get, \
348 .set = pool_size_##m##_set, \
349};
350
351#define POOL_SIZE_MOUDLE_PARAM_CB(m, i) \
352module_param_cb(m##_pool_size, &pool_size_##m##_ops, &pool_size[i], 0644)
353
354POOL_SIZE_SET(uc, NVMAP_HANDLE_UNCACHEABLE);
355POOL_SIZE_GET(uc);
356POOL_SIZE_OPS(uc);
357POOL_SIZE_MOUDLE_PARAM_CB(uc, NVMAP_HANDLE_UNCACHEABLE);
358
359POOL_SIZE_SET(wc, NVMAP_HANDLE_WRITE_COMBINE);
360POOL_SIZE_GET(wc);
361POOL_SIZE_OPS(wc);
362POOL_SIZE_MOUDLE_PARAM_CB(wc, NVMAP_HANDLE_WRITE_COMBINE);
363
364POOL_SIZE_SET(iwb, NVMAP_HANDLE_INNER_CACHEABLE);
365POOL_SIZE_GET(iwb);
366POOL_SIZE_OPS(iwb);
367POOL_SIZE_MOUDLE_PARAM_CB(iwb, NVMAP_HANDLE_INNER_CACHEABLE);
368
369POOL_SIZE_SET(wb, NVMAP_HANDLE_CACHEABLE);
370POOL_SIZE_GET(wb);
371POOL_SIZE_OPS(wb);
372POOL_SIZE_MOUDLE_PARAM_CB(wb, NVMAP_HANDLE_CACHEABLE);
373
374int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags)
375{
376 struct page *page;
377 int i;
378 static int reg = 1;
379 struct sysinfo info;
380 typedef int (*set_pages_array) (struct page **pages, int addrinarray);
381 set_pages_array s_cpa[] = {
382 set_pages_array_uc,
383 set_pages_array_wc,
384 set_pages_array_iwb,
385 set_pages_array_wb
386 };
387
388 BUG_ON(flags >= NVMAP_NUM_POOLS);
389 memset(pool, 0x0, sizeof(*pool));
390 mutex_init(&pool->lock);
391 pool->flags = flags;
392
393 /* No default pool for cached memory. */
394 if (flags == NVMAP_HANDLE_CACHEABLE)
395 return 0;
396
397 si_meminfo(&info);
398 if (!pool_size[flags]) {
399 /* Use 3/8th of total ram for page pools.
400 * 1/8th for uc, 1/8th for wc and 1/8th for iwb.
401 */
402 pool->max_pages = info.totalram >> 3;
403 }
404 if (pool->max_pages <= 0 || pool->max_pages >= info.totalram)
405 pool->max_pages = NVMAP_DEFAULT_PAGE_POOL_SIZE;
406 pool_size[flags] = pool->max_pages;
407 pr_info("nvmap %s page pool size=%d pages",
408 s_memtype_str[flags], pool->max_pages);
409 pool->page_array = vmalloc(sizeof(void *) * pool->max_pages);
410 pool->shrink_array = vmalloc(sizeof(struct page *) * pool->max_pages);
411 if (!pool->page_array || !pool->shrink_array)
412 goto fail;
413
414 if (reg) {
415 reg = 0;
416 register_shrinker(&nvmap_page_pool_shrinker);
417 }
418
419 nvmap_page_pool_lock(pool);
420 for (i = 0; i < pool->max_pages; i++) {
421 page = alloc_page(GFP_NVMAP);
422 if (!page)
423 goto do_cpa;
424 if (!nvmap_page_pool_release_locked(pool, page)) {
425 __free_page(page);
426 goto do_cpa;
427 }
428 }
429do_cpa:
430 (*s_cpa[flags])(pool->page_array, pool->npages);
431 nvmap_page_pool_unlock(pool);
432 return 0;
433fail:
434 pool->max_pages = 0;
435 vfree(pool->shrink_array);
436 vfree(pool->page_array);
437 return -ENOMEM;
438}
439
440static inline void *altalloc(size_t len)
441{
442 if (len >= PAGELIST_VMALLOC_MIN)
443 return vmalloc(len);
444 else
445 return kmalloc(len, GFP_KERNEL);
446}
447
448static inline void altfree(void *ptr, size_t len)
449{
450 if (!ptr)
451 return;
452
453 if (len >= PAGELIST_VMALLOC_MIN)
454 vfree(ptr);
455 else
456 kfree(ptr);
457}
458
459void _nvmap_handle_free(struct nvmap_handle *h)
460{
461 struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
462 unsigned int i, nr_page, page_index = 0;
463 struct nvmap_page_pool *pool = NULL;
464
465 if (nvmap_handle_remove(h->dev, h) != 0)
466 return;
467
468 if (!h->alloc)
469 goto out;
470
471 if (!h->heap_pgalloc) {
472 nvmap_usecount_inc(h);
473 nvmap_heap_free(h->carveout);
474 goto out;
475 }
476
477 nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
478
479 BUG_ON(h->size & ~PAGE_MASK);
480 BUG_ON(!h->pgalloc.pages);
481
482 nvmap_mru_remove(share, h);
483
484 if (h->flags < NVMAP_NUM_POOLS)
485 pool = &share->pools[h->flags];
486
487 while (page_index < nr_page) {
488 if (!nvmap_page_pool_release(pool,
489 h->pgalloc.pages[page_index]))
490 break;
491 page_index++;
492 }
493
494 if (page_index == nr_page)
495 goto skip_attr_restore;
496
497 /* Restore page attributes. */
498 if (h->flags == NVMAP_HANDLE_WRITE_COMBINE ||
499 h->flags == NVMAP_HANDLE_UNCACHEABLE ||
500 h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
501 set_pages_array_wb(&h->pgalloc.pages[page_index],
502 nr_page - page_index);
503
504skip_attr_restore:
505 if (h->pgalloc.area)
506 tegra_iovmm_free_vm(h->pgalloc.area);
507
508 for (i = page_index; i < nr_page; i++)
509 __free_page(h->pgalloc.pages[i]);
510
511 altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
512
513out:
514 kfree(h);
515}
516
517static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
518{
519 struct page *page, *p, *e;
520 unsigned int order;
521
522 size = PAGE_ALIGN(size);
523 order = get_order(size);
524 page = alloc_pages(gfp, order);
525
526 if (!page)
527 return NULL;
528
529 split_page(page, order);
530 e = page + (1 << order);
531 for (p = page + (size >> PAGE_SHIFT); p < e; p++)
532 __free_page(p);
533
534 return page;
535}
536
537static int handle_page_alloc(struct nvmap_client *client,
538 struct nvmap_handle *h, bool contiguous)
539{
540 size_t size = PAGE_ALIGN(h->size);
541 struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
542 unsigned int nr_page = size >> PAGE_SHIFT;
543 pgprot_t prot;
544 unsigned int i = 0, page_index = 0;
545 struct page **pages;
546 struct nvmap_page_pool *pool = NULL;
547
548 pages = altalloc(nr_page * sizeof(*pages));
549 if (!pages)
550 return -ENOMEM;
551
552 prot = nvmap_pgprot(h, pgprot_kernel);
553
554 h->pgalloc.area = NULL;
555 if (contiguous) {
556 struct page *page;
557 page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
558 if (!page)
559 goto fail;
560
561 for (i = 0; i < nr_page; i++)
562 pages[i] = nth_page(page, i);
563
564 } else {
565 if (h->flags < NVMAP_NUM_POOLS)
566 pool = &share->pools[h->flags];
567
568 for (i = 0; i < nr_page; i++) {
569 /* Get pages from pool, if available. */
570 pages[i] = nvmap_page_pool_alloc(pool);
571 if (!pages[i])
572 break;
573 page_index++;
574 }
575
576 for (; i < nr_page; i++) {
577 pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP,
578 PAGE_SIZE);
579 if (!pages[i])
580 goto fail;
581 }
582
583#ifndef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
584 h->pgalloc.area = tegra_iovmm_create_vm(client->share->iovmm,
585 NULL, size, h->align, prot,
586 h->pgalloc.iovm_addr);
587 if (!h->pgalloc.area)
588 goto fail;
589
590 h->pgalloc.dirty = true;
591#endif
592 }
593
594 if (nr_page == page_index)
595 goto skip_attr_change;
596
597 /* Update the pages mapping in kernel page table. */
598 if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
599 set_pages_array_wc(&pages[page_index],
600 nr_page - page_index);
601 else if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
602 set_pages_array_uc(&pages[page_index],
603 nr_page - page_index);
604 else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
605 set_pages_array_iwb(&pages[page_index],
606 nr_page - page_index);
607
608skip_attr_change:
609 h->size = size;
610 h->pgalloc.pages = pages;
611 h->pgalloc.contig = contiguous;
612 INIT_LIST_HEAD(&h->pgalloc.mru_list);
613 return 0;
614
615fail:
616 while (i--) {
617 set_pages_array_wb(&pages[i], 1);
618 __free_page(pages[i]);
619 }
620 altfree(pages, nr_page * sizeof(*pages));
621 wmb();
622 return -ENOMEM;
623}
624
625static void alloc_handle(struct nvmap_client *client,
626 struct nvmap_handle *h, unsigned int type)
627{
628 BUG_ON(type & (type - 1));
629
630#ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
631#define __NVMAP_HEAP_CARVEOUT (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_CARVEOUT_VPR)
632#define __NVMAP_HEAP_IOVMM (NVMAP_HEAP_IOVMM | NVMAP_HEAP_CARVEOUT_GENERIC)
633 if (type & NVMAP_HEAP_CARVEOUT_GENERIC) {
634#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
635 if (h->size <= PAGE_SIZE) {
636 PR_INFO("###CARVEOUT CONVERTED TO SYSMEM "
637 "0x%x bytes %s(%d)###\n",
638 h->size, current->comm, current->pid);
639 goto sysheap;
640 }
641#endif
642 PR_INFO("###CARVEOUT CONVERTED TO IOVM "
643 "0x%x bytes %s(%d)###\n",
644 h->size, current->comm, current->pid);
645 }
646#else
647#define __NVMAP_HEAP_CARVEOUT NVMAP_HEAP_CARVEOUT_MASK
648#define __NVMAP_HEAP_IOVMM NVMAP_HEAP_IOVMM
649#endif
650
651 if (type & __NVMAP_HEAP_CARVEOUT) {
652 struct nvmap_heap_block *b;
653#ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
654 PR_INFO("###IRAM REQUEST RETAINED "
655 "0x%x bytes %s(%d)###\n",
656 h->size, current->comm, current->pid);
657#endif
658 /* Protect handle from relocation */
659 nvmap_usecount_inc(h);
660
661 b = nvmap_carveout_alloc(client, h, type);
662 if (b) {
663 h->heap_pgalloc = false;
664 h->alloc = true;
665 nvmap_carveout_commit_add(client,
666 nvmap_heap_to_arg(nvmap_block_to_heap(b)),
667 h->size);
668 }
669 nvmap_usecount_dec(h);
670
671 } else if (type & __NVMAP_HEAP_IOVMM) {
672 size_t reserved = PAGE_ALIGN(h->size);
673 int commit = 0;
674 int ret;
675
676 /* increment the committed IOVM space prior to allocation
677 * to avoid race conditions with other threads simultaneously
678 * allocating. */
679 commit = atomic_add_return(reserved,
680 &client->iovm_commit);
681
682 if (commit < client->iovm_limit)
683 ret = handle_page_alloc(client, h, false);
684 else
685 ret = -ENOMEM;
686
687 if (!ret) {
688 h->heap_pgalloc = true;
689 h->alloc = true;
690 } else {
691 atomic_sub(reserved, &client->iovm_commit);
692 }
693
694 } else if (type & NVMAP_HEAP_SYSMEM) {
695#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM) && \
696 defined(CONFIG_NVMAP_ALLOW_SYSMEM)
697sysheap:
698#endif
699 if (handle_page_alloc(client, h, true) == 0) {
700 BUG_ON(!h->pgalloc.contig);
701 h->heap_pgalloc = true;
702 h->alloc = true;
703 }
704 }
705}
706
707/* small allocations will try to allocate from generic OS memory before
708 * any of the limited heaps, to increase the effective memory for graphics
709 * allocations, and to reduce fragmentation of the graphics heaps with
710 * sub-page splinters */
711static const unsigned int heap_policy_small[] = {
712 NVMAP_HEAP_CARVEOUT_VPR,
713 NVMAP_HEAP_CARVEOUT_IRAM,
714#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
715 NVMAP_HEAP_SYSMEM,
716#endif
717 NVMAP_HEAP_CARVEOUT_MASK,
718 NVMAP_HEAP_IOVMM,
719 0,
720};
721
722static const unsigned int heap_policy_large[] = {
723 NVMAP_HEAP_CARVEOUT_VPR,
724 NVMAP_HEAP_CARVEOUT_IRAM,
725 NVMAP_HEAP_IOVMM,
726 NVMAP_HEAP_CARVEOUT_MASK,
727#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
728 NVMAP_HEAP_SYSMEM,
729#endif
730 0,
731};
732
733/* Do not override single page policy if there is not much space to
734avoid invoking system oom killer. */
735#define NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD 50000000
736
737int nvmap_alloc_handle_id(struct nvmap_client *client,
738 unsigned long id, unsigned int heap_mask,
739 size_t align, unsigned int flags)
740{
741 struct nvmap_handle *h = NULL;
742 const unsigned int *alloc_policy;
743 int nr_page;
744 int err = -ENOMEM;
745
746 h = nvmap_get_handle_id(client, id);
747
748 if (!h)
749 return -EINVAL;
750
751 if (h->alloc)
752 goto out;
753
754 h->userflags = flags;
755 nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
756 h->secure = !!(flags & NVMAP_HANDLE_SECURE);
757 h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
758 h->align = max_t(size_t, align, L1_CACHE_BYTES);
759
760#ifndef CONFIG_TEGRA_IOVMM
761 if (heap_mask & NVMAP_HEAP_IOVMM) {
762 heap_mask &= NVMAP_HEAP_IOVMM;
763 heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
764 }
765#endif
766#ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
767#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
768 /* Allow single pages allocations in system memory to save
769 * carveout space and avoid extra iovm mappings */
770 if (nr_page == 1) {
771 if (heap_mask & NVMAP_HEAP_IOVMM)
772 heap_mask |= NVMAP_HEAP_SYSMEM;
773 else if (heap_mask & NVMAP_HEAP_CARVEOUT_GENERIC) {
774 /* Calculate size of free physical pages
775 * managed by kernel */
776 unsigned long freeMem =
777 (global_page_state(NR_FREE_PAGES) +
778 global_page_state(NR_FILE_PAGES) -
779 total_swapcache_pages) << PAGE_SHIFT;
780
781 if (freeMem > NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD)
782 heap_mask |= NVMAP_HEAP_SYSMEM;
783 }
784 }
785#endif
786
787 /* This restriction is deprecated as alignments greater than
788 PAGE_SIZE are now correctly handled, but it is retained for
789 AP20 compatibility. */
790 if (h->align > PAGE_SIZE)
791 heap_mask &= NVMAP_HEAP_CARVEOUT_MASK;
792#endif
793 /* secure allocations can only be served from secure heaps */
794 if (h->secure)
795 heap_mask &= NVMAP_SECURE_HEAPS;
796
797 if (!heap_mask) {
798 err = -EINVAL;
799 goto out;
800 }
801
802 alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
803
804 while (!h->alloc && *alloc_policy) {
805 unsigned int heap_type;
806
807 heap_type = *alloc_policy++;
808 heap_type &= heap_mask;
809
810 if (!heap_type)
811 continue;
812
813 heap_mask &= ~heap_type;
814
815 while (heap_type && !h->alloc) {
816 unsigned int heap;
817
818 /* iterate possible heaps MSB-to-LSB, since higher-
819 * priority carveouts will have higher usage masks */
820 heap = 1 << __fls(heap_type);
821 alloc_handle(client, h, heap);
822 heap_type &= ~heap;
823 }
824 }
825
826out:
827 err = (h->alloc) ? 0 : err;
828 nvmap_handle_put(h);
829 return err;
830}
831
832void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
833{
834 struct nvmap_handle_ref *ref;
835 struct nvmap_handle *h;
836 int pins;
837
838 nvmap_ref_lock(client);
839
840 ref = _nvmap_validate_id_locked(client, id);
841 if (!ref) {
842 nvmap_ref_unlock(client);
843 return;
844 }
845
846 BUG_ON(!ref->handle);
847 h = ref->handle;
848
849 if (atomic_dec_return(&ref->dupes)) {
850 nvmap_ref_unlock(client);
851 goto out;
852 }
853
854 smp_rmb();
855 pins = atomic_read(&ref->pin);
856 rb_erase(&ref->node, &client->handle_refs);
857
858 if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
859 atomic_sub(h->size, &client->iovm_commit);
860
861 if (h->alloc && !h->heap_pgalloc) {
862 mutex_lock(&h->lock);
863 nvmap_carveout_commit_subtract(client,
864 nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
865 h->size);
866 mutex_unlock(&h->lock);
867 }
868
869 nvmap_ref_unlock(client);
870
871 if (pins)
872 nvmap_err(client, "%s freeing pinned handle %p\n",
873 current->group_leader->comm, h);
874
875 while (pins--)
876 nvmap_unpin_handles(client, &ref->handle, 1);
877
878 if (h->owner == client)
879 h->owner = NULL;
880
881 kfree(ref);
882
883out:
884 BUG_ON(!atomic_read(&h->ref));
885 nvmap_handle_put(h);
886}
887
888static void add_handle_ref(struct nvmap_client *client,
889 struct nvmap_handle_ref *ref)
890{
891 struct rb_node **p, *parent = NULL;
892
893 nvmap_ref_lock(client);
894 p = &client->handle_refs.rb_node;
895 while (*p) {
896 struct nvmap_handle_ref *node;
897 parent = *p;
898 node = rb_entry(parent, struct nvmap_handle_ref, node);
899 if (ref->handle > node->handle)
900 p = &parent->rb_right;
901 else
902 p = &parent->rb_left;
903 }
904 rb_link_node(&ref->node, parent, p);
905 rb_insert_color(&ref->node, &client->handle_refs);
906 nvmap_ref_unlock(client);
907}
908
909struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
910 size_t size)
911{
912 struct nvmap_handle *h;
913 struct nvmap_handle_ref *ref = NULL;
914
915 if (!client)
916 return ERR_PTR(-EINVAL);
917
918 if (!size)
919 return ERR_PTR(-EINVAL);
920
921 h = kzalloc(sizeof(*h), GFP_KERNEL);
922 if (!h)
923 return ERR_PTR(-ENOMEM);
924
925 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
926 if (!ref) {
927 kfree(h);
928 return ERR_PTR(-ENOMEM);
929 }
930
931 atomic_set(&h->ref, 1);
932 atomic_set(&h->pin, 0);
933 h->owner = client;
934 h->dev = client->dev;
935 BUG_ON(!h->owner);
936 h->size = h->orig_size = size;
937 h->flags = NVMAP_HANDLE_WRITE_COMBINE;
938 mutex_init(&h->lock);
939
940 nvmap_handle_add(client->dev, h);
941
942 atomic_set(&ref->dupes, 1);
943 ref->handle = h;
944 atomic_set(&ref->pin, 0);
945 add_handle_ref(client, ref);
946 return ref;
947}
948
949struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
950 unsigned long id)
951{
952 struct nvmap_handle_ref *ref = NULL;
953 struct nvmap_handle *h = NULL;
954
955 BUG_ON(!client || client->dev != nvmap_dev);
956 /* on success, the reference count for the handle should be
957 * incremented, so the success paths will not call nvmap_handle_put */
958 h = nvmap_validate_get(client, id);
959
960 if (!h) {
961 nvmap_debug(client, "%s duplicate handle failed\n",
962 current->group_leader->comm);
963 return ERR_PTR(-EPERM);
964 }
965
966 if (!h->alloc) {
967 nvmap_err(client, "%s duplicating unallocated handle\n",
968 current->group_leader->comm);
969 nvmap_handle_put(h);
970 return ERR_PTR(-EINVAL);
971 }
972
973 nvmap_ref_lock(client);
974 ref = _nvmap_validate_id_locked(client, (unsigned long)h);
975
976 if (ref) {
977 /* handle already duplicated in client; just increment
978 * the reference count rather than re-duplicating it */
979 atomic_inc(&ref->dupes);
980 nvmap_ref_unlock(client);
981 return ref;
982 }
983
984 nvmap_ref_unlock(client);
985
986 /* verify that adding this handle to the process' access list
987 * won't exceed the IOVM limit */
988 if (h->heap_pgalloc && !h->pgalloc.contig) {
989 int oc;
990 oc = atomic_add_return(h->size, &client->iovm_commit);
991 if (oc > client->iovm_limit && !client->super) {
992 atomic_sub(h->size, &client->iovm_commit);
993 nvmap_handle_put(h);
994 nvmap_err(client, "duplicating %p in %s over-commits"
995 " IOVMM space\n", (void *)id,
996 current->group_leader->comm);
997 return ERR_PTR(-ENOMEM);
998 }
999 }
1000
1001 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1002 if (!ref) {
1003 nvmap_handle_put(h);
1004 return ERR_PTR(-ENOMEM);
1005 }
1006
1007 if (!h->heap_pgalloc) {
1008 mutex_lock(&h->lock);
1009 nvmap_carveout_commit_add(client,
1010 nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
1011 h->size);
1012 mutex_unlock(&h->lock);
1013 }
1014
1015 atomic_set(&ref->dupes, 1);
1016 ref->handle = h;
1017 atomic_set(&ref->pin, 0);
1018 add_handle_ref(client, ref);
1019 return ref;
1020}