aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/tegra/nvmap/nvmap_dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_dev.c')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_dev.c1436
1 files changed, 1436 insertions, 0 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_dev.c b/drivers/video/tegra/nvmap/nvmap_dev.c
new file mode 100644
index 00000000000..f84f38c93aa
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_dev.c
@@ -0,0 +1,1436 @@
1/*
2 * drivers/video/tegra/nvmap/nvmap_dev.c
3 *
4 * User-space interface to nvmap
5 *
6 * Copyright (c) 2011-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/backing-dev.h>
24#include <linux/bitmap.h>
25#include <linux/debugfs.h>
26#include <linux/delay.h>
27#include <linux/kernel.h>
28#include <linux/miscdevice.h>
29#include <linux/mm.h>
30#include <linux/oom.h>
31#include <linux/platform_device.h>
32#include <linux/seq_file.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35#include <linux/uaccess.h>
36#include <linux/vmalloc.h>
37
38#include <asm/cacheflush.h>
39#include <asm/tlbflush.h>
40
41#include <mach/iovmm.h>
42#include <mach/nvmap.h>
43
44#include "nvmap.h"
45#include "nvmap_ioctl.h"
46#include "nvmap_mru.h"
47#include "nvmap_common.h"
48
49#define NVMAP_NUM_PTES 64
50#define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
51
52#ifdef CONFIG_NVMAP_CARVEOUT_KILLER
53static bool carveout_killer = true;
54#else
55static bool carveout_killer;
56#endif
57module_param(carveout_killer, bool, 0640);
58
59struct nvmap_carveout_node {
60 unsigned int heap_bit;
61 struct nvmap_heap *carveout;
62 int index;
63 struct list_head clients;
64 spinlock_t clients_lock;
65};
66
67struct nvmap_device {
68 struct vm_struct *vm_rgn;
69 pte_t *ptes[NVMAP_NUM_PTES];
70 unsigned long ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
71 unsigned int lastpte;
72 spinlock_t ptelock;
73
74 struct rb_root handles;
75 spinlock_t handle_lock;
76 wait_queue_head_t pte_wait;
77 struct miscdevice dev_super;
78 struct miscdevice dev_user;
79 struct nvmap_carveout_node *heaps;
80 int nr_carveouts;
81 struct nvmap_share iovmm_master;
82 struct list_head clients;
83 spinlock_t clients_lock;
84};
85
86struct nvmap_device *nvmap_dev;
87
88static struct backing_dev_info nvmap_bdi = {
89 .ra_pages = 0,
90 .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
91 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
92};
93
94static int nvmap_open(struct inode *inode, struct file *filp);
95static int nvmap_release(struct inode *inode, struct file *filp);
96static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
97static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
98static void nvmap_vma_open(struct vm_area_struct *vma);
99static void nvmap_vma_close(struct vm_area_struct *vma);
100static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
101
102static const struct file_operations nvmap_user_fops = {
103 .owner = THIS_MODULE,
104 .open = nvmap_open,
105 .release = nvmap_release,
106 .unlocked_ioctl = nvmap_ioctl,
107 .mmap = nvmap_map,
108};
109
110static const struct file_operations nvmap_super_fops = {
111 .owner = THIS_MODULE,
112 .open = nvmap_open,
113 .release = nvmap_release,
114 .unlocked_ioctl = nvmap_ioctl,
115 .mmap = nvmap_map,
116};
117
118static struct vm_operations_struct nvmap_vma_ops = {
119 .open = nvmap_vma_open,
120 .close = nvmap_vma_close,
121 .fault = nvmap_vma_fault,
122};
123
124int is_nvmap_vma(struct vm_area_struct *vma)
125{
126 return vma->vm_ops == &nvmap_vma_ops;
127}
128
129struct device *nvmap_client_to_device(struct nvmap_client *client)
130{
131 if (client->super)
132 return client->dev->dev_super.this_device;
133 else
134 return client->dev->dev_user.this_device;
135}
136
137struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
138{
139 return &dev->iovmm_master;
140}
141
142/* allocates a PTE for the caller's use; returns the PTE pointer or
143 * a negative errno. may be called from IRQs */
144pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
145{
146 unsigned long flags;
147 unsigned long bit;
148
149 spin_lock_irqsave(&dev->ptelock, flags);
150 bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
151 if (bit == NVMAP_NUM_PTES) {
152 bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
153 if (bit == dev->lastpte)
154 bit = NVMAP_NUM_PTES;
155 }
156
157 if (bit == NVMAP_NUM_PTES) {
158 spin_unlock_irqrestore(&dev->ptelock, flags);
159 return ERR_PTR(-ENOMEM);
160 }
161
162 dev->lastpte = bit;
163 set_bit(bit, dev->ptebits);
164 spin_unlock_irqrestore(&dev->ptelock, flags);
165
166 *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
167 return &(dev->ptes[bit]);
168}
169
170/* allocates a PTE for the caller's use; returns the PTE pointer or
171 * a negative errno. must be called from sleepable contexts */
172pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
173{
174 int ret;
175 pte_t **pte;
176 ret = wait_event_interruptible(dev->pte_wait,
177 !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
178
179 if (ret == -ERESTARTSYS)
180 return ERR_PTR(-EINTR);
181
182 return pte;
183}
184
185/* frees a PTE */
186void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
187{
188 unsigned long addr;
189 unsigned int bit = pte - dev->ptes;
190 unsigned long flags;
191
192 if (WARN_ON(bit >= NVMAP_NUM_PTES))
193 return;
194
195 addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
196 set_pte_at(&init_mm, addr, *pte, 0);
197
198 spin_lock_irqsave(&dev->ptelock, flags);
199 clear_bit(bit, dev->ptebits);
200 spin_unlock_irqrestore(&dev->ptelock, flags);
201 wake_up(&dev->pte_wait);
202}
203
204/* verifies that the handle ref value "ref" is a valid handle ref for the
205 * file. caller must hold the file's ref_lock prior to calling this function */
206struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *c,
207 unsigned long id)
208{
209 struct rb_node *n = c->handle_refs.rb_node;
210
211 while (n) {
212 struct nvmap_handle_ref *ref;
213 ref = rb_entry(n, struct nvmap_handle_ref, node);
214 if ((unsigned long)ref->handle == id)
215 return ref;
216 else if (id > (unsigned long)ref->handle)
217 n = n->rb_right;
218 else
219 n = n->rb_left;
220 }
221
222 return NULL;
223}
224
225struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
226 unsigned long id)
227{
228 struct nvmap_handle_ref *ref;
229 struct nvmap_handle *h = NULL;
230
231 nvmap_ref_lock(client);
232 ref = _nvmap_validate_id_locked(client, id);
233 if (ref)
234 h = ref->handle;
235 if (h)
236 h = nvmap_handle_get(h);
237 nvmap_ref_unlock(client);
238 return h;
239}
240
241unsigned long nvmap_carveout_usage(struct nvmap_client *c,
242 struct nvmap_heap_block *b)
243{
244 struct nvmap_heap *h = nvmap_block_to_heap(b);
245 struct nvmap_carveout_node *n;
246 int i;
247
248 for (i = 0; i < c->dev->nr_carveouts; i++) {
249 n = &c->dev->heaps[i];
250 if (n->carveout == h)
251 return n->heap_bit;
252 }
253 return 0;
254}
255
256/*
257 * This routine is used to flush the carveout memory from cache.
258 * Why cache flush is needed for carveout? Consider the case, where a piece of
259 * carveout is allocated as cached and released. After this, if the same memory is
260 * allocated for uncached request and the memory is not flushed out from cache.
261 * In this case, the client might pass this to H/W engine and it could start modify
262 * the memory. As this was cached earlier, it might have some portion of it in cache.
263 * During cpu request to read/write other memory, the cached portion of this memory
264 * might get flushed back to main memory and would cause corruptions, if it happens
265 * after H/W writes data to memory.
266 *
267 * But flushing out the memory blindly on each carveout allocation is redundant.
268 *
269 * In order to optimize the carveout buffer cache flushes, the following
270 * strategy is used.
271 *
272 * The whole Carveout is flushed out from cache during its initialization.
273 * During allocation, carveout buffers are not flused from cache.
274 * During deallocation, carveout buffers are flushed, if they were allocated as cached.
275 * if they were allocated as uncached/writecombined, no cache flush is needed.
276 * Just draining store buffers is enough.
277 */
278int nvmap_flush_heap_block(struct nvmap_client *client,
279 struct nvmap_heap_block *block, size_t len, unsigned int prot)
280{
281 pte_t **pte;
282 void *addr;
283 phys_addr_t kaddr;
284 phys_addr_t phys = block->base;
285 phys_addr_t end = block->base + len;
286
287 if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
288 goto out;
289
290 if (len >= FLUSH_CLEAN_BY_SET_WAY_THRESHOLD) {
291 inner_flush_cache_all();
292 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
293 outer_flush_range(block->base, block->base + len);
294 goto out;
295 }
296
297 pte = nvmap_alloc_pte((client ? client->dev : nvmap_dev), &addr);
298 if (IS_ERR(pte))
299 return PTR_ERR(pte);
300
301 kaddr = (phys_addr_t)addr;
302
303 while (phys < end) {
304 phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
305 unsigned long pfn = __phys_to_pfn(phys);
306 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
307
308 next = min(next, end);
309 set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, pgprot_kernel));
310 flush_tlb_kernel_page(kaddr);
311 __cpuc_flush_dcache_area(base, next - phys);
312 phys = next;
313 }
314
315 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
316 outer_flush_range(block->base, block->base + len);
317
318 nvmap_free_pte((client ? client->dev : nvmap_dev), pte);
319out:
320 wmb();
321 return 0;
322}
323
324void nvmap_carveout_commit_add(struct nvmap_client *client,
325 struct nvmap_carveout_node *node,
326 size_t len)
327{
328 unsigned long flags;
329
330 nvmap_ref_lock(client);
331 spin_lock_irqsave(&node->clients_lock, flags);
332 BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
333 client->carveout_commit[node->index].commit != 0);
334
335 client->carveout_commit[node->index].commit += len;
336 /* if this client isn't already on the list of nodes for this heap,
337 add it */
338 if (list_empty(&client->carveout_commit[node->index].list)) {
339 list_add(&client->carveout_commit[node->index].list,
340 &node->clients);
341 }
342 spin_unlock_irqrestore(&node->clients_lock, flags);
343 nvmap_ref_unlock(client);
344}
345
346void nvmap_carveout_commit_subtract(struct nvmap_client *client,
347 struct nvmap_carveout_node *node,
348 size_t len)
349{
350 unsigned long flags;
351
352 if (!client)
353 return;
354
355 spin_lock_irqsave(&node->clients_lock, flags);
356 BUG_ON(client->carveout_commit[node->index].commit < len);
357 client->carveout_commit[node->index].commit -= len;
358 /* if no more allocation in this carveout for this node, delete it */
359 if (!client->carveout_commit[node->index].commit)
360 list_del_init(&client->carveout_commit[node->index].list);
361 spin_unlock_irqrestore(&node->clients_lock, flags);
362}
363
364static struct nvmap_client *get_client_from_carveout_commit(
365 struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
366{
367 struct nvmap_carveout_commit *first_commit = commit - node->index;
368 return (void *)first_commit - offsetof(struct nvmap_client,
369 carveout_commit);
370}
371
372static DECLARE_WAIT_QUEUE_HEAD(wait_reclaim);
373static int wait_count;
374bool nvmap_shrink_carveout(struct nvmap_carveout_node *node)
375{
376 struct nvmap_carveout_commit *commit;
377 size_t selected_size = 0;
378 int selected_oom_adj = OOM_ADJUST_MIN;
379 struct task_struct *selected_task = NULL;
380 unsigned long flags;
381 bool wait = false;
382 int current_oom_adj = OOM_ADJUST_MIN;
383
384 task_lock(current);
385 if (current->signal)
386 current_oom_adj = current->signal->oom_adj;
387 task_unlock(current);
388
389 spin_lock_irqsave(&node->clients_lock, flags);
390 /* find the task with the smallest oom_adj (lowest priority)
391 * and largest carveout allocation -- ignore kernel allocations,
392 * there's no way to handle them */
393 list_for_each_entry(commit, &node->clients, list) {
394 struct nvmap_client *client =
395 get_client_from_carveout_commit(node, commit);
396 size_t size = commit->commit;
397 struct task_struct *task = client->task;
398 struct signal_struct *sig;
399
400 if (!task)
401 continue;
402
403 task_lock(task);
404 sig = task->signal;
405 if (!task->mm || !sig)
406 goto end;
407 /* don't try to kill current */
408 if (task == current->group_leader)
409 goto end;
410 /* don't try to kill higher priority tasks */
411 if (sig->oom_adj < current_oom_adj)
412 goto end;
413 if (sig->oom_adj < selected_oom_adj)
414 goto end;
415 if (sig->oom_adj == selected_oom_adj &&
416 size <= selected_size)
417 goto end;
418 selected_oom_adj = sig->oom_adj;
419 selected_size = size;
420 selected_task = task;
421end:
422 task_unlock(task);
423 }
424 if (selected_task) {
425 wait = true;
426 if (fatal_signal_pending(selected_task)) {
427 pr_warning("carveout_killer: process %d dying "
428 "slowly\n", selected_task->pid);
429 goto out;
430 }
431 pr_info("carveout_killer: killing process %d with oom_adj %d "
432 "to reclaim %d (for process with oom_adj %d)\n",
433 selected_task->pid, selected_oom_adj,
434 selected_size, current_oom_adj);
435 force_sig(SIGKILL, selected_task);
436 }
437out:
438 spin_unlock_irqrestore(&node->clients_lock, flags);
439 return wait;
440}
441
442static
443struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
444 struct nvmap_handle *handle,
445 unsigned long type)
446{
447 struct nvmap_carveout_node *co_heap;
448 struct nvmap_device *dev = client->dev;
449 int i;
450
451 for (i = 0; i < dev->nr_carveouts; i++) {
452 struct nvmap_heap_block *block;
453 co_heap = &dev->heaps[i];
454
455 if (!(co_heap->heap_bit & type))
456 continue;
457
458 block = nvmap_heap_alloc(co_heap->carveout, handle);
459 if (block)
460 return block;
461 }
462 return NULL;
463}
464
465static bool nvmap_carveout_freed(int count)
466{
467 smp_rmb();
468 return count != wait_count;
469}
470
471struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
472 struct nvmap_handle *handle,
473 unsigned long type)
474{
475 struct nvmap_heap_block *block;
476 struct nvmap_carveout_node *co_heap;
477 struct nvmap_device *dev = client->dev;
478 int i;
479 unsigned long end = jiffies +
480 msecs_to_jiffies(NVMAP_CARVEOUT_KILLER_RETRY_TIME);
481 int count = 0;
482
483 do {
484 block = do_nvmap_carveout_alloc(client, handle, type);
485 if (!carveout_killer)
486 return block;
487
488 if (block)
489 return block;
490
491 if (!count++) {
492 char task_comm[TASK_COMM_LEN];
493 if (client->task)
494 get_task_comm(task_comm, client->task);
495 else
496 task_comm[0] = 0;
497 pr_info("%s: failed to allocate %u bytes for "
498 "process %s, firing carveout "
499 "killer!\n", __func__, handle->size, task_comm);
500
501 } else {
502 pr_info("%s: still can't allocate %u bytes, "
503 "attempt %d!\n", __func__, handle->size, count);
504 }
505
506 /* shrink carveouts that matter and try again */
507 for (i = 0; i < dev->nr_carveouts; i++) {
508 int count;
509 co_heap = &dev->heaps[i];
510
511 if (!(co_heap->heap_bit & type))
512 continue;
513
514 count = wait_count;
515 /* indicates we didn't find anything to kill,
516 might as well stop trying */
517 if (!nvmap_shrink_carveout(co_heap))
518 return NULL;
519
520 if (time_is_after_jiffies(end))
521 wait_event_interruptible_timeout(wait_reclaim,
522 nvmap_carveout_freed(count),
523 end - jiffies);
524 }
525 } while (time_is_after_jiffies(end));
526
527 if (time_is_before_jiffies(end))
528 pr_info("carveout_killer: timeout expired without "
529 "allocation succeeding.\n");
530
531 return NULL;
532}
533
534/* remove a handle from the device's tree of all handles; called
535 * when freeing handles. */
536int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
537{
538 spin_lock(&dev->handle_lock);
539
540 /* re-test inside the spinlock if the handle really has no clients;
541 * only remove the handle if it is unreferenced */
542 if (atomic_add_return(0, &h->ref) > 0) {
543 spin_unlock(&dev->handle_lock);
544 return -EBUSY;
545 }
546 smp_rmb();
547 BUG_ON(atomic_read(&h->ref) < 0);
548 BUG_ON(atomic_read(&h->pin) != 0);
549
550 rb_erase(&h->node, &dev->handles);
551
552 spin_unlock(&dev->handle_lock);
553 return 0;
554}
555
556/* adds a newly-created handle to the device master tree */
557void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
558{
559 struct rb_node **p;
560 struct rb_node *parent = NULL;
561
562 spin_lock(&dev->handle_lock);
563 p = &dev->handles.rb_node;
564 while (*p) {
565 struct nvmap_handle *b;
566
567 parent = *p;
568 b = rb_entry(parent, struct nvmap_handle, node);
569 if (h > b)
570 p = &parent->rb_right;
571 else
572 p = &parent->rb_left;
573 }
574 rb_link_node(&h->node, parent, p);
575 rb_insert_color(&h->node, &dev->handles);
576 spin_unlock(&dev->handle_lock);
577}
578
579/* validates that a handle is in the device master tree, and that the
580 * client has permission to access it */
581struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
582 unsigned long id)
583{
584 struct nvmap_handle *h = NULL;
585 struct rb_node *n;
586
587 spin_lock(&client->dev->handle_lock);
588
589 n = client->dev->handles.rb_node;
590
591 while (n) {
592 h = rb_entry(n, struct nvmap_handle, node);
593 if ((unsigned long)h == id) {
594 if (client->super || h->global || (h->owner == client))
595 h = nvmap_handle_get(h);
596 else
597 h = NULL;
598 spin_unlock(&client->dev->handle_lock);
599 return h;
600 }
601 if (id > (unsigned long)h)
602 n = n->rb_right;
603 else
604 n = n->rb_left;
605 }
606 spin_unlock(&client->dev->handle_lock);
607 return NULL;
608}
609
610struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
611 const char *name)
612{
613 struct nvmap_client *client;
614 struct task_struct *task;
615 int i;
616
617 if (WARN_ON(!dev))
618 return NULL;
619
620 client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
621 * dev->nr_carveouts), GFP_KERNEL);
622 if (!client)
623 return NULL;
624
625 client->name = name;
626 client->super = true;
627 client->dev = dev;
628 /* TODO: allocate unique IOVMM client for each nvmap client */
629 client->share = &dev->iovmm_master;
630 client->handle_refs = RB_ROOT;
631
632 atomic_set(&client->iovm_commit, 0);
633
634 client->iovm_limit = nvmap_mru_vm_size(client->share->iovmm);
635
636 for (i = 0; i < dev->nr_carveouts; i++) {
637 INIT_LIST_HEAD(&client->carveout_commit[i].list);
638 client->carveout_commit[i].commit = 0;
639 }
640
641 get_task_struct(current->group_leader);
642 task_lock(current->group_leader);
643 /* don't bother to store task struct for kernel threads,
644 they can't be killed anyway */
645 if (current->flags & PF_KTHREAD) {
646 put_task_struct(current->group_leader);
647 task = NULL;
648 } else {
649 task = current->group_leader;
650 }
651 task_unlock(current->group_leader);
652 client->task = task;
653
654 mutex_init(&client->ref_lock);
655 atomic_set(&client->count, 1);
656
657 spin_lock(&dev->clients_lock);
658 list_add(&client->list, &dev->clients);
659 spin_unlock(&dev->clients_lock);
660 return client;
661}
662
663static void destroy_client(struct nvmap_client *client)
664{
665 struct rb_node *n;
666 int i;
667
668 if (!client)
669 return;
670
671
672 while ((n = rb_first(&client->handle_refs))) {
673 struct nvmap_handle_ref *ref;
674 int pins, dupes;
675
676 ref = rb_entry(n, struct nvmap_handle_ref, node);
677 rb_erase(&ref->node, &client->handle_refs);
678
679 smp_rmb();
680 pins = atomic_read(&ref->pin);
681
682 if (ref->handle->owner == client)
683 ref->handle->owner = NULL;
684
685 while (pins--)
686 nvmap_unpin_handles(client, &ref->handle, 1);
687
688 dupes = atomic_read(&ref->dupes);
689 while (dupes--)
690 nvmap_handle_put(ref->handle);
691
692 kfree(ref);
693 }
694
695 if (carveout_killer) {
696 wait_count++;
697 smp_wmb();
698 wake_up_all(&wait_reclaim);
699 }
700
701 for (i = 0; i < client->dev->nr_carveouts; i++)
702 list_del(&client->carveout_commit[i].list);
703
704 if (client->task)
705 put_task_struct(client->task);
706
707 spin_lock(&client->dev->clients_lock);
708 list_del(&client->list);
709 spin_unlock(&client->dev->clients_lock);
710 kfree(client);
711}
712
713struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
714{
715 if (WARN_ON(!client))
716 return NULL;
717
718 if (WARN_ON(!atomic_add_unless(&client->count, 1, 0)))
719 return NULL;
720
721 return client;
722}
723
724struct nvmap_client *nvmap_client_get_file(int fd)
725{
726 struct nvmap_client *client = ERR_PTR(-EFAULT);
727 struct file *f = fget(fd);
728 if (!f)
729 return ERR_PTR(-EINVAL);
730
731 if ((f->f_op == &nvmap_user_fops) || (f->f_op == &nvmap_super_fops)) {
732 client = f->private_data;
733 atomic_inc(&client->count);
734 }
735
736 fput(f);
737 return client;
738}
739
740void nvmap_client_put(struct nvmap_client *client)
741{
742 if (!client)
743 return;
744
745 if (!atomic_dec_return(&client->count))
746 destroy_client(client);
747}
748
749static int nvmap_open(struct inode *inode, struct file *filp)
750{
751 struct miscdevice *miscdev = filp->private_data;
752 struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
753 struct nvmap_client *priv;
754 int ret;
755
756 ret = nonseekable_open(inode, filp);
757 if (unlikely(ret))
758 return ret;
759
760 BUG_ON(dev != nvmap_dev);
761 priv = nvmap_create_client(dev, "user");
762 if (!priv)
763 return -ENOMEM;
764
765 priv->super = (filp->f_op == &nvmap_super_fops);
766
767 filp->f_mapping->backing_dev_info = &nvmap_bdi;
768
769 filp->private_data = priv;
770 return 0;
771}
772
773static int nvmap_release(struct inode *inode, struct file *filp)
774{
775 nvmap_client_put(filp->private_data);
776 return 0;
777}
778
779static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
780{
781 struct nvmap_vma_priv *priv;
782
783 /* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA
784 * will be stored in vm_private_data and faulted in. until the
785 * ioctl is made, the VMA is mapped no-access */
786 vma->vm_private_data = NULL;
787
788 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
789 if (!priv)
790 return -ENOMEM;
791
792 priv->offs = 0;
793 priv->handle = NULL;
794 atomic_set(&priv->count, 1);
795
796 vma->vm_flags |= VM_SHARED;
797 vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED);
798 vma->vm_ops = &nvmap_vma_ops;
799 vma->vm_private_data = priv;
800
801 return 0;
802}
803
804static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
805{
806 int err = 0;
807 void __user *uarg = (void __user *)arg;
808
809 if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
810 return -ENOTTY;
811
812 if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
813 return -ENOTTY;
814
815 if (_IOC_DIR(cmd) & _IOC_READ)
816 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
817 if (_IOC_DIR(cmd) & _IOC_WRITE)
818 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
819
820 if (err)
821 return -EFAULT;
822
823 switch (cmd) {
824 case NVMAP_IOC_CLAIM:
825 nvmap_warn(filp->private_data, "preserved handles not"
826 "supported\n");
827 err = -ENODEV;
828 break;
829 case NVMAP_IOC_CREATE:
830 case NVMAP_IOC_FROM_ID:
831 err = nvmap_ioctl_create(filp, cmd, uarg);
832 break;
833
834 case NVMAP_IOC_GET_ID:
835 err = nvmap_ioctl_getid(filp, uarg);
836 break;
837
838 case NVMAP_IOC_PARAM:
839 err = nvmap_ioctl_get_param(filp, uarg);
840 break;
841
842 case NVMAP_IOC_UNPIN_MULT:
843 case NVMAP_IOC_PIN_MULT:
844 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT, uarg);
845 break;
846
847 case NVMAP_IOC_ALLOC:
848 err = nvmap_ioctl_alloc(filp, uarg);
849 break;
850
851 case NVMAP_IOC_FREE:
852 err = nvmap_ioctl_free(filp, arg);
853 break;
854
855 case NVMAP_IOC_MMAP:
856 err = nvmap_map_into_caller_ptr(filp, uarg);
857 break;
858
859 case NVMAP_IOC_WRITE:
860 case NVMAP_IOC_READ:
861 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg);
862 break;
863
864 case NVMAP_IOC_CACHE:
865 err = nvmap_ioctl_cache_maint(filp, uarg);
866 break;
867
868 default:
869 return -ENOTTY;
870 }
871 return err;
872}
873
874/* to ensure that the backing store for the VMA isn't freed while a fork'd
875 * reference still exists, nvmap_vma_open increments the reference count on
876 * the handle, and nvmap_vma_close decrements it. alternatively, we could
877 * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
878*/
879static void nvmap_vma_open(struct vm_area_struct *vma)
880{
881 struct nvmap_vma_priv *priv;
882
883 priv = vma->vm_private_data;
884
885 BUG_ON(!priv);
886
887 atomic_inc(&priv->count);
888}
889
890static void nvmap_vma_close(struct vm_area_struct *vma)
891{
892 struct nvmap_vma_priv *priv = vma->vm_private_data;
893
894 if (priv) {
895 if (priv->handle) {
896 nvmap_usecount_dec(priv->handle);
897 BUG_ON(priv->handle->usecount < 0);
898 }
899 if (!atomic_dec_return(&priv->count)) {
900 if (priv->handle)
901 nvmap_handle_put(priv->handle);
902 kfree(priv);
903 }
904 }
905 vma->vm_private_data = NULL;
906}
907
908static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
909{
910 struct nvmap_vma_priv *priv;
911 unsigned long offs;
912
913 offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
914 priv = vma->vm_private_data;
915 if (!priv || !priv->handle || !priv->handle->alloc)
916 return VM_FAULT_SIGBUS;
917
918 offs += priv->offs;
919 /* if the VMA was split for some reason, vm_pgoff will be the VMA's
920 * offset from the original VMA */
921 offs += (vma->vm_pgoff << PAGE_SHIFT);
922
923 if (offs >= priv->handle->size)
924 return VM_FAULT_SIGBUS;
925
926 if (!priv->handle->heap_pgalloc) {
927 unsigned long pfn;
928 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
929 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
930 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
931 return VM_FAULT_NOPAGE;
932 } else {
933 struct page *page;
934 offs >>= PAGE_SHIFT;
935 page = priv->handle->pgalloc.pages[offs];
936 if (page)
937 get_page(page);
938 vmf->page = page;
939 return (page) ? 0 : VM_FAULT_SIGBUS;
940 }
941}
942
943static ssize_t attr_show_usage(struct device *dev,
944 struct device_attribute *attr, char *buf)
945{
946 struct nvmap_carveout_node *node = nvmap_heap_device_to_arg(dev);
947
948 return sprintf(buf, "%08x\n", node->heap_bit);
949}
950
951static struct device_attribute heap_attr_show_usage =
952 __ATTR(usage, S_IRUGO, attr_show_usage, NULL);
953
954static struct attribute *heap_extra_attrs[] = {
955 &heap_attr_show_usage.attr,
956 NULL,
957};
958
959static struct attribute_group heap_extra_attr_group = {
960 .attrs = heap_extra_attrs,
961};
962
963static void client_stringify(struct nvmap_client *client, struct seq_file *s)
964{
965 char task_comm[TASK_COMM_LEN];
966 if (!client->task) {
967 seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
968 return;
969 }
970 get_task_comm(task_comm, client->task);
971 seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
972 client->task->pid);
973}
974
975static void allocations_stringify(struct nvmap_client *client,
976 struct seq_file *s)
977{
978 unsigned long base = 0;
979 struct rb_node *n = rb_first(&client->handle_refs);
980
981 for (; n != NULL; n = rb_next(n)) {
982 struct nvmap_handle_ref *ref =
983 rb_entry(n, struct nvmap_handle_ref, node);
984 struct nvmap_handle *handle = ref->handle;
985 if (handle->alloc && !handle->heap_pgalloc) {
986 seq_printf(s, "%-18s %-18s %8lx %10u %8x\n", "", "",
987 (unsigned long)(handle->carveout->base),
988 handle->size, handle->userflags);
989 } else if (handle->alloc && handle->heap_pgalloc) {
990 seq_printf(s, "%-18s %-18s %8lx %10u %8x\n", "", "",
991 base, handle->size, handle->userflags);
992 }
993 }
994}
995
996static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
997{
998 struct nvmap_carveout_node *node = s->private;
999 struct nvmap_carveout_commit *commit;
1000 unsigned long flags;
1001 unsigned int total = 0;
1002
1003 spin_lock_irqsave(&node->clients_lock, flags);
1004 seq_printf(s, "%-18s %18s %8s %10s %8s\n", "CLIENT", "PROCESS", "PID",
1005 "SIZE", "FLAGS");
1006 seq_printf(s, "%-18s %18s %8s %10s\n", "", "",
1007 "BASE", "SIZE");
1008 list_for_each_entry(commit, &node->clients, list) {
1009 struct nvmap_client *client =
1010 get_client_from_carveout_commit(node, commit);
1011 client_stringify(client, s);
1012 seq_printf(s, " %10u\n", commit->commit);
1013 allocations_stringify(client, s);
1014 seq_printf(s, "\n");
1015 total += commit->commit;
1016 }
1017 seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total);
1018 spin_unlock_irqrestore(&node->clients_lock, flags);
1019
1020 return 0;
1021}
1022
1023static int nvmap_debug_allocations_open(struct inode *inode, struct file *file)
1024{
1025 return single_open(file, nvmap_debug_allocations_show,
1026 inode->i_private);
1027}
1028
1029static const struct file_operations debug_allocations_fops = {
1030 .open = nvmap_debug_allocations_open,
1031 .read = seq_read,
1032 .llseek = seq_lseek,
1033 .release = single_release,
1034};
1035
1036static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
1037{
1038 struct nvmap_carveout_node *node = s->private;
1039 struct nvmap_carveout_commit *commit;
1040 unsigned long flags;
1041 unsigned int total = 0;
1042
1043 spin_lock_irqsave(&node->clients_lock, flags);
1044 seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
1045 "SIZE");
1046 list_for_each_entry(commit, &node->clients, list) {
1047 struct nvmap_client *client =
1048 get_client_from_carveout_commit(node, commit);
1049 client_stringify(client, s);
1050 seq_printf(s, " %10u\n", commit->commit);
1051 total += commit->commit;
1052 }
1053 seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total);
1054 spin_unlock_irqrestore(&node->clients_lock, flags);
1055
1056 return 0;
1057}
1058
1059static int nvmap_debug_clients_open(struct inode *inode, struct file *file)
1060{
1061 return single_open(file, nvmap_debug_clients_show, inode->i_private);
1062}
1063
1064static const struct file_operations debug_clients_fops = {
1065 .open = nvmap_debug_clients_open,
1066 .read = seq_read,
1067 .llseek = seq_lseek,
1068 .release = single_release,
1069};
1070
1071static int nvmap_debug_iovmm_clients_show(struct seq_file *s, void *unused)
1072{
1073 unsigned long flags;
1074 unsigned int total = 0;
1075 struct nvmap_client *client;
1076 struct nvmap_device *dev = s->private;
1077
1078 spin_lock_irqsave(&dev->clients_lock, flags);
1079 seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
1080 "SIZE");
1081 list_for_each_entry(client, &dev->clients, list) {
1082 client_stringify(client, s);
1083 seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit));
1084 total += atomic_read(&client->iovm_commit);
1085 }
1086 seq_printf(s, "%-18s %18s %8u %10u\n", "total", "", 0, total);
1087 spin_unlock_irqrestore(&dev->clients_lock, flags);
1088
1089 return 0;
1090}
1091
1092static int nvmap_debug_iovmm_clients_open(struct inode *inode,
1093 struct file *file)
1094{
1095 return single_open(file, nvmap_debug_iovmm_clients_show,
1096 inode->i_private);
1097}
1098
1099static const struct file_operations debug_iovmm_clients_fops = {
1100 .open = nvmap_debug_iovmm_clients_open,
1101 .read = seq_read,
1102 .llseek = seq_lseek,
1103 .release = single_release,
1104};
1105
1106static int nvmap_debug_iovmm_allocations_show(struct seq_file *s, void *unused)
1107{
1108 unsigned long flags;
1109 unsigned int total = 0;
1110 struct nvmap_client *client;
1111 struct nvmap_device *dev = s->private;
1112
1113 spin_lock_irqsave(&dev->clients_lock, flags);
1114 seq_printf(s, "%-18s %18s %8s %10s\n", "CLIENT", "PROCESS", "PID",
1115 "SIZE");
1116 seq_printf(s, "%-18s %18s %8s %10s\n", "", "",
1117 "BASE", "SIZE");
1118 list_for_each_entry(client, &dev->clients, list) {
1119 client_stringify(client, s);
1120 seq_printf(s, " %10u\n", atomic_read(&client->iovm_commit));
1121 allocations_stringify(client, s);
1122 seq_printf(s, "\n");
1123 total += atomic_read(&client->iovm_commit);
1124 }
1125 seq_printf(s, "%-18s %-18s %8u %10u\n", "total", "", 0, total);
1126 spin_unlock_irqrestore(&dev->clients_lock, flags);
1127
1128 return 0;
1129}
1130
1131static int nvmap_debug_iovmm_allocations_open(struct inode *inode,
1132 struct file *file)
1133{
1134 return single_open(file, nvmap_debug_iovmm_allocations_show,
1135 inode->i_private);
1136}
1137
1138static const struct file_operations debug_iovmm_allocations_fops = {
1139 .open = nvmap_debug_iovmm_allocations_open,
1140 .read = seq_read,
1141 .llseek = seq_lseek,
1142 .release = single_release,
1143};
1144
1145static int nvmap_probe(struct platform_device *pdev)
1146{
1147 struct nvmap_platform_data *plat = pdev->dev.platform_data;
1148 struct nvmap_device *dev;
1149 struct dentry *nvmap_debug_root;
1150 unsigned int i;
1151 int e;
1152
1153 if (!plat) {
1154 dev_err(&pdev->dev, "no platform data?\n");
1155 return -ENODEV;
1156 }
1157
1158 if (WARN_ON(nvmap_dev != NULL)) {
1159 dev_err(&pdev->dev, "only one nvmap device may be present\n");
1160 return -ENODEV;
1161 }
1162
1163 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1164 if (!dev) {
1165 dev_err(&pdev->dev, "out of memory for device\n");
1166 return -ENOMEM;
1167 }
1168
1169 dev->dev_user.minor = MISC_DYNAMIC_MINOR;
1170 dev->dev_user.name = "nvmap";
1171 dev->dev_user.fops = &nvmap_user_fops;
1172 dev->dev_user.parent = &pdev->dev;
1173
1174 dev->dev_super.minor = MISC_DYNAMIC_MINOR;
1175 dev->dev_super.name = "knvmap";
1176 dev->dev_super.fops = &nvmap_super_fops;
1177 dev->dev_super.parent = &pdev->dev;
1178
1179 dev->handles = RB_ROOT;
1180
1181 init_waitqueue_head(&dev->pte_wait);
1182
1183 init_waitqueue_head(&dev->iovmm_master.pin_wait);
1184 mutex_init(&dev->iovmm_master.pin_lock);
1185 for (i = 0; i < NVMAP_NUM_POOLS; i++)
1186 nvmap_page_pool_init(&dev->iovmm_master.pools[i], i);
1187
1188 dev->iovmm_master.iovmm =
1189 tegra_iovmm_alloc_client(dev_name(&pdev->dev), NULL,
1190 &(dev->dev_user));
1191#ifdef CONFIG_TEGRA_IOVMM
1192 if (!dev->iovmm_master.iovmm) {
1193 e = PTR_ERR(dev->iovmm_master.iovmm);
1194 dev_err(&pdev->dev, "couldn't create iovmm client\n");
1195 goto fail;
1196 }
1197#endif
1198 dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE);
1199 if (!dev->vm_rgn) {
1200 e = -ENOMEM;
1201 dev_err(&pdev->dev, "couldn't allocate remapping region\n");
1202 goto fail;
1203 }
1204 e = nvmap_mru_init(&dev->iovmm_master);
1205 if (e) {
1206 dev_err(&pdev->dev, "couldn't initialize MRU lists\n");
1207 goto fail;
1208 }
1209
1210 spin_lock_init(&dev->ptelock);
1211 spin_lock_init(&dev->handle_lock);
1212 INIT_LIST_HEAD(&dev->clients);
1213 spin_lock_init(&dev->clients_lock);
1214
1215 for (i = 0; i < NVMAP_NUM_PTES; i++) {
1216 unsigned long addr;
1217 pgd_t *pgd;
1218 pud_t *pud;
1219 pmd_t *pmd;
1220
1221 addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
1222 pgd = pgd_offset_k(addr);
1223 pud = pud_alloc(&init_mm, pgd, addr);
1224 if (!pud) {
1225 e = -ENOMEM;
1226 dev_err(&pdev->dev, "couldn't allocate page tables\n");
1227 goto fail;
1228 }
1229 pmd = pmd_alloc(&init_mm, pud, addr);
1230 if (!pmd) {
1231 e = -ENOMEM;
1232 dev_err(&pdev->dev, "couldn't allocate page tables\n");
1233 goto fail;
1234 }
1235 dev->ptes[i] = pte_alloc_kernel(pmd, addr);
1236 if (!dev->ptes[i]) {
1237 e = -ENOMEM;
1238 dev_err(&pdev->dev, "couldn't allocate page tables\n");
1239 goto fail;
1240 }
1241 }
1242
1243 e = misc_register(&dev->dev_user);
1244 if (e) {
1245 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1246 dev->dev_user.name);
1247 goto fail;
1248 }
1249
1250 e = misc_register(&dev->dev_super);
1251 if (e) {
1252 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1253 dev->dev_super.name);
1254 goto fail;
1255 }
1256
1257 dev->nr_carveouts = 0;
1258 dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
1259 plat->nr_carveouts, GFP_KERNEL);
1260 if (!dev->heaps) {
1261 e = -ENOMEM;
1262 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
1263 goto fail;
1264 }
1265
1266 nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
1267 if (IS_ERR_OR_NULL(nvmap_debug_root))
1268 dev_err(&pdev->dev, "couldn't create debug files\n");
1269
1270 for (i = 0; i < plat->nr_carveouts; i++) {
1271 struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
1272 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
1273 if (!co->size)
1274 continue;
1275 node->carveout = nvmap_heap_create(dev->dev_user.this_device,
1276 co->name, co->base, co->size,
1277 co->buddy_size, node);
1278 if (!node->carveout) {
1279 e = -ENOMEM;
1280 dev_err(&pdev->dev, "couldn't create %s\n", co->name);
1281 goto fail_heaps;
1282 }
1283 node->index = dev->nr_carveouts;
1284 dev->nr_carveouts++;
1285 spin_lock_init(&node->clients_lock);
1286 INIT_LIST_HEAD(&node->clients);
1287 node->heap_bit = co->usage_mask;
1288 if (nvmap_heap_create_group(node->carveout,
1289 &heap_extra_attr_group))
1290 dev_warn(&pdev->dev, "couldn't add extra attributes\n");
1291
1292 dev_info(&pdev->dev, "created carveout %s (%uKiB)\n",
1293 co->name, co->size / 1024);
1294
1295 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1296 struct dentry *heap_root =
1297 debugfs_create_dir(co->name, nvmap_debug_root);
1298 if (!IS_ERR_OR_NULL(heap_root)) {
1299 debugfs_create_file("clients", 0664, heap_root,
1300 node, &debug_clients_fops);
1301 debugfs_create_file("allocations", 0664,
1302 heap_root, node, &debug_allocations_fops);
1303 }
1304 }
1305 }
1306 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1307 struct dentry *iovmm_root =
1308 debugfs_create_dir("iovmm", nvmap_debug_root);
1309 if (!IS_ERR_OR_NULL(iovmm_root)) {
1310 debugfs_create_file("clients", 0664, iovmm_root,
1311 dev, &debug_iovmm_clients_fops);
1312 debugfs_create_file("allocations", 0664, iovmm_root,
1313 dev, &debug_iovmm_allocations_fops);
1314 for (i = 0; i < NVMAP_NUM_POOLS; i++) {
1315 char name[40];
1316 char *memtype_string[] = {"uc", "wc",
1317 "iwb", "wb"};
1318 sprintf(name, "%s_page_pool_available_pages",
1319 memtype_string[i]);
1320 debugfs_create_u32(name, S_IRUGO|S_IWUSR,
1321 iovmm_root,
1322 &dev->iovmm_master.pools[i].npages);
1323 }
1324 }
1325 }
1326
1327 platform_set_drvdata(pdev, dev);
1328 nvmap_dev = dev;
1329
1330 return 0;
1331fail_heaps:
1332 for (i = 0; i < dev->nr_carveouts; i++) {
1333 struct nvmap_carveout_node *node = &dev->heaps[i];
1334 nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
1335 nvmap_heap_destroy(node->carveout);
1336 }
1337fail:
1338 kfree(dev->heaps);
1339 nvmap_mru_destroy(&dev->iovmm_master);
1340 if (dev->dev_super.minor != MISC_DYNAMIC_MINOR)
1341 misc_deregister(&dev->dev_super);
1342 if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1343 misc_deregister(&dev->dev_user);
1344 if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
1345 tegra_iovmm_free_client(dev->iovmm_master.iovmm);
1346 if (dev->vm_rgn)
1347 free_vm_area(dev->vm_rgn);
1348 kfree(dev);
1349 nvmap_dev = NULL;
1350 return e;
1351}
1352
1353static int nvmap_remove(struct platform_device *pdev)
1354{
1355 struct nvmap_device *dev = platform_get_drvdata(pdev);
1356 struct rb_node *n;
1357 struct nvmap_handle *h;
1358 int i;
1359
1360 misc_deregister(&dev->dev_super);
1361 misc_deregister(&dev->dev_user);
1362
1363 while ((n = rb_first(&dev->handles))) {
1364 h = rb_entry(n, struct nvmap_handle, node);
1365 rb_erase(&h->node, &dev->handles);
1366 kfree(h);
1367 }
1368
1369 if (!IS_ERR_OR_NULL(dev->iovmm_master.iovmm))
1370 tegra_iovmm_free_client(dev->iovmm_master.iovmm);
1371
1372 nvmap_mru_destroy(&dev->iovmm_master);
1373
1374 for (i = 0; i < dev->nr_carveouts; i++) {
1375 struct nvmap_carveout_node *node = &dev->heaps[i];
1376 nvmap_heap_remove_group(node->carveout, &heap_extra_attr_group);
1377 nvmap_heap_destroy(node->carveout);
1378 }
1379 kfree(dev->heaps);
1380
1381 free_vm_area(dev->vm_rgn);
1382 kfree(dev);
1383 nvmap_dev = NULL;
1384 return 0;
1385}
1386
1387static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1388{
1389 return 0;
1390}
1391
1392static int nvmap_resume(struct platform_device *pdev)
1393{
1394 return 0;
1395}
1396
1397static struct platform_driver nvmap_driver = {
1398 .probe = nvmap_probe,
1399 .remove = nvmap_remove,
1400 .suspend = nvmap_suspend,
1401 .resume = nvmap_resume,
1402
1403 .driver = {
1404 .name = "tegra-nvmap",
1405 .owner = THIS_MODULE,
1406 },
1407};
1408
1409static int __init nvmap_init_driver(void)
1410{
1411 int e;
1412
1413 nvmap_dev = NULL;
1414
1415 e = nvmap_heap_init();
1416 if (e)
1417 goto fail;
1418
1419 e = platform_driver_register(&nvmap_driver);
1420 if (e) {
1421 nvmap_heap_deinit();
1422 goto fail;
1423 }
1424
1425fail:
1426 return e;
1427}
1428fs_initcall(nvmap_init_driver);
1429
1430static void __exit nvmap_exit_driver(void)
1431{
1432 platform_driver_unregister(&nvmap_driver);
1433 nvmap_heap_deinit();
1434 nvmap_dev = NULL;
1435}
1436module_exit(nvmap_exit_driver);