diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm.c | 129 |
1 files changed, 129 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c new file mode 100644 index 00000000..eaf30fd0 --- /dev/null +++ b/drivers/gpu/nvgpu/common/mm/vm.c | |||
@@ -0,0 +1,129 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | |||
17 | #include <nvgpu/vm.h> | ||
18 | #include <nvgpu/lock.h> | ||
19 | #include <nvgpu/list.h> | ||
20 | #include <nvgpu/rbtree.h> | ||
21 | #include <nvgpu/semaphore.h> | ||
22 | |||
23 | #include "gk20a/gk20a.h" | ||
24 | #include "gk20a/mm_gk20a.h" | ||
25 | |||
26 | void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch) | ||
27 | { | ||
28 | memset(mapping_batch, 0, sizeof(*mapping_batch)); | ||
29 | mapping_batch->gpu_l2_flushed = false; | ||
30 | mapping_batch->need_tlb_invalidate = false; | ||
31 | } | ||
32 | |||
33 | void nvgpu_vm_mapping_batch_finish_locked( | ||
34 | struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *mapping_batch) | ||
35 | { | ||
36 | /* hanging kref_put batch pointer? */ | ||
37 | WARN_ON(vm->kref_put_batch == mapping_batch); | ||
38 | |||
39 | if (mapping_batch->need_tlb_invalidate) { | ||
40 | struct gk20a *g = gk20a_from_vm(vm); | ||
41 | g->ops.fb.tlb_invalidate(g, &vm->pdb.mem); | ||
42 | } | ||
43 | } | ||
44 | |||
45 | void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm, | ||
46 | struct vm_gk20a_mapping_batch *mapping_batch) | ||
47 | { | ||
48 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
49 | nvgpu_vm_mapping_batch_finish_locked(vm, mapping_batch); | ||
50 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
51 | } | ||
52 | |||
53 | void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm) | ||
54 | { | ||
55 | struct mapped_buffer_node *mapped_buffer; | ||
56 | struct vm_reserved_va_node *va_node, *va_node_tmp; | ||
57 | struct nvgpu_rbtree_node *node = NULL; | ||
58 | struct gk20a *g = vm->mm->g; | ||
59 | |||
60 | gk20a_dbg_fn(""); | ||
61 | |||
62 | /* | ||
63 | * Do this outside of the update_gmmu_lock since unmapping the semaphore | ||
64 | * pool involves unmapping a GMMU mapping which means aquiring the | ||
65 | * update_gmmu_lock. | ||
66 | */ | ||
67 | if (!(g->gpu_characteristics.flags & NVGPU_GPU_FLAGS_HAS_SYNCPOINTS)) { | ||
68 | if (vm->sema_pool) { | ||
69 | nvgpu_semaphore_pool_unmap(vm->sema_pool, vm); | ||
70 | nvgpu_semaphore_pool_put(vm->sema_pool); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
75 | |||
76 | nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); | ||
77 | while (node) { | ||
78 | mapped_buffer = mapped_buffer_from_rbtree_node(node); | ||
79 | nvgpu_vm_unmap_locked(mapped_buffer, NULL); | ||
80 | nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); | ||
81 | } | ||
82 | |||
83 | /* destroy remaining reserved memory areas */ | ||
84 | nvgpu_list_for_each_entry_safe(va_node, va_node_tmp, | ||
85 | &vm->reserved_va_list, | ||
86 | vm_reserved_va_node, reserved_va_list) { | ||
87 | nvgpu_list_del(&va_node->reserved_va_list); | ||
88 | nvgpu_kfree(vm->mm->g, va_node); | ||
89 | } | ||
90 | |||
91 | nvgpu_deinit_vm(vm); | ||
92 | |||
93 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
94 | } | ||
95 | |||
96 | void nvgpu_vm_remove_support(struct vm_gk20a *vm) | ||
97 | { | ||
98 | nvgpu_vm_remove_support_nofree(vm); | ||
99 | /* vm is not used anymore. release it. */ | ||
100 | nvgpu_kfree(vm->mm->g, vm); | ||
101 | } | ||
102 | |||
103 | static void nvgpu_vm_remove_support_kref(struct kref *ref) | ||
104 | { | ||
105 | struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref); | ||
106 | struct gk20a *g = gk20a_from_vm(vm); | ||
107 | |||
108 | g->ops.mm.vm_remove(vm); | ||
109 | } | ||
110 | |||
111 | void nvgpu_vm_get(struct vm_gk20a *vm) | ||
112 | { | ||
113 | kref_get(&vm->ref); | ||
114 | } | ||
115 | |||
116 | void nvgpu_vm_put(struct vm_gk20a *vm) | ||
117 | { | ||
118 | kref_put(&vm->ref, nvgpu_vm_remove_support_kref); | ||
119 | } | ||
120 | |||
121 | void gk20a_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block) | ||
122 | { | ||
123 | struct gk20a *g = vm->mm->g; | ||
124 | |||
125 | gk20a_dbg_fn(""); | ||
126 | |||
127 | gk20a_free_inst_block(g, inst_block); | ||
128 | nvgpu_vm_remove_support_nofree(vm); | ||
129 | } | ||