summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-24 18:26:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-19 18:34:06 -0400
commit014ace5a85f274de7debb4c6168d69c803445e19 (patch)
tree4028be3294b95e38659f1ebba4a14457748e59f1 /drivers/gpu/nvgpu/common/mm/vm.c
parentd37e8f7dcf190f31f9c0c12583db2bb0c0d313c0 (diff)
gpu: nvgpu: Split VM implementation out
This patch begins splitting out the VM implementation from mm_gk20a.c and moves it to common/linux/vm.c and common/mm/vm.c. This split is necessary because the VM code has two portions: first, an interface for the OS specific code to use (i.e userspace mappings), and second, a set of APIs for the driver to use (init, cleanup, etc) which are not OS specific. This is only the beginning of the split - there's still a lot of things that need to be carefully moved around. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I3b57cba245d7daf9e4326a143b9c6217e0f28c96 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477743 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c65
1 files changed, 63 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index eaf30fd0..635ac0fb 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -23,6 +23,11 @@
23#include "gk20a/gk20a.h" 23#include "gk20a/gk20a.h"
24#include "gk20a/mm_gk20a.h" 24#include "gk20a/mm_gk20a.h"
25 25
26int vm_aspace_id(struct vm_gk20a *vm)
27{
28 return vm->as_share ? vm->as_share->id : -1;
29}
30
26void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch) 31void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *mapping_batch)
27{ 32{
28 memset(mapping_batch, 0, sizeof(*mapping_batch)); 33 memset(mapping_batch, 0, sizeof(*mapping_batch));
@@ -52,7 +57,7 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm,
52 57
53void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm) 58void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm)
54{ 59{
55 struct mapped_buffer_node *mapped_buffer; 60 struct nvgpu_mapped_buf *mapped_buffer;
56 struct vm_reserved_va_node *va_node, *va_node_tmp; 61 struct vm_reserved_va_node *va_node, *va_node_tmp;
57 struct nvgpu_rbtree_node *node = NULL; 62 struct nvgpu_rbtree_node *node = NULL;
58 struct gk20a *g = vm->mm->g; 63 struct gk20a *g = vm->mm->g;
@@ -118,7 +123,7 @@ void nvgpu_vm_put(struct vm_gk20a *vm)
118 kref_put(&vm->ref, nvgpu_vm_remove_support_kref); 123 kref_put(&vm->ref, nvgpu_vm_remove_support_kref);
119} 124}
120 125
121void gk20a_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block) 126void nvgpu_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block)
122{ 127{
123 struct gk20a *g = vm->mm->g; 128 struct gk20a *g = vm->mm->g;
124 129
@@ -127,3 +132,59 @@ void gk20a_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block)
127 gk20a_free_inst_block(g, inst_block); 132 gk20a_free_inst_block(g, inst_block);
128 nvgpu_vm_remove_support_nofree(vm); 133 nvgpu_vm_remove_support_nofree(vm);
129} 134}
135
136int nvgpu_insert_mapped_buf(struct vm_gk20a *vm,
137 struct nvgpu_mapped_buf *mapped_buffer)
138{
139 mapped_buffer->node.key_start = mapped_buffer->addr;
140 mapped_buffer->node.key_end = mapped_buffer->addr + mapped_buffer->size;
141
142 nvgpu_rbtree_insert(&mapped_buffer->node, &vm->mapped_buffers);
143
144 return 0;
145}
146
147void nvgpu_remove_mapped_buf(struct vm_gk20a *vm,
148 struct nvgpu_mapped_buf *mapped_buffer)
149{
150 nvgpu_rbtree_unlink(&mapped_buffer->node, &vm->mapped_buffers);
151}
152
153struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf(
154 struct vm_gk20a *vm, u64 addr)
155{
156 struct nvgpu_rbtree_node *node = NULL;
157 struct nvgpu_rbtree_node *root = vm->mapped_buffers;
158
159 nvgpu_rbtree_search(addr, &node, root);
160 if (!node)
161 return NULL;
162
163 return mapped_buffer_from_rbtree_node(node);
164}
165
166struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range(
167 struct vm_gk20a *vm, u64 addr)
168{
169 struct nvgpu_rbtree_node *node = NULL;
170 struct nvgpu_rbtree_node *root = vm->mapped_buffers;
171
172 nvgpu_rbtree_range_search(addr, &node, root);
173 if (!node)
174 return NULL;
175
176 return mapped_buffer_from_rbtree_node(node);
177}
178
179struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than(
180 struct vm_gk20a *vm, u64 addr)
181{
182 struct nvgpu_rbtree_node *node = NULL;
183 struct nvgpu_rbtree_node *root = vm->mapped_buffers;
184
185 nvgpu_rbtree_less_than_search(addr, &node, root);
186 if (!node)
187 return NULL;
188
189 return mapped_buffer_from_rbtree_node(node);
190}