summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-05-05 18:00:23 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-26 06:33:57 -0400
commit0bb47c3675d2030545d40353931e2b8120541de4 (patch)
tree1a23b45c1ac19dbc98e1d4a585822eb47b7dfeb2 /drivers/gpu/nvgpu/vgpu/mm_vgpu.c
parentfbafc7eba41ba7654dfdadf51a53acf1638e9fa1 (diff)
gpu: nvgpu: Add and use VM init/deinit APIs
Remove the VM init/de-init from the HAL and instead use a single set of routines that init/de-init VMs. This prevents code divergence between vGPUs and regular GPUs. This patch also clears up the naming of the routines a little bit. Since some VMs are used inplace and others are dynamically allocated the APIs for freeing them were confusing. Also some free calls also clean up an instance block (this is API abuse - but this is how it currently exists). The new API looks like this: void __nvgpu_vm_remove(struct vm_gk20a *vm); void nvgpu_vm_remove(struct vm_gk20a *vm); void nvgpu_vm_remove_inst(struct vm_gk20a *vm, struct nvgpu_mem *inst_block); void nvgpu_vm_remove_vgpu(struct vm_gk20a *vm); int nvgpu_init_vm(struct mm_gk20a *mm, struct vm_gk20a *vm, u32 big_page_size, u64 low_hole, u64 kernel_reserved, u64 aperture_size, bool big_pages, bool userspace_managed, char *name); void nvgpu_deinit_vm(struct vm_gk20a *vm); JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: Ia4016384c54746bfbcaa4bdd0d29d03d5d7f7f1b Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477747 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/mm_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c40
1 files changed, 3 insertions, 37 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 63490aa5..db120d76 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -18,6 +18,7 @@
18#include <nvgpu/kmem.h> 18#include <nvgpu/kmem.h>
19#include <nvgpu/dma.h> 19#include <nvgpu/dma.h>
20#include <nvgpu/bug.h> 20#include <nvgpu/bug.h>
21#include <nvgpu/vm.h>
21#include <nvgpu/vm_area.h> 22#include <nvgpu/vm_area.h>
22 23
23#include "vgpu/vgpu.h" 24#include "vgpu/vgpu.h"
@@ -200,52 +201,18 @@ static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
200 /* TLB invalidate handled on server side */ 201 /* TLB invalidate handled on server side */
201} 202}
202 203
203static void vgpu_vm_remove_support(struct vm_gk20a *vm) 204void nvgpu_vm_remove_vgpu(struct vm_gk20a *vm)
204{ 205{
205 struct gk20a *g = vm->mm->g; 206 struct gk20a *g = gk20a_from_vm(vm);
206 struct nvgpu_mapped_buf *mapped_buffer;
207 struct nvgpu_vm_area *vm_area, *vm_area_tmp;
208 struct tegra_vgpu_cmd_msg msg; 207 struct tegra_vgpu_cmd_msg msg;
209 struct tegra_vgpu_as_share_params *p = &msg.params.as_share; 208 struct tegra_vgpu_as_share_params *p = &msg.params.as_share;
210 struct nvgpu_rbtree_node *node = NULL;
211 int err; 209 int err;
212 210
213 gk20a_dbg_fn("");
214 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
215
216 /* TBD: add a flag here for the unmap code to recognize teardown
217 * and short-circuit any otherwise expensive operations. */
218
219 nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers);
220 while (node) {
221 mapped_buffer = mapped_buffer_from_rbtree_node(node);
222 nvgpu_vm_unmap_locked(mapped_buffer, NULL);
223 nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers);
224 }
225
226 /* destroy remaining reserved memory areas */
227 nvgpu_list_for_each_entry_safe(vm_area, vm_area_tmp,
228 &vm->vm_area_list,
229 nvgpu_vm_area, vm_area_list) {
230 nvgpu_list_del(&vm_area->vm_area_list);
231 nvgpu_kfree(g, vm_area);
232 }
233
234 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; 211 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
235 msg.handle = vgpu_get_handle(g); 212 msg.handle = vgpu_get_handle(g);
236 p->handle = vm->handle; 213 p->handle = vm->handle;
237 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 214 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
238 WARN_ON(err || msg.ret); 215 WARN_ON(err || msg.ret);
239
240 if (nvgpu_alloc_initialized(&vm->kernel))
241 nvgpu_alloc_destroy(&vm->kernel);
242 if (nvgpu_alloc_initialized(&vm->user))
243 nvgpu_alloc_destroy(&vm->user);
244
245 nvgpu_mutex_release(&vm->update_gmmu_lock);
246
247 /* vm is not used anymore. release it. */
248 nvgpu_kfree(g, vm);
249} 216}
250 217
251u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size) 218u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size)
@@ -534,7 +501,6 @@ void vgpu_init_mm_ops(struct gpu_ops *gops)
534 gops->fb.set_debug_mode = vgpu_mm_mmu_set_debug_mode; 501 gops->fb.set_debug_mode = vgpu_mm_mmu_set_debug_mode;
535 gops->mm.gmmu_map = vgpu_locked_gmmu_map; 502 gops->mm.gmmu_map = vgpu_locked_gmmu_map;
536 gops->mm.gmmu_unmap = vgpu_locked_gmmu_unmap; 503 gops->mm.gmmu_unmap = vgpu_locked_gmmu_unmap;
537 gops->mm.vm_remove = vgpu_vm_remove_support;
538 gops->mm.vm_alloc_share = vgpu_vm_alloc_share; 504 gops->mm.vm_alloc_share = vgpu_vm_alloc_share;
539 gops->mm.vm_bind_channel = vgpu_vm_bind_channel; 505 gops->mm.vm_bind_channel = vgpu_vm_bind_channel;
540 gops->mm.fb_flush = vgpu_mm_fb_flush; 506 gops->mm.fb_flush = vgpu_mm_fb_flush;