From 57fb527a7e33384341fc18f1f918d5a8225057f5 Mon Sep 17 00:00:00 2001 From: Peter Daifuku Date: Fri, 6 Oct 2017 16:27:14 -0700 Subject: gpu: nvgpu: vgpu: flatten out vgpu hal Instead of calling the native HAL init function then adding multiple layers of modification for VGPU, flatten out the sequence so that all entry points are set statically and visible in a single file. JIRA ESRM-30 Change-Id: Ie424abb48bce5038874851d399baac5e4bb7d27c Signed-off-by: Peter Daifuku Reviewed-on: https://git-master.nvidia.com/r/1574616 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 33 +++++++++------------------------ 1 file changed, 9 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/nvgpu/vgpu/mm_vgpu.c') diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index 49517b9a..8dcca0a1 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -35,6 +35,7 @@ #include #include "vgpu/vgpu.h" +#include "vgpu/mm_vgpu.h" #include "gk20a/mm_gk20a.h" #include "gm20b/mm_gm20b.h" @@ -85,7 +86,7 @@ int vgpu_init_mm_support(struct gk20a *g) return err; } -static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, +u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, u64 map_offset, struct nvgpu_sgt *sgt, u64 buffer_offset, @@ -171,7 +172,7 @@ fail: return 0; } -static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, +void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, u64 vaddr, u64 size, int pgsz_idx, @@ -274,7 +275,7 @@ u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size) return addr; } -static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share, +int vgpu_vm_bind_channel(struct gk20a_as_share *as_share, struct channel_gk20a *ch) { struct vm_gk20a *vm = as_share->vm; @@ -315,7 +316,7 @@ static void vgpu_cache_maint(u64 handle, u8 op) WARN_ON(err || msg.ret); } -static int vgpu_mm_fb_flush(struct gk20a *g) +int vgpu_mm_fb_flush(struct gk20a *g) { gk20a_dbg_fn(""); @@ -324,7 +325,7 @@ static int vgpu_mm_fb_flush(struct gk20a *g) return 0; } -static void vgpu_mm_l2_invalidate(struct gk20a *g) +void vgpu_mm_l2_invalidate(struct gk20a *g) { gk20a_dbg_fn(""); @@ -332,7 +333,7 @@ static void vgpu_mm_l2_invalidate(struct gk20a *g) vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV); } -static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) +void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) { u8 op; @@ -346,14 +347,14 @@ static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) vgpu_cache_maint(vgpu_get_handle(g), op); } -static void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) +void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) { gk20a_dbg_fn(""); nvgpu_err(g, "call to RM server not supported"); } -static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) +void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) { struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode; @@ -367,19 +368,3 @@ static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); WARN_ON(err || msg.ret); } - -void vgpu_init_mm_ops(struct gpu_ops *gops) -{ - gops->fb.is_debug_mode_enabled = NULL; - gops->fb.set_debug_mode = vgpu_mm_mmu_set_debug_mode; - gops->mm.gmmu_map = vgpu_locked_gmmu_map; - gops->mm.gmmu_unmap = vgpu_locked_gmmu_unmap; - gops->mm.vm_bind_channel = vgpu_vm_bind_channel; - gops->mm.fb_flush = vgpu_mm_fb_flush; - gops->mm.l2_invalidate = vgpu_mm_l2_invalidate; - gops->mm.l2_flush = vgpu_mm_l2_flush; - gops->fb.tlb_invalidate = vgpu_mm_tlb_invalidate; - gops->mm.get_iommu_bit = gk20a_mm_get_iommu_bit; - gops->mm.gpu_phys_addr = gm20b_gpu_phys_addr; - gops->mm.init_mm_setup_hw = NULL; -} -- cgit v1.2.2