summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
diff options
context:
space:
mode:
authorPeter Daifuku <pdaifuku@nvidia.com>2017-10-06 19:27:14 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-13 18:20:18 -0400
commit57fb527a7e33384341fc18f1f918d5a8225057f5 (patch)
tree23bb49f879ac495834237c99564f0589d637f07e /drivers/gpu/nvgpu/vgpu/mm_vgpu.c
parent3d343c9eeaa3415851d1c71b8815eb7dc2677b5a (diff)
gpu: nvgpu: vgpu: flatten out vgpu hal
Instead of calling the native HAL init function then adding multiple layers of modification for VGPU, flatten out the sequence so that all entry points are set statically and visible in a single file. JIRA ESRM-30 Change-Id: Ie424abb48bce5038874851d399baac5e4bb7d27c Signed-off-by: Peter Daifuku <pdaifuku@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1574616 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/mm_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c33
1 files changed, 9 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 49517b9a..8dcca0a1 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -35,6 +35,7 @@
35#include <nvgpu/linux/nvgpu_mem.h> 35#include <nvgpu/linux/nvgpu_mem.h>
36 36
37#include "vgpu/vgpu.h" 37#include "vgpu/vgpu.h"
38#include "vgpu/mm_vgpu.h"
38#include "gk20a/mm_gk20a.h" 39#include "gk20a/mm_gk20a.h"
39#include "gm20b/mm_gm20b.h" 40#include "gm20b/mm_gm20b.h"
40 41
@@ -85,7 +86,7 @@ int vgpu_init_mm_support(struct gk20a *g)
85 return err; 86 return err;
86} 87}
87 88
88static u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, 89u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
89 u64 map_offset, 90 u64 map_offset,
90 struct nvgpu_sgt *sgt, 91 struct nvgpu_sgt *sgt,
91 u64 buffer_offset, 92 u64 buffer_offset,
@@ -171,7 +172,7 @@ fail:
171 return 0; 172 return 0;
172} 173}
173 174
174static void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm, 175void vgpu_locked_gmmu_unmap(struct vm_gk20a *vm,
175 u64 vaddr, 176 u64 vaddr,
176 u64 size, 177 u64 size,
177 int pgsz_idx, 178 int pgsz_idx,
@@ -274,7 +275,7 @@ u64 vgpu_bar1_map(struct gk20a *g, struct sg_table **sgt, u64 size)
274 return addr; 275 return addr;
275} 276}
276 277
277static int vgpu_vm_bind_channel(struct gk20a_as_share *as_share, 278int vgpu_vm_bind_channel(struct gk20a_as_share *as_share,
278 struct channel_gk20a *ch) 279 struct channel_gk20a *ch)
279{ 280{
280 struct vm_gk20a *vm = as_share->vm; 281 struct vm_gk20a *vm = as_share->vm;
@@ -315,7 +316,7 @@ static void vgpu_cache_maint(u64 handle, u8 op)
315 WARN_ON(err || msg.ret); 316 WARN_ON(err || msg.ret);
316} 317}
317 318
318static int vgpu_mm_fb_flush(struct gk20a *g) 319int vgpu_mm_fb_flush(struct gk20a *g)
319{ 320{
320 321
321 gk20a_dbg_fn(""); 322 gk20a_dbg_fn("");
@@ -324,7 +325,7 @@ static int vgpu_mm_fb_flush(struct gk20a *g)
324 return 0; 325 return 0;
325} 326}
326 327
327static void vgpu_mm_l2_invalidate(struct gk20a *g) 328void vgpu_mm_l2_invalidate(struct gk20a *g)
328{ 329{
329 330
330 gk20a_dbg_fn(""); 331 gk20a_dbg_fn("");
@@ -332,7 +333,7 @@ static void vgpu_mm_l2_invalidate(struct gk20a *g)
332 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV); 333 vgpu_cache_maint(vgpu_get_handle(g), TEGRA_VGPU_L2_MAINT_INV);
333} 334}
334 335
335static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate) 336void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
336{ 337{
337 u8 op; 338 u8 op;
338 339
@@ -346,14 +347,14 @@ static void vgpu_mm_l2_flush(struct gk20a *g, bool invalidate)
346 vgpu_cache_maint(vgpu_get_handle(g), op); 347 vgpu_cache_maint(vgpu_get_handle(g), op);
347} 348}
348 349
349static void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb) 350void vgpu_mm_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
350{ 351{
351 gk20a_dbg_fn(""); 352 gk20a_dbg_fn("");
352 353
353 nvgpu_err(g, "call to RM server not supported"); 354 nvgpu_err(g, "call to RM server not supported");
354} 355}
355 356
356static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable) 357void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
357{ 358{
358 struct tegra_vgpu_cmd_msg msg; 359 struct tegra_vgpu_cmd_msg msg;
359 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode; 360 struct tegra_vgpu_mmu_debug_mode *p = &msg.params.mmu_debug_mode;
@@ -367,19 +368,3 @@ static void vgpu_mm_mmu_set_debug_mode(struct gk20a *g, bool enable)
367 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 368 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
368 WARN_ON(err || msg.ret); 369 WARN_ON(err || msg.ret);
369} 370}
370
371void vgpu_init_mm_ops(struct gpu_ops *gops)
372{
373 gops->fb.is_debug_mode_enabled = NULL;
374 gops->fb.set_debug_mode = vgpu_mm_mmu_set_debug_mode;
375 gops->mm.gmmu_map = vgpu_locked_gmmu_map;
376 gops->mm.gmmu_unmap = vgpu_locked_gmmu_unmap;
377 gops->mm.vm_bind_channel = vgpu_vm_bind_channel;
378 gops->mm.fb_flush = vgpu_mm_fb_flush;
379 gops->mm.l2_invalidate = vgpu_mm_l2_invalidate;
380 gops->mm.l2_flush = vgpu_mm_l2_flush;
381 gops->fb.tlb_invalidate = vgpu_mm_tlb_invalidate;
382 gops->mm.get_iommu_bit = gk20a_mm_get_iommu_bit;
383 gops->mm.gpu_phys_addr = gm20b_gpu_phys_addr;
384 gops->mm.init_mm_setup_hw = NULL;
385}