summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-07-10 12:54:10 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-07-12 23:44:04 -0400
commitb07a304ba3e747c80fe3e0a16caec88c8e1e8b28 (patch)
tree7c800fa3ae75c1d33f806b4e1d6f522173801f44 /drivers/gpu/nvgpu/gv11b/mm_gv11b.c
parent96d4842c0dbae051258408480b981ed034163c13 (diff)
gpu: nvgpu: Use HAL for calls from MM to FB
mm_gv11b.c has several direct calls to fb_gv11b.h. Redirect them to go via a HAL. Also make sure the HALs are using parameter with correct signedness and prefix the parameter constants with NVGPU_FB_MMU_. MMU buffer table indices were also defined in fb_gv11b.h, even though the tables themselves are defined in include/nvgpu/mm.h. Move the indices to include/nvgpu/mm.h and prefix them with NVGPU_MM_MMU_. JIRA NVGPU-714 Change-Id: Ieeae7c5664b8f53f8313cfad0a771d14637caa08 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1776131 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/mm_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.c46
1 files changed, 25 insertions, 21 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
index 394ff0ed..c7556394 100644
--- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
@@ -35,10 +35,8 @@
35#include "gp10b/mc_gp10b.h" 35#include "gp10b/mc_gp10b.h"
36 36
37#include "mm_gv11b.h" 37#include "mm_gv11b.h"
38#include "fb_gv11b.h"
39#include "subctx_gv11b.h" 38#include "subctx_gv11b.h"
40 39
41#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
42#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h> 40#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
43 41
44#define NVGPU_L3_ALLOC_BIT BIT(36) 42#define NVGPU_L3_ALLOC_BIT BIT(36)
@@ -66,7 +64,7 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
66 64
67bool gv11b_mm_mmu_fault_pending(struct gk20a *g) 65bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
68{ 66{
69 return gv11b_fb_mmu_fault_pending(g); 67 return g->ops.fb.mmu_fault_pending(g);
70} 68}
71 69
72void gv11b_mm_fault_info_mem_destroy(struct gk20a *g) 70void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
@@ -79,23 +77,27 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
79 77
80 g->ops.fb.disable_hub_intr(g); 78 g->ops.fb.disable_hub_intr(g);
81 79
82 if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) { 80 if ((g->ops.fb.is_fault_buf_enabled(g,
83 gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX, 81 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX))) {
84 FAULT_BUF_DISABLED); 82 g->ops.fb.fault_buf_set_state_hw(g,
83 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX,
84 NVGPU_FB_MMU_FAULT_BUF_DISABLED);
85 } 85 }
86 86
87 if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) { 87 if ((g->ops.fb.is_fault_buf_enabled(g,
88 gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX, 88 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX))) {
89 FAULT_BUF_DISABLED); 89 g->ops.fb.fault_buf_set_state_hw(g,
90 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX,
91 NVGPU_FB_MMU_FAULT_BUF_DISABLED);
90 } 92 }
91 93
92 if (nvgpu_mem_is_valid( 94 if (nvgpu_mem_is_valid(
93 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY])) 95 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]))
94 nvgpu_dma_unmap_free(vm, 96 nvgpu_dma_unmap_free(vm,
95 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]); 97 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]);
96 if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY])) 98 if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]))
97 nvgpu_dma_unmap_free(vm, 99 nvgpu_dma_unmap_free(vm,
98 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]); 100 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]);
99 101
100 nvgpu_mutex_release(&g->mm.hub_isr_mutex); 102 nvgpu_mutex_release(&g->mm.hub_isr_mutex);
101 nvgpu_mutex_destroy(&g->mm.hub_isr_mutex); 103 nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
@@ -117,10 +119,10 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
117 gmmu_fault_buf_size_v(); 119 gmmu_fault_buf_size_v();
118 120
119 if (!nvgpu_mem_is_valid( 121 if (!nvgpu_mem_is_valid(
120 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY])) { 122 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) {
121 123
122 err = nvgpu_dma_alloc_map_sys(vm, fb_size, 124 err = nvgpu_dma_alloc_map_sys(vm, fb_size,
123 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]); 125 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]);
124 if (err) { 126 if (err) {
125 nvgpu_err(g, 127 nvgpu_err(g,
126 "Error in hw mmu fault buf [0] alloc in bar2 vm "); 128 "Error in hw mmu fault buf [0] alloc in bar2 vm ");
@@ -130,9 +132,9 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
130 } 132 }
131 133
132 if (!nvgpu_mem_is_valid( 134 if (!nvgpu_mem_is_valid(
133 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY])) { 135 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) {
134 err = nvgpu_dma_alloc_map_sys(vm, fb_size, 136 err = nvgpu_dma_alloc_map_sys(vm, fb_size,
135 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]); 137 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]);
136 if (err) { 138 if (err) {
137 nvgpu_err(g, 139 nvgpu_err(g,
138 "Error in hw mmu fault buf [1] alloc in bar2 vm "); 140 "Error in hw mmu fault buf [1] alloc in bar2 vm ");
@@ -145,10 +147,12 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
145static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g) 147static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
146{ 148{
147 if (nvgpu_mem_is_valid( 149 if (nvgpu_mem_is_valid(
148 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY])) 150 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]))
149 gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX); 151 g->ops.fb.fault_buf_configure_hw(g,
150 if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY])) 152 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
151 gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX); 153 if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]))
154 g->ops.fb.fault_buf_configure_hw(g,
155 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
152} 156}
153 157
154static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) 158static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)