summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-07-03 14:20:52 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-07-06 00:49:00 -0400
commitd7c78df466c487140af902b98f79ced367198e2d (patch)
tree2c31600a00929030091e2ec3126084fc96121df5
parent26783b85bfee4c2ef4f2ccc2d16ebc783dd0aa7d (diff)
gpu: nvgpu: Combine the fault buffer deinit seqs
gv11b_mm_fault_info_mem_destroy() and gv11b_mm_mmu_hw_fault_buf_deinit() serve a similar purpose of disabling hub interrupts and deinitializing memory related to MMU fault handling. Out of the two the latter was called from BAR2 deinitialization, and the former from nvgpu_remove_mm_support(). Combine the functions and leave the call from nvgpu_remove_mm_support(). This way BAR2 deinitialization can be combined with gp10b version. JIRA NVGPU-714 Change-Id: I4050865eaba404b049c621ac2ce54c963e1aea44 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1769627 Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.c4
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.c78
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/mm.h2
3 files changed, 24 insertions, 60 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
index d50885f9..39903405 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
@@ -1019,7 +1019,7 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
1019 nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx); 1019 nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx);
1020 1020
1021 mem = &g->mm.hw_fault_buf[index]; 1021 mem = &g->mm.hw_fault_buf[index];
1022 mmfault = g->mm.fault_info[index]; 1022 mmfault = &g->mm.fault_info[index];
1023 1023
1024 entries = gv11b_fb_fault_buffer_size_val(g, index); 1024 entries = gv11b_fb_fault_buffer_size_val(g, index);
1025 nvgpu_log(g, gpu_dbg_intr, "buffer num entries = %d", entries); 1025 nvgpu_log(g, gpu_dbg_intr, "buffer num entries = %d", entries);
@@ -1251,7 +1251,7 @@ void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
1251 struct mmu_fault_info *mmfault; 1251 struct mmu_fault_info *mmfault;
1252 u32 invalidate_replay_val = 0; 1252 u32 invalidate_replay_val = 0;
1253 1253
1254 mmfault = g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY]; 1254 mmfault = &g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY];
1255 1255
1256 gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault); 1256 gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault);
1257 1257
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
index 0cf08d4a..aa2e2cf1 100644
--- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
@@ -71,6 +71,8 @@ bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
71 71
72void gv11b_mm_fault_info_mem_destroy(struct gk20a *g) 72void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
73{ 73{
74 struct vm_gk20a *vm = g->mm.bar2.vm;
75
74 nvgpu_log_fn(g, " "); 76 nvgpu_log_fn(g, " ");
75 77
76 nvgpu_mutex_acquire(&g->mm.hub_isr_mutex); 78 nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
@@ -78,10 +80,26 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
78 g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_OTHER | 80 g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_OTHER |
79 HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY); 81 HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
80 82
81 nvgpu_kfree(g, g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY]); 83 g->mm.hub_intr_types &= (~(HUB_INTR_TYPE_NONREPLAY |
84 HUB_INTR_TYPE_REPLAY));
85
86 if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) {
87 gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX,
88 FAULT_BUF_DISABLED);
89 }
90
91 if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) {
92 gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX,
93 FAULT_BUF_DISABLED);
94 }
82 95
83 g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] = NULL; 96 if (nvgpu_mem_is_valid(
84 g->mm.fault_info[FAULT_TYPE_REPLAY] = NULL; 97 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]))
98 nvgpu_dma_unmap_free(vm,
99 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
100 if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]))
101 nvgpu_dma_unmap_free(vm,
102 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
85 103
86 nvgpu_mutex_release(&g->mm.hub_isr_mutex); 104 nvgpu_mutex_release(&g->mm.hub_isr_mutex);
87 nvgpu_mutex_destroy(&g->mm.hub_isr_mutex); 105 nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
@@ -90,27 +108,6 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
90static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g, 108static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g,
91 u32 *hub_intr_types) 109 u32 *hub_intr_types)
92{ 110{
93 struct mmu_fault_info *fault_info_mem;
94
95 if (g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] != NULL &&
96 g->mm.fault_info[FAULT_TYPE_REPLAY] != NULL) {
97 *hub_intr_types |= HUB_INTR_TYPE_OTHER;
98 return 0;
99 }
100
101 fault_info_mem = nvgpu_kzalloc(g, sizeof(struct mmu_fault_info) *
102 FAULT_TYPE_NUM);
103 if (!fault_info_mem) {
104 nvgpu_log_info(g, "failed to alloc shadow fault info");
105 return -ENOMEM;
106 }
107 /* shadow buffer for copying mmu fault info */
108 g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] =
109 &fault_info_mem[FAULT_TYPE_OTHER_AND_NONREPLAY];
110
111 g->mm.fault_info[FAULT_TYPE_REPLAY] =
112 &fault_info_mem[FAULT_TYPE_REPLAY];
113
114 *hub_intr_types |= HUB_INTR_TYPE_OTHER; 111 *hub_intr_types |= HUB_INTR_TYPE_OTHER;
115 return 0; 112 return 0;
116} 113}
@@ -156,45 +153,12 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g,
156 *hub_intr_types |= HUB_INTR_TYPE_REPLAY; 153 *hub_intr_types |= HUB_INTR_TYPE_REPLAY;
157} 154}
158 155
159static void gv11b_mm_mmu_hw_fault_buf_deinit(struct gk20a *g)
160{
161 struct vm_gk20a *vm = g->mm.bar2.vm;
162
163 nvgpu_log_fn(g, " ");
164
165 g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_NONREPLAY |
166 HUB_INTR_TYPE_REPLAY);
167
168 g->mm.hub_intr_types &= (~(HUB_INTR_TYPE_NONREPLAY |
169 HUB_INTR_TYPE_REPLAY));
170
171 if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) {
172 gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX,
173 FAULT_BUF_DISABLED);
174 }
175
176 if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) {
177 gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX,
178 FAULT_BUF_DISABLED);
179 }
180
181 if (nvgpu_mem_is_valid(
182 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]))
183 nvgpu_dma_unmap_free(vm,
184 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
185 if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]))
186 nvgpu_dma_unmap_free(vm,
187 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
188}
189
190void gv11b_mm_remove_bar2_vm(struct gk20a *g) 156void gv11b_mm_remove_bar2_vm(struct gk20a *g)
191{ 157{
192 struct mm_gk20a *mm = &g->mm; 158 struct mm_gk20a *mm = &g->mm;
193 159
194 nvgpu_log_fn(g, " "); 160 nvgpu_log_fn(g, " ");
195 161
196 gv11b_mm_mmu_hw_fault_buf_deinit(g);
197
198 nvgpu_free_inst_block(g, &mm->bar2.inst_block); 162 nvgpu_free_inst_block(g, &mm->bar2.inst_block);
199 nvgpu_vm_put(mm->bar2.vm); 163 nvgpu_vm_put(mm->bar2.vm);
200} 164}
diff --git a/drivers/gpu/nvgpu/include/nvgpu/mm.h b/drivers/gpu/nvgpu/include/nvgpu/mm.h
index 3c9da601..ace22742 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/mm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/mm.h
@@ -129,7 +129,7 @@ struct mm_gk20a {
129 struct nvgpu_mem bar2_desc; 129 struct nvgpu_mem bar2_desc;
130 130
131 struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM]; 131 struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM];
132 struct mmu_fault_info *fault_info[FAULT_TYPE_NUM]; 132 struct mmu_fault_info fault_info[FAULT_TYPE_NUM];
133 struct nvgpu_mutex hub_isr_mutex; 133 struct nvgpu_mutex hub_isr_mutex;
134 u32 hub_intr_types; 134 u32 hub_intr_types;
135 135