summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-06-15 12:40:43 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-23 04:14:23 -0400
commit94cb4b635fba4c01b58f36a6d3384db729e4c9f6 (patch)
treefaa4d026519d04005204c172f1b708e5c1188ab2
parentbe04b9b1b56d6dd478fe521277c079367c03f39d (diff)
gpu: nvgpu: use nvgpu_flcn_* interfaces
- set nvgpu_flcn_reset() to point to gk20a_pmu_reset() - set PMU interrupt using nvgpu_flcn_enable_irq() - replace pmu_idle with nvgpu_flcn_wait_idle() JIRA NVGPU-57 Change-Id: I50d0310ae78ad266da3c1e662f1598d61ff7abb6 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: http://git-master/r/1469478 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/flcn_gk20a.c21
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c118
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/gm20b/acr_gm20b.c4
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c10
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.c4
8 files changed, 65 insertions, 101 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
index 9568b979..0b140802 100644
--- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
@@ -107,6 +107,25 @@ static bool gk20a_is_falcon_scrubbing_done(struct nvgpu_falcon *flcn)
107 return status; 107 return status;
108} 108}
109 109
110static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
111{
112 struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops =
113 &flcn->flcn_engine_dep_ops;
114
115 switch (flcn->flcn_id) {
116 case FALCON_ID_PMU:
117 flcn_eng_dep_ops->reset_eng = gk20a_pmu_reset;
118 break;
119 default:
120 /* NULL assignment make sure
121 * CPU hard reset in gk20a_flcn_reset() gets execute
122 * if falcon doesn't need specific reset implementation
123 */
124 flcn_eng_dep_ops->reset_eng = NULL;
125 break;
126 }
127}
128
110static void gk20a_falcon_ops(struct nvgpu_falcon *flcn) 129static void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
111{ 130{
112 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 131 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
@@ -116,6 +135,8 @@ static void gk20a_falcon_ops(struct nvgpu_falcon *flcn)
116 flcn_ops->is_falcon_cpu_halted = gk20a_is_falcon_cpu_halted; 135 flcn_ops->is_falcon_cpu_halted = gk20a_is_falcon_cpu_halted;
117 flcn_ops->is_falcon_idle = gk20a_is_falcon_idle; 136 flcn_ops->is_falcon_idle = gk20a_is_falcon_idle;
118 flcn_ops->is_falcon_scrubbing_done = gk20a_is_falcon_scrubbing_done; 137 flcn_ops->is_falcon_scrubbing_done = gk20a_is_falcon_scrubbing_done;
138
139 gk20a_falcon_engine_dependency_ops(flcn);
119} 140}
120 141
121static void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn) 142static void gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index 13635706..7106ea03 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -178,6 +178,9 @@ int gk20a_finalize_poweron(struct gk20a *g)
178 g->gpu_reset_done = true; 178 g->gpu_reset_done = true;
179 } 179 }
180 180
181 /* init interface layer support for PMU falcon */
182 nvgpu_flcn_sw_init(g, FALCON_ID_PMU);
183
181 if (g->ops.bios_init) 184 if (g->ops.bios_init)
182 err = g->ops.bios_init(g); 185 err = g->ops.bios_init(g);
183 if (err) 186 if (err)
@@ -237,9 +240,6 @@ int gk20a_finalize_poweron(struct gk20a *g)
237 goto done; 240 goto done;
238 } 241 }
239 242
240 /* init interface layer support for PMU falcon */
241 nvgpu_flcn_sw_init(g, FALCON_ID_PMU);
242
243 if (g->ops.pmu.is_pmu_supported(g)) { 243 if (g->ops.pmu.is_pmu_supported(g)) {
244 if (g->ops.pmu.prepare_ucode) 244 if (g->ops.pmu.prepare_ucode)
245 err = g->ops.pmu.prepare_ucode(g); 245 err = g->ops.pmu.prepare_ucode(g);
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 2f8e456f..fefa77fc 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -200,38 +200,11 @@ void pmu_copy_to_dmem(struct nvgpu_pmu *pmu,
200 return; 200 return;
201} 201}
202 202
203int pmu_idle(struct nvgpu_pmu *pmu)
204{
205 struct gk20a *g = gk20a_from_pmu(pmu);
206 struct nvgpu_timeout timeout;
207 u32 idle_stat;
208
209 nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER);
210
211 /* wait for pmu idle */
212 do {
213 idle_stat = gk20a_readl(g, pwr_falcon_idlestate_r());
214
215 if (pwr_falcon_idlestate_falcon_busy_v(idle_stat) == 0 &&
216 pwr_falcon_idlestate_ext_busy_v(idle_stat) == 0) {
217 break;
218 }
219
220 if (nvgpu_timeout_expired_msg(&timeout,
221 "waiting for pmu idle: 0x%08x",
222 idle_stat))
223 return -EBUSY;
224
225 nvgpu_usleep_range(100, 200);
226 } while (1);
227
228 gk20a_dbg_fn("done");
229 return 0;
230}
231
232void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) 203void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
233{ 204{
234 struct gk20a *g = gk20a_from_pmu(pmu); 205 struct gk20a *g = gk20a_from_pmu(pmu);
206 u32 intr_mask;
207 u32 intr_dest;
235 208
236 gk20a_dbg_fn(""); 209 gk20a_dbg_fn("");
237 210
@@ -240,21 +213,11 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
240 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, false, 213 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, false,
241 mc_intr_mask_1_pmu_enabled_f()); 214 mc_intr_mask_1_pmu_enabled_f());
242 215
243 gk20a_writel(g, pwr_falcon_irqmclr_r(), 216 nvgpu_flcn_set_irq(pmu->flcn, false, 0x0, 0x0);
244 pwr_falcon_irqmclr_gptmr_f(1) |
245 pwr_falcon_irqmclr_wdtmr_f(1) |
246 pwr_falcon_irqmclr_mthd_f(1) |
247 pwr_falcon_irqmclr_ctxsw_f(1) |
248 pwr_falcon_irqmclr_halt_f(1) |
249 pwr_falcon_irqmclr_exterr_f(1) |
250 pwr_falcon_irqmclr_swgen0_f(1) |
251 pwr_falcon_irqmclr_swgen1_f(1) |
252 pwr_falcon_irqmclr_ext_f(0xff));
253 217
254 if (enable) { 218 if (enable) {
255 /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */ 219 /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
256 gk20a_writel(g, pwr_falcon_irqdest_r(), 220 intr_dest = pwr_falcon_irqdest_host_gptmr_f(0) |
257 pwr_falcon_irqdest_host_gptmr_f(0) |
258 pwr_falcon_irqdest_host_wdtmr_f(1) | 221 pwr_falcon_irqdest_host_wdtmr_f(1) |
259 pwr_falcon_irqdest_host_mthd_f(0) | 222 pwr_falcon_irqdest_host_mthd_f(0) |
260 pwr_falcon_irqdest_host_ctxsw_f(0) | 223 pwr_falcon_irqdest_host_ctxsw_f(0) |
@@ -271,18 +234,19 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
271 pwr_falcon_irqdest_target_exterr_f(0) | 234 pwr_falcon_irqdest_target_exterr_f(0) |
272 pwr_falcon_irqdest_target_swgen0_f(0) | 235 pwr_falcon_irqdest_target_swgen0_f(0) |
273 pwr_falcon_irqdest_target_swgen1_f(0) | 236 pwr_falcon_irqdest_target_swgen1_f(0) |
274 pwr_falcon_irqdest_target_ext_f(0xff)); 237 pwr_falcon_irqdest_target_ext_f(0xff);
275 238
276 /* 0=disable, 1=enable */ 239 /* 0=disable, 1=enable */
277 gk20a_writel(g, pwr_falcon_irqmset_r(), 240 intr_mask = pwr_falcon_irqmset_gptmr_f(1) |
278 pwr_falcon_irqmset_gptmr_f(1) |
279 pwr_falcon_irqmset_wdtmr_f(1) | 241 pwr_falcon_irqmset_wdtmr_f(1) |
280 pwr_falcon_irqmset_mthd_f(0) | 242 pwr_falcon_irqmset_mthd_f(0) |
281 pwr_falcon_irqmset_ctxsw_f(0) | 243 pwr_falcon_irqmset_ctxsw_f(0) |
282 pwr_falcon_irqmset_halt_f(1) | 244 pwr_falcon_irqmset_halt_f(1) |
283 pwr_falcon_irqmset_exterr_f(1) | 245 pwr_falcon_irqmset_exterr_f(1) |
284 pwr_falcon_irqmset_swgen0_f(1) | 246 pwr_falcon_irqmset_swgen0_f(1) |
285 pwr_falcon_irqmset_swgen1_f(1)); 247 pwr_falcon_irqmset_swgen1_f(1);
248
249 nvgpu_flcn_set_irq(pmu->flcn, true, intr_mask, intr_dest);
286 250
287 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_ENABLE, true, 251 g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_ENABLE, true,
288 mc_intr_mask_0_pmu_enabled_f()); 252 mc_intr_mask_0_pmu_enabled_f());
@@ -295,6 +259,7 @@ int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
295{ 259{
296 struct gk20a *g = gk20a_from_pmu(pmu); 260 struct gk20a *g = gk20a_from_pmu(pmu);
297 struct nvgpu_timeout timeout; 261 struct nvgpu_timeout timeout;
262 int err = 0;
298 263
299 gk20a_dbg_fn(""); 264 gk20a_dbg_fn("");
300 265
@@ -313,13 +278,9 @@ int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
313 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT, 278 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT,
314 NVGPU_TIMER_RETRY_TIMER); 279 NVGPU_TIMER_RETRY_TIMER);
315 do { 280 do {
316 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) & 281 if (nvgpu_flcn_get_mem_scrubbing_status(pmu->flcn)) {
317 (pwr_falcon_dmactl_dmem_scrubbing_m() |
318 pwr_falcon_dmactl_imem_scrubbing_m());
319
320 if (!w) {
321 gk20a_dbg_fn("done"); 282 gk20a_dbg_fn("done");
322 return 0; 283 goto exit;
323 } 284 }
324 nvgpu_udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT); 285 nvgpu_udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
325 } while (!nvgpu_timeout_expired(&timeout)); 286 } while (!nvgpu_timeout_expired(&timeout));
@@ -327,11 +288,12 @@ int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
327 g->ops.mc.disable(g, mc_enable_pwr_enabled_f()); 288 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
328 nvgpu_err(g, "Falcon mem scrubbing timeout"); 289 nvgpu_err(g, "Falcon mem scrubbing timeout");
329 290
330 return -ETIMEDOUT; 291 err = -ETIMEDOUT;
331 } else { 292 } else
332 g->ops.mc.disable(g, mc_enable_pwr_enabled_f()); 293 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
333 return 0; 294
334 } 295exit:
296 return err;
335} 297}
336 298
337static int pmu_enable(struct nvgpu_pmu *pmu, bool enable) 299static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
@@ -357,7 +319,7 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
357 319
358 /* TBD: post reset */ 320 /* TBD: post reset */
359 321
360 err = pmu_idle(pmu); 322 err = nvgpu_flcn_wait_idle(pmu->flcn);
361 if (err) 323 if (err)
362 return err; 324 return err;
363 325
@@ -368,31 +330,6 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
368 return 0; 330 return 0;
369} 331}
370 332
371int pmu_reset(struct nvgpu_pmu *pmu)
372{
373 int err;
374
375 err = pmu_idle(pmu);
376 if (err)
377 return err;
378
379 /* TBD: release pmu hw mutex */
380
381 err = pmu_enable(pmu, false);
382 if (err)
383 return err;
384
385 /* TBD: cancel all sequences */
386 /* TBD: init all sequences and state tables */
387 /* TBD: restore pre-init message handler */
388
389 err = pmu_enable(pmu, true);
390 if (err)
391 return err;
392
393 return 0;
394}
395
396int pmu_bootstrap(struct nvgpu_pmu *pmu) 333int pmu_bootstrap(struct nvgpu_pmu *pmu)
397{ 334{
398 struct gk20a *g = gk20a_from_pmu(pmu); 335 struct gk20a *g = gk20a_from_pmu(pmu);
@@ -704,7 +641,7 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
704 gk20a_dbg_fn(""); 641 gk20a_dbg_fn("");
705 642
706 nvgpu_mutex_acquire(&pmu->isr_mutex); 643 nvgpu_mutex_acquire(&pmu->isr_mutex);
707 g->ops.pmu.reset(g); 644 nvgpu_flcn_reset(pmu->flcn);
708 pmu->isr_enabled = true; 645 pmu->isr_enabled = true;
709 nvgpu_mutex_release(&pmu->isr_mutex); 646 nvgpu_mutex_release(&pmu->isr_mutex);
710 647
@@ -737,11 +674,22 @@ static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
737 674
738int gk20a_pmu_reset(struct gk20a *g) 675int gk20a_pmu_reset(struct gk20a *g)
739{ 676{
740 int err;
741 struct nvgpu_pmu *pmu = &g->pmu; 677 struct nvgpu_pmu *pmu = &g->pmu;
678 int err;
679
680 err = nvgpu_flcn_wait_idle(pmu->flcn);
681 if (err)
682 goto exit;
683
684 err = pmu_enable(pmu, false);
685 if (err)
686 goto exit;
742 687
743 err = pmu_reset(pmu); 688 err = pmu_enable(pmu, true);
689 if (err)
690 goto exit;
744 691
692exit:
745 return err; 693 return err;
746} 694}
747 695
@@ -799,7 +747,7 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
799 gops->pmu.alloc_blob_space = NULL; 747 gops->pmu.alloc_blob_space = NULL;
800 gops->pmu.pmu_populate_loader_cfg = NULL; 748 gops->pmu.pmu_populate_loader_cfg = NULL;
801 gops->pmu.flcn_populate_bl_dmem_desc = NULL; 749 gops->pmu.flcn_populate_bl_dmem_desc = NULL;
802 gops->pmu.reset = gk20a_pmu_reset; 750 gops->pmu.reset = NULL;
803} 751}
804 752
805static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, 753static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index a88bc404..147458af 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -60,7 +60,6 @@ void pmu_copy_to_dmem(struct nvgpu_pmu *pmu,
60 u32 dst, u8 *src, u32 size, u8 port); 60 u32 dst, u8 *src, u32 size, u8 port);
61void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, 61void pmu_copy_from_dmem(struct nvgpu_pmu *pmu,
62 u32 src, u8 *dst, u32 size, u8 port); 62 u32 src, u8 *dst, u32 size, u8 port);
63int pmu_reset(struct nvgpu_pmu *pmu);
64int pmu_bootstrap(struct nvgpu_pmu *pmu); 63int pmu_bootstrap(struct nvgpu_pmu *pmu);
65 64
66void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu); 65void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
index 02da3de9..43df8f24 100644
--- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c
@@ -1291,7 +1291,7 @@ int gm20b_init_nspmu_setup_hw1(struct gk20a *g)
1291 gk20a_dbg_fn(""); 1291 gk20a_dbg_fn("");
1292 1292
1293 nvgpu_mutex_acquire(&pmu->isr_mutex); 1293 nvgpu_mutex_acquire(&pmu->isr_mutex);
1294 g->ops.pmu.reset(g); 1294 nvgpu_flcn_reset(pmu->flcn);
1295 pmu->isr_enabled = true; 1295 pmu->isr_enabled = true;
1296 nvgpu_mutex_release(&pmu->isr_mutex); 1296 nvgpu_mutex_release(&pmu->isr_mutex);
1297 1297
@@ -1326,7 +1326,7 @@ static int gm20b_init_pmu_setup_hw1(struct gk20a *g,
1326 gk20a_dbg_fn(""); 1326 gk20a_dbg_fn("");
1327 1327
1328 nvgpu_mutex_acquire(&pmu->isr_mutex); 1328 nvgpu_mutex_acquire(&pmu->isr_mutex);
1329 g->ops.pmu.reset(g); 1329 nvgpu_flcn_reset(pmu->flcn);
1330 pmu->isr_enabled = true; 1330 pmu->isr_enabled = true;
1331 nvgpu_mutex_release(&pmu->isr_mutex); 1331 nvgpu_mutex_release(&pmu->isr_mutex);
1332 1332
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index 5609a8cc..b486acfd 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -310,5 +310,5 @@ void gm20b_init_pmu_ops(struct gpu_ops *gops)
310 gops->pmu.pmu_pg_param_post_init = NULL; 310 gops->pmu.pmu_pg_param_post_init = NULL;
311 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL; 311 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL;
312 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gm20b; 312 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gm20b;
313 gops->pmu.reset = gk20a_pmu_reset; 313 gops->pmu.reset = NULL;
314} 314}
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
index 57ccd269..7662f4eb 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -64,11 +64,7 @@ static int gp106_pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
64 64
65 /* wait for Scrubbing to complete */ 65 /* wait for Scrubbing to complete */
66 do { 66 do {
67 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) & 67 if (nvgpu_flcn_get_mem_scrubbing_status(pmu->flcn)) {
68 (pwr_falcon_dmactl_dmem_scrubbing_m() |
69 pwr_falcon_dmactl_imem_scrubbing_m());
70
71 if (!w) {
72 gk20a_dbg_fn("done"); 68 gk20a_dbg_fn("done");
73 return 0; 69 return 0;
74 } 70 }
@@ -112,7 +108,7 @@ static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
112 /* TBD: post reset */ 108 /* TBD: post reset */
113 109
114 /*idle the PMU and enable interrupts on the Falcon*/ 110 /*idle the PMU and enable interrupts on the Falcon*/
115 err = pmu_idle(pmu); 111 err = nvgpu_flcn_wait_idle(pmu->flcn);
116 if (err) 112 if (err)
117 return err; 113 return err;
118 nvgpu_udelay(5); 114 nvgpu_udelay(5);
@@ -130,7 +126,7 @@ int gp106_pmu_reset(struct gk20a *g)
130 126
131 gk20a_dbg_fn(""); 127 gk20a_dbg_fn("");
132 128
133 err = pmu_idle(pmu); 129 err = nvgpu_flcn_wait_idle(pmu->flcn);
134 if (err) 130 if (err)
135 return err; 131 return err;
136 132
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
index 2222cc17..c3ad8978 100644
--- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
@@ -307,7 +307,7 @@ static int gp10b_init_pmu_setup_hw1(struct gk20a *g)
307 gk20a_dbg_fn(""); 307 gk20a_dbg_fn("");
308 308
309 nvgpu_mutex_acquire(&pmu->isr_mutex); 309 nvgpu_mutex_acquire(&pmu->isr_mutex);
310 g->ops.pmu.reset(g); 310 nvgpu_flcn_reset(pmu->flcn);
311 pmu->isr_enabled = true; 311 pmu->isr_enabled = true;
312 nvgpu_mutex_release(&pmu->isr_mutex); 312 nvgpu_mutex_release(&pmu->isr_mutex);
313 313
@@ -430,6 +430,6 @@ void gp10b_init_pmu_ops(struct gpu_ops *gops)
430 gops->pmu.pmu_lpwr_disable_pg = NULL; 430 gops->pmu.pmu_lpwr_disable_pg = NULL;
431 gops->pmu.pmu_pg_param_post_init = NULL; 431 gops->pmu.pmu_pg_param_post_init = NULL;
432 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL; 432 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL;
433 gops->pmu.reset = gk20a_pmu_reset; 433 gops->pmu.reset = NULL;
434 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gp10b; 434 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gp10b;
435} 435}