summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-06-23 07:40:13 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-29 16:29:52 -0400
commit268721975c6c72418e2282126e7f594f62e6e118 (patch)
tree62c64561775f42513a9dda74244f613dc5a59543
parent97aea977e25c17ecb44448eb19dc15e740036958 (diff)
gpu: nvgpu: PMU reset reorg
- nvgpu_pmu_reset() as pmu reset for all chips & removed gk20a_pmu_reset() & gp106_pmu_reset() along with dependent code. - Created ops to do PMU engine reset & to know the engine reset status - Removed pmu.reset ops & replaced with nvgpu_flcn_reset(pmu->flcn) - Moved sec2 reset to sec2_gp106 from pmu_gp106 & cleaned PMU code part of sec2. JIRA NVGPU-99 Change-Id: I7575e4ca2b34922d73d171f6a41bfcdc2f40dc96 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master/r/1507881 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/falcon/falcon.c1
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c114
-rw-r--r--drivers/gpu/nvgpu/gk20a/flcn_gk20a.c3
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h3
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c108
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h5
-rw-r--r--drivers/gpu/nvgpu/gm206/bios_gm206.c4
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c3
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c145
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.h3
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c23
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.c3
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmu.h3
13 files changed, 173 insertions, 245 deletions
diff --git a/drivers/gpu/nvgpu/common/falcon/falcon.c b/drivers/gpu/nvgpu/common/falcon/falcon.c
index 375a9cee..b6589bd1 100644
--- a/drivers/gpu/nvgpu/common/falcon/falcon.c
+++ b/drivers/gpu/nvgpu/common/falcon/falcon.c
@@ -142,6 +142,7 @@ void nvgpu_flcn_sw_init(struct gk20a *g, u32 flcn_id)
142 flcn = &g->pmu_flcn; 142 flcn = &g->pmu_flcn;
143 flcn->flcn_id = flcn_id; 143 flcn->flcn_id = flcn_id;
144 g->pmu.flcn = &g->pmu_flcn; 144 g->pmu.flcn = &g->pmu_flcn;
145 g->pmu.g = g;
145 break; 146 break;
146 case FALCON_ID_SEC2: 147 case FALCON_ID_SEC2:
147 flcn = &g->sec2_flcn; 148 flcn = &g->sec2_flcn;
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index eb1c83fb..cc87c89b 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -18,8 +18,109 @@
18 18
19#include "gk20a/gk20a.h" 19#include "gk20a/gk20a.h"
20 20
21#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000
22#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10
23
21static int nvgpu_pg_init_task(void *arg); 24static int nvgpu_pg_init_task(void *arg);
22 25
26static int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
27{
28 struct gk20a *g = pmu->g;
29 struct nvgpu_timeout timeout;
30 int err = 0;
31
32 nvgpu_log_fn(g, " %s ", g->name);
33
34 if (enable) {
35 /* bring PMU falcon/engine out of reset */
36 g->ops.pmu.reset_engine(g, true);
37
38 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
39 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
40 g->slcg_enabled);
41
42 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
43 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
44 g->blcg_enabled);
45
46 /* check for PMU IMEM/DMEM scrubbing complete status */
47 nvgpu_timeout_init(g, &timeout,
48 PMU_MEM_SCRUBBING_TIMEOUT_MAX /
49 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT,
50 NVGPU_TIMER_RETRY_TIMER);
51 do {
52 if (nvgpu_flcn_get_mem_scrubbing_status(pmu->flcn))
53 goto exit;
54
55 nvgpu_udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
56 } while (!nvgpu_timeout_expired(&timeout));
57
58 /* keep PMU falcon/engine in reset
59 * if IMEM/DMEM scrubbing fails
60 */
61 g->ops.pmu.reset_engine(g, false);
62 nvgpu_err(g, "Falcon mem scrubbing timeout");
63 err = -ETIMEDOUT;
64 } else
65 /* keep PMU falcon/engine in reset */
66 g->ops.pmu.reset_engine(g, false);
67
68exit:
69 nvgpu_log_fn(g, "%s Done, status - %d ", g->name, err);
70 return err;
71}
72
73static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
74{
75 struct gk20a *g = pmu->g;
76 int err = 0;
77
78 nvgpu_log_fn(g, " ");
79
80 if (!enable) {
81 if (!g->ops.pmu.is_engine_in_reset(g)) {
82 pmu_enable_irq(pmu, false);
83 pmu_enable_hw(pmu, false);
84 }
85 } else {
86 err = pmu_enable_hw(pmu, true);
87 if (err)
88 goto exit;
89
90 err = nvgpu_flcn_wait_idle(pmu->flcn);
91 if (err)
92 goto exit;
93
94 pmu_enable_irq(pmu, true);
95 }
96
97exit:
98 nvgpu_log_fn(g, "Done, status - %d ", err);
99 return err;
100}
101
102int nvgpu_pmu_reset(struct gk20a *g)
103{
104 struct nvgpu_pmu *pmu = &g->pmu;
105 int err = 0;
106
107 nvgpu_log_fn(g, " %s ", g->name);
108
109 err = nvgpu_flcn_wait_idle(pmu->flcn);
110 if (err)
111 goto exit;
112
113 err = pmu_enable(pmu, false);
114 if (err)
115 goto exit;
116
117 err = pmu_enable(pmu, true);
118
119exit:
120 nvgpu_log_fn(g, " %s Done, status - %d ", g->name, err);
121 return err;
122}
123
23static int nvgpu_init_task_pg_init(struct gk20a *g) 124static int nvgpu_init_task_pg_init(struct gk20a *g)
24{ 125{
25 struct nvgpu_pmu *pmu = &g->pmu; 126 struct nvgpu_pmu *pmu = &g->pmu;
@@ -139,17 +240,6 @@ skip_init:
139 return err; 240 return err;
140} 241}
141 242
142static int nvgpu_init_pmu_reset_enable_hw(struct gk20a *g)
143{
144 struct nvgpu_pmu *pmu = &g->pmu;
145
146 nvgpu_log_fn(g, " ");
147
148 pmu_enable_hw(pmu, true);
149
150 return 0;
151}
152
153int nvgpu_init_pmu_support(struct gk20a *g) 243int nvgpu_init_pmu_support(struct gk20a *g)
154{ 244{
155 struct nvgpu_pmu *pmu = &g->pmu; 245 struct nvgpu_pmu *pmu = &g->pmu;
@@ -160,7 +250,7 @@ int nvgpu_init_pmu_support(struct gk20a *g)
160 if (pmu->initialized) 250 if (pmu->initialized)
161 return 0; 251 return 0;
162 252
163 err = nvgpu_init_pmu_reset_enable_hw(g); 253 err = pmu_enable_hw(pmu, true);
164 if (err) 254 if (err)
165 return err; 255 return err;
166 256
diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
index 9d378248..2a246fdc 100644
--- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c
@@ -11,6 +11,7 @@
11 * more details. 11 * more details.
12 */ 12 */
13#include <nvgpu/falcon.h> 13#include <nvgpu/falcon.h>
14#include <nvgpu/pmu.h>
14 15
15#include "gk20a/gk20a.h" 16#include "gk20a/gk20a.h"
16 17
@@ -256,7 +257,7 @@ static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn)
256 257
257 switch (flcn->flcn_id) { 258 switch (flcn->flcn_id) {
258 case FALCON_ID_PMU: 259 case FALCON_ID_PMU:
259 flcn_eng_dep_ops->reset_eng = gk20a_pmu_reset; 260 flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset;
260 break; 261 break;
261 default: 262 default:
262 /* NULL assignment make sure 263 /* NULL assignment make sure
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 191d1c39..ff37d9f3 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -755,7 +755,8 @@ struct gpu_ops {
755 int (*pmu_lpwr_disable_pg)(struct gk20a *g, bool pstate_lock); 755 int (*pmu_lpwr_disable_pg)(struct gk20a *g, bool pstate_lock);
756 u32 (*pmu_pg_param_post_init)(struct gk20a *g); 756 u32 (*pmu_pg_param_post_init)(struct gk20a *g);
757 void (*dump_secure_fuses)(struct gk20a *g); 757 void (*dump_secure_fuses)(struct gk20a *g);
758 int (*reset)(struct gk20a *g); 758 int (*reset_engine)(struct gk20a *g, bool do_reset);
759 bool (*is_engine_in_reset)(struct gk20a *g);
759 int (*falcon_wait_for_halt)(struct gk20a *g, 760 int (*falcon_wait_for_halt)(struct gk20a *g,
760 unsigned int timeout); 761 unsigned int timeout);
761 int (*falcon_clear_halt_interrupt_status)(struct gk20a *g, 762 int (*falcon_clear_halt_interrupt_status)(struct gk20a *g,
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 4a676b82..3fc73e42 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -38,8 +38,6 @@
38#define gk20a_dbg_pmu(fmt, arg...) \ 38#define gk20a_dbg_pmu(fmt, arg...) \
39 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 39 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
40 40
41#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000
42#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10
43 41
44bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) 42bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
45{ 43{
@@ -159,80 +157,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
159 gk20a_dbg_fn("done"); 157 gk20a_dbg_fn("done");
160} 158}
161 159
162int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
163{
164 struct gk20a *g = gk20a_from_pmu(pmu);
165 struct nvgpu_timeout timeout;
166 int err = 0;
167
168 gk20a_dbg_fn("");
169
170 if (enable) {
171 g->ops.mc.enable(g, mc_enable_pwr_enabled_f());
172
173 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
174 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
175 g->slcg_enabled);
176 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
177 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
178 g->blcg_enabled);
179
180 nvgpu_timeout_init(g, &timeout,
181 PMU_MEM_SCRUBBING_TIMEOUT_MAX /
182 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT,
183 NVGPU_TIMER_RETRY_TIMER);
184 do {
185 if (nvgpu_flcn_get_mem_scrubbing_status(pmu->flcn)) {
186 gk20a_dbg_fn("done");
187 goto exit;
188 }
189 nvgpu_udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
190 } while (!nvgpu_timeout_expired(&timeout));
191
192 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
193 nvgpu_err(g, "Falcon mem scrubbing timeout");
194
195 err = -ETIMEDOUT;
196 } else
197 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
198
199exit:
200 return err;
201}
202 160
203static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
204{
205 struct gk20a *g = gk20a_from_pmu(pmu);
206 u32 pmc_enable;
207 int err;
208
209 gk20a_dbg_fn("");
210
211 if (!enable) {
212 pmc_enable = gk20a_readl(g, mc_enable_r());
213 if (mc_enable_pwr_v(pmc_enable) !=
214 mc_enable_pwr_disabled_v()) {
215
216 pmu_enable_irq(pmu, false);
217 pmu_enable_hw(pmu, false);
218 }
219 } else {
220 err = pmu_enable_hw(pmu, true);
221 if (err)
222 return err;
223
224 /* TBD: post reset */
225
226 err = nvgpu_flcn_wait_idle(pmu->flcn);
227 if (err)
228 return err;
229
230 pmu_enable_irq(pmu, true);
231 }
232
233 gk20a_dbg_fn("done");
234 return 0;
235}
236 161
237int pmu_bootstrap(struct nvgpu_pmu *pmu) 162int pmu_bootstrap(struct nvgpu_pmu *pmu)
238{ 163{
@@ -576,25 +501,27 @@ static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
576 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr); 501 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr);
577} 502}
578 503
579int gk20a_pmu_reset(struct gk20a *g) 504bool gk20a_pmu_is_engine_in_reset(struct gk20a *g)
580{ 505{
581 struct nvgpu_pmu *pmu = &g->pmu; 506 u32 pmc_enable;
582 int err; 507 bool status = false;
583 508
584 err = nvgpu_flcn_wait_idle(pmu->flcn); 509 pmc_enable = gk20a_readl(g, mc_enable_r());
585 if (err) 510 if (mc_enable_pwr_v(pmc_enable) ==
586 goto exit; 511 mc_enable_pwr_disabled_v())
512 status = true;
587 513
588 err = pmu_enable(pmu, false); 514 return status;
589 if (err) 515}
590 goto exit;
591 516
592 err = pmu_enable(pmu, true); 517int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset)
593 if (err) 518{
594 goto exit; 519 if (do_reset)
520 g->ops.mc.enable(g, mc_enable_pwr_enabled_f());
521 else
522 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
595 523
596exit: 524 return 0;
597 return err;
598} 525}
599 526
600static bool gk20a_is_pmu_supported(struct gk20a *g) 527static bool gk20a_is_pmu_supported(struct gk20a *g)
@@ -650,7 +577,8 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
650 gops->pmu.alloc_blob_space = NULL; 577 gops->pmu.alloc_blob_space = NULL;
651 gops->pmu.pmu_populate_loader_cfg = NULL; 578 gops->pmu.pmu_populate_loader_cfg = NULL;
652 gops->pmu.flcn_populate_bl_dmem_desc = NULL; 579 gops->pmu.flcn_populate_bl_dmem_desc = NULL;
653 gops->pmu.reset = NULL; 580 gops->pmu.reset_engine = gk20a_pmu_engine_reset;
581 gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
654} 582}
655 583
656static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, 584static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg,
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index 8f337855..997a88d2 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -70,9 +70,10 @@ void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg,
70 void *param, u32 handle, u32 status); 70 void *param, u32 handle, u32 status);
71void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, 71void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
72 struct pmu_pg_stats_data *pg_stat_data); 72 struct pmu_pg_stats_data *pg_stat_data);
73int gk20a_pmu_reset(struct gk20a *g); 73bool gk20a_pmu_is_engine_in_reset(struct gk20a *g);
74int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset);
75
74int pmu_idle(struct nvgpu_pmu *pmu); 76int pmu_idle(struct nvgpu_pmu *pmu);
75int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable);
76 77
77bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); 78bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos);
78 79
diff --git a/drivers/gpu/nvgpu/gm206/bios_gm206.c b/drivers/gpu/nvgpu/gm206/bios_gm206.c
index c32959a3..567b01ce 100644
--- a/drivers/gpu/nvgpu/gm206/bios_gm206.c
+++ b/drivers/gpu/nvgpu/gm206/bios_gm206.c
@@ -105,7 +105,7 @@ static int gm206_bios_devinit(struct gk20a *g)
105 struct nvgpu_timeout timeout; 105 struct nvgpu_timeout timeout;
106 106
107 gk20a_dbg_fn(""); 107 gk20a_dbg_fn("");
108 g->ops.pmu.reset(g); 108 nvgpu_flcn_reset(g->pmu.flcn);
109 109
110 nvgpu_timeout_init(g, &timeout, 110 nvgpu_timeout_init(g, &timeout,
111 PMU_BOOT_TIMEOUT_MAX / 111 PMU_BOOT_TIMEOUT_MAX /
@@ -187,7 +187,7 @@ static int gm206_bios_preos(struct gk20a *g)
187 struct nvgpu_timeout timeout; 187 struct nvgpu_timeout timeout;
188 188
189 gk20a_dbg_fn(""); 189 gk20a_dbg_fn("");
190 g->ops.pmu.reset(g); 190 nvgpu_flcn_reset(g->pmu.flcn);
191 191
192 nvgpu_timeout_init(g, &timeout, 192 nvgpu_timeout_init(g, &timeout,
193 PMU_BOOT_TIMEOUT_MAX / 193 PMU_BOOT_TIMEOUT_MAX /
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index 6c5a2502..3b655b62 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -309,5 +309,6 @@ void gm20b_init_pmu_ops(struct gpu_ops *gops)
309 gops->pmu.pmu_lpwr_disable_pg = NULL; 309 gops->pmu.pmu_lpwr_disable_pg = NULL;
310 gops->pmu.pmu_pg_param_post_init = NULL; 310 gops->pmu.pmu_pg_param_post_init = NULL;
311 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gm20b; 311 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gm20b;
312 gops->pmu.reset = NULL; 312 gops->pmu.reset_engine = gk20a_pmu_engine_reset;
313 gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
313} 314}
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
index 88d68220..56f1e194 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -32,151 +32,43 @@
32#include <nvgpu/hw/gp106/hw_mc_gp106.h> 32#include <nvgpu/hw/gp106/hw_mc_gp106.h>
33#include <nvgpu/hw/gp106/hw_pwr_gp106.h> 33#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
34 34
35#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000 35static bool gp106_is_pmu_supported(struct gk20a *g)
36#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 36{
37 return true;
38}
37 39
38static int gp106_pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable) 40bool gp106_pmu_is_engine_in_reset(struct gk20a *g)
39{ 41{
40 struct gk20a *g = gk20a_from_pmu(pmu); 42 u32 reg_reset;
43 bool status = false;
41 44
42 gk20a_dbg_fn(""); 45 reg_reset = gk20a_readl(g, pwr_falcon_engine_r());
46 if (reg_reset == pwr_falcon_engine_reset_true_f())
47 status = true;
48
49 return status;
50}
43 51
52int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset)
53{
44 /* 54 /*
45 * From GP10X onwards, we are using PPWR_FALCON_ENGINE for reset. And as 55 * From GP10X onwards, we are using PPWR_FALCON_ENGINE for reset. And as
46 * it may come into same behaviour, reading NV_PPWR_FALCON_ENGINE again 56 * it may come into same behavior, reading NV_PPWR_FALCON_ENGINE again
47 * after Reset. 57 * after Reset.
48 */ 58 */
49 59 if (do_reset) {
50 if (enable) {
51 int retries = PMU_MEM_SCRUBBING_TIMEOUT_MAX /
52 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT;
53 gk20a_writel(g, pwr_falcon_engine_r(), 60 gk20a_writel(g, pwr_falcon_engine_r(),
54 pwr_falcon_engine_reset_false_f()); 61 pwr_falcon_engine_reset_false_f());
55 gk20a_readl(g, pwr_falcon_engine_r()); 62 gk20a_readl(g, pwr_falcon_engine_r());
56
57 /* make sure ELPG is in a good state */
58 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
59 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
60 g->slcg_enabled);
61 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
62 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
63 g->blcg_enabled);
64
65 /* wait for Scrubbing to complete */
66 do {
67 if (nvgpu_flcn_get_mem_scrubbing_status(pmu->flcn)) {
68 gk20a_dbg_fn("done");
69 return 0;
70 }
71 nvgpu_udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
72 } while (--retries);
73
74 /* If scrubbing timeout, keep PMU in reset state */
75 gk20a_writel(g, pwr_falcon_engine_r(),
76 pwr_falcon_engine_reset_true_f());
77 gk20a_readl(g, pwr_falcon_engine_r());
78 nvgpu_err(g, "Falcon mem scrubbing timeout");
79 return -ETIMEDOUT;
80 } else { 63 } else {
81 /* DISBALE */
82 gk20a_writel(g, pwr_falcon_engine_r(), 64 gk20a_writel(g, pwr_falcon_engine_r(),
83 pwr_falcon_engine_reset_true_f()); 65 pwr_falcon_engine_reset_true_f());
84 gk20a_readl(g, pwr_falcon_engine_r()); 66 gk20a_readl(g, pwr_falcon_engine_r());
85 return 0;
86 } 67 }
87}
88 68
89static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
90{
91 struct gk20a *g = gk20a_from_pmu(pmu);
92 u32 reg_reset;
93 int err;
94
95 gk20a_dbg_fn("");
96
97 if (!enable) {
98 reg_reset = gk20a_readl(g, pwr_falcon_engine_r());
99 if (reg_reset !=
100 pwr_falcon_engine_reset_true_f()) {
101
102 pmu_enable_irq(pmu, false);
103 gp106_pmu_enable_hw(pmu, false);
104 nvgpu_udelay(10);
105 }
106 } else {
107 gp106_pmu_enable_hw(pmu, true);
108 /* TBD: post reset */
109
110 /*idle the PMU and enable interrupts on the Falcon*/
111 err = nvgpu_flcn_wait_idle(pmu->flcn);
112 if (err)
113 return err;
114 nvgpu_udelay(5);
115 pmu_enable_irq(pmu, true);
116 }
117
118 gk20a_dbg_fn("done");
119 return 0; 69 return 0;
120} 70}
121 71
122int gp106_pmu_reset(struct gk20a *g)
123{
124 struct nvgpu_pmu *pmu = &g->pmu;
125 int err = 0;
126
127 gk20a_dbg_fn("");
128
129 err = nvgpu_flcn_wait_idle(pmu->flcn);
130 if (err)
131 return err;
132
133 /* TBD: release pmu hw mutex */
134
135 err = pmu_enable(pmu, false);
136 if (err)
137 return err;
138
139 /* TBD: cancel all sequences */
140 /* TBD: init all sequences and state tables */
141 /* TBD: restore pre-init message handler */
142
143 err = pmu_enable(pmu, true);
144 if (err)
145 return err;
146
147 return err;
148}
149
150static int gp106_sec2_reset(struct gk20a *g)
151{
152 gk20a_dbg_fn("");
153 //sec2 reset
154 gk20a_writel(g, psec_falcon_engine_r(),
155 pwr_falcon_engine_reset_true_f());
156 nvgpu_udelay(10);
157 gk20a_writel(g, psec_falcon_engine_r(),
158 pwr_falcon_engine_reset_false_f());
159
160 gk20a_dbg_fn("done");
161 return 0;
162}
163
164static int gp106_falcon_reset(struct gk20a *g)
165{
166 gk20a_dbg_fn("");
167
168 gp106_pmu_reset(g);
169 gp106_sec2_reset(g);
170
171 gk20a_dbg_fn("done");
172 return 0;
173}
174
175static bool gp106_is_pmu_supported(struct gk20a *g)
176{
177 return true;
178}
179
180static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) 72static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
181{ 73{
182 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 74 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
@@ -439,10 +331,11 @@ void gp106_init_pmu_ops(struct gpu_ops *gops)
439 gops->pmu.pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg; 331 gops->pmu.pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg;
440 gops->pmu.pmu_pg_param_post_init = nvgpu_lpwr_post_init; 332 gops->pmu.pmu_pg_param_post_init = nvgpu_lpwr_post_init;
441 gops->pmu.dump_secure_fuses = NULL; 333 gops->pmu.dump_secure_fuses = NULL;
442 gops->pmu.reset = gp106_falcon_reset;
443 gops->pmu.mclk_init = gp106_mclk_init; 334 gops->pmu.mclk_init = gp106_mclk_init;
444 gops->pmu.mclk_deinit = gp106_mclk_deinit; 335 gops->pmu.mclk_deinit = gp106_mclk_deinit;
445 gops->pmu.is_pmu_supported = gp106_is_pmu_supported; 336 gops->pmu.is_pmu_supported = gp106_is_pmu_supported;
337 gops->pmu.reset_engine = gp106_pmu_engine_reset;
338 gops->pmu.is_engine_in_reset = gp106_pmu_is_engine_in_reset;
446 339
447 gk20a_dbg_fn("done"); 340 gk20a_dbg_fn("done");
448} 341}
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.h b/drivers/gpu/nvgpu/gp106/pmu_gp106.h
index 3213b25c..5f399b89 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.h
@@ -18,8 +18,9 @@
18 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 18 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
19 19
20void gp106_init_pmu_ops(struct gpu_ops *gops); 20void gp106_init_pmu_ops(struct gpu_ops *gops);
21int gp106_pmu_reset(struct gk20a *g);
22void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, 21void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
23 struct pmu_pg_stats_data *pg_stat_data); 22 struct pmu_pg_stats_data *pg_stat_data);
23bool gp106_pmu_is_engine_in_reset(struct gk20a *g);
24int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset);
24 25
25#endif /*__PMU_GP106_H_*/ 26#endif /*__PMU_GP106_H_*/
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index a25fc990..f49d56c4 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -330,6 +330,20 @@ void init_pmu_setup_hw1(struct gk20a *g)
330 330
331} 331}
332 332
333static int gp106_sec2_reset(struct gk20a *g)
334{
335 nvgpu_log_fn(g, " ");
336
337 gk20a_writel(g, psec_falcon_engine_r(),
338 pwr_falcon_engine_reset_true_f());
339 nvgpu_udelay(10);
340 gk20a_writel(g, psec_falcon_engine_r(),
341 pwr_falcon_engine_reset_false_f());
342
343 nvgpu_log_fn(g, "done");
344 return 0;
345}
346
333int init_sec2_setup_hw1(struct gk20a *g, 347int init_sec2_setup_hw1(struct gk20a *g,
334 void *desc, u32 bl_sz) 348 void *desc, u32 bl_sz)
335{ 349{
@@ -339,10 +353,7 @@ int init_sec2_setup_hw1(struct gk20a *g,
339 353
340 gk20a_dbg_fn(""); 354 gk20a_dbg_fn("");
341 355
342 nvgpu_mutex_acquire(&pmu->isr_mutex); 356 gp106_sec2_reset(g);
343 g->ops.pmu.reset(g);
344 pmu->isr_enabled = true;
345 nvgpu_mutex_release(&pmu->isr_mutex);
346 357
347 data = gk20a_readl(g, psec_fbif_ctl_r()); 358 data = gk20a_readl(g, psec_fbif_ctl_r());
348 data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f(); 359 data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f();
@@ -370,11 +381,7 @@ int init_sec2_setup_hw1(struct gk20a *g,
370 psec_fbif_transcfg_target_noncoherent_sysmem_f()); 381 psec_fbif_transcfg_target_noncoherent_sysmem_f());
371 382
372 /*disable irqs for hs falcon booting as we will poll for halt*/ 383 /*disable irqs for hs falcon booting as we will poll for halt*/
373 nvgpu_mutex_acquire(&pmu->isr_mutex);
374 pmu_enable_irq(pmu, false);
375 sec_enable_irq(pmu, false); 384 sec_enable_irq(pmu, false);
376 pmu->isr_enabled = false;
377 nvgpu_mutex_release(&pmu->isr_mutex);
378 err = bl_bootstrap_sec2(pmu, desc, bl_sz); 385 err = bl_bootstrap_sec2(pmu, desc, bl_sz);
379 if (err) 386 if (err)
380 return err; 387 return err;
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
index 3d02f475..b086bf1f 100644
--- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
@@ -428,6 +428,7 @@ void gp10b_init_pmu_ops(struct gpu_ops *gops)
428 gops->pmu.pmu_lpwr_enable_pg = NULL; 428 gops->pmu.pmu_lpwr_enable_pg = NULL;
429 gops->pmu.pmu_lpwr_disable_pg = NULL; 429 gops->pmu.pmu_lpwr_disable_pg = NULL;
430 gops->pmu.pmu_pg_param_post_init = NULL; 430 gops->pmu.pmu_pg_param_post_init = NULL;
431 gops->pmu.reset = NULL;
432 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gp10b; 431 gops->pmu.dump_secure_fuses = pmu_dump_security_fuses_gp10b;
432 gops->pmu.reset_engine = gk20a_pmu_engine_reset;
433 gops->pmu.is_engine_in_reset = gk20a_pmu_is_engine_in_reset;
433} 434}
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
index ede238a0..107d2b2d 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
@@ -432,6 +432,9 @@ int nvgpu_pmu_init_powergating(struct gk20a *g);
432int nvgpu_pmu_init_bind_fecs(struct gk20a *g); 432int nvgpu_pmu_init_bind_fecs(struct gk20a *g);
433void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g); 433void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g);
434 434
435/* PMU reset */
436int nvgpu_pmu_reset(struct gk20a *g);
437
435/* PG enable/disable */ 438/* PG enable/disable */
436int nvgpu_pmu_enable_elpg(struct gk20a *g); 439int nvgpu_pmu_enable_elpg(struct gk20a *g);
437int nvgpu_pmu_disable_elpg(struct gk20a *g); 440int nvgpu_pmu_disable_elpg(struct gk20a *g);