summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp106')
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c145
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.h3
-rw-r--r--drivers/gpu/nvgpu/gp106/sec2_gp106.c23
3 files changed, 36 insertions, 135 deletions
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
index 88d68220..56f1e194 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -32,151 +32,43 @@
32#include <nvgpu/hw/gp106/hw_mc_gp106.h> 32#include <nvgpu/hw/gp106/hw_mc_gp106.h>
33#include <nvgpu/hw/gp106/hw_pwr_gp106.h> 33#include <nvgpu/hw/gp106/hw_pwr_gp106.h>
34 34
35#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000 35static bool gp106_is_pmu_supported(struct gk20a *g)
36#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 36{
37 return true;
38}
37 39
38static int gp106_pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable) 40bool gp106_pmu_is_engine_in_reset(struct gk20a *g)
39{ 41{
40 struct gk20a *g = gk20a_from_pmu(pmu); 42 u32 reg_reset;
43 bool status = false;
41 44
42 gk20a_dbg_fn(""); 45 reg_reset = gk20a_readl(g, pwr_falcon_engine_r());
46 if (reg_reset == pwr_falcon_engine_reset_true_f())
47 status = true;
48
49 return status;
50}
43 51
52int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset)
53{
44 /* 54 /*
45 * From GP10X onwards, we are using PPWR_FALCON_ENGINE for reset. And as 55 * From GP10X onwards, we are using PPWR_FALCON_ENGINE for reset. And as
46 * it may come into same behaviour, reading NV_PPWR_FALCON_ENGINE again 56 * it may come into same behavior, reading NV_PPWR_FALCON_ENGINE again
47 * after Reset. 57 * after Reset.
48 */ 58 */
49 59 if (do_reset) {
50 if (enable) {
51 int retries = PMU_MEM_SCRUBBING_TIMEOUT_MAX /
52 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT;
53 gk20a_writel(g, pwr_falcon_engine_r(), 60 gk20a_writel(g, pwr_falcon_engine_r(),
54 pwr_falcon_engine_reset_false_f()); 61 pwr_falcon_engine_reset_false_f());
55 gk20a_readl(g, pwr_falcon_engine_r()); 62 gk20a_readl(g, pwr_falcon_engine_r());
56
57 /* make sure ELPG is in a good state */
58 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
59 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
60 g->slcg_enabled);
61 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
62 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
63 g->blcg_enabled);
64
65 /* wait for Scrubbing to complete */
66 do {
67 if (nvgpu_flcn_get_mem_scrubbing_status(pmu->flcn)) {
68 gk20a_dbg_fn("done");
69 return 0;
70 }
71 nvgpu_udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
72 } while (--retries);
73
74 /* If scrubbing timeout, keep PMU in reset state */
75 gk20a_writel(g, pwr_falcon_engine_r(),
76 pwr_falcon_engine_reset_true_f());
77 gk20a_readl(g, pwr_falcon_engine_r());
78 nvgpu_err(g, "Falcon mem scrubbing timeout");
79 return -ETIMEDOUT;
80 } else { 63 } else {
81 /* DISBALE */
82 gk20a_writel(g, pwr_falcon_engine_r(), 64 gk20a_writel(g, pwr_falcon_engine_r(),
83 pwr_falcon_engine_reset_true_f()); 65 pwr_falcon_engine_reset_true_f());
84 gk20a_readl(g, pwr_falcon_engine_r()); 66 gk20a_readl(g, pwr_falcon_engine_r());
85 return 0;
86 } 67 }
87}
88 68
89static int pmu_enable(struct nvgpu_pmu *pmu, bool enable)
90{
91 struct gk20a *g = gk20a_from_pmu(pmu);
92 u32 reg_reset;
93 int err;
94
95 gk20a_dbg_fn("");
96
97 if (!enable) {
98 reg_reset = gk20a_readl(g, pwr_falcon_engine_r());
99 if (reg_reset !=
100 pwr_falcon_engine_reset_true_f()) {
101
102 pmu_enable_irq(pmu, false);
103 gp106_pmu_enable_hw(pmu, false);
104 nvgpu_udelay(10);
105 }
106 } else {
107 gp106_pmu_enable_hw(pmu, true);
108 /* TBD: post reset */
109
110 /*idle the PMU and enable interrupts on the Falcon*/
111 err = nvgpu_flcn_wait_idle(pmu->flcn);
112 if (err)
113 return err;
114 nvgpu_udelay(5);
115 pmu_enable_irq(pmu, true);
116 }
117
118 gk20a_dbg_fn("done");
119 return 0; 69 return 0;
120} 70}
121 71
122int gp106_pmu_reset(struct gk20a *g)
123{
124 struct nvgpu_pmu *pmu = &g->pmu;
125 int err = 0;
126
127 gk20a_dbg_fn("");
128
129 err = nvgpu_flcn_wait_idle(pmu->flcn);
130 if (err)
131 return err;
132
133 /* TBD: release pmu hw mutex */
134
135 err = pmu_enable(pmu, false);
136 if (err)
137 return err;
138
139 /* TBD: cancel all sequences */
140 /* TBD: init all sequences and state tables */
141 /* TBD: restore pre-init message handler */
142
143 err = pmu_enable(pmu, true);
144 if (err)
145 return err;
146
147 return err;
148}
149
150static int gp106_sec2_reset(struct gk20a *g)
151{
152 gk20a_dbg_fn("");
153 //sec2 reset
154 gk20a_writel(g, psec_falcon_engine_r(),
155 pwr_falcon_engine_reset_true_f());
156 nvgpu_udelay(10);
157 gk20a_writel(g, psec_falcon_engine_r(),
158 pwr_falcon_engine_reset_false_f());
159
160 gk20a_dbg_fn("done");
161 return 0;
162}
163
164static int gp106_falcon_reset(struct gk20a *g)
165{
166 gk20a_dbg_fn("");
167
168 gp106_pmu_reset(g);
169 gp106_sec2_reset(g);
170
171 gk20a_dbg_fn("done");
172 return 0;
173}
174
175static bool gp106_is_pmu_supported(struct gk20a *g)
176{
177 return true;
178}
179
180static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) 72static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
181{ 73{
182 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) 74 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
@@ -439,10 +331,11 @@ void gp106_init_pmu_ops(struct gpu_ops *gops)
439 gops->pmu.pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg; 331 gops->pmu.pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg;
440 gops->pmu.pmu_pg_param_post_init = nvgpu_lpwr_post_init; 332 gops->pmu.pmu_pg_param_post_init = nvgpu_lpwr_post_init;
441 gops->pmu.dump_secure_fuses = NULL; 333 gops->pmu.dump_secure_fuses = NULL;
442 gops->pmu.reset = gp106_falcon_reset;
443 gops->pmu.mclk_init = gp106_mclk_init; 334 gops->pmu.mclk_init = gp106_mclk_init;
444 gops->pmu.mclk_deinit = gp106_mclk_deinit; 335 gops->pmu.mclk_deinit = gp106_mclk_deinit;
445 gops->pmu.is_pmu_supported = gp106_is_pmu_supported; 336 gops->pmu.is_pmu_supported = gp106_is_pmu_supported;
337 gops->pmu.reset_engine = gp106_pmu_engine_reset;
338 gops->pmu.is_engine_in_reset = gp106_pmu_is_engine_in_reset;
446 339
447 gk20a_dbg_fn("done"); 340 gk20a_dbg_fn("done");
448} 341}
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.h b/drivers/gpu/nvgpu/gp106/pmu_gp106.h
index 3213b25c..5f399b89 100644
--- a/drivers/gpu/nvgpu/gp106/pmu_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.h
@@ -18,8 +18,9 @@
18 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) 18 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
19 19
20void gp106_init_pmu_ops(struct gpu_ops *gops); 20void gp106_init_pmu_ops(struct gpu_ops *gops);
21int gp106_pmu_reset(struct gk20a *g);
22void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, 21void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
23 struct pmu_pg_stats_data *pg_stat_data); 22 struct pmu_pg_stats_data *pg_stat_data);
23bool gp106_pmu_is_engine_in_reset(struct gk20a *g);
24int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset);
24 25
25#endif /*__PMU_GP106_H_*/ 26#endif /*__PMU_GP106_H_*/
diff --git a/drivers/gpu/nvgpu/gp106/sec2_gp106.c b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
index a25fc990..f49d56c4 100644
--- a/drivers/gpu/nvgpu/gp106/sec2_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/sec2_gp106.c
@@ -330,6 +330,20 @@ void init_pmu_setup_hw1(struct gk20a *g)
330 330
331} 331}
332 332
333static int gp106_sec2_reset(struct gk20a *g)
334{
335 nvgpu_log_fn(g, " ");
336
337 gk20a_writel(g, psec_falcon_engine_r(),
338 pwr_falcon_engine_reset_true_f());
339 nvgpu_udelay(10);
340 gk20a_writel(g, psec_falcon_engine_r(),
341 pwr_falcon_engine_reset_false_f());
342
343 nvgpu_log_fn(g, "done");
344 return 0;
345}
346
333int init_sec2_setup_hw1(struct gk20a *g, 347int init_sec2_setup_hw1(struct gk20a *g,
334 void *desc, u32 bl_sz) 348 void *desc, u32 bl_sz)
335{ 349{
@@ -339,10 +353,7 @@ int init_sec2_setup_hw1(struct gk20a *g,
339 353
340 gk20a_dbg_fn(""); 354 gk20a_dbg_fn("");
341 355
342 nvgpu_mutex_acquire(&pmu->isr_mutex); 356 gp106_sec2_reset(g);
343 g->ops.pmu.reset(g);
344 pmu->isr_enabled = true;
345 nvgpu_mutex_release(&pmu->isr_mutex);
346 357
347 data = gk20a_readl(g, psec_fbif_ctl_r()); 358 data = gk20a_readl(g, psec_fbif_ctl_r());
348 data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f(); 359 data |= psec_fbif_ctl_allow_phys_no_ctx_allow_f();
@@ -370,11 +381,7 @@ int init_sec2_setup_hw1(struct gk20a *g,
370 psec_fbif_transcfg_target_noncoherent_sysmem_f()); 381 psec_fbif_transcfg_target_noncoherent_sysmem_f());
371 382
372 /*disable irqs for hs falcon booting as we will poll for halt*/ 383 /*disable irqs for hs falcon booting as we will poll for halt*/
373 nvgpu_mutex_acquire(&pmu->isr_mutex);
374 pmu_enable_irq(pmu, false);
375 sec_enable_irq(pmu, false); 384 sec_enable_irq(pmu, false);
376 pmu->isr_enabled = false;
377 nvgpu_mutex_release(&pmu->isr_mutex);
378 err = bl_bootstrap_sec2(pmu, desc, bl_sz); 385 err = bl_bootstrap_sec2(pmu, desc, bl_sz);
379 if (err) 386 if (err)
380 return err; 387 return err;