summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVijayakumar <vsubbu@nvidia.com>2014-09-26 01:24:09 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:43 -0400
commit17ce09bb0537bd21b8b64ea9f963fc6b420563f9 (patch)
tree9d1b520b335752f38fb5c3a3a4a6b09032abf49f
parent026781c82ca42371c9263f449c2fd1d45d60dc20 (diff)
gpu: nvgpu: send ELPG init cmd after GR is ready
bug 200040021 bug 200032923 Change-Id: I5aa7f4efb1b675e9a3faaf73a80452e55cded89e Signed-off-by: Vijayakumar <vsubbu@nvidia.com> Change-Id: Ic162902bd2f05abab9ebd37392ed56dc4c164ba8 Reviewed-on: http://git-master/r/539995 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c59
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h11
2 files changed, 51 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 7878e1e2..395afdd4 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -42,6 +42,8 @@ static void ap_callback_init_and_enable_ctrl(
42 struct gk20a *g, struct pmu_msg *msg, 42 struct gk20a *g, struct pmu_msg *msg,
43 void *param, u32 seq_desc, u32 status); 43 void *param, u32 seq_desc, u32 status);
44 44
45static int pmu_init_powergating(struct gk20a *g);
46
45static u32 pmu_perfmon_cntr_sz_v0(struct pmu_gk20a *pmu) 47static u32 pmu_perfmon_cntr_sz_v0(struct pmu_gk20a *pmu)
46{ 48{
47 return sizeof(struct pmu_perfmon_counter_v0); 49 return sizeof(struct pmu_perfmon_counter_v0);
@@ -2089,12 +2091,13 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
2089 return; 2091 return;
2090 } 2092 }
2091 2093
2092 if (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_FAILED) { 2094 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
2095 if ((!pmu->buf_loaded) &&
2096 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
2093 gk20a_err(dev_from_gk20a(g), "failed to load PGENG buffer"); 2097 gk20a_err(dev_from_gk20a(g), "failed to load PGENG buffer");
2098 else {
2099 schedule_work(&pmu->pg_init);
2094 } 2100 }
2095
2096 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
2097 schedule_work(&pmu->pg_init);
2098} 2101}
2099 2102
2100int gk20a_init_pmu_setup_hw1(struct gk20a *g) 2103int gk20a_init_pmu_setup_hw1(struct gk20a *g)
@@ -2143,6 +2146,10 @@ void pmu_setup_hw(struct work_struct *work)
2143 struct gk20a *g = gk20a_from_pmu(pmu); 2146 struct gk20a *g = gk20a_from_pmu(pmu);
2144 2147
2145 switch (pmu->pmu_state) { 2148 switch (pmu->pmu_state) {
2149 case PMU_STATE_INIT_RECEIVED:
2150 gk20a_dbg_pmu("pmu starting");
2151 pmu_init_powergating(g);
2152 break;
2146 case PMU_STATE_ELPG_BOOTED: 2153 case PMU_STATE_ELPG_BOOTED:
2147 gk20a_dbg_pmu("elpg booted"); 2154 gk20a_dbg_pmu("elpg booted");
2148 gk20a_init_pmu_bind_fecs(g); 2155 gk20a_init_pmu_bind_fecs(g);
@@ -2180,14 +2187,19 @@ int gk20a_init_pmu_bind_fecs(struct gk20a *g)
2180 gk20a_dbg_fn(""); 2187 gk20a_dbg_fn("");
2181 2188
2182 size = 0; 2189 size = 0;
2183 gk20a_gr_wait_initialized(g);
2184 err = gr_gk20a_fecs_get_reglist_img_size(g, &size); 2190 err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
2185 if (err) { 2191 if (err && (pmu->pmu_state == PMU_STATE_ELPG_BOOTED)) {
2186 gk20a_err(dev_from_gk20a(g), 2192 gk20a_err(dev_from_gk20a(g),
2187 "fail to query fecs pg buffer size"); 2193 "fail to query fecs pg buffer size");
2188 return err; 2194 return err;
2189 } 2195 }
2190 2196
2197 if (err) {
2198 gk20a_err(dev_from_gk20a(g),
2199 "fail to query fecs pg buffer size invalid boot state");
2200 return err;
2201 }
2202
2191 if (!pmu->pg_buf.cpuva) { 2203 if (!pmu->pg_buf.cpuva) {
2192 pmu->pg_buf.cpuva = dma_alloc_coherent(d, size, 2204 pmu->pg_buf.cpuva = dma_alloc_coherent(d, size,
2193 &iova, 2205 &iova,
@@ -2224,19 +2236,31 @@ int gk20a_init_pmu_bind_fecs(struct gk20a *g)
2224 } 2236 }
2225 2237
2226 err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa); 2238 err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa);
2227 if (err) { 2239 if (err && (pmu->pmu_state == PMU_STATE_ELPG_BOOTED)) {
2228 gk20a_err(dev_from_gk20a(g), 2240 gk20a_err(dev_from_gk20a(g),
2229 "fail to bind pmu inst to gr"); 2241 "fail to bind pmu inst to gr");
2230 return err; 2242 return err;
2231 } 2243 }
2232 2244
2233 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.pmu_va);
2234 if (err) { 2245 if (err) {
2235 gk20a_err(dev_from_gk20a(g), 2246 gk20a_err(dev_from_gk20a(g),
2247 "fail to bind pmu inst to gr invalid boot state");
2248 return err;
2249 }
2250
2251 err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.pmu_va);
2252 if (err && (pmu->pmu_state == PMU_STATE_ELPG_BOOTED)) {
2253 gk20a_err(dev_from_gk20a(g),
2236 "fail to set pg buffer pmu va"); 2254 "fail to set pg buffer pmu va");
2237 return err; 2255 return err;
2238 } 2256 }
2239 2257
2258 if (err) {
2259 gk20a_err(dev_from_gk20a(g),
2260 "fail to set pg buffer pmu va invalid boot state");
2261 return err;
2262 }
2263
2240 memset(&cmd, 0, sizeof(struct pmu_cmd)); 2264 memset(&cmd, 0, sizeof(struct pmu_cmd));
2241 cmd.hdr.unit_id = PMU_UNIT_PG; 2265 cmd.hdr.unit_id = PMU_UNIT_PG;
2242 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_eng_buf_load); 2266 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_eng_buf_load);
@@ -2384,9 +2408,10 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
2384 case PMU_PG_ELPG_MSG_DISALLOW_ACK: 2408 case PMU_PG_ELPG_MSG_DISALLOW_ACK:
2385 gk20a_dbg_pmu("DISALLOW is acknowledged from PMU"); 2409 gk20a_dbg_pmu("DISALLOW is acknowledged from PMU");
2386 pmu->elpg_stat = PMU_ELPG_STAT_OFF; 2410 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
2387 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) 2411 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
2388 pmu->pmu_state = PMU_STATE_ELPG_BOOTED; 2412 pmu->pmu_state = PMU_STATE_ELPG_BOOTED;
2389 schedule_work(&pmu->pg_init); 2413 schedule_work(&pmu->pg_init);
2414 }
2390 break; 2415 break;
2391 default: 2416 default:
2392 gk20a_err(dev_from_gk20a(g), 2417 gk20a_err(dev_from_gk20a(g),
@@ -2419,9 +2444,9 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
2419 } 2444 }
2420} 2445}
2421 2446
2422static int pmu_init_powergating(struct pmu_gk20a *pmu) 2447static int pmu_init_powergating(struct gk20a *g)
2423{ 2448{
2424 struct gk20a *g = gk20a_from_pmu(pmu); 2449 struct pmu_gk20a *pmu = &g->pmu;
2425 struct pmu_cmd cmd; 2450 struct pmu_cmd cmd;
2426 u32 seq; 2451 u32 seq;
2427 2452
@@ -2441,6 +2466,8 @@ static int pmu_init_powergating(struct pmu_gk20a *pmu)
2441 PMU_PG_POST_POWERUP_IDLE_THRESHOLD); 2466 PMU_PG_POST_POWERUP_IDLE_THRESHOLD);
2442 } 2467 }
2443 2468
2469 gk20a_gr_wait_initialized(g);
2470
2444 /* init ELPG */ 2471 /* init ELPG */
2445 memset(&cmd, 0, sizeof(struct pmu_cmd)); 2472 memset(&cmd, 0, sizeof(struct pmu_cmd));
2446 cmd.hdr.unit_id = PMU_UNIT_PG; 2473 cmd.hdr.unit_id = PMU_UNIT_PG;
@@ -2481,7 +2508,8 @@ static int pmu_init_powergating(struct pmu_gk20a *pmu)
2481 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, 2508 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
2482 pmu_handle_pg_elpg_msg, pmu, &seq, ~0); 2509 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
2483 2510
2484 pmu->pmu_state = PMU_STATE_ELPG_BOOTING; 2511 if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED)
2512 pmu->pmu_state = PMU_STATE_ELPG_BOOTING;
2485 2513
2486 return 0; 2514 return 0;
2487} 2515}
@@ -2596,6 +2624,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
2596 union pmu_init_msg_pmu *init; 2624 union pmu_init_msg_pmu *init;
2597 struct pmu_sha1_gid_data gid_data; 2625 struct pmu_sha1_gid_data gid_data;
2598 u32 i, tail = 0; 2626 u32 i, tail = 0;
2627 gk20a_dbg_pmu("init received\n");
2599 2628
2600 tail = pwr_pmu_msgq_tail_val_v( 2629 tail = pwr_pmu_msgq_tail_val_v(
2601 gk20a_readl(g, pwr_pmu_msgq_tail_r())); 2630 gk20a_readl(g, pwr_pmu_msgq_tail_r()));
@@ -2653,6 +2682,9 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
2653 PMU_DMEM_ALLOC_ALIGNMENT); 2682 PMU_DMEM_ALLOC_ALIGNMENT);
2654 2683
2655 pmu->pmu_ready = true; 2684 pmu->pmu_ready = true;
2685 pmu->pmu_state = PMU_STATE_INIT_RECEIVED;
2686 schedule_work(&pmu->pg_init);
2687 gk20a_dbg_pmu("init received end\n");
2656 2688
2657 return 0; 2689 return 0;
2658} 2690}
@@ -2964,7 +2996,6 @@ static int pmu_process_message(struct pmu_gk20a *pmu)
2964 2996
2965 if (unlikely(!pmu->pmu_ready)) { 2997 if (unlikely(!pmu->pmu_ready)) {
2966 pmu_process_init_msg(pmu, &msg); 2998 pmu_process_init_msg(pmu, &msg);
2967 pmu_init_powergating(pmu);
2968 pmu_init_perfmon(pmu); 2999 pmu_init_perfmon(pmu);
2969 return 0; 3000 return 0;
2970 } 3001 }
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index bc5e474a..0745136c 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -1016,11 +1016,12 @@ struct pmu_pg_stats {
1016/* Choices for pmu_state */ 1016/* Choices for pmu_state */
1017#define PMU_STATE_OFF 0 /* PMU is off */ 1017#define PMU_STATE_OFF 0 /* PMU is off */
1018#define PMU_STATE_STARTING 1 /* PMU is on, but not booted */ 1018#define PMU_STATE_STARTING 1 /* PMU is on, but not booted */
1019#define PMU_STATE_ELPG_BOOTING 2 /* PMU is booting */ 1019#define PMU_STATE_INIT_RECEIVED 2 /* PMU init message received */
1020#define PMU_STATE_ELPG_BOOTED 3 /* ELPG is initialized */ 1020#define PMU_STATE_ELPG_BOOTING 3 /* PMU is booting */
1021#define PMU_STATE_LOADING_PG_BUF 4 /* Loading PG buf */ 1021#define PMU_STATE_ELPG_BOOTED 4 /* ELPG is initialized */
1022#define PMU_STATE_LOADING_ZBC 5 /* Loading ZBC buf */ 1022#define PMU_STATE_LOADING_PG_BUF 5 /* Loading PG buf */
1023#define PMU_STATE_STARTED 6 /* Fully unitialized */ 1023#define PMU_STATE_LOADING_ZBC 6 /* Loading ZBC buf */
1024#define PMU_STATE_STARTED 7 /* Fully unitialized */
1024 1025
1025 1026
1026 1027