summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-05-23 07:33:28 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-02 13:53:47 -0400
commit96cf9748a776e06ae9d9827b5e9adc58a6c25d80 (patch)
treea0714ebd4b90f5c10e32e6bacbec34a659ff9c3b /drivers/gpu/nvgpu
parent6090a8a7ee347f92d806f104d3a0082208f5df64 (diff)
gpu: nvgpu: nvgpu_thread to handle PMU state changes
- Replaced schedule_worker() with nvgpu_thread along with nvgpu_cond_wait() to handle PMU state machine changes during boot. - Added new state "PMU_STATE_EXIT" to exit PMU state machine loop & wait for the thread stop signal. - In gk20a_init_pmu_setup_sw() thread creates & starts thread execution to handle PMU state changes. - In pmu_destroy() thread post PMU_STATE_EXIT & waits for pending operation to complete before thread destroy. JIRA NVGPU-56 Change-Id: I951208bf88e82c281e3e678ddc603d58aec5ab10 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: http://git-master/r/1487882 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c169
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h10
2 files changed, 132 insertions, 47 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 552d5d73..fc46db91 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -50,6 +50,10 @@ static void ap_callback_init_and_enable_ctrl(
50 struct gk20a *g, struct pmu_msg *msg, 50 struct gk20a *g, struct pmu_msg *msg,
51 void *param, u32 seq_desc, u32 status); 51 void *param, u32 seq_desc, u32 status);
52 52
53static int nvgpu_init_task_pg_init(struct gk20a *g);
54static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
55 bool post_change_event);
56
53static int pmu_init_powergating(struct gk20a *g); 57static int pmu_init_powergating(struct gk20a *g);
54 58
55static u32 pmu_perfmon_cntr_sz_v0(struct pmu_gk20a *pmu) 59static u32 pmu_perfmon_cntr_sz_v0(struct pmu_gk20a *pmu)
@@ -3176,6 +3180,9 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3176 /* start with elpg disabled until first enable call */ 3180 /* start with elpg disabled until first enable call */
3177 pmu->elpg_refcnt = 0; 3181 pmu->elpg_refcnt = 0;
3178 3182
3183 /* Create thread to handle PMU state machine */
3184 nvgpu_init_task_pg_init(g);
3185
3179 if (pmu->sw_ready) { 3186 if (pmu->sw_ready) {
3180 for (i = 0; i < pmu->mutex_cnt; i++) { 3187 for (i = 0; i < pmu->mutex_cnt; i++) {
3181 pmu->mutex[i].id = i; 3188 pmu->mutex[i].id = i;
@@ -3216,8 +3223,6 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3216 3223
3217 pmu_seq_init(pmu); 3224 pmu_seq_init(pmu);
3218 3225
3219 INIT_WORK(&pmu->pg_init, pmu_setup_hw);
3220
3221 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, 3226 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
3222 &pmu->seq_buf); 3227 &pmu->seq_buf);
3223 if (err) { 3228 if (err) {
@@ -3278,7 +3283,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
3278 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF)) 3283 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
3279 nvgpu_err(g, "failed to load PGENG buffer"); 3284 nvgpu_err(g, "failed to load PGENG buffer");
3280 else { 3285 else {
3281 schedule_work(&pmu->pg_init); 3286 nvgpu_pmu_state_change(g, pmu->pmu_state, true);
3282 } 3287 }
3283} 3288}
3284 3289
@@ -3319,36 +3324,93 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
3319static void pmu_setup_hw_load_zbc(struct gk20a *g); 3324static void pmu_setup_hw_load_zbc(struct gk20a *g);
3320static void pmu_setup_hw_enable_elpg(struct gk20a *g); 3325static void pmu_setup_hw_enable_elpg(struct gk20a *g);
3321 3326
3322void pmu_setup_hw(struct work_struct *work) 3327static void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
3328 bool post_change_event)
3323{ 3329{
3324 struct pmu_gk20a *pmu = container_of(work, struct pmu_gk20a, pg_init); 3330 struct pmu_gk20a *pmu = &g->pmu;
3325 struct gk20a *g = gk20a_from_pmu(pmu);
3326 3331
3327 switch (pmu->pmu_state) { 3332 pmu->pmu_state = pmu_state;
3328 case PMU_STATE_INIT_RECEIVED: 3333
3329 gk20a_dbg_pmu("pmu starting"); 3334 if (post_change_event) {
3330 if (g->can_elpg) 3335 pmu->pg_init.state_change = true;
3331 pmu_init_powergating(g); 3336 nvgpu_cond_signal(&pmu->pg_init.wq);
3332 break;
3333 case PMU_STATE_ELPG_BOOTED:
3334 gk20a_dbg_pmu("elpg booted");
3335 gk20a_init_pmu_bind_fecs(g);
3336 break;
3337 case PMU_STATE_LOADING_PG_BUF:
3338 gk20a_dbg_pmu("loaded pg buf");
3339 pmu_setup_hw_load_zbc(g);
3340 break;
3341 case PMU_STATE_LOADING_ZBC:
3342 gk20a_dbg_pmu("loaded zbc");
3343 pmu_setup_hw_enable_elpg(g);
3344 break;
3345 case PMU_STATE_STARTED:
3346 gk20a_dbg_pmu("PMU booted");
3347 break;
3348 default:
3349 gk20a_dbg_pmu("invalid state");
3350 break;
3351 } 3337 }
3338
3339 /* make status visible */
3340 smp_mb();
3341}
3342
3343static int nvgpu_pg_init_task(void *arg)
3344{
3345 struct gk20a *g = (struct gk20a *)arg;
3346 struct pmu_gk20a *pmu = &g->pmu;
3347 struct nvgpu_pg_init *pg_init = &pmu->pg_init;
3348 u32 pmu_state = 0;
3349
3350 while (true) {
3351
3352 NVGPU_COND_WAIT(&pg_init->wq,
3353 (pg_init->state_change == true), 0);
3354
3355 pmu->pg_init.state_change = false;
3356 pmu_state = ACCESS_ONCE(pmu->pmu_state);
3357
3358 if (pmu_state == PMU_STATE_EXIT) {
3359 gk20a_dbg_pmu("pmu state exit");
3360 break;
3361 }
3362
3363 switch (pmu_state) {
3364 case PMU_STATE_INIT_RECEIVED:
3365 gk20a_dbg_pmu("pmu starting");
3366 if (g->can_elpg)
3367 pmu_init_powergating(g);
3368 break;
3369 case PMU_STATE_ELPG_BOOTED:
3370 gk20a_dbg_pmu("elpg booted");
3371 gk20a_init_pmu_bind_fecs(g);
3372 break;
3373 case PMU_STATE_LOADING_PG_BUF:
3374 gk20a_dbg_pmu("loaded pg buf");
3375 pmu_setup_hw_load_zbc(g);
3376 break;
3377 case PMU_STATE_LOADING_ZBC:
3378 gk20a_dbg_pmu("loaded zbc");
3379 pmu_setup_hw_enable_elpg(g);
3380 break;
3381 case PMU_STATE_STARTED:
3382 gk20a_dbg_pmu("PMU booted");
3383 break;
3384 default:
3385 gk20a_dbg_pmu("invalid state");
3386 break;
3387 }
3388
3389 }
3390
3391 while (!nvgpu_thread_should_stop(&pg_init->state_task))
3392 nvgpu_msleep(5);
3393
3394 return 0;
3395}
3396
3397static int nvgpu_init_task_pg_init(struct gk20a *g)
3398{
3399 struct pmu_gk20a *pmu = &g->pmu;
3400 char thread_name[64];
3401 int err = 0;
3402
3403 nvgpu_cond_init(&pmu->pg_init.wq);
3404
3405 snprintf(thread_name, sizeof(thread_name),
3406 "nvgpu_pg_init_%s", g->name);
3407
3408 err = nvgpu_thread_create(&pmu->pg_init.state_task, g,
3409 nvgpu_pg_init_task, thread_name);
3410 if (err)
3411 nvgpu_err(g, "failed to start nvgpu_pg_init thread");
3412
3413 return err;
3352} 3414}
3353 3415
3354int gk20a_init_pmu_bind_fecs(struct gk20a *g) 3416int gk20a_init_pmu_bind_fecs(struct gk20a *g)
@@ -3386,7 +3448,7 @@ int gk20a_init_pmu_bind_fecs(struct gk20a *g)
3386 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); 3448 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
3387 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 3449 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
3388 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 3450 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
3389 pmu->pmu_state = PMU_STATE_LOADING_PG_BUF; 3451 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
3390 return err; 3452 return err;
3391} 3453}
3392 3454
@@ -3422,7 +3484,7 @@ static void pmu_setup_hw_load_zbc(struct gk20a *g)
3422 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC"); 3484 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
3423 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ, 3485 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
3424 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); 3486 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
3425 pmu->pmu_state = PMU_STATE_LOADING_ZBC; 3487 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
3426} 3488}
3427 3489
3428static void pmu_setup_hw_enable_elpg(struct gk20a *g) 3490static void pmu_setup_hw_enable_elpg(struct gk20a *g)
@@ -3438,7 +3500,7 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g)
3438 gk20a_writel(g, 0x10a164, 0x109ff); 3500 gk20a_writel(g, 0x10a164, 0x109ff);
3439 3501
3440 pmu->initialized = true; 3502 pmu->initialized = true;
3441 pmu->pmu_state = PMU_STATE_STARTED; 3503 nvgpu_pmu_state_change(g, PMU_STATE_STARTED, false);
3442 3504
3443 if (g->ops.pmu_ver.is_pmu_zbc_save_supported) { 3505 if (g->ops.pmu_ver.is_pmu_zbc_save_supported) {
3444 /* Save zbc table after PMU is initialized. */ 3506 /* Save zbc table after PMU is initialized. */
@@ -3550,7 +3612,7 @@ int gk20a_init_pmu_support(struct gk20a *g)
3550 if (err) 3612 if (err)
3551 return err; 3613 return err;
3552 3614
3553 pmu->pmu_state = PMU_STATE_STARTING; 3615 nvgpu_pmu_state_change(g, PMU_STATE_STARTING, false);
3554 } 3616 }
3555 3617
3556 return err; 3618 return err;
@@ -3598,14 +3660,14 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
3598 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 3660 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
3599 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) { 3661 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
3600 pmu->initialized = true; 3662 pmu->initialized = true;
3601 pmu->pmu_state = PMU_STATE_STARTED; 3663 nvgpu_pmu_state_change(g, PMU_STATE_STARTED,
3664 false);
3602 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED); 3665 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
3603 /* make status visible */ 3666 /* make status visible */
3604 smp_mb(); 3667 smp_mb();
3605 } else { 3668 } else
3606 pmu->pmu_state = PMU_STATE_ELPG_BOOTED; 3669 nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED,
3607 schedule_work(&pmu->pg_init); 3670 true);
3608 }
3609 } 3671 }
3610 break; 3672 break;
3611 default: 3673 default:
@@ -3722,7 +3784,8 @@ static int pmu_init_powergating(struct gk20a *g)
3722 if (BIT(pg_engine_id) & pg_engine_id_list) { 3784 if (BIT(pg_engine_id) & pg_engine_id_list) {
3723 pmu_pg_init_send(g, pg_engine_id); 3785 pmu_pg_init_send(g, pg_engine_id);
3724 if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED) 3786 if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED)
3725 pmu->pmu_state = PMU_STATE_ELPG_BOOTING; 3787 nvgpu_pmu_state_change(g,
3788 PMU_STATE_ELPG_BOOTING, false);
3726 } 3789 }
3727 } 3790 }
3728 3791
@@ -3931,8 +3994,9 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3931 } 3994 }
3932 3995
3933 pmu->pmu_ready = true; 3996 pmu->pmu_ready = true;
3934 pmu->pmu_state = PMU_STATE_INIT_RECEIVED; 3997
3935 schedule_work(&pmu->pg_init); 3998 nvgpu_pmu_state_change(g, PMU_STATE_INIT_RECEIVED, true);
3999
3936 gk20a_dbg_pmu("init received end\n"); 4000 gk20a_dbg_pmu("init received end\n");
3937 4001
3938 return 0; 4002 return 0;
@@ -5178,6 +5242,7 @@ int gk20a_pmu_destroy(struct gk20a *g)
5178{ 5242{
5179 struct pmu_gk20a *pmu = &g->pmu; 5243 struct pmu_gk20a *pmu = &g->pmu;
5180 struct pmu_pg_stats_data pg_stat_data = { 0 }; 5244 struct pmu_pg_stats_data pg_stat_data = { 0 };
5245 struct nvgpu_timeout timeout;
5181 int i; 5246 int i;
5182 5247
5183 gk20a_dbg_fn(""); 5248 gk20a_dbg_fn("");
@@ -5186,7 +5251,23 @@ int gk20a_pmu_destroy(struct gk20a *g)
5186 return 0; 5251 return 0;
5187 5252
5188 /* make sure the pending operations are finished before we continue */ 5253 /* make sure the pending operations are finished before we continue */
5189 cancel_work_sync(&pmu->pg_init); 5254 if (nvgpu_thread_is_running(&pmu->pg_init.state_task)) {
5255
5256 /* post PMU_STATE_EXIT to exit PMU state machine loop */
5257 nvgpu_pmu_state_change(g, PMU_STATE_EXIT, true);
5258
5259 /* Make thread stop*/
5260 nvgpu_thread_stop(&pmu->pg_init.state_task);
5261
5262 /* wait to confirm thread stopped */
5263 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
5264 do {
5265 if (!nvgpu_thread_is_running(&pmu->pg_init.state_task))
5266 break;
5267 nvgpu_udelay(2);
5268 } while (!nvgpu_timeout_expired_msg(&timeout,
5269 "timeout - waiting PMU state machine thread stop"));
5270 }
5190 5271
5191 gk20a_pmu_get_pg_stats(g, 5272 gk20a_pmu_get_pg_stats(g,
5192 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data); 5273 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
@@ -5206,7 +5287,7 @@ int gk20a_pmu_destroy(struct gk20a *g)
5206 for (i = 0; i < PMU_QUEUE_COUNT; i++) 5287 for (i = 0; i < PMU_QUEUE_COUNT; i++)
5207 nvgpu_mutex_destroy(&pmu->queue[i].mutex); 5288 nvgpu_mutex_destroy(&pmu->queue[i].mutex);
5208 5289
5209 pmu->pmu_state = PMU_STATE_OFF; 5290 nvgpu_pmu_state_change(g, PMU_STATE_OFF, false);
5210 pmu->pmu_ready = false; 5291 pmu->pmu_ready = false;
5211 pmu->perfmon_ready = false; 5292 pmu->perfmon_ready = false;
5212 pmu->zbc_ready = false; 5293 pmu->zbc_ready = false;
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index cefb6577..cfcf3947 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -307,8 +307,13 @@ struct pmu_pg_stats_data {
307#define PMU_STATE_LOADING_PG_BUF 5 /* Loading PG buf */ 307#define PMU_STATE_LOADING_PG_BUF 5 /* Loading PG buf */
308#define PMU_STATE_LOADING_ZBC 6 /* Loading ZBC buf */ 308#define PMU_STATE_LOADING_ZBC 6 /* Loading ZBC buf */
309#define PMU_STATE_STARTED 7 /* Fully unitialized */ 309#define PMU_STATE_STARTED 7 /* Fully unitialized */
310#define PMU_STATE_EXIT 8 /* Exit PMU state machine */
310 311
311 312struct nvgpu_pg_init {
313 bool state_change;
314 struct nvgpu_cond wq;
315 struct nvgpu_thread state_task;
316};
312 317
313struct pmu_gk20a { 318struct pmu_gk20a {
314 319
@@ -356,7 +361,7 @@ struct pmu_gk20a {
356 int pmu_state; 361 int pmu_state;
357 362
358#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ 363#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */
359 struct work_struct pg_init; 364 struct nvgpu_pg_init pg_init;
360 struct nvgpu_mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */ 365 struct nvgpu_mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */
361 struct nvgpu_mutex elpg_mutex; /* protect elpg enable/disable */ 366 struct nvgpu_mutex elpg_mutex; /* protect elpg enable/disable */
362 int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */ 367 int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */
@@ -440,7 +445,6 @@ int pmu_bootstrap(struct pmu_gk20a *pmu);
440int gk20a_init_pmu(struct pmu_gk20a *pmu); 445int gk20a_init_pmu(struct pmu_gk20a *pmu);
441void pmu_dump_falcon_stats(struct pmu_gk20a *pmu); 446void pmu_dump_falcon_stats(struct pmu_gk20a *pmu);
442void gk20a_remove_pmu_support(struct pmu_gk20a *pmu); 447void gk20a_remove_pmu_support(struct pmu_gk20a *pmu);
443void pmu_setup_hw(struct work_struct *work);
444void pmu_seq_init(struct pmu_gk20a *pmu); 448void pmu_seq_init(struct pmu_gk20a *pmu);
445 449
446int gk20a_init_pmu(struct pmu_gk20a *pmu); 450int gk20a_init_pmu(struct pmu_gk20a *pmu);