summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2016-06-08 07:39:02 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-06-09 19:55:23 -0400
commitb5f2cff0239ee7e9f8ae54c271a6d447ea83df49 (patch)
tree41d31c96d2b751b3c5386e57de0c7472fd2e282b /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parent3735dba6f83e8de21eb2db620402d0ea9fd28835 (diff)
gpu: nvgpu: update PMU version, interface & code
- update PMU interface/code to support latest version of secure boot FW - Add PMU FW version for next GPU support - can_elpg check in pmu_setup_hw helps to fix queue error JIRA DNVGPU-34 Change-Id: Iecf47fbc5b71cbf0f4bcdfeafad5c635cb6bff82 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: http://git-master/r/1161107 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c73
1 files changed, 61 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index f6e2df00..31cae4fd 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -576,6 +576,27 @@ static void pmu_allocation_set_dmem_offset_v0(struct pmu_gk20a *pmu,
576 pmu_a_ptr->alloc.dmem.offset = offset; 576 pmu_a_ptr->alloc.dmem.offset = offset;
577} 577}
578 578
579static void *get_pmu_msg_pmu_init_msg_ptr_v3(struct pmu_init_msg *init)
580{
581 return (void *)(&(init->pmu_init_v3));
582}
583
584static u16 get_pmu_init_msg_pmu_sw_mg_off_v3(union pmu_init_msg_pmu *init_msg)
585{
586 struct pmu_init_msg_pmu_v3 *init =
587 (struct pmu_init_msg_pmu_v3 *)(&init_msg->v3);
588
589 return init->sw_managed_area_offset;
590}
591
592static u16 get_pmu_init_msg_pmu_sw_mg_size_v3(union pmu_init_msg_pmu *init_msg)
593{
594 struct pmu_init_msg_pmu_v3 *init =
595 (struct pmu_init_msg_pmu_v3 *)(&init_msg->v3);
596
597 return init->sw_managed_area_size;
598}
599
579static void *get_pmu_msg_pmu_init_msg_ptr_v2(struct pmu_init_msg *init) 600static void *get_pmu_msg_pmu_init_msg_ptr_v2(struct pmu_init_msg *init)
580{ 601{
581 return (void *)(&(init->pmu_init_v2)); 602 return (void *)(&(init->pmu_init_v2));
@@ -1114,6 +1135,17 @@ static void get_pmu_init_msg_pmu_queue_params_v2(struct pmu_queue *queue,
1114 queue->size = init->queue_info[id].size; 1135 queue->size = init->queue_info[id].size;
1115} 1136}
1116 1137
1138static void get_pmu_init_msg_pmu_queue_params_v3(struct pmu_queue *queue,
1139 u32 id, void *pmu_init_msg)
1140{
1141 struct pmu_init_msg_pmu_v3 *init =
1142 (struct pmu_init_msg_pmu_v3 *)pmu_init_msg;
1143
1144 queue->index = init->queue_index[id];
1145 queue->offset = init->queue_offset;
1146 queue->size = init->queue_size[id];
1147}
1148
1117static void *get_pmu_sequence_in_alloc_ptr_v3(struct pmu_sequence *seq) 1149static void *get_pmu_sequence_in_alloc_ptr_v3(struct pmu_sequence *seq)
1118{ 1150{
1119 return (void *)(&seq->in_v3); 1151 return (void *)(&seq->in_v3);
@@ -1409,6 +1441,7 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
1409 get_pmu_sequence_out_alloc_ptr_v1; 1441 get_pmu_sequence_out_alloc_ptr_v1;
1410 break; 1442 break;
1411 case APP_VERSION_GM206: 1443 case APP_VERSION_GM206:
1444 case APP_VERSION_NV_GPU:
1412 g->ops.pmu_ver.pg_cmd_eng_buf_load_size = 1445 g->ops.pmu_ver.pg_cmd_eng_buf_load_size =
1413 pg_cmd_eng_buf_load_size_v2; 1446 pg_cmd_eng_buf_load_size_v2;
1414 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = 1447 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type =
@@ -1466,14 +1499,28 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
1466 pmu_allocation_get_dmem_offset_addr_v3; 1499 pmu_allocation_get_dmem_offset_addr_v3;
1467 g->ops.pmu_ver.pmu_allocation_set_dmem_offset = 1500 g->ops.pmu_ver.pmu_allocation_set_dmem_offset =
1468 pmu_allocation_set_dmem_offset_v3; 1501 pmu_allocation_set_dmem_offset_v3;
1469 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = 1502
1470 get_pmu_init_msg_pmu_queue_params_v2; 1503 if(pmu->desc->app_version != APP_VERSION_NV_GPU) {
1471 g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = 1504 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params =
1472 get_pmu_msg_pmu_init_msg_ptr_v2; 1505 get_pmu_init_msg_pmu_queue_params_v2;
1473 g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = 1506 g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr =
1474 get_pmu_init_msg_pmu_sw_mg_off_v2; 1507 get_pmu_msg_pmu_init_msg_ptr_v2;
1475 g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = 1508 g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off =
1476 get_pmu_init_msg_pmu_sw_mg_size_v2; 1509 get_pmu_init_msg_pmu_sw_mg_off_v2;
1510 g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size =
1511 get_pmu_init_msg_pmu_sw_mg_size_v2;
1512 }
1513 else
1514 {
1515 g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params =
1516 get_pmu_init_msg_pmu_queue_params_v3;
1517 g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr =
1518 get_pmu_msg_pmu_init_msg_ptr_v3;
1519 g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off =
1520 get_pmu_init_msg_pmu_sw_mg_off_v3;
1521 g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size =
1522 get_pmu_init_msg_pmu_sw_mg_size_v3;
1523 }
1477 g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = 1524 g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size =
1478 get_pmu_perfmon_cmd_start_size_v3; 1525 get_pmu_perfmon_cmd_start_size_v3;
1479 g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = 1526 g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar =
@@ -2866,7 +2913,6 @@ static int gk20a_prepare_ucode(struct gk20a *g)
2866 2913
2867static int gk20a_init_pmu_setup_sw(struct gk20a *g) 2914static int gk20a_init_pmu_setup_sw(struct gk20a *g)
2868{ 2915{
2869 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
2870 struct pmu_gk20a *pmu = &g->pmu; 2916 struct pmu_gk20a *pmu = &g->pmu;
2871 struct mm_gk20a *mm = &g->mm; 2917 struct mm_gk20a *mm = &g->mm;
2872 struct vm_gk20a *vm = &mm->pmu.vm; 2918 struct vm_gk20a *vm = &mm->pmu.vm;
@@ -2919,8 +2965,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
2919 2965
2920 pmu_seq_init(pmu); 2966 pmu_seq_init(pmu);
2921 2967
2922 if (platform->can_elpg) 2968 INIT_WORK(&pmu->pg_init, pmu_setup_hw);
2923 INIT_WORK(&pmu->pg_init, pmu_setup_hw);
2924 2969
2925 err = gk20a_gmmu_alloc_map(vm, GK20A_PMU_SEQ_BUF_SIZE, &pmu->seq_buf); 2970 err = gk20a_gmmu_alloc_map(vm, GK20A_PMU_SEQ_BUF_SIZE, &pmu->seq_buf);
2926 if (err) { 2971 if (err) {
@@ -3028,11 +3073,13 @@ void pmu_setup_hw(struct work_struct *work)
3028{ 3073{
3029 struct pmu_gk20a *pmu = container_of(work, struct pmu_gk20a, pg_init); 3074 struct pmu_gk20a *pmu = container_of(work, struct pmu_gk20a, pg_init);
3030 struct gk20a *g = gk20a_from_pmu(pmu); 3075 struct gk20a *g = gk20a_from_pmu(pmu);
3076 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
3031 3077
3032 switch (pmu->pmu_state) { 3078 switch (pmu->pmu_state) {
3033 case PMU_STATE_INIT_RECEIVED: 3079 case PMU_STATE_INIT_RECEIVED:
3034 gk20a_dbg_pmu("pmu starting"); 3080 gk20a_dbg_pmu("pmu starting");
3035 pmu_init_powergating(g); 3081 if (platform->can_elpg)
3082 pmu_init_powergating(g);
3036 break; 3083 break;
3037 case PMU_STATE_ELPG_BOOTED: 3084 case PMU_STATE_ELPG_BOOTED:
3038 gk20a_dbg_pmu("elpg booted"); 3085 gk20a_dbg_pmu("elpg booted");
@@ -3380,6 +3427,8 @@ static u8 get_perfmon_id(struct pmu_gk20a *pmu)
3380 break; 3427 break;
3381#if defined(CONFIG_ARCH_TEGRA_18x_SOC) 3428#if defined(CONFIG_ARCH_TEGRA_18x_SOC)
3382 case TEGRA_18x_GPUID: 3429 case TEGRA_18x_GPUID:
3430 case TEGRA_18x_GPUID2:
3431 case TEGRA_18x_GPUID3:
3383 unit_id = PMU_UNIT_PERFMON_T18X; 3432 unit_id = PMU_UNIT_PERFMON_T18X;
3384 break; 3433 break;
3385#endif 3434#endif