summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 347d7158..a3898993 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2856,8 +2856,8 @@ void gk20a_remove_pmu_support(struct pmu_gk20a *pmu)
2856{ 2856{
2857 gk20a_dbg_fn(""); 2857 gk20a_dbg_fn("");
2858 2858
2859 if (pmu->dmem.init) 2859 if (gk20a_alloc_initialized(&pmu->dmem))
2860 gk20a_allocator_destroy(&pmu->dmem); 2860 gk20a_alloc_destroy(&pmu->dmem);
2861} 2861}
2862 2862
2863static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g) 2863static int gk20a_init_pmu_reset_enable_hw(struct gk20a *g)
@@ -3503,7 +3503,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
3503 gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data); 3503 gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data);
3504 3504
3505 if (!pmu->sample_buffer) 3505 if (!pmu->sample_buffer)
3506 pmu->sample_buffer = gk20a_balloc(&pmu->dmem, 3506 pmu->sample_buffer = gk20a_alloc(&pmu->dmem,
3507 2 * sizeof(u16)); 3507 2 * sizeof(u16));
3508 if (!pmu->sample_buffer) { 3508 if (!pmu->sample_buffer) {
3509 gk20a_err(dev_from_gk20a(g), 3509 gk20a_err(dev_from_gk20a(g),
@@ -3605,7 +3605,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3605 for (i = 0; i < PMU_QUEUE_COUNT; i++) 3605 for (i = 0; i < PMU_QUEUE_COUNT; i++)
3606 pmu_queue_init(pmu, i, init); 3606 pmu_queue_init(pmu, i, init);
3607 3607
3608 if (!pmu->dmem.init) { 3608 if (!gk20a_alloc_initialized(&pmu->dmem)) {
3609 /* Align start and end addresses */ 3609 /* Align start and end addresses */
3610 u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init), 3610 u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init),
3611 PMU_DMEM_ALLOC_ALIGNMENT); 3611 PMU_DMEM_ALLOC_ALIGNMENT);
@@ -3613,9 +3613,9 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3613 pv->get_pmu_init_msg_pmu_sw_mg_size(init)) & 3613 pv->get_pmu_init_msg_pmu_sw_mg_size(init)) &
3614 ~(PMU_DMEM_ALLOC_ALIGNMENT - 1); 3614 ~(PMU_DMEM_ALLOC_ALIGNMENT - 1);
3615 u32 size = end - start; 3615 u32 size = end - start;
3616 __gk20a_allocator_init(&pmu->dmem, NULL, "gk20a_pmu_dmem", 3616 gk20a_buddy_allocator_init(&pmu->dmem, "gk20a_pmu_dmem",
3617 start, size, 3617 start, size,
3618 PMU_DMEM_ALLOC_ALIGNMENT, 4, 0); 3618 PMU_DMEM_ALLOC_ALIGNMENT, 0);
3619 } 3619 }
3620 3620
3621 pmu->pmu_ready = true; 3621 pmu->pmu_ready = true;
@@ -3752,12 +3752,12 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
3752 seq->callback = NULL; 3752 seq->callback = NULL;
3753 if (pv->pmu_allocation_get_dmem_size(pmu, 3753 if (pv->pmu_allocation_get_dmem_size(pmu,
3754 pv->get_pmu_seq_in_a_ptr(seq)) != 0) 3754 pv->get_pmu_seq_in_a_ptr(seq)) != 0)
3755 gk20a_bfree(&pmu->dmem, 3755 gk20a_free(&pmu->dmem,
3756 pv->pmu_allocation_get_dmem_offset(pmu, 3756 pv->pmu_allocation_get_dmem_offset(pmu,
3757 pv->get_pmu_seq_in_a_ptr(seq))); 3757 pv->get_pmu_seq_in_a_ptr(seq)));
3758 if (pv->pmu_allocation_get_dmem_size(pmu, 3758 if (pv->pmu_allocation_get_dmem_size(pmu,
3759 pv->get_pmu_seq_out_a_ptr(seq)) != 0) 3759 pv->get_pmu_seq_out_a_ptr(seq)) != 0)
3760 gk20a_bfree(&pmu->dmem, 3760 gk20a_free(&pmu->dmem,
3761 pv->pmu_allocation_get_dmem_offset(pmu, 3761 pv->pmu_allocation_get_dmem_offset(pmu,
3762 pv->get_pmu_seq_out_a_ptr(seq))); 3762 pv->get_pmu_seq_out_a_ptr(seq)));
3763 3763
@@ -4418,7 +4418,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4418 (u16)max(payload->in.size, payload->out.size)); 4418 (u16)max(payload->in.size, payload->out.size));
4419 4419
4420 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = 4420 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) =
4421 gk20a_balloc(&pmu->dmem, 4421 gk20a_alloc(&pmu->dmem,
4422 pv->pmu_allocation_get_dmem_size(pmu, in)); 4422 pv->pmu_allocation_get_dmem_size(pmu, in));
4423 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) 4423 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)))
4424 goto clean_up; 4424 goto clean_up;
@@ -4443,7 +4443,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4443 if (payload->out.buf != payload->in.buf) { 4443 if (payload->out.buf != payload->in.buf) {
4444 4444
4445 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) = 4445 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) =
4446 gk20a_balloc(&pmu->dmem, 4446 gk20a_alloc(&pmu->dmem,
4447 pv->pmu_allocation_get_dmem_size(pmu, out)); 4447 pv->pmu_allocation_get_dmem_size(pmu, out));
4448 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, 4448 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu,
4449 out))) 4449 out)))
@@ -4474,10 +4474,10 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4474clean_up: 4474clean_up:
4475 gk20a_dbg_fn("fail"); 4475 gk20a_dbg_fn("fail");
4476 if (in) 4476 if (in)
4477 gk20a_bfree(&pmu->dmem, 4477 gk20a_free(&pmu->dmem,
4478 pv->pmu_allocation_get_dmem_offset(pmu, in)); 4478 pv->pmu_allocation_get_dmem_offset(pmu, in));
4479 if (out) 4479 if (out)
4480 gk20a_bfree(&pmu->dmem, 4480 gk20a_free(&pmu->dmem,
4481 pv->pmu_allocation_get_dmem_offset(pmu, out)); 4481 pv->pmu_allocation_get_dmem_offset(pmu, out));
4482 4482
4483 pmu_seq_release(pmu, seq); 4483 pmu_seq_release(pmu, seq);