summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index e221be11..56ebda1a 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2896,8 +2896,8 @@ void gk20a_remove_pmu_support(struct pmu_gk20a *pmu)
2896{ 2896{
2897 gk20a_dbg_fn(""); 2897 gk20a_dbg_fn("");
2898 2898
2899 if (gk20a_alloc_initialized(&pmu->dmem)) 2899 if (nvgpu_alloc_initialized(&pmu->dmem))
2900 gk20a_alloc_destroy(&pmu->dmem); 2900 nvgpu_alloc_destroy(&pmu->dmem);
2901 2901
2902 release_firmware(pmu->fw); 2902 release_firmware(pmu->fw);
2903} 2903}
@@ -3607,7 +3607,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
3607 gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data); 3607 gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data);
3608 3608
3609 if (!pmu->sample_buffer) 3609 if (!pmu->sample_buffer)
3610 pmu->sample_buffer = gk20a_alloc(&pmu->dmem, 3610 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem,
3611 2 * sizeof(u16)); 3611 2 * sizeof(u16));
3612 if (!pmu->sample_buffer) { 3612 if (!pmu->sample_buffer) {
3613 gk20a_err(dev_from_gk20a(g), 3613 gk20a_err(dev_from_gk20a(g),
@@ -3708,7 +3708,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3708 for (i = 0; i < PMU_QUEUE_COUNT; i++) 3708 for (i = 0; i < PMU_QUEUE_COUNT; i++)
3709 pmu_queue_init(pmu, i, init); 3709 pmu_queue_init(pmu, i, init);
3710 3710
3711 if (!gk20a_alloc_initialized(&pmu->dmem)) { 3711 if (!nvgpu_alloc_initialized(&pmu->dmem)) {
3712 /* Align start and end addresses */ 3712 /* Align start and end addresses */
3713 u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init), 3713 u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init),
3714 PMU_DMEM_ALLOC_ALIGNMENT); 3714 PMU_DMEM_ALLOC_ALIGNMENT);
@@ -3716,9 +3716,9 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3716 pv->get_pmu_init_msg_pmu_sw_mg_size(init)) & 3716 pv->get_pmu_init_msg_pmu_sw_mg_size(init)) &
3717 ~(PMU_DMEM_ALLOC_ALIGNMENT - 1); 3717 ~(PMU_DMEM_ALLOC_ALIGNMENT - 1);
3718 u32 size = end - start; 3718 u32 size = end - start;
3719 gk20a_bitmap_allocator_init(g, &pmu->dmem, "gk20a_pmu_dmem", 3719 nvgpu_bitmap_allocator_init(g, &pmu->dmem, "gk20a_pmu_dmem",
3720 start, size, 3720 start, size,
3721 PMU_DMEM_ALLOC_ALIGNMENT, 0); 3721 PMU_DMEM_ALLOC_ALIGNMENT, 0);
3722 } 3722 }
3723 3723
3724 pmu->pmu_ready = true; 3724 pmu->pmu_ready = true;
@@ -3855,12 +3855,12 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
3855 seq->callback = NULL; 3855 seq->callback = NULL;
3856 if (pv->pmu_allocation_get_dmem_size(pmu, 3856 if (pv->pmu_allocation_get_dmem_size(pmu,
3857 pv->get_pmu_seq_in_a_ptr(seq)) != 0) 3857 pv->get_pmu_seq_in_a_ptr(seq)) != 0)
3858 gk20a_free(&pmu->dmem, 3858 nvgpu_free(&pmu->dmem,
3859 pv->pmu_allocation_get_dmem_offset(pmu, 3859 pv->pmu_allocation_get_dmem_offset(pmu,
3860 pv->get_pmu_seq_in_a_ptr(seq))); 3860 pv->get_pmu_seq_in_a_ptr(seq)));
3861 if (pv->pmu_allocation_get_dmem_size(pmu, 3861 if (pv->pmu_allocation_get_dmem_size(pmu,
3862 pv->get_pmu_seq_out_a_ptr(seq)) != 0) 3862 pv->get_pmu_seq_out_a_ptr(seq)) != 0)
3863 gk20a_free(&pmu->dmem, 3863 nvgpu_free(&pmu->dmem,
3864 pv->pmu_allocation_get_dmem_offset(pmu, 3864 pv->pmu_allocation_get_dmem_offset(pmu,
3865 pv->get_pmu_seq_out_a_ptr(seq))); 3865 pv->get_pmu_seq_out_a_ptr(seq)));
3866 3866
@@ -4601,7 +4601,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4601 (u16)max(payload->in.size, payload->out.size)); 4601 (u16)max(payload->in.size, payload->out.size));
4602 4602
4603 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = 4603 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) =
4604 gk20a_alloc(&pmu->dmem, 4604 nvgpu_alloc(&pmu->dmem,
4605 pv->pmu_allocation_get_dmem_size(pmu, in)); 4605 pv->pmu_allocation_get_dmem_size(pmu, in));
4606 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) 4606 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)))
4607 goto clean_up; 4607 goto clean_up;
@@ -4644,7 +4644,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4644 4644
4645 if (payload->in.buf != payload->out.buf) { 4645 if (payload->in.buf != payload->out.buf) {
4646 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) = 4646 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) =
4647 gk20a_alloc(&pmu->dmem, 4647 nvgpu_alloc(&pmu->dmem,
4648 pv->pmu_allocation_get_dmem_size(pmu, out)); 4648 pv->pmu_allocation_get_dmem_size(pmu, out));
4649 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, 4649 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu,
4650 out))) 4650 out)))
@@ -4694,10 +4694,10 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4694clean_up: 4694clean_up:
4695 gk20a_dbg_fn("fail"); 4695 gk20a_dbg_fn("fail");
4696 if (in) 4696 if (in)
4697 gk20a_free(&pmu->dmem, 4697 nvgpu_free(&pmu->dmem,
4698 pv->pmu_allocation_get_dmem_offset(pmu, in)); 4698 pv->pmu_allocation_get_dmem_offset(pmu, in));
4699 if (out) 4699 if (out)
4700 gk20a_free(&pmu->dmem, 4700 nvgpu_free(&pmu->dmem,
4701 pv->pmu_allocation_get_dmem_offset(pmu, out)); 4701 pv->pmu_allocation_get_dmem_offset(pmu, out));
4702 4702
4703 pmu_seq_release(pmu, seq); 4703 pmu_seq_release(pmu, seq);