diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 68 |
1 files changed, 29 insertions, 39 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 2456c784..11322293 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -2816,7 +2816,6 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu) | |||
2816 | struct pmu_payload payload; | 2816 | struct pmu_payload payload; |
2817 | u32 seq; | 2817 | u32 seq; |
2818 | u32 data; | 2818 | u32 data; |
2819 | int err = 0; | ||
2820 | 2819 | ||
2821 | gk20a_dbg_fn(""); | 2820 | gk20a_dbg_fn(""); |
2822 | 2821 | ||
@@ -2867,12 +2866,11 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu) | |||
2867 | gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data); | 2866 | gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data); |
2868 | 2867 | ||
2869 | if (!pmu->sample_buffer) | 2868 | if (!pmu->sample_buffer) |
2870 | err = pmu->dmem.alloc(&pmu->dmem, | 2869 | pmu->sample_buffer = gk20a_balloc(&pmu->dmem, |
2871 | &pmu->sample_buffer, 2 * sizeof(u16), | 2870 | 2 * sizeof(u16)); |
2872 | PMU_DMEM_ALLOC_ALIGNMENT); | 2871 | if (!pmu->sample_buffer) { |
2873 | if (err) { | ||
2874 | gk20a_err(dev_from_gk20a(g), | 2872 | gk20a_err(dev_from_gk20a(g), |
2875 | "failed to allocate perfmon sample buffer"); | 2873 | "failed to allocate perfmon sample buffer"); |
2876 | return -ENOMEM; | 2874 | return -ENOMEM; |
2877 | } | 2875 | } |
2878 | 2876 | ||
@@ -2970,15 +2968,17 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu, | |||
2970 | for (i = 0; i < PMU_QUEUE_COUNT; i++) | 2968 | for (i = 0; i < PMU_QUEUE_COUNT; i++) |
2971 | pmu_queue_init(pmu, i, init); | 2969 | pmu_queue_init(pmu, i, init); |
2972 | 2970 | ||
2973 | if (!pmu->dmem.alloc) { | 2971 | if (!pmu->dmem.init) { |
2974 | /*Align start and end addresses*/ | 2972 | /* Align start and end addresses */ |
2975 | u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init), | 2973 | u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init), |
2976 | PMU_DMEM_ALLOC_ALIGNMENT); | 2974 | PMU_DMEM_ALLOC_ALIGNMENT); |
2977 | u32 end = (pv->get_pmu_init_msg_pmu_sw_mg_off(init) + | 2975 | u32 end = (pv->get_pmu_init_msg_pmu_sw_mg_off(init) + |
2978 | pv->get_pmu_init_msg_pmu_sw_mg_size(init)) & | 2976 | pv->get_pmu_init_msg_pmu_sw_mg_size(init)) & |
2979 | ~(PMU_DMEM_ALLOC_ALIGNMENT - 1); | 2977 | ~(PMU_DMEM_ALLOC_ALIGNMENT - 1); |
2980 | u32 size = end - start; | 2978 | u32 size = end - start; |
2981 | gk20a_allocator_init(&pmu->dmem, "gk20a_pmu_dmem", start, size); | 2979 | __gk20a_allocator_init(&pmu->dmem, NULL, "gk20a_pmu_dmem", |
2980 | start, size, | ||
2981 | PMU_DMEM_ALLOC_ALIGNMENT, 4, 0); | ||
2982 | } | 2982 | } |
2983 | 2983 | ||
2984 | pmu->pmu_ready = true; | 2984 | pmu->pmu_ready = true; |
@@ -3115,20 +3115,14 @@ static int pmu_response_handle(struct pmu_gk20a *pmu, | |||
3115 | seq->callback = NULL; | 3115 | seq->callback = NULL; |
3116 | if (pv->pmu_allocation_get_dmem_size(pmu, | 3116 | if (pv->pmu_allocation_get_dmem_size(pmu, |
3117 | pv->get_pmu_seq_in_a_ptr(seq)) != 0) | 3117 | pv->get_pmu_seq_in_a_ptr(seq)) != 0) |
3118 | pmu->dmem.free(&pmu->dmem, | 3118 | gk20a_bfree(&pmu->dmem, |
3119 | pv->pmu_allocation_get_dmem_offset(pmu, | 3119 | pv->pmu_allocation_get_dmem_offset(pmu, |
3120 | pv->get_pmu_seq_in_a_ptr(seq)), | 3120 | pv->get_pmu_seq_in_a_ptr(seq))); |
3121 | pv->pmu_allocation_get_dmem_size(pmu, | ||
3122 | pv->get_pmu_seq_in_a_ptr(seq)), | ||
3123 | PMU_DMEM_ALLOC_ALIGNMENT); | ||
3124 | if (pv->pmu_allocation_get_dmem_size(pmu, | 3121 | if (pv->pmu_allocation_get_dmem_size(pmu, |
3125 | pv->get_pmu_seq_out_a_ptr(seq)) != 0) | 3122 | pv->get_pmu_seq_out_a_ptr(seq)) != 0) |
3126 | pmu->dmem.free(&pmu->dmem, | 3123 | gk20a_bfree(&pmu->dmem, |
3127 | pv->pmu_allocation_get_dmem_offset(pmu, | 3124 | pv->pmu_allocation_get_dmem_offset(pmu, |
3128 | pv->get_pmu_seq_out_a_ptr(seq)), | 3125 | pv->get_pmu_seq_out_a_ptr(seq))); |
3129 | pv->pmu_allocation_get_dmem_size(pmu, | ||
3130 | pv->get_pmu_seq_out_a_ptr(seq)), | ||
3131 | PMU_DMEM_ALLOC_ALIGNMENT); | ||
3132 | 3126 | ||
3133 | if (seq->callback) | 3127 | if (seq->callback) |
3134 | seq->callback(g, msg, seq->cb_params, seq->desc, ret); | 3128 | seq->callback(g, msg, seq->cb_params, seq->desc, ret); |
@@ -3769,11 +3763,10 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, | |||
3769 | pv->pmu_allocation_set_dmem_size(pmu, in, | 3763 | pv->pmu_allocation_set_dmem_size(pmu, in, |
3770 | (u16)max(payload->in.size, payload->out.size)); | 3764 | (u16)max(payload->in.size, payload->out.size)); |
3771 | 3765 | ||
3772 | err = pmu->dmem.alloc(&pmu->dmem, | 3766 | *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = |
3773 | pv->pmu_allocation_get_dmem_offset_addr(pmu, in), | 3767 | gk20a_balloc(&pmu->dmem, |
3774 | pv->pmu_allocation_get_dmem_size(pmu, in), | 3768 | pv->pmu_allocation_get_dmem_size(pmu, in)); |
3775 | PMU_DMEM_ALLOC_ALIGNMENT); | 3769 | if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) |
3776 | if (err) | ||
3777 | goto clean_up; | 3770 | goto clean_up; |
3778 | 3771 | ||
3779 | pmu_copy_to_dmem(pmu, (pv->pmu_allocation_get_dmem_offset(pmu, | 3772 | pmu_copy_to_dmem(pmu, (pv->pmu_allocation_get_dmem_offset(pmu, |
@@ -3794,11 +3787,12 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, | |||
3794 | (u16)payload->out.size); | 3787 | (u16)payload->out.size); |
3795 | 3788 | ||
3796 | if (payload->out.buf != payload->in.buf) { | 3789 | if (payload->out.buf != payload->in.buf) { |
3797 | err = pmu->dmem.alloc(&pmu->dmem, | 3790 | |
3798 | pv->pmu_allocation_get_dmem_offset_addr(pmu, out), | 3791 | *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) = |
3799 | pv->pmu_allocation_get_dmem_size(pmu, out), | 3792 | gk20a_balloc(&pmu->dmem, |
3800 | PMU_DMEM_ALLOC_ALIGNMENT); | 3793 | pv->pmu_allocation_get_dmem_size(pmu, out)); |
3801 | if (err) | 3794 | if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, |
3795 | out))) | ||
3802 | goto clean_up; | 3796 | goto clean_up; |
3803 | } else { | 3797 | } else { |
3804 | BUG_ON(in == NULL); | 3798 | BUG_ON(in == NULL); |
@@ -3826,15 +3820,11 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, | |||
3826 | clean_up: | 3820 | clean_up: |
3827 | gk20a_dbg_fn("fail"); | 3821 | gk20a_dbg_fn("fail"); |
3828 | if (in) | 3822 | if (in) |
3829 | pmu->dmem.free(&pmu->dmem, | 3823 | gk20a_bfree(&pmu->dmem, |
3830 | pv->pmu_allocation_get_dmem_offset(pmu, in), | 3824 | pv->pmu_allocation_get_dmem_offset(pmu, in)); |
3831 | pv->pmu_allocation_get_dmem_size(pmu, in), | ||
3832 | PMU_DMEM_ALLOC_ALIGNMENT); | ||
3833 | if (out) | 3825 | if (out) |
3834 | pmu->dmem.free(&pmu->dmem, | 3826 | gk20a_bfree(&pmu->dmem, |
3835 | pv->pmu_allocation_get_dmem_offset(pmu, out), | 3827 | pv->pmu_allocation_get_dmem_offset(pmu, out)); |
3836 | pv->pmu_allocation_get_dmem_size(pmu, out), | ||
3837 | PMU_DMEM_ALLOC_ALIGNMENT); | ||
3838 | 3828 | ||
3839 | pmu_seq_release(pmu, seq); | 3829 | pmu_seq_release(pmu, seq); |
3840 | return err; | 3830 | return err; |