summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorBharat Nihalani <bnihalani@nvidia.com>2015-05-29 06:56:23 -0400
committerBharat Nihalani <bnihalani@nvidia.com>2015-06-02 23:18:55 -0400
commit1d8fdf56959240622073dd771dd9bfccf31b8f8e (patch)
tree5c670e604825ddc25d6b6b0cce32cb3e7dc6871a /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parent38cee4d7effe5a2079a08b3c9a216b3197893959 (diff)
Revert "Revert "Revert "gpu: nvgpu: New allocator for VA space"""
This reverts commit ce1cf06b9a8eb6314ba0ca294e8cb430e1e141c0 since it causes GPU pbdma interrupt to be generated. Bug 200106514 Change-Id: If3ed9a914c4e3e7f3f98c6609c6dbf57e1eb9aad Signed-off-by: Bharat Nihalani <bnihalani@nvidia.com> Reviewed-on: http://git-master/r/749291
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c68
1 files changed, 39 insertions, 29 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 11322293..2456c784 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2816,6 +2816,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
2816 struct pmu_payload payload; 2816 struct pmu_payload payload;
2817 u32 seq; 2817 u32 seq;
2818 u32 data; 2818 u32 data;
2819 int err = 0;
2819 2820
2820 gk20a_dbg_fn(""); 2821 gk20a_dbg_fn("");
2821 2822
@@ -2866,11 +2867,12 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
2866 gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data); 2867 gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data);
2867 2868
2868 if (!pmu->sample_buffer) 2869 if (!pmu->sample_buffer)
2869 pmu->sample_buffer = gk20a_balloc(&pmu->dmem, 2870 err = pmu->dmem.alloc(&pmu->dmem,
2870 2 * sizeof(u16)); 2871 &pmu->sample_buffer, 2 * sizeof(u16),
2871 if (!pmu->sample_buffer) { 2872 PMU_DMEM_ALLOC_ALIGNMENT);
2873 if (err) {
2872 gk20a_err(dev_from_gk20a(g), 2874 gk20a_err(dev_from_gk20a(g),
2873 "failed to allocate perfmon sample buffer"); 2875 "failed to allocate perfmon sample buffer");
2874 return -ENOMEM; 2876 return -ENOMEM;
2875 } 2877 }
2876 2878
@@ -2968,17 +2970,15 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
2968 for (i = 0; i < PMU_QUEUE_COUNT; i++) 2970 for (i = 0; i < PMU_QUEUE_COUNT; i++)
2969 pmu_queue_init(pmu, i, init); 2971 pmu_queue_init(pmu, i, init);
2970 2972
2971 if (!pmu->dmem.init) { 2973 if (!pmu->dmem.alloc) {
2972 /* Align start and end addresses */ 2974 /*Align start and end addresses*/
2973 u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init), 2975 u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init),
2974 PMU_DMEM_ALLOC_ALIGNMENT); 2976 PMU_DMEM_ALLOC_ALIGNMENT);
2975 u32 end = (pv->get_pmu_init_msg_pmu_sw_mg_off(init) + 2977 u32 end = (pv->get_pmu_init_msg_pmu_sw_mg_off(init) +
2976 pv->get_pmu_init_msg_pmu_sw_mg_size(init)) & 2978 pv->get_pmu_init_msg_pmu_sw_mg_size(init)) &
2977 ~(PMU_DMEM_ALLOC_ALIGNMENT - 1); 2979 ~(PMU_DMEM_ALLOC_ALIGNMENT - 1);
2978 u32 size = end - start; 2980 u32 size = end - start;
2979 __gk20a_allocator_init(&pmu->dmem, NULL, "gk20a_pmu_dmem", 2981 gk20a_allocator_init(&pmu->dmem, "gk20a_pmu_dmem", start, size);
2980 start, size,
2981 PMU_DMEM_ALLOC_ALIGNMENT, 4, 0);
2982 } 2982 }
2983 2983
2984 pmu->pmu_ready = true; 2984 pmu->pmu_ready = true;
@@ -3115,14 +3115,20 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
3115 seq->callback = NULL; 3115 seq->callback = NULL;
3116 if (pv->pmu_allocation_get_dmem_size(pmu, 3116 if (pv->pmu_allocation_get_dmem_size(pmu,
3117 pv->get_pmu_seq_in_a_ptr(seq)) != 0) 3117 pv->get_pmu_seq_in_a_ptr(seq)) != 0)
3118 gk20a_bfree(&pmu->dmem, 3118 pmu->dmem.free(&pmu->dmem,
3119 pv->pmu_allocation_get_dmem_offset(pmu, 3119 pv->pmu_allocation_get_dmem_offset(pmu,
3120 pv->get_pmu_seq_in_a_ptr(seq))); 3120 pv->get_pmu_seq_in_a_ptr(seq)),
3121 pv->pmu_allocation_get_dmem_size(pmu,
3122 pv->get_pmu_seq_in_a_ptr(seq)),
3123 PMU_DMEM_ALLOC_ALIGNMENT);
3121 if (pv->pmu_allocation_get_dmem_size(pmu, 3124 if (pv->pmu_allocation_get_dmem_size(pmu,
3122 pv->get_pmu_seq_out_a_ptr(seq)) != 0) 3125 pv->get_pmu_seq_out_a_ptr(seq)) != 0)
3123 gk20a_bfree(&pmu->dmem, 3126 pmu->dmem.free(&pmu->dmem,
3124 pv->pmu_allocation_get_dmem_offset(pmu, 3127 pv->pmu_allocation_get_dmem_offset(pmu,
3125 pv->get_pmu_seq_out_a_ptr(seq))); 3128 pv->get_pmu_seq_out_a_ptr(seq)),
3129 pv->pmu_allocation_get_dmem_size(pmu,
3130 pv->get_pmu_seq_out_a_ptr(seq)),
3131 PMU_DMEM_ALLOC_ALIGNMENT);
3126 3132
3127 if (seq->callback) 3133 if (seq->callback)
3128 seq->callback(g, msg, seq->cb_params, seq->desc, ret); 3134 seq->callback(g, msg, seq->cb_params, seq->desc, ret);
@@ -3763,10 +3769,11 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
3763 pv->pmu_allocation_set_dmem_size(pmu, in, 3769 pv->pmu_allocation_set_dmem_size(pmu, in,
3764 (u16)max(payload->in.size, payload->out.size)); 3770 (u16)max(payload->in.size, payload->out.size));
3765 3771
3766 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = 3772 err = pmu->dmem.alloc(&pmu->dmem,
3767 gk20a_balloc(&pmu->dmem, 3773 pv->pmu_allocation_get_dmem_offset_addr(pmu, in),
3768 pv->pmu_allocation_get_dmem_size(pmu, in)); 3774 pv->pmu_allocation_get_dmem_size(pmu, in),
3769 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) 3775 PMU_DMEM_ALLOC_ALIGNMENT);
3776 if (err)
3770 goto clean_up; 3777 goto clean_up;
3771 3778
3772 pmu_copy_to_dmem(pmu, (pv->pmu_allocation_get_dmem_offset(pmu, 3779 pmu_copy_to_dmem(pmu, (pv->pmu_allocation_get_dmem_offset(pmu,
@@ -3787,12 +3794,11 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
3787 (u16)payload->out.size); 3794 (u16)payload->out.size);
3788 3795
3789 if (payload->out.buf != payload->in.buf) { 3796 if (payload->out.buf != payload->in.buf) {
3790 3797 err = pmu->dmem.alloc(&pmu->dmem,
3791 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) = 3798 pv->pmu_allocation_get_dmem_offset_addr(pmu, out),
3792 gk20a_balloc(&pmu->dmem, 3799 pv->pmu_allocation_get_dmem_size(pmu, out),
3793 pv->pmu_allocation_get_dmem_size(pmu, out)); 3800 PMU_DMEM_ALLOC_ALIGNMENT);
3794 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, 3801 if (err)
3795 out)))
3796 goto clean_up; 3802 goto clean_up;
3797 } else { 3803 } else {
3798 BUG_ON(in == NULL); 3804 BUG_ON(in == NULL);
@@ -3820,11 +3826,15 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
3820clean_up: 3826clean_up:
3821 gk20a_dbg_fn("fail"); 3827 gk20a_dbg_fn("fail");
3822 if (in) 3828 if (in)
3823 gk20a_bfree(&pmu->dmem, 3829 pmu->dmem.free(&pmu->dmem,
3824 pv->pmu_allocation_get_dmem_offset(pmu, in)); 3830 pv->pmu_allocation_get_dmem_offset(pmu, in),
3831 pv->pmu_allocation_get_dmem_size(pmu, in),
3832 PMU_DMEM_ALLOC_ALIGNMENT);
3825 if (out) 3833 if (out)
3826 gk20a_bfree(&pmu->dmem, 3834 pmu->dmem.free(&pmu->dmem,
3827 pv->pmu_allocation_get_dmem_offset(pmu, out)); 3835 pv->pmu_allocation_get_dmem_offset(pmu, out),
3836 pv->pmu_allocation_get_dmem_size(pmu, out),
3837 PMU_DMEM_ALLOC_ALIGNMENT);
3828 3838
3829 pmu_seq_release(pmu, seq); 3839 pmu_seq_release(pmu, seq);
3830 return err; 3840 return err;