From 1d8fdf56959240622073dd771dd9bfccf31b8f8e Mon Sep 17 00:00:00 2001 From: Bharat Nihalani Date: Fri, 29 May 2015 16:26:23 +0530 Subject: Revert "Revert "Revert "gpu: nvgpu: New allocator for VA space""" This reverts commit ce1cf06b9a8eb6314ba0ca294e8cb430e1e141c0 since it causes GPU pbdma interrupt to be generated. Bug 200106514 Change-Id: If3ed9a914c4e3e7f3f98c6609c6dbf57e1eb9aad Signed-off-by: Bharat Nihalani Reviewed-on: http://git-master/r/749291 --- drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 68 +++++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c') diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 11322293..2456c784 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c @@ -2816,6 +2816,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu) struct pmu_payload payload; u32 seq; u32 data; + int err = 0; gk20a_dbg_fn(""); @@ -2866,11 +2867,12 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu) gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data); if (!pmu->sample_buffer) - pmu->sample_buffer = gk20a_balloc(&pmu->dmem, - 2 * sizeof(u16)); - if (!pmu->sample_buffer) { + err = pmu->dmem.alloc(&pmu->dmem, + &pmu->sample_buffer, 2 * sizeof(u16), + PMU_DMEM_ALLOC_ALIGNMENT); + if (err) { gk20a_err(dev_from_gk20a(g), - "failed to allocate perfmon sample buffer"); + "failed to allocate perfmon sample buffer"); return -ENOMEM; } @@ -2968,17 +2970,15 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu, for (i = 0; i < PMU_QUEUE_COUNT; i++) pmu_queue_init(pmu, i, init); - if (!pmu->dmem.init) { - /* Align start and end addresses */ + if (!pmu->dmem.alloc) { + /*Align start and end addresses*/ u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init), - PMU_DMEM_ALLOC_ALIGNMENT); + PMU_DMEM_ALLOC_ALIGNMENT); u32 end = (pv->get_pmu_init_msg_pmu_sw_mg_off(init) + - pv->get_pmu_init_msg_pmu_sw_mg_size(init)) & + pv->get_pmu_init_msg_pmu_sw_mg_size(init)) & ~(PMU_DMEM_ALLOC_ALIGNMENT - 1); u32 size = end - start; - __gk20a_allocator_init(&pmu->dmem, NULL, "gk20a_pmu_dmem", - start, size, - PMU_DMEM_ALLOC_ALIGNMENT, 4, 0); + gk20a_allocator_init(&pmu->dmem, "gk20a_pmu_dmem", start, size); } pmu->pmu_ready = true; @@ -3115,14 +3115,20 @@ static int pmu_response_handle(struct pmu_gk20a *pmu, seq->callback = NULL; if (pv->pmu_allocation_get_dmem_size(pmu, pv->get_pmu_seq_in_a_ptr(seq)) != 0) - gk20a_bfree(&pmu->dmem, + pmu->dmem.free(&pmu->dmem, pv->pmu_allocation_get_dmem_offset(pmu, - pv->get_pmu_seq_in_a_ptr(seq))); + pv->get_pmu_seq_in_a_ptr(seq)), + pv->pmu_allocation_get_dmem_size(pmu, + pv->get_pmu_seq_in_a_ptr(seq)), + PMU_DMEM_ALLOC_ALIGNMENT); if (pv->pmu_allocation_get_dmem_size(pmu, pv->get_pmu_seq_out_a_ptr(seq)) != 0) - gk20a_bfree(&pmu->dmem, + pmu->dmem.free(&pmu->dmem, pv->pmu_allocation_get_dmem_offset(pmu, - pv->get_pmu_seq_out_a_ptr(seq))); + pv->get_pmu_seq_out_a_ptr(seq)), + pv->pmu_allocation_get_dmem_size(pmu, + pv->get_pmu_seq_out_a_ptr(seq)), + PMU_DMEM_ALLOC_ALIGNMENT); if (seq->callback) seq->callback(g, msg, seq->cb_params, seq->desc, ret); @@ -3763,10 +3769,11 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, pv->pmu_allocation_set_dmem_size(pmu, in, (u16)max(payload->in.size, payload->out.size)); - *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = - gk20a_balloc(&pmu->dmem, - pv->pmu_allocation_get_dmem_size(pmu, in)); - if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) + err = pmu->dmem.alloc(&pmu->dmem, + pv->pmu_allocation_get_dmem_offset_addr(pmu, in), + pv->pmu_allocation_get_dmem_size(pmu, in), + PMU_DMEM_ALLOC_ALIGNMENT); + if (err) goto clean_up; pmu_copy_to_dmem(pmu, (pv->pmu_allocation_get_dmem_offset(pmu, @@ -3787,12 +3794,11 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, (u16)payload->out.size); if (payload->out.buf != payload->in.buf) { - - *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) = - gk20a_balloc(&pmu->dmem, - pv->pmu_allocation_get_dmem_size(pmu, out)); - if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, - out))) + err = pmu->dmem.alloc(&pmu->dmem, + pv->pmu_allocation_get_dmem_offset_addr(pmu, out), + pv->pmu_allocation_get_dmem_size(pmu, out), + PMU_DMEM_ALLOC_ALIGNMENT); + if (err) goto clean_up; } else { BUG_ON(in == NULL); @@ -3820,11 +3826,15 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd, clean_up: gk20a_dbg_fn("fail"); if (in) - gk20a_bfree(&pmu->dmem, - pv->pmu_allocation_get_dmem_offset(pmu, in)); + pmu->dmem.free(&pmu->dmem, + pv->pmu_allocation_get_dmem_offset(pmu, in), + pv->pmu_allocation_get_dmem_size(pmu, in), + PMU_DMEM_ALLOC_ALIGNMENT); if (out) - gk20a_bfree(&pmu->dmem, - pv->pmu_allocation_get_dmem_offset(pmu, out)); + pmu->dmem.free(&pmu->dmem, + pv->pmu_allocation_get_dmem_offset(pmu, out), + pv->pmu_allocation_get_dmem_size(pmu, out), + PMU_DMEM_ALLOC_ALIGNMENT); pmu_seq_release(pmu, seq); return err; -- cgit v1.2.2