From 8a4e6945302e35204eac1dd1c88cac615825217a Mon Sep 17 00:00:00 2001 From: Vaikundanathan S Date: Tue, 24 Apr 2018 11:32:43 +0530 Subject: gpu: nvgpu: effective freq load changes Read clk frequency through PMU RPC Bug 200399373 Change-Id: I9e887dcb1c5b622110eb4c1584f2f34434efd674 Signed-off-by: Vaikundanathan S Signed-off-by: Mahantesh Kumbar Reviewed-on: https://git-master.nvidia.com/r/1701276 Reviewed-by: svc-mobile-coverity GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/clk/clk.c | 143 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 142 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/nvgpu/clk/clk.c') diff --git a/drivers/gpu/nvgpu/clk/clk.c b/drivers/gpu/nvgpu/clk/clk.c index 5d6ae19d..5e77f376 100644 --- a/drivers/gpu/nvgpu/clk/clk.c +++ b/drivers/gpu/nvgpu/clk/clk.c @@ -55,6 +55,137 @@ static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, phandlerparams->success = 1; } + +int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload) +{ + struct pmu_cmd cmd; + struct pmu_msg msg; + struct pmu_payload payload; + u32 status; + u32 seqdesc; + struct nv_pmu_clk_rpc rpccall; + struct clkrpc_pmucmdhandler_params handler; + struct nv_pmu_clk_load *clkload; + + memset(&payload, 0, sizeof(struct pmu_payload)); + memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); + memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); + memset(&cmd, 0, sizeof(struct pmu_cmd)); + + rpccall.function = NV_PMU_CLK_RPC_ID_LOAD; + clkload = &rpccall.params.clk_load; + clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_EFFECTIVE_AVG; + clkload->action_mask = bload ? + NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_YES : + NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_NO; + + cmd.hdr.unit_id = PMU_UNIT_CLK; + cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + + (u32)sizeof(struct pmu_hdr); + + cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; + msg.hdr.size = sizeof(struct pmu_msg); + + payload.in.buf = (u8 *)&rpccall; + payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); + payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; + + payload.out.buf = (u8 *)&rpccall; + payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); + payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; + + handler.prpccall = &rpccall; + handler.success = 0; + + status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, + PMU_COMMAND_QUEUE_LPQ, + clkrpc_pmucmdhandler, (void *)&handler, + &seqdesc, ~0); + if (status) { + nvgpu_err(g, "unable to post clk RPC cmd %x", + cmd.cmd.clk.cmd_type); + goto done; + } + + pmu_wait_message_cond(&g->pmu, + gk20a_get_gr_idle_timeout(g), + &handler.success, 1); + if (handler.success == 0) { + nvgpu_err(g, "rpc call to load Effective avg clk domain freq failed"); + status = -EINVAL; + } + +done: + return status; +} + +u32 clk_freq_effective_avg(struct gk20a *g, u32 clkDomainMask) { + + struct pmu_cmd cmd; + struct pmu_msg msg; + struct pmu_payload payload; + u32 status; + u32 seqdesc; + struct nv_pmu_clk_rpc rpccall; + struct clkrpc_pmucmdhandler_params handler; + struct nv_pmu_clk_freq_effective_avg *clk_freq_effective_avg; + + memset(&payload, 0, sizeof(struct pmu_payload)); + memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc)); + memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params)); + memset(&cmd, 0, sizeof(struct pmu_cmd)); + + rpccall.function = NV_PMU_CLK_RPC_ID_CLK_FREQ_EFF_AVG; + clk_freq_effective_avg = &rpccall.params.clk_freq_effective_avg; + clk_freq_effective_avg->clkDomainMask = clkDomainMask; + + cmd.hdr.unit_id = PMU_UNIT_CLK; + cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) + + (u32)sizeof(struct pmu_hdr); + + cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC; + msg.hdr.size = sizeof(struct pmu_msg); + + payload.in.buf = (u8 *)&rpccall; + payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc); + payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET; + + payload.out.buf = (u8 *)&rpccall; + payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc); + payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET; + + handler.prpccall = &rpccall; + handler.success = 0; + + status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, + PMU_COMMAND_QUEUE_LPQ, + clkrpc_pmucmdhandler, (void *)&handler, + &seqdesc, ~0); + if (status) { + nvgpu_err(g, "unable to post clk RPC cmd %x", + cmd.cmd.clk.cmd_type); + goto done; + } + + pmu_wait_message_cond(&g->pmu, + gk20a_get_gr_idle_timeout(g), + &handler.success, 1); + if (handler.success == 0) { + nvgpu_err(g, "rpc call to get clk frequency average failed"); + status = -EINVAL; + goto done; + } + + return rpccall.params.clk_freq_effective_avg.freqkHz[clkDomainMask]; + +done: + return status; +} + int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx) { struct pmu_cmd cmd; @@ -676,7 +807,6 @@ u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g) status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPCCLK, &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); if (status) { - nvgpu_err(g,"failed 1"); return status; } @@ -695,6 +825,17 @@ u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g) if (status) nvgpu_err(g, "attempt to set boot gpcclk failed"); + status = clk_pmu_freq_effective_avg_load(g, true); + + /* + * Read clocks after some delay with below method + * & extract clock data from buffer + * clk_freq_effective_avg(g, CTRL_CLK_DOMAIN_GPCCLK | + * CTRL_CLK_DOMAIN_XBARCLK | + * CTRL_CLK_DOMAIN_SYSCLK | + * CTRL_CLK_DOMAIN_NVDCLK) + * */ + return status; } -- cgit v1.2.2