summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/clk
diff options
context:
space:
mode:
authorVaikundanathan S <vaikuns@nvidia.com>2018-04-24 02:02:43 -0400
committerTejal Kudav <tkudav@nvidia.com>2018-06-14 09:44:08 -0400
commit8a4e6945302e35204eac1dd1c88cac615825217a (patch)
tree76987e7367d48a543a6e66474857668e57d91315 /drivers/gpu/nvgpu/clk
parent0aa8d6e27394ec15c1816943996daf8f8ffab438 (diff)
gpu: nvgpu: effective freq load changes
Read clk frequency through PMU RPC Bug 200399373 Change-Id: I9e887dcb1c5b622110eb4c1584f2f34434efd674 Signed-off-by: Vaikundanathan S <vaikuns@nvidia.com> Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1701276 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/clk')
-rw-r--r--drivers/gpu/nvgpu/clk/clk.c143
-rw-r--r--drivers/gpu/nvgpu/clk/clk.h2
-rw-r--r--drivers/gpu/nvgpu/clk/clk_fll.c1
3 files changed, 145 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/clk/clk.c b/drivers/gpu/nvgpu/clk/clk.c
index 5d6ae19d..5e77f376 100644
--- a/drivers/gpu/nvgpu/clk/clk.c
+++ b/drivers/gpu/nvgpu/clk/clk.c
@@ -55,6 +55,137 @@ static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
55 phandlerparams->success = 1; 55 phandlerparams->success = 1;
56} 56}
57 57
58
59int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload)
60{
61 struct pmu_cmd cmd;
62 struct pmu_msg msg;
63 struct pmu_payload payload;
64 u32 status;
65 u32 seqdesc;
66 struct nv_pmu_clk_rpc rpccall;
67 struct clkrpc_pmucmdhandler_params handler;
68 struct nv_pmu_clk_load *clkload;
69
70 memset(&payload, 0, sizeof(struct pmu_payload));
71 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
72 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
73 memset(&cmd, 0, sizeof(struct pmu_cmd));
74
75 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD;
76 clkload = &rpccall.params.clk_load;
77 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_EFFECTIVE_AVG;
78 clkload->action_mask = bload ?
79 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_YES :
80 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_NO;
81
82 cmd.hdr.unit_id = PMU_UNIT_CLK;
83 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
84 (u32)sizeof(struct pmu_hdr);
85
86 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
87 msg.hdr.size = sizeof(struct pmu_msg);
88
89 payload.in.buf = (u8 *)&rpccall;
90 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
91 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
92 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
93
94 payload.out.buf = (u8 *)&rpccall;
95 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
96 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
97 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
98
99 handler.prpccall = &rpccall;
100 handler.success = 0;
101
102 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
103 PMU_COMMAND_QUEUE_LPQ,
104 clkrpc_pmucmdhandler, (void *)&handler,
105 &seqdesc, ~0);
106 if (status) {
107 nvgpu_err(g, "unable to post clk RPC cmd %x",
108 cmd.cmd.clk.cmd_type);
109 goto done;
110 }
111
112 pmu_wait_message_cond(&g->pmu,
113 gk20a_get_gr_idle_timeout(g),
114 &handler.success, 1);
115 if (handler.success == 0) {
116 nvgpu_err(g, "rpc call to load Effective avg clk domain freq failed");
117 status = -EINVAL;
118 }
119
120done:
121 return status;
122}
123
124u32 clk_freq_effective_avg(struct gk20a *g, u32 clkDomainMask) {
125
126 struct pmu_cmd cmd;
127 struct pmu_msg msg;
128 struct pmu_payload payload;
129 u32 status;
130 u32 seqdesc;
131 struct nv_pmu_clk_rpc rpccall;
132 struct clkrpc_pmucmdhandler_params handler;
133 struct nv_pmu_clk_freq_effective_avg *clk_freq_effective_avg;
134
135 memset(&payload, 0, sizeof(struct pmu_payload));
136 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
137 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
138 memset(&cmd, 0, sizeof(struct pmu_cmd));
139
140 rpccall.function = NV_PMU_CLK_RPC_ID_CLK_FREQ_EFF_AVG;
141 clk_freq_effective_avg = &rpccall.params.clk_freq_effective_avg;
142 clk_freq_effective_avg->clkDomainMask = clkDomainMask;
143
144 cmd.hdr.unit_id = PMU_UNIT_CLK;
145 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
146 (u32)sizeof(struct pmu_hdr);
147
148 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
149 msg.hdr.size = sizeof(struct pmu_msg);
150
151 payload.in.buf = (u8 *)&rpccall;
152 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
153 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
154 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
155
156 payload.out.buf = (u8 *)&rpccall;
157 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
158 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
159 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
160
161 handler.prpccall = &rpccall;
162 handler.success = 0;
163
164 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
165 PMU_COMMAND_QUEUE_LPQ,
166 clkrpc_pmucmdhandler, (void *)&handler,
167 &seqdesc, ~0);
168 if (status) {
169 nvgpu_err(g, "unable to post clk RPC cmd %x",
170 cmd.cmd.clk.cmd_type);
171 goto done;
172 }
173
174 pmu_wait_message_cond(&g->pmu,
175 gk20a_get_gr_idle_timeout(g),
176 &handler.success, 1);
177 if (handler.success == 0) {
178 nvgpu_err(g, "rpc call to get clk frequency average failed");
179 status = -EINVAL;
180 goto done;
181 }
182
183 return rpccall.params.clk_freq_effective_avg.freqkHz[clkDomainMask];
184
185done:
186 return status;
187}
188
58int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx) 189int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx)
59{ 190{
60 struct pmu_cmd cmd; 191 struct pmu_cmd cmd;
@@ -676,7 +807,6 @@ u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g)
676 status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPCCLK, 807 status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPCCLK,
677 &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC); 808 &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC);
678 if (status) { 809 if (status) {
679 nvgpu_err(g,"failed 1");
680 return status; 810 return status;
681 } 811 }
682 812
@@ -695,6 +825,17 @@ u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g)
695 if (status) 825 if (status)
696 nvgpu_err(g, "attempt to set boot gpcclk failed"); 826 nvgpu_err(g, "attempt to set boot gpcclk failed");
697 827
828 status = clk_pmu_freq_effective_avg_load(g, true);
829
830 /*
831 * Read clocks after some delay with below method
832 * & extract clock data from buffer
833 * clk_freq_effective_avg(g, CTRL_CLK_DOMAIN_GPCCLK |
834 * CTRL_CLK_DOMAIN_XBARCLK |
835 * CTRL_CLK_DOMAIN_SYSCLK |
836 * CTRL_CLK_DOMAIN_NVDCLK)
837 * */
838
698 return status; 839 return status;
699} 840}
700 841
diff --git a/drivers/gpu/nvgpu/clk/clk.h b/drivers/gpu/nvgpu/clk/clk.h
index 70b04fc4..cd65f6f5 100644
--- a/drivers/gpu/nvgpu/clk/clk.h
+++ b/drivers/gpu/nvgpu/clk/clk.h
@@ -143,4 +143,6 @@ u32 nvgpu_clk_vf_change_inject_data_fill_gp10x(struct gk20a *g,
143 struct nv_pmu_clk_rpc *rpccall, 143 struct nv_pmu_clk_rpc *rpccall,
144 struct set_fll_clk *setfllclk); 144 struct set_fll_clk *setfllclk);
145u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g); 145u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g);
146int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload);
147u32 clk_freq_effective_avg(struct gk20a *g, u32 clkDomainMask);
146#endif 148#endif
diff --git a/drivers/gpu/nvgpu/clk/clk_fll.c b/drivers/gpu/nvgpu/clk/clk_fll.c
index 87222b90..a05fdf22 100644
--- a/drivers/gpu/nvgpu/clk/clk_fll.c
+++ b/drivers/gpu/nvgpu/clk/clk_fll.c
@@ -340,6 +340,7 @@ static u32 devinit_get_fll_device_table(struct gk20a *g,
340 CTRL_CLK_FLL_REGIME_ID_FFR; 340 CTRL_CLK_FLL_REGIME_ID_FFR;
341 fll_dev_data.regime_desc.fixed_freq_regime_limit_mhz = 341 fll_dev_data.regime_desc.fixed_freq_regime_limit_mhz =
342 (u16)fll_desc_table_entry.ffr_cutoff_freq_mhz; 342 (u16)fll_desc_table_entry.ffr_cutoff_freq_mhz;
343 fll_dev_data.regime_desc.target_regime_id_override=0;
343 344
344 /*construct fll device*/ 345 /*construct fll device*/
345 pfll_dev = construct_fll_device(g, (void *)&fll_dev_data); 346 pfll_dev = construct_fll_device(g, (void *)&fll_dev_data);