summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/clk/clk_mclk.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-03-29 04:59:29 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-30 15:36:21 -0400
commit6c3370a588108ba920c952d63699670905e16449 (patch)
treef2fdcca18c9a7e127a7d71f601c70da52858b76d /drivers/gpu/nvgpu/clk/clk_mclk.c
parent1ca4c5f069f8b055248aab61619c9a2490b1fe9c (diff)
gpu: nvgpu: check return value of mutex_init in mclk code
- check return value of nvgpu_mutex_init in clk_mclk.c - declare new callback g->ops.pmu.mclk_deinit() to deinitialize mclk mutexes - and define this callback for gp106 - add corresponding nvgpu_mutex_destroy calls in deinitialization Jira NVGPU-13 Change-Id: I1491c084d330ac9756c9520477e6fe494560e651 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1321294 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/clk/clk_mclk.c')
-rw-r--r--drivers/gpu/nvgpu/clk/clk_mclk.c48
1 files changed, 36 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/clk/clk_mclk.c b/drivers/gpu/nvgpu/clk/clk_mclk.c
index 16852b5e..f973a696 100644
--- a/drivers/gpu/nvgpu/clk/clk_mclk.c
+++ b/drivers/gpu/nvgpu/clk/clk_mclk.c
@@ -2174,20 +2174,33 @@ done:
2174 return status; 2174 return status;
2175} 2175}
2176 2176
2177void clk_mclkseq_deinit_mclk_gddr5(struct gk20a *g)
2178{
2179 struct clk_mclk_state *mclk = &g->clk_pmu.clk_mclk;
2180
2181 nvgpu_mutex_destroy(&mclk->data_lock);
2182 nvgpu_mutex_destroy(&mclk->mclk_lock);
2183}
2184
2177int clk_mclkseq_init_mclk_gddr5(struct gk20a *g) 2185int clk_mclkseq_init_mclk_gddr5(struct gk20a *g)
2178{ 2186{
2179 struct clk_mclk_state *mclk; 2187 struct clk_mclk_state *mclk;
2180 int status; 2188 int status;
2181 struct clk_set_info *p5_info; 2189 struct clk_set_info *p5_info;
2182 struct clk_set_info *p0_info; 2190 struct clk_set_info *p0_info;
2183 2191 int err;
2184 2192
2185 gk20a_dbg_fn(""); 2193 gk20a_dbg_fn("");
2186 2194
2187 mclk = &g->clk_pmu.clk_mclk; 2195 mclk = &g->clk_pmu.clk_mclk;
2188 2196
2189 nvgpu_mutex_init(&mclk->mclk_lock); 2197 err = nvgpu_mutex_init(&mclk->mclk_lock);
2190 nvgpu_mutex_init(&mclk->data_lock); 2198 if (err)
2199 return err;
2200
2201 err = nvgpu_mutex_init(&mclk->data_lock);
2202 if (err)
2203 goto fail_mclk_mutex;
2191 2204
2192 /* FBPA gain WAR */ 2205 /* FBPA gain WAR */
2193 gk20a_writel(g, fb_fbpa_fbio_iref_byte_rx_ctrl_r(), 0x22222222); 2206 gk20a_writel(g, fb_fbpa_fbio_iref_byte_rx_ctrl_r(), 0x22222222);
@@ -2196,32 +2209,37 @@ int clk_mclkseq_init_mclk_gddr5(struct gk20a *g)
2196 2209
2197 /* Parse VBIOS */ 2210 /* Parse VBIOS */
2198 status = mclk_get_memclk_table(g); 2211 status = mclk_get_memclk_table(g);
2199 if (status < 0) 2212 if (status < 0) {
2200 return status; 2213 err = status;
2214 goto fail_data_mutex;
2215 }
2201 2216
2202 /* Load RAM pattern */ 2217 /* Load RAM pattern */
2203 mclk_memory_load_training_pattern(g); 2218 mclk_memory_load_training_pattern(g);
2204 2219
2205 p5_info = pstate_get_clk_set_info(g, 2220 p5_info = pstate_get_clk_set_info(g,
2206 CTRL_PERF_PSTATE_P5, clkwhich_mclk); 2221 CTRL_PERF_PSTATE_P5, clkwhich_mclk);
2207 if (!p5_info) 2222 if (!p5_info) {
2208 return -EINVAL; 2223 err = -EINVAL;
2224 goto fail_data_mutex;
2225 }
2209 2226
2210 p0_info = pstate_get_clk_set_info(g, 2227 p0_info = pstate_get_clk_set_info(g,
2211 CTRL_PERF_PSTATE_P0, clkwhich_mclk); 2228 CTRL_PERF_PSTATE_P0, clkwhich_mclk);
2212 if (!p0_info) 2229 if (!p0_info) {
2213 return -EINVAL; 2230 err = -EINVAL;
2214 2231 goto fail_data_mutex;
2232 }
2215 2233
2216 mclk->p5_min = p5_info->min_mhz; 2234 mclk->p5_min = p5_info->min_mhz;
2217 mclk->p0_min = p0_info->min_mhz; 2235 mclk->p0_min = p0_info->min_mhz;
2218 2236
2219
2220 mclk->vreg_buf = nvgpu_kcalloc(g, VREG_COUNT, sizeof(u32)); 2237 mclk->vreg_buf = nvgpu_kcalloc(g, VREG_COUNT, sizeof(u32));
2221 if (!mclk->vreg_buf) { 2238 if (!mclk->vreg_buf) {
2222 gk20a_err(dev_from_gk20a(g), 2239 gk20a_err(dev_from_gk20a(g),
2223 "unable to allocate memory for VREG"); 2240 "unable to allocate memory for VREG");
2224 return -ENOMEM; 2241 err = -ENOMEM;
2242 goto fail_data_mutex;
2225 } 2243 }
2226 2244
2227#ifdef CONFIG_DEBUG_FS 2245#ifdef CONFIG_DEBUG_FS
@@ -2235,6 +2253,12 @@ int clk_mclkseq_init_mclk_gddr5(struct gk20a *g)
2235 mclk->init = true; 2253 mclk->init = true;
2236 2254
2237 return 0; 2255 return 0;
2256
2257fail_data_mutex:
2258 nvgpu_mutex_destroy(&mclk->data_lock);
2259fail_mclk_mutex:
2260 nvgpu_mutex_destroy(&mclk->mclk_lock);
2261 return err;
2238} 2262}
2239 2263
2240int clk_mclkseq_change_mclk_gddr5(struct gk20a *g, u16 val) 2264int clk_mclkseq_change_mclk_gddr5(struct gk20a *g, u16 val)