summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2014-11-04 08:14:28 -0500
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:12:01 -0400
commitb3f575074b66e8af1a9943874f9782b793fa7edc (patch)
treec4aaa1defc512cf5a896edc25445f169de184ece /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent797e4dd319bd2b9e13ce0e44a3bbbb75e4820330 (diff)
gpu: nvgpu: fix sparse warnings
Fix below sparse warnings : warning: Using plain integer as NULL pointer warning: symbol <variable/funcion> was not declared. Should it be static? warning: Initializer entry defined twice Also, remove dead functions Bug 1573254 Change-Id: I29d71ecc01c841233cf6b26c9088ca8874773469 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/593363 Reviewed-by: Amit Sharma (SW-TEGRA) <amisharma@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Sachin Nikam <snikam@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index f87608d1..a6432f41 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -491,7 +491,7 @@ struct fecs_method_op_gk20a {
491 491
492}; 492};
493 493
494int gr_gk20a_submit_fecs_method_op(struct gk20a *g, 494static int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
495 struct fecs_method_op_gk20a op) 495 struct fecs_method_op_gk20a op)
496{ 496{
497 struct gr_gk20a *gr = &g->gr; 497 struct gr_gk20a *gr = &g->gr;
@@ -524,7 +524,7 @@ int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
524 return ret; 524 return ret;
525} 525}
526 526
527int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret) 527static int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret)
528{ 528{
529 return gr_gk20a_submit_fecs_method_op(g, 529 return gr_gk20a_submit_fecs_method_op(g,
530 (struct fecs_method_op_gk20a) { 530 (struct fecs_method_op_gk20a) {
@@ -544,14 +544,16 @@ int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret)
544int gr_gk20a_disable_ctxsw(struct gk20a *g) 544int gr_gk20a_disable_ctxsw(struct gk20a *g)
545{ 545{
546 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 546 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
547 return gr_gk20a_ctrl_ctxsw(g, gr_fecs_method_push_adr_stop_ctxsw_v(), 0); 547 return gr_gk20a_ctrl_ctxsw(g,
548 gr_fecs_method_push_adr_stop_ctxsw_v(), NULL);
548} 549}
549 550
550/* Start processing (continue) context switches at FECS */ 551/* Start processing (continue) context switches at FECS */
551int gr_gk20a_enable_ctxsw(struct gk20a *g) 552int gr_gk20a_enable_ctxsw(struct gk20a *g)
552{ 553{
553 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 554 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
554 return gr_gk20a_ctrl_ctxsw(g, gr_fecs_method_push_adr_start_ctxsw_v(), 0); 555 return gr_gk20a_ctrl_ctxsw(g,
556 gr_fecs_method_push_adr_start_ctxsw_v(), NULL);
555} 557}
556 558
557 559
@@ -2105,7 +2107,7 @@ void gr_gk20a_load_ctxsw_ucode_boot(struct gk20a *g, u64 addr_base,
2105 gr_fecs_bootvec_vec_f(segments->boot_entry)); 2107 gr_fecs_bootvec_vec_f(segments->boot_entry));
2106} 2108}
2107 2109
2108int gr_gk20a_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base, 2110static int gr_gk20a_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
2109 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset) 2111 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset)
2110{ 2112{
2111 gk20a_writel(g, reg_offset + gr_fecs_dmactl_r(), 2113 gk20a_writel(g, reg_offset + gr_fecs_dmactl_r(),
@@ -2176,7 +2178,7 @@ static int gr_gk20a_wait_ctxsw_ready(struct gk20a *g)
2176 2178
2177 gk20a_dbg_fn(""); 2179 gk20a_dbg_fn("");
2178 2180
2179 ret = gr_gk20a_ctx_wait_ucode(g, 0, 0, 2181 ret = gr_gk20a_ctx_wait_ucode(g, 0, NULL,
2180 GR_IS_UCODE_OP_EQUAL, 2182 GR_IS_UCODE_OP_EQUAL,
2181 eUcodeHandshakeInitComplete, 2183 eUcodeHandshakeInitComplete,
2182 GR_IS_UCODE_OP_SKIP, 0); 2184 GR_IS_UCODE_OP_SKIP, 0);
@@ -3794,7 +3796,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
3794 return 0; 3796 return 0;
3795} 3797}
3796 3798
3797int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr) 3799static int gr_gk20a_load_zbc_table(struct gk20a *g, struct gr_gk20a *gr)
3798{ 3800{
3799 int i, ret; 3801 int i, ret;
3800 3802
@@ -4453,7 +4455,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
4453 return -ETIMEDOUT; 4455 return -ETIMEDOUT;
4454} 4456}
4455 4457
4456int gr_gk20a_init_ctxsw(struct gk20a *g) 4458static int gr_gk20a_init_ctxsw(struct gk20a *g)
4457{ 4459{
4458 struct gr_gk20a *gr = &g->gr; 4460 struct gr_gk20a *gr = &g->gr;
4459 u32 err = 0; 4461 u32 err = 0;
@@ -4481,7 +4483,7 @@ out:
4481 return 0; 4483 return 0;
4482} 4484}
4483 4485
4484int gk20a_init_gr_reset_enable_hw(struct gk20a *g) 4486static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
4485{ 4487{
4486 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load; 4488 struct av_list_gk20a *sw_non_ctx_load = &g->gr.ctx_vars.sw_non_ctx_load;
4487 unsigned long end_jiffies = jiffies + 4489 unsigned long end_jiffies = jiffies +
@@ -5859,7 +5861,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
5859 u32 *priv_offset); 5861 u32 *priv_offset);
5860 5862
5861/* This function will decode a priv address and return the partition type and numbers. */ 5863/* This function will decode a priv address and return the partition type and numbers. */
5862int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr, 5864static int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr,
5863 int *addr_type, /* enum ctxsw_addr_type */ 5865 int *addr_type, /* enum ctxsw_addr_type */
5864 u32 *gpc_num, u32 *tpc_num, u32 *ppc_num, u32 *be_num, 5866 u32 *gpc_num, u32 *tpc_num, u32 *ppc_num, u32 *be_num,
5865 u32 *broadcast_flags) 5867 u32 *broadcast_flags)
@@ -6154,7 +6156,7 @@ static void init_sm_dsm_reg_info(void)
6154 * which makes it impossible to know externally whether a ctx 6156 * which makes it impossible to know externally whether a ctx
6155 * write will actually occur. so later we should put a lazy, 6157 * write will actually occur. so later we should put a lazy,
6156 * map-and-hold system in the patch write state */ 6158 * map-and-hold system in the patch write state */
6157int gr_gk20a_ctx_patch_smpc(struct gk20a *g, 6159static int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
6158 struct channel_ctx_gk20a *ch_ctx, 6160 struct channel_ctx_gk20a *ch_ctx,
6159 u32 addr, u32 data, 6161 u32 addr, u32 data,
6160 u8 *context) 6162 u8 *context)