From bc92e2fb972e039ee33c1f1477204a4d145a8b96 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Wed, 8 Mar 2017 17:08:32 -0800 Subject: gpu: nvgpu: Use new kmem API functions (gk20a core) Use the new kmem API functions in core gk20a code. Also add a struct gk20a pointer to several functions to ensure that the kmem APIs can be used. Bug 1799159 Bug 1823380 Change-Id: I41276509c4f0b68e80b989aa55cf94d8dbbdf156 Signed-off-by: Alex Waterman Reviewed-on: http://git-master/r/1318322 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/debug_gk20a.c | 8 +- drivers/gpu/nvgpu/gk20a/gk20a.c | 2 +- drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c | 129 ++++++++++---------- drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.h | 15 ++- drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c | 35 +++--- drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 185 ++++++++++++++--------------- 6 files changed, 187 insertions(+), 187 deletions(-) diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c index a64bccf0..b666bb16 100644 --- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c @@ -71,8 +71,8 @@ static void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g, struct channel_gk20a *ch = &f->channel[chid]; if (gk20a_channel_get(ch)) { ch_state[chid] = - kmalloc(sizeof(struct ch_state) + - ram_in_alloc_size_v(), GFP_KERNEL); + nvgpu_kmalloc(g, sizeof(struct ch_state) + + ram_in_alloc_size_v()); /* ref taken stays to below loop with * successful allocs */ if (!ch_state[chid]) @@ -96,10 +96,10 @@ static void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g, if (ch_state[chid]) { g->ops.fifo.dump_channel_status_ramfc(g, o, chid, ch_state[chid]); - kfree(ch_state[chid]); + nvgpu_kfree(g, ch_state[chid]); } } - kfree(ch_state); + nvgpu_kfree(g, ch_state); } void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o) diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index 0495e9d1..4ed7251a 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c @@ -340,7 +340,7 @@ void gk20a_remove_support(struct gk20a *g) tegra_unregister_idle_unidle(); #endif if (g->dbg_regops_tmp_buf) - kfree(g->dbg_regops_tmp_buf); + nvgpu_kfree(g, g->dbg_regops_tmp_buf); if (g->pmu.remove_support) g->pmu.remove_support(&g->pmu); diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c index 7342cb1d..96185ee7 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c @@ -22,17 +22,18 @@ #include #include +#include #include "gk20a.h" #include "gr_ctx_gk20a.h" #include -static int gr_gk20a_alloc_load_netlist_u32(u32 *src, u32 len, +static int gr_gk20a_alloc_load_netlist_u32(struct gk20a *g, u32 *src, u32 len, struct u32_list_gk20a *u32_list) { u32_list->count = (len + sizeof(u32) - 1) / sizeof(u32); - if (!alloc_u32_list_gk20a(u32_list)) + if (!alloc_u32_list_gk20a(g, u32_list)) return -ENOMEM; memcpy(u32_list->l, src, len); @@ -40,11 +41,11 @@ static int gr_gk20a_alloc_load_netlist_u32(u32 *src, u32 len, return 0; } -static int gr_gk20a_alloc_load_netlist_av(u32 *src, u32 len, +static int gr_gk20a_alloc_load_netlist_av(struct gk20a *g, u32 *src, u32 len, struct av_list_gk20a *av_list) { av_list->count = len / sizeof(struct av_gk20a); - if (!alloc_av_list_gk20a(av_list)) + if (!alloc_av_list_gk20a(g, av_list)) return -ENOMEM; memcpy(av_list->l, src, len); @@ -52,11 +53,11 @@ static int gr_gk20a_alloc_load_netlist_av(u32 *src, u32 len, return 0; } -static int gr_gk20a_alloc_load_netlist_aiv(u32 *src, u32 len, +static int gr_gk20a_alloc_load_netlist_aiv(struct gk20a *g, u32 *src, u32 len, struct aiv_list_gk20a *aiv_list) { aiv_list->count = len / sizeof(struct aiv_gk20a); - if (!alloc_aiv_list_gk20a(aiv_list)) + if (!alloc_aiv_list_gk20a(g, aiv_list)) return -ENOMEM; memcpy(aiv_list->l, src, len); @@ -153,56 +154,56 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) switch (netlist->regions[i].region_id) { case NETLIST_REGIONID_FECS_UCODE_DATA: gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA"); - err = gr_gk20a_alloc_load_netlist_u32( + err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.fecs.data); if (err) goto clean_up; break; case NETLIST_REGIONID_FECS_UCODE_INST: gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST"); - err = gr_gk20a_alloc_load_netlist_u32( + err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.fecs.inst); if (err) goto clean_up; break; case NETLIST_REGIONID_GPCCS_UCODE_DATA: gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA"); - err = gr_gk20a_alloc_load_netlist_u32( + err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.gpccs.data); if (err) goto clean_up; break; case NETLIST_REGIONID_GPCCS_UCODE_INST: gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST"); - err = gr_gk20a_alloc_load_netlist_u32( + err = gr_gk20a_alloc_load_netlist_u32(g, src, size, &g->gr.ctx_vars.ucode.gpccs.inst); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_BUNDLE_INIT: gk20a_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT"); - err = gr_gk20a_alloc_load_netlist_av( + err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_bundle_init); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_METHOD_INIT: gk20a_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT"); - err = gr_gk20a_alloc_load_netlist_av( + err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_method_init); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_CTX_LOAD: gk20a_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.sw_ctx_load); if (err) goto clean_up; break; case NETLIST_REGIONID_SW_NON_CTX_LOAD: gk20a_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD"); - err = gr_gk20a_alloc_load_netlist_av( + err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_non_ctx_load); if (err) goto clean_up; @@ -210,7 +211,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) case NETLIST_REGIONID_SWVEIDBUNDLEINIT: gk20a_dbg_info( "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT"); - err = gr_gk20a_alloc_load_netlist_av( + err = gr_gk20a_alloc_load_netlist_av(g, src, size, &g->gr.ctx_vars.sw_veid_bundle_init); if (err) @@ -218,56 +219,56 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) break; case NETLIST_REGIONID_CTXREG_SYS: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_SYS"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.sys); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_GPC: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_GPC"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_TPC: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_TPC"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.tpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_ZCULL_GPC: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PPC: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PPC"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.ppc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PM_SYS: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PM_GPC: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PM_TPC: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); if (err) goto clean_up; @@ -294,84 +295,84 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) break; case NETLIST_REGIONID_CTXREG_PMPPC: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_CTXREG_SYS: gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_SYS"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_FBP_CTXREGS: gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.fbp); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_CTXREG_GPC: gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_GPC"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_FBP_ROUTER: gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_ROUTER"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_GPC_ROUTER: gk20a_dbg_info("NETLIST_REGIONID_NVPERF_GPC_ROUTER"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMLTC: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMLTC"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMFBPA: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMFBPA"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_SYS_ROUTER: gk20a_dbg_info("NETLIST_REGIONID_NVPERF_SYS_ROUTER"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router); if (err) goto clean_up; break; case NETLIST_REGIONID_NVPERF_PMA: gk20a_dbg_info("NETLIST_REGIONID_NVPERF_PMA"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMROP: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMROP"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop); if (err) goto clean_up; break; case NETLIST_REGIONID_CTXREG_PMUCGPC: gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMUCGPC"); - err = gr_gk20a_alloc_load_netlist_aiv( + err = gr_gk20a_alloc_load_netlist_aiv(g, src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc); if (err) goto clean_up; @@ -397,35 +398,35 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr) clean_up: g->gr.ctx_vars.valid = false; - kfree(g->gr.ctx_vars.ucode.fecs.inst.l); - kfree(g->gr.ctx_vars.ucode.fecs.data.l); - kfree(g->gr.ctx_vars.ucode.gpccs.inst.l); - kfree(g->gr.ctx_vars.ucode.gpccs.data.l); - kfree(g->gr.ctx_vars.sw_bundle_init.l); - kfree(g->gr.ctx_vars.sw_method_init.l); - kfree(g->gr.ctx_vars.sw_ctx_load.l); - kfree(g->gr.ctx_vars.sw_non_ctx_load.l); - kfree(g->gr.ctx_vars.sw_veid_bundle_init.l); - kfree(g->gr.ctx_vars.ctxsw_regs.sys.l); - kfree(g->gr.ctx_vars.ctxsw_regs.gpc.l); - kfree(g->gr.ctx_vars.ctxsw_regs.tpc.l); - kfree(g->gr.ctx_vars.ctxsw_regs.zcull_gpc.l); - kfree(g->gr.ctx_vars.ctxsw_regs.ppc.l); - kfree(g->gr.ctx_vars.ctxsw_regs.pm_sys.l); - kfree(g->gr.ctx_vars.ctxsw_regs.pm_gpc.l); - kfree(g->gr.ctx_vars.ctxsw_regs.pm_tpc.l); - kfree(g->gr.ctx_vars.ctxsw_regs.pm_ppc.l); - kfree(g->gr.ctx_vars.ctxsw_regs.perf_sys.l); - kfree(g->gr.ctx_vars.ctxsw_regs.fbp.l); - kfree(g->gr.ctx_vars.ctxsw_regs.perf_gpc.l); - kfree(g->gr.ctx_vars.ctxsw_regs.fbp_router.l); - kfree(g->gr.ctx_vars.ctxsw_regs.gpc_router.l); - kfree(g->gr.ctx_vars.ctxsw_regs.pm_ltc.l); - kfree(g->gr.ctx_vars.ctxsw_regs.pm_fbpa.l); - kfree(g->gr.ctx_vars.ctxsw_regs.perf_sys_router.l); - kfree(g->gr.ctx_vars.ctxsw_regs.perf_pma.l); - kfree(g->gr.ctx_vars.ctxsw_regs.pm_rop.l); - kfree(g->gr.ctx_vars.ctxsw_regs.pm_ucgpc.l); + nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.inst.l); + nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.data.l); + nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.inst.l); + nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.data.l); + nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle_init.l); + nvgpu_kfree(g, g->gr.ctx_vars.sw_method_init.l); + nvgpu_kfree(g, g->gr.ctx_vars.sw_ctx_load.l); + nvgpu_kfree(g, g->gr.ctx_vars.sw_non_ctx_load.l); + nvgpu_kfree(g, g->gr.ctx_vars.sw_veid_bundle_init.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.sys.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.gpc.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.tpc.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.zcull_gpc.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.ppc.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_sys.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_gpc.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_tpc.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ppc.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_sys.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.fbp.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_gpc.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.fbp_router.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.gpc_router.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ltc.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_fbpa.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_sys_router.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_pma.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_rop.l); + nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ucgpc.l); release_firmware(netlist_fw); err = -ENOENT; } diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.h index 4b3f3ae6..b82f5275 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.h @@ -18,6 +18,8 @@ #ifndef __GR_CTX_GK20A_H__ #define __GR_CTX_GK20A_H__ +#include + struct gr_gk20a; /* production netlist, one and only one from below */ @@ -145,23 +147,24 @@ struct ctxsw_buf_offset_map_entry { }; static inline -struct av_gk20a *alloc_av_list_gk20a(struct av_list_gk20a *avl) +struct av_gk20a *alloc_av_list_gk20a(struct gk20a *g, struct av_list_gk20a *avl) { - avl->l = kzalloc(avl->count * sizeof(*avl->l), GFP_KERNEL); + avl->l = nvgpu_kzalloc(g, avl->count * sizeof(*avl->l)); return avl->l; } static inline -struct aiv_gk20a *alloc_aiv_list_gk20a(struct aiv_list_gk20a *aivl) +struct aiv_gk20a *alloc_aiv_list_gk20a(struct gk20a *g, + struct aiv_list_gk20a *aivl) { - aivl->l = kzalloc(aivl->count * sizeof(*aivl->l), GFP_KERNEL); + aivl->l = nvgpu_kzalloc(g, aivl->count * sizeof(*aivl->l)); return aivl->l; } static inline -u32 *alloc_u32_list_gk20a(struct u32_list_gk20a *u32l) +u32 *alloc_u32_list_gk20a(struct gk20a *g, struct u32_list_gk20a *u32l) { - u32l->l = kzalloc(u32l->count * sizeof(*u32l->l), GFP_KERNEL); + u32l->l = nvgpu_kzalloc(g, u32l->count * sizeof(*u32l->l)); return u32l->l; } diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c index 34b315e6..2fdbc01a 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c +++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c @@ -71,23 +71,23 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr) gk20a_sim_esc_readl(g, "GRCTX_REG_LIST_PM_TPC_COUNT", 0, &g->gr.ctx_vars.ctxsw_regs.pm_tpc.count); - err |= !alloc_u32_list_gk20a(&g->gr.ctx_vars.ucode.fecs.inst); - err |= !alloc_u32_list_gk20a(&g->gr.ctx_vars.ucode.fecs.data); - err |= !alloc_u32_list_gk20a(&g->gr.ctx_vars.ucode.gpccs.inst); - err |= !alloc_u32_list_gk20a(&g->gr.ctx_vars.ucode.gpccs.data); - err |= !alloc_av_list_gk20a(&g->gr.ctx_vars.sw_bundle_init); - err |= !alloc_av_list_gk20a(&g->gr.ctx_vars.sw_method_init); - err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.sw_ctx_load); - err |= !alloc_av_list_gk20a(&g->gr.ctx_vars.sw_non_ctx_load); - err |= !alloc_av_list_gk20a(&g->gr.ctx_vars.sw_veid_bundle_init); - err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.sys); - err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.gpc); - err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.tpc); - err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.zcull_gpc); - err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.ppc); - err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.pm_sys); - err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.pm_gpc); - err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.pm_tpc); + err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.fecs.inst); + err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.fecs.data); + err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.gpccs.inst); + err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.gpccs.data); + err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_bundle_init); + err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_method_init); + err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.sw_ctx_load); + err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_non_ctx_load); + err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_veid_bundle_init); + err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.sys); + err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.gpc); + err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.tpc); + err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); + err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.ppc); + err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.pm_sys); + err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); + err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); if (err) goto fail; @@ -244,4 +244,3 @@ fail: return err; } - diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 172931d7..0e3bcdbe 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -1250,8 +1249,8 @@ static int gr_gk20a_setup_alpha_beta_tables(struct gk20a *g, gk20a_dbg_fn(""); - map_alpha = kzalloc(3 * gr_pd_alpha_ratio_table__size_1_v() * - sizeof(u32), GFP_KERNEL); + map_alpha = nvgpu_kzalloc(g, 3 * gr_pd_alpha_ratio_table__size_1_v() * + sizeof(u32)); if (!map_alpha) return -ENOMEM; map_beta = map_alpha + gr_pd_alpha_ratio_table__size_1_v(); @@ -1321,7 +1320,7 @@ static int gr_gk20a_setup_alpha_beta_tables(struct gk20a *g, } } - kfree(map_alpha); + nvgpu_kfree(g, map_alpha); return 0; } @@ -1744,14 +1743,14 @@ restore_fe_go_idle: if (err) goto clean_up; - kfree(gr->sm_error_states); + nvgpu_kfree(g, gr->sm_error_states); /* we need to allocate this after g->ops.gr.init_fs_state() since * we initialize gr->no_of_sm in this function */ - gr->sm_error_states = kzalloc( + gr->sm_error_states = nvgpu_kzalloc(g, sizeof(struct nvgpu_dbg_gpu_sm_error_state_record) - * gr->no_of_sm, GFP_KERNEL); + * gr->no_of_sm); if (!gr->sm_error_states) { err = -ENOMEM; goto restore_fe_go_idle; @@ -1794,7 +1793,7 @@ restore_fe_go_idle: if (gr->ctx_vars.local_golden_image == NULL) { gr->ctx_vars.local_golden_image = - vzalloc(gr->ctx_vars.golden_image_size); + nvgpu_vzalloc(g, gr->ctx_vars.golden_image_size); if (gr->ctx_vars.local_golden_image == NULL) { err = -ENOMEM; @@ -2949,7 +2948,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size; gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; - gr_ctx = kzalloc(sizeof(*gr_ctx), GFP_KERNEL); + gr_ctx = nvgpu_kzalloc(g, sizeof(*gr_ctx)); if (!gr_ctx) return -ENOMEM; @@ -2975,7 +2974,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, err_free_mem: gk20a_gmmu_free(g, &gr_ctx->mem); err_free_ctx: - kfree(gr_ctx); + nvgpu_kfree(g, gr_ctx); gr_ctx = NULL; return err; @@ -3023,7 +3022,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g, gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, gk20a_mem_flag_none); gk20a_gmmu_free(g, &gr_ctx->mem); - kfree(gr_ctx); + nvgpu_kfree(g, gr_ctx); } void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) @@ -3370,18 +3369,18 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr) memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc)); - kfree(gr->sm_error_states); - kfree(gr->gpc_tpc_count); - kfree(gr->gpc_zcb_count); - kfree(gr->gpc_ppc_count); - kfree(gr->pes_tpc_count[0]); - kfree(gr->pes_tpc_count[1]); - kfree(gr->pes_tpc_mask[0]); - kfree(gr->pes_tpc_mask[1]); - kfree(gr->sm_to_cluster); - kfree(gr->gpc_skip_mask); - kfree(gr->map_tiles); - kfree(gr->fbp_rop_l2_en_mask); + nvgpu_kfree(g, gr->sm_error_states); + nvgpu_kfree(g, gr->gpc_tpc_count); + nvgpu_kfree(g, gr->gpc_zcb_count); + nvgpu_kfree(g, gr->gpc_ppc_count); + nvgpu_kfree(g, gr->pes_tpc_count[0]); + nvgpu_kfree(g, gr->pes_tpc_count[1]); + nvgpu_kfree(g, gr->pes_tpc_mask[0]); + nvgpu_kfree(g, gr->pes_tpc_mask[1]); + nvgpu_kfree(g, gr->sm_to_cluster); + nvgpu_kfree(g, gr->gpc_skip_mask); + nvgpu_kfree(g, gr->map_tiles); + nvgpu_kfree(g, gr->fbp_rop_l2_en_mask); gr->gpc_tpc_count = NULL; gr->gpc_zcb_count = NULL; gr->gpc_ppc_count = NULL; @@ -3394,31 +3393,31 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr) gr->fbp_rop_l2_en_mask = NULL; gr->ctx_vars.valid = false; - kfree(gr->ctx_vars.ucode.fecs.inst.l); - kfree(gr->ctx_vars.ucode.fecs.data.l); - kfree(gr->ctx_vars.ucode.gpccs.inst.l); - kfree(gr->ctx_vars.ucode.gpccs.data.l); - kfree(gr->ctx_vars.sw_bundle_init.l); - kfree(gr->ctx_vars.sw_veid_bundle_init.l); - kfree(gr->ctx_vars.sw_method_init.l); - kfree(gr->ctx_vars.sw_ctx_load.l); - kfree(gr->ctx_vars.sw_non_ctx_load.l); - kfree(gr->ctx_vars.ctxsw_regs.sys.l); - kfree(gr->ctx_vars.ctxsw_regs.gpc.l); - kfree(gr->ctx_vars.ctxsw_regs.tpc.l); - kfree(gr->ctx_vars.ctxsw_regs.zcull_gpc.l); - kfree(gr->ctx_vars.ctxsw_regs.ppc.l); - kfree(gr->ctx_vars.ctxsw_regs.pm_sys.l); - kfree(gr->ctx_vars.ctxsw_regs.pm_gpc.l); - kfree(gr->ctx_vars.ctxsw_regs.pm_tpc.l); - kfree(gr->ctx_vars.ctxsw_regs.pm_ppc.l); - kfree(gr->ctx_vars.ctxsw_regs.perf_sys.l); - kfree(gr->ctx_vars.ctxsw_regs.fbp.l); - kfree(gr->ctx_vars.ctxsw_regs.perf_gpc.l); - kfree(gr->ctx_vars.ctxsw_regs.fbp_router.l); - kfree(gr->ctx_vars.ctxsw_regs.gpc_router.l); - kfree(gr->ctx_vars.ctxsw_regs.pm_ltc.l); - kfree(gr->ctx_vars.ctxsw_regs.pm_fbpa.l); + nvgpu_kfree(g, gr->ctx_vars.ucode.fecs.inst.l); + nvgpu_kfree(g, gr->ctx_vars.ucode.fecs.data.l); + nvgpu_kfree(g, gr->ctx_vars.ucode.gpccs.inst.l); + nvgpu_kfree(g, gr->ctx_vars.ucode.gpccs.data.l); + nvgpu_kfree(g, gr->ctx_vars.sw_bundle_init.l); + nvgpu_kfree(g, gr->ctx_vars.sw_veid_bundle_init.l); + nvgpu_kfree(g, gr->ctx_vars.sw_method_init.l); + nvgpu_kfree(g, gr->ctx_vars.sw_ctx_load.l); + nvgpu_kfree(g, gr->ctx_vars.sw_non_ctx_load.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.sys.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.gpc.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.tpc.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.zcull_gpc.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.ppc.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_sys.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_gpc.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_tpc.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_ppc.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.perf_sys.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.fbp.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.perf_gpc.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.fbp_router.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.gpc_router.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_ltc.l); + nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_fbpa.l); vfree(gr->ctx_vars.local_golden_image); gr->ctx_vars.local_golden_image = NULL; @@ -3464,7 +3463,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) gr->fbp_en_mask = g->ops.gr.get_fbp_en_mask(g); gr->fbp_rop_l2_en_mask = - kzalloc(gr->max_fbps_count * sizeof(u32), GFP_KERNEL); + nvgpu_kzalloc(g, gr->max_fbps_count * sizeof(u32)); if (!gr->fbp_rop_l2_en_mask) goto clean_up; @@ -3491,14 +3490,14 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) goto clean_up; } - gr->gpc_tpc_count = kzalloc(gr->gpc_count * sizeof(u32), GFP_KERNEL); - gr->gpc_tpc_mask = kzalloc(gr->gpc_count * sizeof(u32), GFP_KERNEL); - gr->gpc_zcb_count = kzalloc(gr->gpc_count * sizeof(u32), GFP_KERNEL); - gr->gpc_ppc_count = kzalloc(gr->gpc_count * sizeof(u32), GFP_KERNEL); + gr->gpc_tpc_count = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32)); + gr->gpc_tpc_mask = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32)); + gr->gpc_zcb_count = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32)); + gr->gpc_ppc_count = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32)); gr->gpc_skip_mask = - kzalloc(gr_pd_dist_skip_table__size_1_v() * 4 * sizeof(u32), - GFP_KERNEL); + nvgpu_kzalloc(g, gr_pd_dist_skip_table__size_1_v() * + 4 * sizeof(u32)); if (!gr->gpc_tpc_count || !gr->gpc_tpc_mask || !gr->gpc_zcb_count || !gr->gpc_ppc_count || !gr->gpc_skip_mask) @@ -3526,11 +3525,11 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) { if (!gr->pes_tpc_count[pes_index]) { gr->pes_tpc_count[pes_index] = - kzalloc(gr->gpc_count * sizeof(u32), - GFP_KERNEL); + nvgpu_kzalloc(g, gr->gpc_count * + sizeof(u32)); gr->pes_tpc_mask[pes_index] = - kzalloc(gr->gpc_count * sizeof(u32), - GFP_KERNEL); + nvgpu_kzalloc(g, gr->gpc_count * + sizeof(u32)); if (!gr->pes_tpc_count[pes_index] || !gr->pes_tpc_mask[pes_index]) goto clean_up; @@ -3585,8 +3584,8 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) gr->gpc_skip_mask[gpc_index] = gpc_new_skip_mask; } - gr->sm_to_cluster = kzalloc(gr->gpc_count * gr->tpc_count * - sizeof(struct sm_info), GFP_KERNEL); + gr->sm_to_cluster = nvgpu_kzalloc(g, gr->gpc_count * gr->tpc_count * + sizeof(struct sm_info)); gr->no_of_sm = 0; gk20a_dbg_info("fbps: %d", gr->num_fbps); @@ -3696,14 +3695,13 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr) int num_tpc_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_TPC_PER_GPC); int map_tile_count = num_gpcs * num_tpc_per_gpc; - init_frac = kzalloc(num_gpcs * sizeof(s32), GFP_KERNEL); - init_err = kzalloc(num_gpcs * sizeof(s32), GFP_KERNEL); - run_err = kzalloc(num_gpcs * sizeof(s32), GFP_KERNEL); + init_frac = nvgpu_kzalloc(g, num_gpcs * sizeof(s32)); + init_err = nvgpu_kzalloc(g, num_gpcs * sizeof(s32)); + run_err = nvgpu_kzalloc(g, num_gpcs * sizeof(s32)); sorted_num_tpcs = - kzalloc(num_gpcs * num_tpc_per_gpc * sizeof(s32), - GFP_KERNEL); + nvgpu_kzalloc(g, num_gpcs * num_tpc_per_gpc * sizeof(s32)); sorted_to_unsorted_gpc_map = - kzalloc(num_gpcs * sizeof(s32), GFP_KERNEL); + nvgpu_kzalloc(g, num_gpcs * sizeof(s32)); if (!(init_frac && init_err && run_err && sorted_num_tpcs && sorted_to_unsorted_gpc_map)) { @@ -3764,15 +3762,14 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr) } if (delete_map) { - kfree(gr->map_tiles); + nvgpu_kfree(g, gr->map_tiles); gr->map_tiles = NULL; gr->map_tile_count = 0; } } if (gr->map_tiles == NULL) { - gr->map_tiles = kzalloc(map_tile_count * sizeof(u8), - GFP_KERNEL); + gr->map_tiles = nvgpu_kzalloc(g, num_gpcs * sizeof(u8)); if (gr->map_tiles == NULL) { ret = -ENOMEM; goto clean_up; @@ -3838,11 +3835,11 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr) } clean_up: - kfree(init_frac); - kfree(init_err); - kfree(run_err); - kfree(sorted_num_tpcs); - kfree(sorted_to_unsorted_gpc_map); + nvgpu_kfree(g, init_frac); + nvgpu_kfree(g, init_err); + nvgpu_kfree(g, run_err); + nvgpu_kfree(g, sorted_num_tpcs); + nvgpu_kfree(g, sorted_to_unsorted_gpc_map); if (ret) gk20a_err(dev_from_gk20a(g), "fail"); @@ -4588,20 +4585,20 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr) /* Total 8 fields per map reg i.e. tile_0 to tile_7*/ zcull_alloc_num += (zcull_alloc_num % 8); } - zcull_map_tiles = kzalloc(zcull_alloc_num * - sizeof(u32), GFP_KERNEL); + zcull_map_tiles = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); + if (!zcull_map_tiles) { gk20a_err(dev_from_gk20a(g), "failed to allocate zcull map titles"); return -ENOMEM; } - zcull_bank_counters = kzalloc(zcull_alloc_num * - sizeof(u32), GFP_KERNEL); + + zcull_bank_counters = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); if (!zcull_bank_counters) { gk20a_err(dev_from_gk20a(g), "failed to allocate zcull bank counters"); - kfree(zcull_map_tiles); + nvgpu_kfree(g, zcull_map_tiles); return -ENOMEM; } @@ -4616,8 +4613,8 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr) g->ops.gr.program_zcull_mapping(g, zcull_alloc_num, zcull_map_tiles); - kfree(zcull_map_tiles); - kfree(zcull_bank_counters); + nvgpu_kfree(g, zcull_map_tiles); + nvgpu_kfree(g, zcull_bank_counters); for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { gpc_tpc_count = gr->gpc_tpc_count[gpc_index]; @@ -4891,14 +4888,14 @@ restore_fe_go_idle: if (err) goto out; - kfree(gr->sm_error_states); + nvgpu_kfree(g, gr->sm_error_states); /* we need to allocate this after g->ops.gr.init_fs_state() since * we initialize gr->no_of_sm in this function */ - gr->sm_error_states = kzalloc( - sizeof(struct nvgpu_dbg_gpu_sm_error_state_record) - * gr->no_of_sm, GFP_KERNEL); + gr->sm_error_states = nvgpu_kzalloc(g, + sizeof(struct nvgpu_dbg_gpu_sm_error_state_record) * + gr->no_of_sm); if (!gr->sm_error_states) { err = -ENOMEM; goto restore_fe_go_idle; @@ -6945,7 +6942,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, if (!g->gr.ctx_vars.golden_image_initialized) return -ENODEV; - priv_registers = kzalloc(sizeof(u32) * potential_offsets, GFP_KERNEL); + priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); if (!priv_registers) { gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); err = PTR_ERR(priv_registers); @@ -6991,7 +6988,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, *num_offsets = num_registers; cleanup: if (!IS_ERR_OR_NULL(priv_registers)) - kfree(priv_registers); + nvgpu_kfree(g, priv_registers); return err; } @@ -7019,7 +7016,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, if (!g->gr.ctx_vars.golden_image_initialized) return -ENODEV; - priv_registers = kzalloc(sizeof(u32) * potential_offsets, GFP_KERNEL); + priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); if (ZERO_OR_NULL_PTR(priv_registers)) { gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); return -ENOMEM; @@ -7060,7 +7057,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, *num_offsets = num_registers; cleanup: - kfree(priv_registers); + nvgpu_kfree(g, priv_registers); return err; } @@ -8352,7 +8349,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, } /* they're the same size, so just use one alloc for both */ - offsets = kzalloc(2 * sizeof(u32) * max_offsets, GFP_KERNEL); + offsets = nvgpu_kzalloc(g, 2 * sizeof(u32) * max_offsets); if (!offsets) { err = -ENOMEM; goto cleanup; @@ -8502,7 +8499,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, cleanup: if (offsets) - kfree(offsets); + nvgpu_kfree(g, offsets); if (ch_ctx->patch_ctx.mem.cpu_va) gr_gk20a_ctx_patch_write_end(g, ch_ctx); @@ -9025,7 +9022,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g, u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); - ops = kcalloc(g->gr.no_of_sm, sizeof(*ops), GFP_KERNEL); + ops = nvgpu_kcalloc(g, g->gr.no_of_sm, sizeof(*ops)); if (!ops) return -ENOMEM; for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) { @@ -9068,7 +9065,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g, err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0); if (err) gk20a_err(dev_from_gk20a(g), "Failed to access register\n"); - kfree(ops); + nvgpu_kfree(g, ops); return err; } -- cgit v1.2.2