summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-08 20:08:32 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-29 14:50:21 -0400
commitbc92e2fb972e039ee33c1f1477204a4d145a8b96 (patch)
treea43df80fe921f3e4b50c70bf67aef30a0b5dd5bb
parent4022b989aa2e91fe77ed52df49d45838f6d8b9bb (diff)
gpu: nvgpu: Use new kmem API functions (gk20a core)
Use the new kmem API functions in core gk20a code. Also add a struct gk20a pointer to several functions to ensure that the kmem APIs can be used. Bug 1799159 Bug 1823380 Change-Id: I41276509c4f0b68e80b989aa55cf94d8dbbdf156 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1318322 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c129
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.h15
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c35
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c185
6 files changed, 187 insertions, 187 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index a64bccf0..b666bb16 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -71,8 +71,8 @@ static void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
71 struct channel_gk20a *ch = &f->channel[chid]; 71 struct channel_gk20a *ch = &f->channel[chid];
72 if (gk20a_channel_get(ch)) { 72 if (gk20a_channel_get(ch)) {
73 ch_state[chid] = 73 ch_state[chid] =
74 kmalloc(sizeof(struct ch_state) + 74 nvgpu_kmalloc(g, sizeof(struct ch_state) +
75 ram_in_alloc_size_v(), GFP_KERNEL); 75 ram_in_alloc_size_v());
76 /* ref taken stays to below loop with 76 /* ref taken stays to below loop with
77 * successful allocs */ 77 * successful allocs */
78 if (!ch_state[chid]) 78 if (!ch_state[chid])
@@ -96,10 +96,10 @@ static void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
96 if (ch_state[chid]) { 96 if (ch_state[chid]) {
97 g->ops.fifo.dump_channel_status_ramfc(g, o, chid, 97 g->ops.fifo.dump_channel_status_ramfc(g, o, chid,
98 ch_state[chid]); 98 ch_state[chid]);
99 kfree(ch_state[chid]); 99 nvgpu_kfree(g, ch_state[chid]);
100 } 100 }
101 } 101 }
102 kfree(ch_state); 102 nvgpu_kfree(g, ch_state);
103} 103}
104 104
105void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o) 105void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o)
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index 0495e9d1..4ed7251a 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -340,7 +340,7 @@ void gk20a_remove_support(struct gk20a *g)
340 tegra_unregister_idle_unidle(); 340 tegra_unregister_idle_unidle();
341#endif 341#endif
342 if (g->dbg_regops_tmp_buf) 342 if (g->dbg_regops_tmp_buf)
343 kfree(g->dbg_regops_tmp_buf); 343 nvgpu_kfree(g, g->dbg_regops_tmp_buf);
344 344
345 if (g->pmu.remove_support) 345 if (g->pmu.remove_support)
346 g->pmu.remove_support(&g->pmu); 346 g->pmu.remove_support(&g->pmu);
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
index 7342cb1d..96185ee7 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.c
@@ -22,17 +22,18 @@
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23 23
24#include <nvgpu/nvgpu_common.h> 24#include <nvgpu/nvgpu_common.h>
25#include <nvgpu/kmem.h>
25 26
26#include "gk20a.h" 27#include "gk20a.h"
27#include "gr_ctx_gk20a.h" 28#include "gr_ctx_gk20a.h"
28 29
29#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> 30#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
30 31
31static int gr_gk20a_alloc_load_netlist_u32(u32 *src, u32 len, 32static int gr_gk20a_alloc_load_netlist_u32(struct gk20a *g, u32 *src, u32 len,
32 struct u32_list_gk20a *u32_list) 33 struct u32_list_gk20a *u32_list)
33{ 34{
34 u32_list->count = (len + sizeof(u32) - 1) / sizeof(u32); 35 u32_list->count = (len + sizeof(u32) - 1) / sizeof(u32);
35 if (!alloc_u32_list_gk20a(u32_list)) 36 if (!alloc_u32_list_gk20a(g, u32_list))
36 return -ENOMEM; 37 return -ENOMEM;
37 38
38 memcpy(u32_list->l, src, len); 39 memcpy(u32_list->l, src, len);
@@ -40,11 +41,11 @@ static int gr_gk20a_alloc_load_netlist_u32(u32 *src, u32 len,
40 return 0; 41 return 0;
41} 42}
42 43
43static int gr_gk20a_alloc_load_netlist_av(u32 *src, u32 len, 44static int gr_gk20a_alloc_load_netlist_av(struct gk20a *g, u32 *src, u32 len,
44 struct av_list_gk20a *av_list) 45 struct av_list_gk20a *av_list)
45{ 46{
46 av_list->count = len / sizeof(struct av_gk20a); 47 av_list->count = len / sizeof(struct av_gk20a);
47 if (!alloc_av_list_gk20a(av_list)) 48 if (!alloc_av_list_gk20a(g, av_list))
48 return -ENOMEM; 49 return -ENOMEM;
49 50
50 memcpy(av_list->l, src, len); 51 memcpy(av_list->l, src, len);
@@ -52,11 +53,11 @@ static int gr_gk20a_alloc_load_netlist_av(u32 *src, u32 len,
52 return 0; 53 return 0;
53} 54}
54 55
55static int gr_gk20a_alloc_load_netlist_aiv(u32 *src, u32 len, 56static int gr_gk20a_alloc_load_netlist_aiv(struct gk20a *g, u32 *src, u32 len,
56 struct aiv_list_gk20a *aiv_list) 57 struct aiv_list_gk20a *aiv_list)
57{ 58{
58 aiv_list->count = len / sizeof(struct aiv_gk20a); 59 aiv_list->count = len / sizeof(struct aiv_gk20a);
59 if (!alloc_aiv_list_gk20a(aiv_list)) 60 if (!alloc_aiv_list_gk20a(g, aiv_list))
60 return -ENOMEM; 61 return -ENOMEM;
61 62
62 memcpy(aiv_list->l, src, len); 63 memcpy(aiv_list->l, src, len);
@@ -153,56 +154,56 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
153 switch (netlist->regions[i].region_id) { 154 switch (netlist->regions[i].region_id) {
154 case NETLIST_REGIONID_FECS_UCODE_DATA: 155 case NETLIST_REGIONID_FECS_UCODE_DATA:
155 gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA"); 156 gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA");
156 err = gr_gk20a_alloc_load_netlist_u32( 157 err = gr_gk20a_alloc_load_netlist_u32(g,
157 src, size, &g->gr.ctx_vars.ucode.fecs.data); 158 src, size, &g->gr.ctx_vars.ucode.fecs.data);
158 if (err) 159 if (err)
159 goto clean_up; 160 goto clean_up;
160 break; 161 break;
161 case NETLIST_REGIONID_FECS_UCODE_INST: 162 case NETLIST_REGIONID_FECS_UCODE_INST:
162 gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST"); 163 gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST");
163 err = gr_gk20a_alloc_load_netlist_u32( 164 err = gr_gk20a_alloc_load_netlist_u32(g,
164 src, size, &g->gr.ctx_vars.ucode.fecs.inst); 165 src, size, &g->gr.ctx_vars.ucode.fecs.inst);
165 if (err) 166 if (err)
166 goto clean_up; 167 goto clean_up;
167 break; 168 break;
168 case NETLIST_REGIONID_GPCCS_UCODE_DATA: 169 case NETLIST_REGIONID_GPCCS_UCODE_DATA:
169 gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA"); 170 gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA");
170 err = gr_gk20a_alloc_load_netlist_u32( 171 err = gr_gk20a_alloc_load_netlist_u32(g,
171 src, size, &g->gr.ctx_vars.ucode.gpccs.data); 172 src, size, &g->gr.ctx_vars.ucode.gpccs.data);
172 if (err) 173 if (err)
173 goto clean_up; 174 goto clean_up;
174 break; 175 break;
175 case NETLIST_REGIONID_GPCCS_UCODE_INST: 176 case NETLIST_REGIONID_GPCCS_UCODE_INST:
176 gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST"); 177 gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST");
177 err = gr_gk20a_alloc_load_netlist_u32( 178 err = gr_gk20a_alloc_load_netlist_u32(g,
178 src, size, &g->gr.ctx_vars.ucode.gpccs.inst); 179 src, size, &g->gr.ctx_vars.ucode.gpccs.inst);
179 if (err) 180 if (err)
180 goto clean_up; 181 goto clean_up;
181 break; 182 break;
182 case NETLIST_REGIONID_SW_BUNDLE_INIT: 183 case NETLIST_REGIONID_SW_BUNDLE_INIT:
183 gk20a_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT"); 184 gk20a_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT");
184 err = gr_gk20a_alloc_load_netlist_av( 185 err = gr_gk20a_alloc_load_netlist_av(g,
185 src, size, &g->gr.ctx_vars.sw_bundle_init); 186 src, size, &g->gr.ctx_vars.sw_bundle_init);
186 if (err) 187 if (err)
187 goto clean_up; 188 goto clean_up;
188 break; 189 break;
189 case NETLIST_REGIONID_SW_METHOD_INIT: 190 case NETLIST_REGIONID_SW_METHOD_INIT:
190 gk20a_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT"); 191 gk20a_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT");
191 err = gr_gk20a_alloc_load_netlist_av( 192 err = gr_gk20a_alloc_load_netlist_av(g,
192 src, size, &g->gr.ctx_vars.sw_method_init); 193 src, size, &g->gr.ctx_vars.sw_method_init);
193 if (err) 194 if (err)
194 goto clean_up; 195 goto clean_up;
195 break; 196 break;
196 case NETLIST_REGIONID_SW_CTX_LOAD: 197 case NETLIST_REGIONID_SW_CTX_LOAD:
197 gk20a_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD"); 198 gk20a_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD");
198 err = gr_gk20a_alloc_load_netlist_aiv( 199 err = gr_gk20a_alloc_load_netlist_aiv(g,
199 src, size, &g->gr.ctx_vars.sw_ctx_load); 200 src, size, &g->gr.ctx_vars.sw_ctx_load);
200 if (err) 201 if (err)
201 goto clean_up; 202 goto clean_up;
202 break; 203 break;
203 case NETLIST_REGIONID_SW_NON_CTX_LOAD: 204 case NETLIST_REGIONID_SW_NON_CTX_LOAD:
204 gk20a_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD"); 205 gk20a_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD");
205 err = gr_gk20a_alloc_load_netlist_av( 206 err = gr_gk20a_alloc_load_netlist_av(g,
206 src, size, &g->gr.ctx_vars.sw_non_ctx_load); 207 src, size, &g->gr.ctx_vars.sw_non_ctx_load);
207 if (err) 208 if (err)
208 goto clean_up; 209 goto clean_up;
@@ -210,7 +211,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
210 case NETLIST_REGIONID_SWVEIDBUNDLEINIT: 211 case NETLIST_REGIONID_SWVEIDBUNDLEINIT:
211 gk20a_dbg_info( 212 gk20a_dbg_info(
212 "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT"); 213 "NETLIST_REGIONID_SW_VEID_BUNDLE_INIT");
213 err = gr_gk20a_alloc_load_netlist_av( 214 err = gr_gk20a_alloc_load_netlist_av(g,
214 src, size, 215 src, size,
215 &g->gr.ctx_vars.sw_veid_bundle_init); 216 &g->gr.ctx_vars.sw_veid_bundle_init);
216 if (err) 217 if (err)
@@ -218,56 +219,56 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
218 break; 219 break;
219 case NETLIST_REGIONID_CTXREG_SYS: 220 case NETLIST_REGIONID_CTXREG_SYS:
220 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_SYS"); 221 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_SYS");
221 err = gr_gk20a_alloc_load_netlist_aiv( 222 err = gr_gk20a_alloc_load_netlist_aiv(g,
222 src, size, &g->gr.ctx_vars.ctxsw_regs.sys); 223 src, size, &g->gr.ctx_vars.ctxsw_regs.sys);
223 if (err) 224 if (err)
224 goto clean_up; 225 goto clean_up;
225 break; 226 break;
226 case NETLIST_REGIONID_CTXREG_GPC: 227 case NETLIST_REGIONID_CTXREG_GPC:
227 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_GPC"); 228 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_GPC");
228 err = gr_gk20a_alloc_load_netlist_aiv( 229 err = gr_gk20a_alloc_load_netlist_aiv(g,
229 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc); 230 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc);
230 if (err) 231 if (err)
231 goto clean_up; 232 goto clean_up;
232 break; 233 break;
233 case NETLIST_REGIONID_CTXREG_TPC: 234 case NETLIST_REGIONID_CTXREG_TPC:
234 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_TPC"); 235 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_TPC");
235 err = gr_gk20a_alloc_load_netlist_aiv( 236 err = gr_gk20a_alloc_load_netlist_aiv(g,
236 src, size, &g->gr.ctx_vars.ctxsw_regs.tpc); 237 src, size, &g->gr.ctx_vars.ctxsw_regs.tpc);
237 if (err) 238 if (err)
238 goto clean_up; 239 goto clean_up;
239 break; 240 break;
240 case NETLIST_REGIONID_CTXREG_ZCULL_GPC: 241 case NETLIST_REGIONID_CTXREG_ZCULL_GPC:
241 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC"); 242 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC");
242 err = gr_gk20a_alloc_load_netlist_aiv( 243 err = gr_gk20a_alloc_load_netlist_aiv(g,
243 src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc); 244 src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc);
244 if (err) 245 if (err)
245 goto clean_up; 246 goto clean_up;
246 break; 247 break;
247 case NETLIST_REGIONID_CTXREG_PPC: 248 case NETLIST_REGIONID_CTXREG_PPC:
248 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PPC"); 249 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PPC");
249 err = gr_gk20a_alloc_load_netlist_aiv( 250 err = gr_gk20a_alloc_load_netlist_aiv(g,
250 src, size, &g->gr.ctx_vars.ctxsw_regs.ppc); 251 src, size, &g->gr.ctx_vars.ctxsw_regs.ppc);
251 if (err) 252 if (err)
252 goto clean_up; 253 goto clean_up;
253 break; 254 break;
254 case NETLIST_REGIONID_CTXREG_PM_SYS: 255 case NETLIST_REGIONID_CTXREG_PM_SYS:
255 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS"); 256 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS");
256 err = gr_gk20a_alloc_load_netlist_aiv( 257 err = gr_gk20a_alloc_load_netlist_aiv(g,
257 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys); 258 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys);
258 if (err) 259 if (err)
259 goto clean_up; 260 goto clean_up;
260 break; 261 break;
261 case NETLIST_REGIONID_CTXREG_PM_GPC: 262 case NETLIST_REGIONID_CTXREG_PM_GPC:
262 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC"); 263 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC");
263 err = gr_gk20a_alloc_load_netlist_aiv( 264 err = gr_gk20a_alloc_load_netlist_aiv(g,
264 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc); 265 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc);
265 if (err) 266 if (err)
266 goto clean_up; 267 goto clean_up;
267 break; 268 break;
268 case NETLIST_REGIONID_CTXREG_PM_TPC: 269 case NETLIST_REGIONID_CTXREG_PM_TPC:
269 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC"); 270 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC");
270 err = gr_gk20a_alloc_load_netlist_aiv( 271 err = gr_gk20a_alloc_load_netlist_aiv(g,
271 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc); 272 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc);
272 if (err) 273 if (err)
273 goto clean_up; 274 goto clean_up;
@@ -294,84 +295,84 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
294 break; 295 break;
295 case NETLIST_REGIONID_CTXREG_PMPPC: 296 case NETLIST_REGIONID_CTXREG_PMPPC:
296 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC"); 297 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC");
297 err = gr_gk20a_alloc_load_netlist_aiv( 298 err = gr_gk20a_alloc_load_netlist_aiv(g,
298 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc); 299 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ppc);
299 if (err) 300 if (err)
300 goto clean_up; 301 goto clean_up;
301 break; 302 break;
302 case NETLIST_REGIONID_NVPERF_CTXREG_SYS: 303 case NETLIST_REGIONID_NVPERF_CTXREG_SYS:
303 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_SYS"); 304 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_SYS");
304 err = gr_gk20a_alloc_load_netlist_aiv( 305 err = gr_gk20a_alloc_load_netlist_aiv(g,
305 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys); 306 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys);
306 if (err) 307 if (err)
307 goto clean_up; 308 goto clean_up;
308 break; 309 break;
309 case NETLIST_REGIONID_NVPERF_FBP_CTXREGS: 310 case NETLIST_REGIONID_NVPERF_FBP_CTXREGS:
310 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_CTXREGS"); 311 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_CTXREGS");
311 err = gr_gk20a_alloc_load_netlist_aiv( 312 err = gr_gk20a_alloc_load_netlist_aiv(g,
312 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp); 313 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp);
313 if (err) 314 if (err)
314 goto clean_up; 315 goto clean_up;
315 break; 316 break;
316 case NETLIST_REGIONID_NVPERF_CTXREG_GPC: 317 case NETLIST_REGIONID_NVPERF_CTXREG_GPC:
317 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_GPC"); 318 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_CTXREG_GPC");
318 err = gr_gk20a_alloc_load_netlist_aiv( 319 err = gr_gk20a_alloc_load_netlist_aiv(g,
319 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc); 320 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_gpc);
320 if (err) 321 if (err)
321 goto clean_up; 322 goto clean_up;
322 break; 323 break;
323 case NETLIST_REGIONID_NVPERF_FBP_ROUTER: 324 case NETLIST_REGIONID_NVPERF_FBP_ROUTER:
324 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_ROUTER"); 325 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_FBP_ROUTER");
325 err = gr_gk20a_alloc_load_netlist_aiv( 326 err = gr_gk20a_alloc_load_netlist_aiv(g,
326 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router); 327 src, size, &g->gr.ctx_vars.ctxsw_regs.fbp_router);
327 if (err) 328 if (err)
328 goto clean_up; 329 goto clean_up;
329 break; 330 break;
330 case NETLIST_REGIONID_NVPERF_GPC_ROUTER: 331 case NETLIST_REGIONID_NVPERF_GPC_ROUTER:
331 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_GPC_ROUTER"); 332 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_GPC_ROUTER");
332 err = gr_gk20a_alloc_load_netlist_aiv( 333 err = gr_gk20a_alloc_load_netlist_aiv(g,
333 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router); 334 src, size, &g->gr.ctx_vars.ctxsw_regs.gpc_router);
334 if (err) 335 if (err)
335 goto clean_up; 336 goto clean_up;
336 break; 337 break;
337 case NETLIST_REGIONID_CTXREG_PMLTC: 338 case NETLIST_REGIONID_CTXREG_PMLTC:
338 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMLTC"); 339 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMLTC");
339 err = gr_gk20a_alloc_load_netlist_aiv( 340 err = gr_gk20a_alloc_load_netlist_aiv(g,
340 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc); 341 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ltc);
341 if (err) 342 if (err)
342 goto clean_up; 343 goto clean_up;
343 break; 344 break;
344 case NETLIST_REGIONID_CTXREG_PMFBPA: 345 case NETLIST_REGIONID_CTXREG_PMFBPA:
345 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMFBPA"); 346 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMFBPA");
346 err = gr_gk20a_alloc_load_netlist_aiv( 347 err = gr_gk20a_alloc_load_netlist_aiv(g,
347 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa); 348 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_fbpa);
348 if (err) 349 if (err)
349 goto clean_up; 350 goto clean_up;
350 break; 351 break;
351 case NETLIST_REGIONID_NVPERF_SYS_ROUTER: 352 case NETLIST_REGIONID_NVPERF_SYS_ROUTER:
352 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_SYS_ROUTER"); 353 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_SYS_ROUTER");
353 err = gr_gk20a_alloc_load_netlist_aiv( 354 err = gr_gk20a_alloc_load_netlist_aiv(g,
354 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router); 355 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_sys_router);
355 if (err) 356 if (err)
356 goto clean_up; 357 goto clean_up;
357 break; 358 break;
358 case NETLIST_REGIONID_NVPERF_PMA: 359 case NETLIST_REGIONID_NVPERF_PMA:
359 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_PMA"); 360 gk20a_dbg_info("NETLIST_REGIONID_NVPERF_PMA");
360 err = gr_gk20a_alloc_load_netlist_aiv( 361 err = gr_gk20a_alloc_load_netlist_aiv(g,
361 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma); 362 src, size, &g->gr.ctx_vars.ctxsw_regs.perf_pma);
362 if (err) 363 if (err)
363 goto clean_up; 364 goto clean_up;
364 break; 365 break;
365 case NETLIST_REGIONID_CTXREG_PMROP: 366 case NETLIST_REGIONID_CTXREG_PMROP:
366 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMROP"); 367 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMROP");
367 err = gr_gk20a_alloc_load_netlist_aiv( 368 err = gr_gk20a_alloc_load_netlist_aiv(g,
368 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop); 369 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_rop);
369 if (err) 370 if (err)
370 goto clean_up; 371 goto clean_up;
371 break; 372 break;
372 case NETLIST_REGIONID_CTXREG_PMUCGPC: 373 case NETLIST_REGIONID_CTXREG_PMUCGPC:
373 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMUCGPC"); 374 gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMUCGPC");
374 err = gr_gk20a_alloc_load_netlist_aiv( 375 err = gr_gk20a_alloc_load_netlist_aiv(g,
375 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc); 376 src, size, &g->gr.ctx_vars.ctxsw_regs.pm_ucgpc);
376 if (err) 377 if (err)
377 goto clean_up; 378 goto clean_up;
@@ -397,35 +398,35 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
397 398
398clean_up: 399clean_up:
399 g->gr.ctx_vars.valid = false; 400 g->gr.ctx_vars.valid = false;
400 kfree(g->gr.ctx_vars.ucode.fecs.inst.l); 401 nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.inst.l);
401 kfree(g->gr.ctx_vars.ucode.fecs.data.l); 402 nvgpu_kfree(g, g->gr.ctx_vars.ucode.fecs.data.l);
402 kfree(g->gr.ctx_vars.ucode.gpccs.inst.l); 403 nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.inst.l);
403 kfree(g->gr.ctx_vars.ucode.gpccs.data.l); 404 nvgpu_kfree(g, g->gr.ctx_vars.ucode.gpccs.data.l);
404 kfree(g->gr.ctx_vars.sw_bundle_init.l); 405 nvgpu_kfree(g, g->gr.ctx_vars.sw_bundle_init.l);
405 kfree(g->gr.ctx_vars.sw_method_init.l); 406 nvgpu_kfree(g, g->gr.ctx_vars.sw_method_init.l);
406 kfree(g->gr.ctx_vars.sw_ctx_load.l); 407 nvgpu_kfree(g, g->gr.ctx_vars.sw_ctx_load.l);
407 kfree(g->gr.ctx_vars.sw_non_ctx_load.l); 408 nvgpu_kfree(g, g->gr.ctx_vars.sw_non_ctx_load.l);
408 kfree(g->gr.ctx_vars.sw_veid_bundle_init.l); 409 nvgpu_kfree(g, g->gr.ctx_vars.sw_veid_bundle_init.l);
409 kfree(g->gr.ctx_vars.ctxsw_regs.sys.l); 410 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.sys.l);
410 kfree(g->gr.ctx_vars.ctxsw_regs.gpc.l); 411 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.gpc.l);
411 kfree(g->gr.ctx_vars.ctxsw_regs.tpc.l); 412 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.tpc.l);
412 kfree(g->gr.ctx_vars.ctxsw_regs.zcull_gpc.l); 413 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.zcull_gpc.l);
413 kfree(g->gr.ctx_vars.ctxsw_regs.ppc.l); 414 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.ppc.l);
414 kfree(g->gr.ctx_vars.ctxsw_regs.pm_sys.l); 415 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_sys.l);
415 kfree(g->gr.ctx_vars.ctxsw_regs.pm_gpc.l); 416 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_gpc.l);
416 kfree(g->gr.ctx_vars.ctxsw_regs.pm_tpc.l); 417 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_tpc.l);
417 kfree(g->gr.ctx_vars.ctxsw_regs.pm_ppc.l); 418 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ppc.l);
418 kfree(g->gr.ctx_vars.ctxsw_regs.perf_sys.l); 419 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_sys.l);
419 kfree(g->gr.ctx_vars.ctxsw_regs.fbp.l); 420 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.fbp.l);
420 kfree(g->gr.ctx_vars.ctxsw_regs.perf_gpc.l); 421 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_gpc.l);
421 kfree(g->gr.ctx_vars.ctxsw_regs.fbp_router.l); 422 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.fbp_router.l);
422 kfree(g->gr.ctx_vars.ctxsw_regs.gpc_router.l); 423 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.gpc_router.l);
423 kfree(g->gr.ctx_vars.ctxsw_regs.pm_ltc.l); 424 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ltc.l);
424 kfree(g->gr.ctx_vars.ctxsw_regs.pm_fbpa.l); 425 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_fbpa.l);
425 kfree(g->gr.ctx_vars.ctxsw_regs.perf_sys_router.l); 426 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_sys_router.l);
426 kfree(g->gr.ctx_vars.ctxsw_regs.perf_pma.l); 427 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.perf_pma.l);
427 kfree(g->gr.ctx_vars.ctxsw_regs.pm_rop.l); 428 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_rop.l);
428 kfree(g->gr.ctx_vars.ctxsw_regs.pm_ucgpc.l); 429 nvgpu_kfree(g, g->gr.ctx_vars.ctxsw_regs.pm_ucgpc.l);
429 release_firmware(netlist_fw); 430 release_firmware(netlist_fw);
430 err = -ENOENT; 431 err = -ENOENT;
431 } 432 }
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.h
index 4b3f3ae6..b82f5275 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a.h
@@ -18,6 +18,8 @@
18#ifndef __GR_CTX_GK20A_H__ 18#ifndef __GR_CTX_GK20A_H__
19#define __GR_CTX_GK20A_H__ 19#define __GR_CTX_GK20A_H__
20 20
21#include <nvgpu/kmem.h>
22
21struct gr_gk20a; 23struct gr_gk20a;
22 24
23/* production netlist, one and only one from below */ 25/* production netlist, one and only one from below */
@@ -145,23 +147,24 @@ struct ctxsw_buf_offset_map_entry {
145}; 147};
146 148
147static inline 149static inline
148struct av_gk20a *alloc_av_list_gk20a(struct av_list_gk20a *avl) 150struct av_gk20a *alloc_av_list_gk20a(struct gk20a *g, struct av_list_gk20a *avl)
149{ 151{
150 avl->l = kzalloc(avl->count * sizeof(*avl->l), GFP_KERNEL); 152 avl->l = nvgpu_kzalloc(g, avl->count * sizeof(*avl->l));
151 return avl->l; 153 return avl->l;
152} 154}
153 155
154static inline 156static inline
155struct aiv_gk20a *alloc_aiv_list_gk20a(struct aiv_list_gk20a *aivl) 157struct aiv_gk20a *alloc_aiv_list_gk20a(struct gk20a *g,
158 struct aiv_list_gk20a *aivl)
156{ 159{
157 aivl->l = kzalloc(aivl->count * sizeof(*aivl->l), GFP_KERNEL); 160 aivl->l = nvgpu_kzalloc(g, aivl->count * sizeof(*aivl->l));
158 return aivl->l; 161 return aivl->l;
159} 162}
160 163
161static inline 164static inline
162u32 *alloc_u32_list_gk20a(struct u32_list_gk20a *u32l) 165u32 *alloc_u32_list_gk20a(struct gk20a *g, struct u32_list_gk20a *u32l)
163{ 166{
164 u32l->l = kzalloc(u32l->count * sizeof(*u32l->l), GFP_KERNEL); 167 u32l->l = nvgpu_kzalloc(g, u32l->count * sizeof(*u32l->l));
165 return u32l->l; 168 return u32l->l;
166} 169}
167 170
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
index 34b315e6..2fdbc01a 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_ctx_gk20a_sim.c
@@ -71,23 +71,23 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
71 gk20a_sim_esc_readl(g, "GRCTX_REG_LIST_PM_TPC_COUNT", 0, 71 gk20a_sim_esc_readl(g, "GRCTX_REG_LIST_PM_TPC_COUNT", 0,
72 &g->gr.ctx_vars.ctxsw_regs.pm_tpc.count); 72 &g->gr.ctx_vars.ctxsw_regs.pm_tpc.count);
73 73
74 err |= !alloc_u32_list_gk20a(&g->gr.ctx_vars.ucode.fecs.inst); 74 err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.fecs.inst);
75 err |= !alloc_u32_list_gk20a(&g->gr.ctx_vars.ucode.fecs.data); 75 err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.fecs.data);
76 err |= !alloc_u32_list_gk20a(&g->gr.ctx_vars.ucode.gpccs.inst); 76 err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.gpccs.inst);
77 err |= !alloc_u32_list_gk20a(&g->gr.ctx_vars.ucode.gpccs.data); 77 err |= !alloc_u32_list_gk20a(g, &g->gr.ctx_vars.ucode.gpccs.data);
78 err |= !alloc_av_list_gk20a(&g->gr.ctx_vars.sw_bundle_init); 78 err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_bundle_init);
79 err |= !alloc_av_list_gk20a(&g->gr.ctx_vars.sw_method_init); 79 err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_method_init);
80 err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.sw_ctx_load); 80 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.sw_ctx_load);
81 err |= !alloc_av_list_gk20a(&g->gr.ctx_vars.sw_non_ctx_load); 81 err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_non_ctx_load);
82 err |= !alloc_av_list_gk20a(&g->gr.ctx_vars.sw_veid_bundle_init); 82 err |= !alloc_av_list_gk20a(g, &g->gr.ctx_vars.sw_veid_bundle_init);
83 err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.sys); 83 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.sys);
84 err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.gpc); 84 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.gpc);
85 err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.tpc); 85 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.tpc);
86 err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.zcull_gpc); 86 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc);
87 err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.ppc); 87 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.ppc);
88 err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.pm_sys); 88 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.pm_sys);
89 err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.pm_gpc); 89 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.pm_gpc);
90 err |= !alloc_aiv_list_gk20a(&g->gr.ctx_vars.ctxsw_regs.pm_tpc); 90 err |= !alloc_aiv_list_gk20a(g, &g->gr.ctx_vars.ctxsw_regs.pm_tpc);
91 91
92 if (err) 92 if (err)
93 goto fail; 93 goto fail;
@@ -244,4 +244,3 @@ fail:
244 return err; 244 return err;
245 245
246} 246}
247
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 172931d7..0e3bcdbe 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -22,7 +22,6 @@
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <uapi/linux/nvgpu.h> 24#include <uapi/linux/nvgpu.h>
25#include <linux/vmalloc.h>
26#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
27#include <linux/firmware.h> 26#include <linux/firmware.h>
28#include <linux/nvhost.h> 27#include <linux/nvhost.h>
@@ -1250,8 +1249,8 @@ static int gr_gk20a_setup_alpha_beta_tables(struct gk20a *g,
1250 1249
1251 gk20a_dbg_fn(""); 1250 gk20a_dbg_fn("");
1252 1251
1253 map_alpha = kzalloc(3 * gr_pd_alpha_ratio_table__size_1_v() * 1252 map_alpha = nvgpu_kzalloc(g, 3 * gr_pd_alpha_ratio_table__size_1_v() *
1254 sizeof(u32), GFP_KERNEL); 1253 sizeof(u32));
1255 if (!map_alpha) 1254 if (!map_alpha)
1256 return -ENOMEM; 1255 return -ENOMEM;
1257 map_beta = map_alpha + gr_pd_alpha_ratio_table__size_1_v(); 1256 map_beta = map_alpha + gr_pd_alpha_ratio_table__size_1_v();
@@ -1321,7 +1320,7 @@ static int gr_gk20a_setup_alpha_beta_tables(struct gk20a *g,
1321 } 1320 }
1322 } 1321 }
1323 1322
1324 kfree(map_alpha); 1323 nvgpu_kfree(g, map_alpha);
1325 return 0; 1324 return 0;
1326} 1325}
1327 1326
@@ -1744,14 +1743,14 @@ restore_fe_go_idle:
1744 if (err) 1743 if (err)
1745 goto clean_up; 1744 goto clean_up;
1746 1745
1747 kfree(gr->sm_error_states); 1746 nvgpu_kfree(g, gr->sm_error_states);
1748 1747
1749 /* we need to allocate this after g->ops.gr.init_fs_state() since 1748 /* we need to allocate this after g->ops.gr.init_fs_state() since
1750 * we initialize gr->no_of_sm in this function 1749 * we initialize gr->no_of_sm in this function
1751 */ 1750 */
1752 gr->sm_error_states = kzalloc( 1751 gr->sm_error_states = nvgpu_kzalloc(g,
1753 sizeof(struct nvgpu_dbg_gpu_sm_error_state_record) 1752 sizeof(struct nvgpu_dbg_gpu_sm_error_state_record)
1754 * gr->no_of_sm, GFP_KERNEL); 1753 * gr->no_of_sm);
1755 if (!gr->sm_error_states) { 1754 if (!gr->sm_error_states) {
1756 err = -ENOMEM; 1755 err = -ENOMEM;
1757 goto restore_fe_go_idle; 1756 goto restore_fe_go_idle;
@@ -1794,7 +1793,7 @@ restore_fe_go_idle:
1794 if (gr->ctx_vars.local_golden_image == NULL) { 1793 if (gr->ctx_vars.local_golden_image == NULL) {
1795 1794
1796 gr->ctx_vars.local_golden_image = 1795 gr->ctx_vars.local_golden_image =
1797 vzalloc(gr->ctx_vars.golden_image_size); 1796 nvgpu_vzalloc(g, gr->ctx_vars.golden_image_size);
1798 1797
1799 if (gr->ctx_vars.local_golden_image == NULL) { 1798 if (gr->ctx_vars.local_golden_image == NULL) {
1800 err = -ENOMEM; 1799 err = -ENOMEM;
@@ -2949,7 +2948,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2949 gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size; 2948 gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size;
2950 gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; 2949 gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size;
2951 2950
2952 gr_ctx = kzalloc(sizeof(*gr_ctx), GFP_KERNEL); 2951 gr_ctx = nvgpu_kzalloc(g, sizeof(*gr_ctx));
2953 if (!gr_ctx) 2952 if (!gr_ctx)
2954 return -ENOMEM; 2953 return -ENOMEM;
2955 2954
@@ -2975,7 +2974,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2975 err_free_mem: 2974 err_free_mem:
2976 gk20a_gmmu_free(g, &gr_ctx->mem); 2975 gk20a_gmmu_free(g, &gr_ctx->mem);
2977 err_free_ctx: 2976 err_free_ctx:
2978 kfree(gr_ctx); 2977 nvgpu_kfree(g, gr_ctx);
2979 gr_ctx = NULL; 2978 gr_ctx = NULL;
2980 2979
2981 return err; 2980 return err;
@@ -3023,7 +3022,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
3023 gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va, 3022 gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va,
3024 gr_ctx->mem.size, gk20a_mem_flag_none); 3023 gr_ctx->mem.size, gk20a_mem_flag_none);
3025 gk20a_gmmu_free(g, &gr_ctx->mem); 3024 gk20a_gmmu_free(g, &gr_ctx->mem);
3026 kfree(gr_ctx); 3025 nvgpu_kfree(g, gr_ctx);
3027} 3026}
3028 3027
3029void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) 3028void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg)
@@ -3370,18 +3369,18 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
3370 3369
3371 memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc)); 3370 memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc));
3372 3371
3373 kfree(gr->sm_error_states); 3372 nvgpu_kfree(g, gr->sm_error_states);
3374 kfree(gr->gpc_tpc_count); 3373 nvgpu_kfree(g, gr->gpc_tpc_count);
3375 kfree(gr->gpc_zcb_count); 3374 nvgpu_kfree(g, gr->gpc_zcb_count);
3376 kfree(gr->gpc_ppc_count); 3375 nvgpu_kfree(g, gr->gpc_ppc_count);
3377 kfree(gr->pes_tpc_count[0]); 3376 nvgpu_kfree(g, gr->pes_tpc_count[0]);
3378 kfree(gr->pes_tpc_count[1]); 3377 nvgpu_kfree(g, gr->pes_tpc_count[1]);
3379 kfree(gr->pes_tpc_mask[0]); 3378 nvgpu_kfree(g, gr->pes_tpc_mask[0]);
3380 kfree(gr->pes_tpc_mask[1]); 3379 nvgpu_kfree(g, gr->pes_tpc_mask[1]);
3381 kfree(gr->sm_to_cluster); 3380 nvgpu_kfree(g, gr->sm_to_cluster);
3382 kfree(gr->gpc_skip_mask); 3381 nvgpu_kfree(g, gr->gpc_skip_mask);
3383 kfree(gr->map_tiles); 3382 nvgpu_kfree(g, gr->map_tiles);
3384 kfree(gr->fbp_rop_l2_en_mask); 3383 nvgpu_kfree(g, gr->fbp_rop_l2_en_mask);
3385 gr->gpc_tpc_count = NULL; 3384 gr->gpc_tpc_count = NULL;
3386 gr->gpc_zcb_count = NULL; 3385 gr->gpc_zcb_count = NULL;
3387 gr->gpc_ppc_count = NULL; 3386 gr->gpc_ppc_count = NULL;
@@ -3394,31 +3393,31 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
3394 gr->fbp_rop_l2_en_mask = NULL; 3393 gr->fbp_rop_l2_en_mask = NULL;
3395 3394
3396 gr->ctx_vars.valid = false; 3395 gr->ctx_vars.valid = false;
3397 kfree(gr->ctx_vars.ucode.fecs.inst.l); 3396 nvgpu_kfree(g, gr->ctx_vars.ucode.fecs.inst.l);
3398 kfree(gr->ctx_vars.ucode.fecs.data.l); 3397 nvgpu_kfree(g, gr->ctx_vars.ucode.fecs.data.l);
3399 kfree(gr->ctx_vars.ucode.gpccs.inst.l); 3398 nvgpu_kfree(g, gr->ctx_vars.ucode.gpccs.inst.l);
3400 kfree(gr->ctx_vars.ucode.gpccs.data.l); 3399 nvgpu_kfree(g, gr->ctx_vars.ucode.gpccs.data.l);
3401 kfree(gr->ctx_vars.sw_bundle_init.l); 3400 nvgpu_kfree(g, gr->ctx_vars.sw_bundle_init.l);
3402 kfree(gr->ctx_vars.sw_veid_bundle_init.l); 3401 nvgpu_kfree(g, gr->ctx_vars.sw_veid_bundle_init.l);
3403 kfree(gr->ctx_vars.sw_method_init.l); 3402 nvgpu_kfree(g, gr->ctx_vars.sw_method_init.l);
3404 kfree(gr->ctx_vars.sw_ctx_load.l); 3403 nvgpu_kfree(g, gr->ctx_vars.sw_ctx_load.l);
3405 kfree(gr->ctx_vars.sw_non_ctx_load.l); 3404 nvgpu_kfree(g, gr->ctx_vars.sw_non_ctx_load.l);
3406 kfree(gr->ctx_vars.ctxsw_regs.sys.l); 3405 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.sys.l);
3407 kfree(gr->ctx_vars.ctxsw_regs.gpc.l); 3406 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.gpc.l);
3408 kfree(gr->ctx_vars.ctxsw_regs.tpc.l); 3407 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.tpc.l);
3409 kfree(gr->ctx_vars.ctxsw_regs.zcull_gpc.l); 3408 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.zcull_gpc.l);
3410 kfree(gr->ctx_vars.ctxsw_regs.ppc.l); 3409 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.ppc.l);
3411 kfree(gr->ctx_vars.ctxsw_regs.pm_sys.l); 3410 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_sys.l);
3412 kfree(gr->ctx_vars.ctxsw_regs.pm_gpc.l); 3411 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_gpc.l);
3413 kfree(gr->ctx_vars.ctxsw_regs.pm_tpc.l); 3412 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_tpc.l);
3414 kfree(gr->ctx_vars.ctxsw_regs.pm_ppc.l); 3413 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_ppc.l);
3415 kfree(gr->ctx_vars.ctxsw_regs.perf_sys.l); 3414 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.perf_sys.l);
3416 kfree(gr->ctx_vars.ctxsw_regs.fbp.l); 3415 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.fbp.l);
3417 kfree(gr->ctx_vars.ctxsw_regs.perf_gpc.l); 3416 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.perf_gpc.l);
3418 kfree(gr->ctx_vars.ctxsw_regs.fbp_router.l); 3417 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.fbp_router.l);
3419 kfree(gr->ctx_vars.ctxsw_regs.gpc_router.l); 3418 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.gpc_router.l);
3420 kfree(gr->ctx_vars.ctxsw_regs.pm_ltc.l); 3419 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_ltc.l);
3421 kfree(gr->ctx_vars.ctxsw_regs.pm_fbpa.l); 3420 nvgpu_kfree(g, gr->ctx_vars.ctxsw_regs.pm_fbpa.l);
3422 3421
3423 vfree(gr->ctx_vars.local_golden_image); 3422 vfree(gr->ctx_vars.local_golden_image);
3424 gr->ctx_vars.local_golden_image = NULL; 3423 gr->ctx_vars.local_golden_image = NULL;
@@ -3464,7 +3463,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3464 gr->fbp_en_mask = g->ops.gr.get_fbp_en_mask(g); 3463 gr->fbp_en_mask = g->ops.gr.get_fbp_en_mask(g);
3465 3464
3466 gr->fbp_rop_l2_en_mask = 3465 gr->fbp_rop_l2_en_mask =
3467 kzalloc(gr->max_fbps_count * sizeof(u32), GFP_KERNEL); 3466 nvgpu_kzalloc(g, gr->max_fbps_count * sizeof(u32));
3468 if (!gr->fbp_rop_l2_en_mask) 3467 if (!gr->fbp_rop_l2_en_mask)
3469 goto clean_up; 3468 goto clean_up;
3470 3469
@@ -3491,14 +3490,14 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3491 goto clean_up; 3490 goto clean_up;
3492 } 3491 }
3493 3492
3494 gr->gpc_tpc_count = kzalloc(gr->gpc_count * sizeof(u32), GFP_KERNEL); 3493 gr->gpc_tpc_count = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32));
3495 gr->gpc_tpc_mask = kzalloc(gr->gpc_count * sizeof(u32), GFP_KERNEL); 3494 gr->gpc_tpc_mask = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32));
3496 gr->gpc_zcb_count = kzalloc(gr->gpc_count * sizeof(u32), GFP_KERNEL); 3495 gr->gpc_zcb_count = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32));
3497 gr->gpc_ppc_count = kzalloc(gr->gpc_count * sizeof(u32), GFP_KERNEL); 3496 gr->gpc_ppc_count = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32));
3498 3497
3499 gr->gpc_skip_mask = 3498 gr->gpc_skip_mask =
3500 kzalloc(gr_pd_dist_skip_table__size_1_v() * 4 * sizeof(u32), 3499 nvgpu_kzalloc(g, gr_pd_dist_skip_table__size_1_v() *
3501 GFP_KERNEL); 3500 4 * sizeof(u32));
3502 3501
3503 if (!gr->gpc_tpc_count || !gr->gpc_tpc_mask || !gr->gpc_zcb_count || 3502 if (!gr->gpc_tpc_count || !gr->gpc_tpc_mask || !gr->gpc_zcb_count ||
3504 !gr->gpc_ppc_count || !gr->gpc_skip_mask) 3503 !gr->gpc_ppc_count || !gr->gpc_skip_mask)
@@ -3526,11 +3525,11 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3526 for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) { 3525 for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) {
3527 if (!gr->pes_tpc_count[pes_index]) { 3526 if (!gr->pes_tpc_count[pes_index]) {
3528 gr->pes_tpc_count[pes_index] = 3527 gr->pes_tpc_count[pes_index] =
3529 kzalloc(gr->gpc_count * sizeof(u32), 3528 nvgpu_kzalloc(g, gr->gpc_count *
3530 GFP_KERNEL); 3529 sizeof(u32));
3531 gr->pes_tpc_mask[pes_index] = 3530 gr->pes_tpc_mask[pes_index] =
3532 kzalloc(gr->gpc_count * sizeof(u32), 3531 nvgpu_kzalloc(g, gr->gpc_count *
3533 GFP_KERNEL); 3532 sizeof(u32));
3534 if (!gr->pes_tpc_count[pes_index] || 3533 if (!gr->pes_tpc_count[pes_index] ||
3535 !gr->pes_tpc_mask[pes_index]) 3534 !gr->pes_tpc_mask[pes_index])
3536 goto clean_up; 3535 goto clean_up;
@@ -3585,8 +3584,8 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3585 gr->gpc_skip_mask[gpc_index] = gpc_new_skip_mask; 3584 gr->gpc_skip_mask[gpc_index] = gpc_new_skip_mask;
3586 } 3585 }
3587 3586
3588 gr->sm_to_cluster = kzalloc(gr->gpc_count * gr->tpc_count * 3587 gr->sm_to_cluster = nvgpu_kzalloc(g, gr->gpc_count * gr->tpc_count *
3589 sizeof(struct sm_info), GFP_KERNEL); 3588 sizeof(struct sm_info));
3590 gr->no_of_sm = 0; 3589 gr->no_of_sm = 0;
3591 3590
3592 gk20a_dbg_info("fbps: %d", gr->num_fbps); 3591 gk20a_dbg_info("fbps: %d", gr->num_fbps);
@@ -3696,14 +3695,13 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr)
3696 int num_tpc_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_TPC_PER_GPC); 3695 int num_tpc_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_TPC_PER_GPC);
3697 int map_tile_count = num_gpcs * num_tpc_per_gpc; 3696 int map_tile_count = num_gpcs * num_tpc_per_gpc;
3698 3697
3699 init_frac = kzalloc(num_gpcs * sizeof(s32), GFP_KERNEL); 3698 init_frac = nvgpu_kzalloc(g, num_gpcs * sizeof(s32));
3700 init_err = kzalloc(num_gpcs * sizeof(s32), GFP_KERNEL); 3699 init_err = nvgpu_kzalloc(g, num_gpcs * sizeof(s32));
3701 run_err = kzalloc(num_gpcs * sizeof(s32), GFP_KERNEL); 3700 run_err = nvgpu_kzalloc(g, num_gpcs * sizeof(s32));
3702 sorted_num_tpcs = 3701 sorted_num_tpcs =
3703 kzalloc(num_gpcs * num_tpc_per_gpc * sizeof(s32), 3702 nvgpu_kzalloc(g, num_gpcs * num_tpc_per_gpc * sizeof(s32));
3704 GFP_KERNEL);
3705 sorted_to_unsorted_gpc_map = 3703 sorted_to_unsorted_gpc_map =
3706 kzalloc(num_gpcs * sizeof(s32), GFP_KERNEL); 3704 nvgpu_kzalloc(g, num_gpcs * sizeof(s32));
3707 3705
3708 if (!(init_frac && init_err && run_err && sorted_num_tpcs && 3706 if (!(init_frac && init_err && run_err && sorted_num_tpcs &&
3709 sorted_to_unsorted_gpc_map)) { 3707 sorted_to_unsorted_gpc_map)) {
@@ -3764,15 +3762,14 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr)
3764 } 3762 }
3765 3763
3766 if (delete_map) { 3764 if (delete_map) {
3767 kfree(gr->map_tiles); 3765 nvgpu_kfree(g, gr->map_tiles);
3768 gr->map_tiles = NULL; 3766 gr->map_tiles = NULL;
3769 gr->map_tile_count = 0; 3767 gr->map_tile_count = 0;
3770 } 3768 }
3771 } 3769 }
3772 3770
3773 if (gr->map_tiles == NULL) { 3771 if (gr->map_tiles == NULL) {
3774 gr->map_tiles = kzalloc(map_tile_count * sizeof(u8), 3772 gr->map_tiles = nvgpu_kzalloc(g, num_gpcs * sizeof(u8));
3775 GFP_KERNEL);
3776 if (gr->map_tiles == NULL) { 3773 if (gr->map_tiles == NULL) {
3777 ret = -ENOMEM; 3774 ret = -ENOMEM;
3778 goto clean_up; 3775 goto clean_up;
@@ -3838,11 +3835,11 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr)
3838 } 3835 }
3839 3836
3840clean_up: 3837clean_up:
3841 kfree(init_frac); 3838 nvgpu_kfree(g, init_frac);
3842 kfree(init_err); 3839 nvgpu_kfree(g, init_err);
3843 kfree(run_err); 3840 nvgpu_kfree(g, run_err);
3844 kfree(sorted_num_tpcs); 3841 nvgpu_kfree(g, sorted_num_tpcs);
3845 kfree(sorted_to_unsorted_gpc_map); 3842 nvgpu_kfree(g, sorted_to_unsorted_gpc_map);
3846 3843
3847 if (ret) 3844 if (ret)
3848 gk20a_err(dev_from_gk20a(g), "fail"); 3845 gk20a_err(dev_from_gk20a(g), "fail");
@@ -4588,20 +4585,20 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
4588 /* Total 8 fields per map reg i.e. tile_0 to tile_7*/ 4585 /* Total 8 fields per map reg i.e. tile_0 to tile_7*/
4589 zcull_alloc_num += (zcull_alloc_num % 8); 4586 zcull_alloc_num += (zcull_alloc_num % 8);
4590 } 4587 }
4591 zcull_map_tiles = kzalloc(zcull_alloc_num * 4588 zcull_map_tiles = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32));
4592 sizeof(u32), GFP_KERNEL); 4589
4593 if (!zcull_map_tiles) { 4590 if (!zcull_map_tiles) {
4594 gk20a_err(dev_from_gk20a(g), 4591 gk20a_err(dev_from_gk20a(g),
4595 "failed to allocate zcull map titles"); 4592 "failed to allocate zcull map titles");
4596 return -ENOMEM; 4593 return -ENOMEM;
4597 } 4594 }
4598 zcull_bank_counters = kzalloc(zcull_alloc_num * 4595
4599 sizeof(u32), GFP_KERNEL); 4596 zcull_bank_counters = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32));
4600 4597
4601 if (!zcull_bank_counters) { 4598 if (!zcull_bank_counters) {
4602 gk20a_err(dev_from_gk20a(g), 4599 gk20a_err(dev_from_gk20a(g),
4603 "failed to allocate zcull bank counters"); 4600 "failed to allocate zcull bank counters");
4604 kfree(zcull_map_tiles); 4601 nvgpu_kfree(g, zcull_map_tiles);
4605 return -ENOMEM; 4602 return -ENOMEM;
4606 } 4603 }
4607 4604
@@ -4616,8 +4613,8 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
4616 g->ops.gr.program_zcull_mapping(g, zcull_alloc_num, 4613 g->ops.gr.program_zcull_mapping(g, zcull_alloc_num,
4617 zcull_map_tiles); 4614 zcull_map_tiles);
4618 4615
4619 kfree(zcull_map_tiles); 4616 nvgpu_kfree(g, zcull_map_tiles);
4620 kfree(zcull_bank_counters); 4617 nvgpu_kfree(g, zcull_bank_counters);
4621 4618
4622 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 4619 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
4623 gpc_tpc_count = gr->gpc_tpc_count[gpc_index]; 4620 gpc_tpc_count = gr->gpc_tpc_count[gpc_index];
@@ -4891,14 +4888,14 @@ restore_fe_go_idle:
4891 if (err) 4888 if (err)
4892 goto out; 4889 goto out;
4893 4890
4894 kfree(gr->sm_error_states); 4891 nvgpu_kfree(g, gr->sm_error_states);
4895 4892
4896 /* we need to allocate this after g->ops.gr.init_fs_state() since 4893 /* we need to allocate this after g->ops.gr.init_fs_state() since
4897 * we initialize gr->no_of_sm in this function 4894 * we initialize gr->no_of_sm in this function
4898 */ 4895 */
4899 gr->sm_error_states = kzalloc( 4896 gr->sm_error_states = nvgpu_kzalloc(g,
4900 sizeof(struct nvgpu_dbg_gpu_sm_error_state_record) 4897 sizeof(struct nvgpu_dbg_gpu_sm_error_state_record) *
4901 * gr->no_of_sm, GFP_KERNEL); 4898 gr->no_of_sm);
4902 if (!gr->sm_error_states) { 4899 if (!gr->sm_error_states) {
4903 err = -ENOMEM; 4900 err = -ENOMEM;
4904 goto restore_fe_go_idle; 4901 goto restore_fe_go_idle;
@@ -6945,7 +6942,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6945 if (!g->gr.ctx_vars.golden_image_initialized) 6942 if (!g->gr.ctx_vars.golden_image_initialized)
6946 return -ENODEV; 6943 return -ENODEV;
6947 6944
6948 priv_registers = kzalloc(sizeof(u32) * potential_offsets, GFP_KERNEL); 6945 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets);
6949 if (!priv_registers) { 6946 if (!priv_registers) {
6950 gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); 6947 gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets);
6951 err = PTR_ERR(priv_registers); 6948 err = PTR_ERR(priv_registers);
@@ -6991,7 +6988,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
6991 *num_offsets = num_registers; 6988 *num_offsets = num_registers;
6992cleanup: 6989cleanup:
6993 if (!IS_ERR_OR_NULL(priv_registers)) 6990 if (!IS_ERR_OR_NULL(priv_registers))
6994 kfree(priv_registers); 6991 nvgpu_kfree(g, priv_registers);
6995 6992
6996 return err; 6993 return err;
6997} 6994}
@@ -7019,7 +7016,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
7019 if (!g->gr.ctx_vars.golden_image_initialized) 7016 if (!g->gr.ctx_vars.golden_image_initialized)
7020 return -ENODEV; 7017 return -ENODEV;
7021 7018
7022 priv_registers = kzalloc(sizeof(u32) * potential_offsets, GFP_KERNEL); 7019 priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets);
7023 if (ZERO_OR_NULL_PTR(priv_registers)) { 7020 if (ZERO_OR_NULL_PTR(priv_registers)) {
7024 gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets); 7021 gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets);
7025 return -ENOMEM; 7022 return -ENOMEM;
@@ -7060,7 +7057,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g,
7060 7057
7061 *num_offsets = num_registers; 7058 *num_offsets = num_registers;
7062cleanup: 7059cleanup:
7063 kfree(priv_registers); 7060 nvgpu_kfree(g, priv_registers);
7064 7061
7065 return err; 7062 return err;
7066} 7063}
@@ -8352,7 +8349,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8352 } 8349 }
8353 8350
8354 /* they're the same size, so just use one alloc for both */ 8351 /* they're the same size, so just use one alloc for both */
8355 offsets = kzalloc(2 * sizeof(u32) * max_offsets, GFP_KERNEL); 8352 offsets = nvgpu_kzalloc(g, 2 * sizeof(u32) * max_offsets);
8356 if (!offsets) { 8353 if (!offsets) {
8357 err = -ENOMEM; 8354 err = -ENOMEM;
8358 goto cleanup; 8355 goto cleanup;
@@ -8502,7 +8499,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8502 8499
8503 cleanup: 8500 cleanup:
8504 if (offsets) 8501 if (offsets)
8505 kfree(offsets); 8502 nvgpu_kfree(g, offsets);
8506 8503
8507 if (ch_ctx->patch_ctx.mem.cpu_va) 8504 if (ch_ctx->patch_ctx.mem.cpu_va)
8508 gr_gk20a_ctx_patch_write_end(g, ch_ctx); 8505 gr_gk20a_ctx_patch_write_end(g, ch_ctx);
@@ -9025,7 +9022,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
9025 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); 9022 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
9026 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); 9023 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
9027 9024
9028 ops = kcalloc(g->gr.no_of_sm, sizeof(*ops), GFP_KERNEL); 9025 ops = nvgpu_kcalloc(g, g->gr.no_of_sm, sizeof(*ops));
9029 if (!ops) 9026 if (!ops)
9030 return -ENOMEM; 9027 return -ENOMEM;
9031 for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) { 9028 for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) {
@@ -9068,7 +9065,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
9068 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0); 9065 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0);
9069 if (err) 9066 if (err)
9070 gk20a_err(dev_from_gk20a(g), "Failed to access register\n"); 9067 gk20a_err(dev_from_gk20a(g), "Failed to access register\n");
9071 kfree(ops); 9068 nvgpu_kfree(g, ops);
9072 return err; 9069 return err;
9073} 9070}
9074 9071