diff options
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 488 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h | 36 |
2 files changed, 269 insertions, 255 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index a9a87a54..4a460d02 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -102,7 +102,7 @@ int gr_gk20a_get_ctx_id(struct gk20a *g, | |||
102 | struct nvgpu_mem *mem = NULL; | 102 | struct nvgpu_mem *mem = NULL; |
103 | 103 | ||
104 | tsg = tsg_gk20a_from_ch(c); | 104 | tsg = tsg_gk20a_from_ch(c); |
105 | if (!tsg) { | 105 | if (tsg == NULL) { |
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | } | 107 | } |
108 | 108 | ||
@@ -268,7 +268,7 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g) | |||
268 | ucode_u32_data = (const u32 *)g->gr.ctx_vars.ucode.gpccs.inst.l; | 268 | ucode_u32_data = (const u32 *)g->gr.ctx_vars.ucode.gpccs.inst.l; |
269 | 269 | ||
270 | for (i = 0, checksum = 0; i < ucode_u32_size; i++) { | 270 | for (i = 0, checksum = 0; i < ucode_u32_size; i++) { |
271 | if (i && ((i % (256/sizeof(u32))) == 0)) { | 271 | if ((i != 0U) && ((i % (256U/sizeof(u32))) == 0U)) { |
272 | tag++; | 272 | tag++; |
273 | gk20a_writel(g, gr_gpccs_imemt_r(0), | 273 | gk20a_writel(g, gr_gpccs_imemt_r(0), |
274 | gr_gpccs_imemt_tag_f(tag)); | 274 | gr_gpccs_imemt_tag_f(tag)); |
@@ -277,12 +277,12 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g) | |||
277 | checksum += ucode_u32_data[i]; | 277 | checksum += ucode_u32_data[i]; |
278 | } | 278 | } |
279 | 279 | ||
280 | pad_start = i*4; | 280 | pad_start = i * 4U; |
281 | pad_end = pad_start+(256-pad_start%256)+256; | 281 | pad_end = pad_start + (256U - pad_start % 256U) + 256U; |
282 | for (i = pad_start; | 282 | for (i = pad_start; |
283 | (i < gpccs_imem_size * 256) && (i < pad_end); | 283 | (i < gpccs_imem_size * 256U) && (i < pad_end); |
284 | i += 4) { | 284 | i += 4U) { |
285 | if (i && ((i % 256) == 0)) { | 285 | if ((i != 0U) && ((i % 256U) == 0U)) { |
286 | tag++; | 286 | tag++; |
287 | gk20a_writel(g, gr_gpccs_imemt_r(0), | 287 | gk20a_writel(g, gr_gpccs_imemt_r(0), |
288 | gr_gpccs_imemt_tag_f(tag)); | 288 | gr_gpccs_imemt_tag_f(tag)); |
@@ -302,7 +302,7 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g) | |||
302 | ucode_u32_data = (const u32 *)g->gr.ctx_vars.ucode.fecs.inst.l; | 302 | ucode_u32_data = (const u32 *)g->gr.ctx_vars.ucode.fecs.inst.l; |
303 | 303 | ||
304 | for (i = 0, checksum = 0; i < ucode_u32_size; i++) { | 304 | for (i = 0, checksum = 0; i < ucode_u32_size; i++) { |
305 | if (i && ((i % (256/sizeof(u32))) == 0)) { | 305 | if ((i != 0U) && ((i % (256U/sizeof(u32))) == 0U)) { |
306 | tag++; | 306 | tag++; |
307 | gk20a_writel(g, gr_fecs_imemt_r(0), | 307 | gk20a_writel(g, gr_fecs_imemt_r(0), |
308 | gr_fecs_imemt_tag_f(tag)); | 308 | gr_fecs_imemt_tag_f(tag)); |
@@ -311,10 +311,12 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g) | |||
311 | checksum += ucode_u32_data[i]; | 311 | checksum += ucode_u32_data[i]; |
312 | } | 312 | } |
313 | 313 | ||
314 | pad_start = i*4; | 314 | pad_start = i * 4U; |
315 | pad_end = pad_start+(256-pad_start%256)+256; | 315 | pad_end = pad_start + (256U - pad_start % 256U) + 256U; |
316 | for (i = pad_start; (i < fecs_imem_size * 256) && i < pad_end; i += 4) { | 316 | for (i = pad_start; |
317 | if (i && ((i % 256) == 0)) { | 317 | (i < fecs_imem_size * 256U) && i < pad_end; |
318 | i += 4U) { | ||
319 | if ((i != 0U) && ((i % 256U) == 0U)) { | ||
318 | tag++; | 320 | tag++; |
319 | gk20a_writel(g, gr_fecs_imemt_r(0), | 321 | gk20a_writel(g, gr_fecs_imemt_r(0), |
320 | gr_fecs_imemt_tag_f(tag)); | 322 | gr_fecs_imemt_tag_f(tag)); |
@@ -366,7 +368,7 @@ int gr_gk20a_wait_idle(struct gk20a *g, unsigned long duration_ms, | |||
366 | nvgpu_usleep_range(delay, delay * 2); | 368 | nvgpu_usleep_range(delay, delay * 2); |
367 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | 369 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); |
368 | 370 | ||
369 | } while (!nvgpu_timeout_expired(&timeout)); | 371 | } while (nvgpu_timeout_expired(&timeout) == 0); |
370 | 372 | ||
371 | nvgpu_err(g, | 373 | nvgpu_err(g, |
372 | "timeout, ctxsw busy : %d, gr busy : %d", | 374 | "timeout, ctxsw busy : %d, gr busy : %d", |
@@ -393,14 +395,14 @@ int gr_gk20a_wait_fe_idle(struct gk20a *g, unsigned long duration_ms, | |||
393 | do { | 395 | do { |
394 | val = gk20a_readl(g, gr_status_r()); | 396 | val = gk20a_readl(g, gr_status_r()); |
395 | 397 | ||
396 | if (!gr_status_fe_method_lower_v(val)) { | 398 | if (gr_status_fe_method_lower_v(val) == 0U) { |
397 | nvgpu_log_fn(g, "done"); | 399 | nvgpu_log_fn(g, "done"); |
398 | return 0; | 400 | return 0; |
399 | } | 401 | } |
400 | 402 | ||
401 | nvgpu_usleep_range(delay, delay * 2); | 403 | nvgpu_usleep_range(delay, delay * 2); |
402 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | 404 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); |
403 | } while (!nvgpu_timeout_expired(&timeout)); | 405 | } while (nvgpu_timeout_expired(&timeout) == 0); |
404 | 406 | ||
405 | nvgpu_err(g, | 407 | nvgpu_err(g, |
406 | "timeout, fe busy : %x", val); | 408 | "timeout, fe busy : %x", val); |
@@ -832,7 +834,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c) | |||
832 | nvgpu_log_fn(g, " "); | 834 | nvgpu_log_fn(g, " "); |
833 | 835 | ||
834 | tsg = tsg_gk20a_from_ch(c); | 836 | tsg = tsg_gk20a_from_ch(c); |
835 | if (!tsg) { | 837 | if (tsg == NULL) { |
836 | return -EINVAL; | 838 | return -EINVAL; |
837 | } | 839 | } |
838 | 840 | ||
@@ -902,7 +904,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, | |||
902 | nvgpu_log_fn(g, " "); | 904 | nvgpu_log_fn(g, " "); |
903 | 905 | ||
904 | tsg = tsg_gk20a_from_ch(c); | 906 | tsg = tsg_gk20a_from_ch(c); |
905 | if (!tsg) { | 907 | if (tsg == NULL) { |
906 | return -EINVAL; | 908 | return -EINVAL; |
907 | } | 909 | } |
908 | 910 | ||
@@ -910,7 +912,7 @@ int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g, | |||
910 | if (patch) { | 912 | if (patch) { |
911 | int err; | 913 | int err; |
912 | err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); | 914 | err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); |
913 | if (err) { | 915 | if (err != 0) { |
914 | return err; | 916 | return err; |
915 | } | 917 | } |
916 | } | 918 | } |
@@ -1032,7 +1034,7 @@ int gr_gk20a_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr) | |||
1032 | u32 coeff5_mod, coeff6_mod, coeff7_mod, coeff8_mod, coeff9_mod, coeff10_mod, coeff11_mod; | 1034 | u32 coeff5_mod, coeff6_mod, coeff7_mod, coeff8_mod, coeff9_mod, coeff10_mod, coeff11_mod; |
1033 | u32 map0, map1, map2, map3, map4, map5; | 1035 | u32 map0, map1, map2, map3, map4, map5; |
1034 | 1036 | ||
1035 | if (!gr->map_tiles) { | 1037 | if (gr->map_tiles == NULL) { |
1036 | return -1; | 1038 | return -1; |
1037 | } | 1039 | } |
1038 | 1040 | ||
@@ -1225,7 +1227,7 @@ int gr_gk20a_init_fs_state(struct gk20a *g) | |||
1225 | 1227 | ||
1226 | if (g->ops.gr.init_sm_id_table) { | 1228 | if (g->ops.gr.init_sm_id_table) { |
1227 | err = g->ops.gr.init_sm_id_table(g); | 1229 | err = g->ops.gr.init_sm_id_table(g); |
1228 | if (err) { | 1230 | if (err != 0) { |
1229 | return err; | 1231 | return err; |
1230 | } | 1232 | } |
1231 | 1233 | ||
@@ -1275,15 +1277,15 @@ int gr_gk20a_init_fs_state(struct gk20a *g) | |||
1275 | gpc_index += 4) { | 1277 | gpc_index += 4) { |
1276 | 1278 | ||
1277 | gk20a_writel(g, gr_pd_dist_skip_table_r(gpc_index/4), | 1279 | gk20a_writel(g, gr_pd_dist_skip_table_r(gpc_index/4), |
1278 | gr_pd_dist_skip_table_gpc_4n0_mask_f(gr->gpc_skip_mask[gpc_index]) || | 1280 | (gr_pd_dist_skip_table_gpc_4n0_mask_f(gr->gpc_skip_mask[gpc_index]) != 0U) || |
1279 | gr_pd_dist_skip_table_gpc_4n1_mask_f(gr->gpc_skip_mask[gpc_index + 1]) || | 1281 | (gr_pd_dist_skip_table_gpc_4n1_mask_f(gr->gpc_skip_mask[gpc_index + 1]) != 0U) || |
1280 | gr_pd_dist_skip_table_gpc_4n2_mask_f(gr->gpc_skip_mask[gpc_index + 2]) || | 1282 | (gr_pd_dist_skip_table_gpc_4n2_mask_f(gr->gpc_skip_mask[gpc_index + 2]) != 0U) || |
1281 | gr_pd_dist_skip_table_gpc_4n3_mask_f(gr->gpc_skip_mask[gpc_index + 3])); | 1283 | (gr_pd_dist_skip_table_gpc_4n3_mask_f(gr->gpc_skip_mask[gpc_index + 3]) != 0U)); |
1282 | } | 1284 | } |
1283 | 1285 | ||
1284 | fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0); | 1286 | fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, 0); |
1285 | if (g->tpc_fs_mask_user && | 1287 | if ((g->tpc_fs_mask_user != 0U) && |
1286 | fuse_tpc_mask == (0x1U << gr->max_tpc_count) - 1U) { | 1288 | (fuse_tpc_mask == BIT32(gr->max_tpc_count) - 1U)) { |
1287 | u32 val = g->tpc_fs_mask_user; | 1289 | u32 val = g->tpc_fs_mask_user; |
1288 | val &= (0x1U << gr->max_tpc_count) - 1U; | 1290 | val &= (0x1U << gr->max_tpc_count) - 1U; |
1289 | gk20a_writel(g, gr_cwd_fs_r(), | 1291 | gk20a_writel(g, gr_cwd_fs_r(), |
@@ -1358,28 +1360,28 @@ u32 gk20a_init_sw_bundle(struct gk20a *g) | |||
1358 | err = gr_gk20a_wait_idle(g, | 1360 | err = gr_gk20a_wait_idle(g, |
1359 | gk20a_get_gr_idle_timeout(g), | 1361 | gk20a_get_gr_idle_timeout(g), |
1360 | GR_IDLE_CHECK_DEFAULT); | 1362 | GR_IDLE_CHECK_DEFAULT); |
1361 | if (err) { | 1363 | if (err != 0U) { |
1362 | goto error; | 1364 | goto error; |
1363 | } | 1365 | } |
1364 | } | 1366 | } |
1365 | 1367 | ||
1366 | err = gr_gk20a_wait_fe_idle(g, gk20a_get_gr_idle_timeout(g), | 1368 | err = gr_gk20a_wait_fe_idle(g, gk20a_get_gr_idle_timeout(g), |
1367 | GR_IDLE_CHECK_DEFAULT); | 1369 | GR_IDLE_CHECK_DEFAULT); |
1368 | if (err) { | 1370 | if (err != 0U) { |
1369 | goto error; | 1371 | goto error; |
1370 | } | 1372 | } |
1371 | } | 1373 | } |
1372 | 1374 | ||
1373 | if (!err && g->ops.gr.init_sw_veid_bundle) { | 1375 | if ((err == 0U) && (g->ops.gr.init_sw_veid_bundle != NULL)) { |
1374 | err = g->ops.gr.init_sw_veid_bundle(g); | 1376 | err = g->ops.gr.init_sw_veid_bundle(g); |
1375 | if (err) { | 1377 | if (err != 0U) { |
1376 | goto error; | 1378 | goto error; |
1377 | } | 1379 | } |
1378 | } | 1380 | } |
1379 | 1381 | ||
1380 | if (g->ops.gr.init_sw_bundle64) { | 1382 | if (g->ops.gr.init_sw_bundle64) { |
1381 | err = g->ops.gr.init_sw_bundle64(g); | 1383 | err = g->ops.gr.init_sw_bundle64(g); |
1382 | if (err) { | 1384 | if (err != 0U) { |
1383 | goto error; | 1385 | goto error; |
1384 | } | 1386 | } |
1385 | } | 1387 | } |
@@ -1431,7 +1433,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, | |||
1431 | nvgpu_log_fn(g, " "); | 1433 | nvgpu_log_fn(g, " "); |
1432 | 1434 | ||
1433 | tsg = tsg_gk20a_from_ch(c); | 1435 | tsg = tsg_gk20a_from_ch(c); |
1434 | if (!tsg) { | 1436 | if (tsg == NULL) { |
1435 | return -EINVAL; | 1437 | return -EINVAL; |
1436 | } | 1438 | } |
1437 | 1439 | ||
@@ -1461,8 +1463,8 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, | |||
1461 | break; | 1463 | break; |
1462 | } | 1464 | } |
1463 | nvgpu_udelay(FE_PWR_MODE_TIMEOUT_DEFAULT); | 1465 | nvgpu_udelay(FE_PWR_MODE_TIMEOUT_DEFAULT); |
1464 | } while (!nvgpu_timeout_expired_msg(&timeout, | 1466 | } while (nvgpu_timeout_expired_msg(&timeout, |
1465 | "timeout forcing FE on")); | 1467 | "timeout forcing FE on") == 0); |
1466 | } | 1468 | } |
1467 | 1469 | ||
1468 | 1470 | ||
@@ -1508,8 +1510,8 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, | |||
1508 | break; | 1510 | break; |
1509 | } | 1511 | } |
1510 | nvgpu_udelay(FE_PWR_MODE_TIMEOUT_DEFAULT); | 1512 | nvgpu_udelay(FE_PWR_MODE_TIMEOUT_DEFAULT); |
1511 | } while (!nvgpu_timeout_expired_msg(&timeout, | 1513 | } while (nvgpu_timeout_expired_msg(&timeout, |
1512 | "timeout setting FE power to auto")); | 1514 | "timeout setting FE power to auto") == 0); |
1513 | } | 1515 | } |
1514 | 1516 | ||
1515 | /* clear scc ram */ | 1517 | /* clear scc ram */ |
@@ -1517,7 +1519,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, | |||
1517 | gr_scc_init_ram_trigger_f()); | 1519 | gr_scc_init_ram_trigger_f()); |
1518 | 1520 | ||
1519 | err = gr_gk20a_fecs_ctx_bind_channel(g, c); | 1521 | err = gr_gk20a_fecs_ctx_bind_channel(g, c); |
1520 | if (err) { | 1522 | if (err != 0U) { |
1521 | goto clean_up; | 1523 | goto clean_up; |
1522 | } | 1524 | } |
1523 | 1525 | ||
@@ -1540,7 +1542,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, | |||
1540 | 1542 | ||
1541 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), | 1543 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), |
1542 | GR_IDLE_CHECK_DEFAULT); | 1544 | GR_IDLE_CHECK_DEFAULT); |
1543 | if (err) { | 1545 | if (err != 0U) { |
1544 | goto clean_up; | 1546 | goto clean_up; |
1545 | } | 1547 | } |
1546 | 1548 | ||
@@ -1549,7 +1551,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, | |||
1549 | gr_fe_go_idle_timeout_count_disabled_f()); | 1551 | gr_fe_go_idle_timeout_count_disabled_f()); |
1550 | 1552 | ||
1551 | err = g->ops.gr.commit_global_ctx_buffers(g, c, false); | 1553 | err = g->ops.gr.commit_global_ctx_buffers(g, c, false); |
1552 | if (err) { | 1554 | if (err != 0U) { |
1553 | goto clean_up; | 1555 | goto clean_up; |
1554 | } | 1556 | } |
1555 | 1557 | ||
@@ -1558,18 +1560,18 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, | |||
1558 | 1560 | ||
1559 | /* floorsweep anything left */ | 1561 | /* floorsweep anything left */ |
1560 | err = g->ops.gr.init_fs_state(g); | 1562 | err = g->ops.gr.init_fs_state(g); |
1561 | if (err) { | 1563 | if (err != 0U) { |
1562 | goto clean_up; | 1564 | goto clean_up; |
1563 | } | 1565 | } |
1564 | 1566 | ||
1565 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), | 1567 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), |
1566 | GR_IDLE_CHECK_DEFAULT); | 1568 | GR_IDLE_CHECK_DEFAULT); |
1567 | if (err) { | 1569 | if (err != 0U) { |
1568 | goto restore_fe_go_idle; | 1570 | goto restore_fe_go_idle; |
1569 | } | 1571 | } |
1570 | 1572 | ||
1571 | err = gk20a_init_sw_bundle(g); | 1573 | err = gk20a_init_sw_bundle(g); |
1572 | if (err) { | 1574 | if (err != 0U) { |
1573 | goto clean_up; | 1575 | goto clean_up; |
1574 | } | 1576 | } |
1575 | 1577 | ||
@@ -1578,8 +1580,8 @@ restore_fe_go_idle: | |||
1578 | gk20a_writel(g, gr_fe_go_idle_timeout_r(), | 1580 | gk20a_writel(g, gr_fe_go_idle_timeout_r(), |
1579 | gr_fe_go_idle_timeout_count_prod_f()); | 1581 | gr_fe_go_idle_timeout_count_prod_f()); |
1580 | 1582 | ||
1581 | if (err || gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), | 1583 | if ((err != 0U) || (gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), |
1582 | GR_IDLE_CHECK_DEFAULT)) { | 1584 | GR_IDLE_CHECK_DEFAULT) != 0)) { |
1583 | goto clean_up; | 1585 | goto clean_up; |
1584 | } | 1586 | } |
1585 | 1587 | ||
@@ -1605,7 +1607,7 @@ restore_fe_go_idle: | |||
1605 | 1607 | ||
1606 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), | 1608 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), |
1607 | GR_IDLE_CHECK_DEFAULT); | 1609 | GR_IDLE_CHECK_DEFAULT); |
1608 | if (err) { | 1610 | if (err != 0U) { |
1609 | goto clean_up; | 1611 | goto clean_up; |
1610 | } | 1612 | } |
1611 | 1613 | ||
@@ -1624,7 +1626,7 @@ restore_fe_go_idle: | |||
1624 | g->ops.gr.write_zcull_ptr(g, gold_mem, 0); | 1626 | g->ops.gr.write_zcull_ptr(g, gold_mem, 0); |
1625 | 1627 | ||
1626 | err = g->ops.gr.commit_inst(c, gr_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]); | 1628 | err = g->ops.gr.commit_inst(c, gr_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]); |
1627 | if (err) { | 1629 | if (err != 0U) { |
1628 | goto clean_up; | 1630 | goto clean_up; |
1629 | } | 1631 | } |
1630 | 1632 | ||
@@ -1648,7 +1650,7 @@ restore_fe_go_idle: | |||
1648 | } | 1650 | } |
1649 | 1651 | ||
1650 | err = g->ops.gr.commit_inst(c, gr_mem->gpu_va); | 1652 | err = g->ops.gr.commit_inst(c, gr_mem->gpu_va); |
1651 | if (err) { | 1653 | if (err != 0U) { |
1652 | goto clean_up; | 1654 | goto clean_up; |
1653 | } | 1655 | } |
1654 | 1656 | ||
@@ -1658,7 +1660,7 @@ restore_fe_go_idle: | |||
1658 | gr_fecs_current_ctx_valid_false_f()); | 1660 | gr_fecs_current_ctx_valid_false_f()); |
1659 | 1661 | ||
1660 | clean_up: | 1662 | clean_up: |
1661 | if (err) { | 1663 | if (err != 0U) { |
1662 | nvgpu_err(g, "fail"); | 1664 | nvgpu_err(g, "fail"); |
1663 | } else { | 1665 | } else { |
1664 | nvgpu_log_fn(g, "done"); | 1666 | nvgpu_log_fn(g, "done"); |
@@ -1681,7 +1683,7 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g, | |||
1681 | nvgpu_log_fn(g, " "); | 1683 | nvgpu_log_fn(g, " "); |
1682 | 1684 | ||
1683 | tsg = tsg_gk20a_from_ch(c); | 1685 | tsg = tsg_gk20a_from_ch(c); |
1684 | if (!tsg) { | 1686 | if (tsg == NULL) { |
1685 | return -EINVAL; | 1687 | return -EINVAL; |
1686 | } | 1688 | } |
1687 | 1689 | ||
@@ -1741,7 +1743,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, | |||
1741 | nvgpu_log_fn(g, " "); | 1743 | nvgpu_log_fn(g, " "); |
1742 | 1744 | ||
1743 | tsg = tsg_gk20a_from_ch(c); | 1745 | tsg = tsg_gk20a_from_ch(c); |
1744 | if (!tsg) { | 1746 | if (tsg == NULL) { |
1745 | return -EINVAL; | 1747 | return -EINVAL; |
1746 | } | 1748 | } |
1747 | 1749 | ||
@@ -1754,7 +1756,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, | |||
1754 | } | 1756 | } |
1755 | 1757 | ||
1756 | if ((mode == NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW) && | 1758 | if ((mode == NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW) && |
1757 | (!g->ops.gr.get_hw_accessor_stream_out_mode)) { | 1759 | (g->ops.gr.get_hw_accessor_stream_out_mode == NULL)) { |
1758 | nvgpu_err(g, "Mode-E hwpm context switch mode is not supported"); | 1760 | nvgpu_err(g, "Mode-E hwpm context switch mode is not supported"); |
1759 | return -EINVAL; | 1761 | return -EINVAL; |
1760 | } | 1762 | } |
@@ -1817,7 +1819,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, | |||
1817 | NVGPU_VM_MAP_CACHEABLE, | 1819 | NVGPU_VM_MAP_CACHEABLE, |
1818 | gk20a_mem_flag_none, true, | 1820 | gk20a_mem_flag_none, true, |
1819 | pm_ctx->mem.aperture); | 1821 | pm_ctx->mem.aperture); |
1820 | if (!pm_ctx->mem.gpu_va) { | 1822 | if (pm_ctx->mem.gpu_va == 0ULL) { |
1821 | nvgpu_err(g, | 1823 | nvgpu_err(g, |
1822 | "failed to map pm ctxt buffer"); | 1824 | "failed to map pm ctxt buffer"); |
1823 | nvgpu_dma_free(g, &pm_ctx->mem); | 1825 | nvgpu_dma_free(g, &pm_ctx->mem); |
@@ -1826,8 +1828,8 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, | |||
1826 | } | 1828 | } |
1827 | } | 1829 | } |
1828 | 1830 | ||
1829 | if (mode == NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW && | 1831 | if ((mode == NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW) && |
1830 | g->ops.gr.init_hwpm_pmm_register) { | 1832 | (g->ops.gr.init_hwpm_pmm_register != NULL)) { |
1831 | g->ops.gr.init_hwpm_pmm_register(g); | 1833 | g->ops.gr.init_hwpm_pmm_register(g); |
1832 | } | 1834 | } |
1833 | } | 1835 | } |
@@ -1891,7 +1893,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, | |||
1891 | nvgpu_log_fn(g, " "); | 1893 | nvgpu_log_fn(g, " "); |
1892 | 1894 | ||
1893 | tsg = tsg_gk20a_from_ch(c); | 1895 | tsg = tsg_gk20a_from_ch(c); |
1894 | if (!tsg) { | 1896 | if (tsg == NULL) { |
1895 | return -EINVAL; | 1897 | return -EINVAL; |
1896 | } | 1898 | } |
1897 | 1899 | ||
@@ -1913,7 +1915,7 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g, | |||
1913 | g->ops.gr.init_ctxsw_hdr_data(g, mem); | 1915 | g->ops.gr.init_ctxsw_hdr_data(g, mem); |
1914 | } | 1916 | } |
1915 | 1917 | ||
1916 | if (g->ops.gr.enable_cde_in_fecs && c->cde) { | 1918 | if ((g->ops.gr.enable_cde_in_fecs != NULL) && c->cde) { |
1917 | g->ops.gr.enable_cde_in_fecs(g, mem); | 1919 | g->ops.gr.enable_cde_in_fecs(g, mem); |
1918 | } | 1920 | } |
1919 | 1921 | ||
@@ -2019,7 +2021,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) | |||
2019 | int err; | 2021 | int err; |
2020 | 2022 | ||
2021 | err = g->ops.mm.alloc_inst_block(g, &ucode_info->inst_blk_desc); | 2023 | err = g->ops.mm.alloc_inst_block(g, &ucode_info->inst_blk_desc); |
2022 | if (err) { | 2024 | if (err != 0) { |
2023 | return err; | 2025 | return err; |
2024 | } | 2026 | } |
2025 | 2027 | ||
@@ -2033,7 +2035,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g) | |||
2033 | gk20a_mem_flag_read_only, | 2035 | gk20a_mem_flag_read_only, |
2034 | false, | 2036 | false, |
2035 | ucode_info->surface_desc.aperture); | 2037 | ucode_info->surface_desc.aperture); |
2036 | if (!ucode_info->surface_desc.gpu_va) { | 2038 | if (ucode_info->surface_desc.gpu_va == 0ULL) { |
2037 | nvgpu_err(g, "failed to update gmmu ptes"); | 2039 | nvgpu_err(g, "failed to update gmmu ptes"); |
2038 | return -ENOMEM; | 2040 | return -ENOMEM; |
2039 | } | 2041 | } |
@@ -2102,7 +2104,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) | |||
2102 | int err = 0; | 2104 | int err = 0; |
2103 | 2105 | ||
2104 | fecs_fw = nvgpu_request_firmware(g, GK20A_FECS_UCODE_IMAGE, 0); | 2106 | fecs_fw = nvgpu_request_firmware(g, GK20A_FECS_UCODE_IMAGE, 0); |
2105 | if (!fecs_fw) { | 2107 | if (fecs_fw == NULL) { |
2106 | nvgpu_err(g, "failed to load fecs ucode!!"); | 2108 | nvgpu_err(g, "failed to load fecs ucode!!"); |
2107 | return -ENOENT; | 2109 | return -ENOENT; |
2108 | } | 2110 | } |
@@ -2112,7 +2114,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) | |||
2112 | sizeof(struct gk20a_ctxsw_bootloader_desc)); | 2114 | sizeof(struct gk20a_ctxsw_bootloader_desc)); |
2113 | 2115 | ||
2114 | gpccs_fw = nvgpu_request_firmware(g, GK20A_GPCCS_UCODE_IMAGE, 0); | 2116 | gpccs_fw = nvgpu_request_firmware(g, GK20A_GPCCS_UCODE_IMAGE, 0); |
2115 | if (!gpccs_fw) { | 2117 | if (gpccs_fw == NULL) { |
2116 | nvgpu_release_firmware(g, fecs_fw); | 2118 | nvgpu_release_firmware(g, fecs_fw); |
2117 | nvgpu_err(g, "failed to load gpccs ucode!!"); | 2119 | nvgpu_err(g, "failed to load gpccs ucode!!"); |
2118 | return -ENOENT; | 2120 | return -ENOENT; |
@@ -2133,7 +2135,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) | |||
2133 | g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); | 2135 | g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); |
2134 | 2136 | ||
2135 | err = nvgpu_dma_alloc_sys(g, ucode_size, &ucode_info->surface_desc); | 2137 | err = nvgpu_dma_alloc_sys(g, ucode_size, &ucode_info->surface_desc); |
2136 | if (err) { | 2138 | if (err != 0) { |
2137 | goto clean_up; | 2139 | goto clean_up; |
2138 | } | 2140 | } |
2139 | 2141 | ||
@@ -2156,7 +2158,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) | |||
2156 | gpccs_fw = NULL; | 2158 | gpccs_fw = NULL; |
2157 | 2159 | ||
2158 | err = gr_gk20a_init_ctxsw_ucode_vaspace(g); | 2160 | err = gr_gk20a_init_ctxsw_ucode_vaspace(g); |
2159 | if (err) { | 2161 | if (err != 0) { |
2160 | goto clean_up; | 2162 | goto clean_up; |
2161 | } | 2163 | } |
2162 | 2164 | ||
@@ -2183,24 +2185,25 @@ static void gr_gk20a_wait_for_fecs_arb_idle(struct gk20a *g) | |||
2183 | u32 val; | 2185 | u32 val; |
2184 | 2186 | ||
2185 | val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); | 2187 | val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); |
2186 | while (gr_fecs_arb_ctx_cmd_cmd_v(val) && retries) { | 2188 | while ((gr_fecs_arb_ctx_cmd_cmd_v(val) != 0U) && (retries != 0)) { |
2187 | nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); | 2189 | nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); |
2188 | retries--; | 2190 | retries--; |
2189 | val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); | 2191 | val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r()); |
2190 | } | 2192 | } |
2191 | 2193 | ||
2192 | if (!retries) { | 2194 | if (retries == 0) { |
2193 | nvgpu_err(g, "arbiter cmd timeout, fecs arb ctx cmd: 0x%08x", | 2195 | nvgpu_err(g, "arbiter cmd timeout, fecs arb ctx cmd: 0x%08x", |
2194 | gk20a_readl(g, gr_fecs_arb_ctx_cmd_r())); | 2196 | gk20a_readl(g, gr_fecs_arb_ctx_cmd_r())); |
2195 | } | 2197 | } |
2196 | 2198 | ||
2197 | retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT; | 2199 | retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT; |
2198 | while ((gk20a_readl(g, gr_fecs_ctxsw_status_1_r()) & | 2200 | while (((gk20a_readl(g, gr_fecs_ctxsw_status_1_r()) & |
2199 | gr_fecs_ctxsw_status_1_arb_busy_m()) && retries) { | 2201 | gr_fecs_ctxsw_status_1_arb_busy_m()) != 0U) && |
2202 | (retries != 0)) { | ||
2200 | nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); | 2203 | nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); |
2201 | retries--; | 2204 | retries--; |
2202 | } | 2205 | } |
2203 | if (!retries) { | 2206 | if (retries == 0) { |
2204 | nvgpu_err(g, | 2207 | nvgpu_err(g, |
2205 | "arbiter idle timeout, fecs ctxsw status: 0x%08x", | 2208 | "arbiter idle timeout, fecs ctxsw status: 0x%08x", |
2206 | gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); | 2209 | gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); |
@@ -2213,12 +2216,13 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g) | |||
2213 | int retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT; | 2216 | int retries = FECS_ARB_CMD_TIMEOUT_MAX / FECS_ARB_CMD_TIMEOUT_DEFAULT; |
2214 | u64 inst_ptr; | 2217 | u64 inst_ptr; |
2215 | 2218 | ||
2216 | while ((gk20a_readl(g, gr_fecs_ctxsw_status_1_r()) & | 2219 | while (((gk20a_readl(g, gr_fecs_ctxsw_status_1_r()) & |
2217 | gr_fecs_ctxsw_status_1_arb_busy_m()) && retries) { | 2220 | gr_fecs_ctxsw_status_1_arb_busy_m()) != 0U) && |
2221 | (retries != 0)) { | ||
2218 | nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); | 2222 | nvgpu_udelay(FECS_ARB_CMD_TIMEOUT_DEFAULT); |
2219 | retries--; | 2223 | retries--; |
2220 | } | 2224 | } |
2221 | if (!retries) { | 2225 | if (retries == 0) { |
2222 | nvgpu_err(g, | 2226 | nvgpu_err(g, |
2223 | "arbiter idle timeout, status: %08x", | 2227 | "arbiter idle timeout, status: %08x", |
2224 | gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); | 2228 | gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); |
@@ -2425,7 +2429,7 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g) | |||
2425 | if (!g->gr.skip_ucode_init) { | 2429 | if (!g->gr.skip_ucode_init) { |
2426 | err = gr_gk20a_init_ctxsw_ucode(g); | 2430 | err = gr_gk20a_init_ctxsw_ucode(g); |
2427 | 2431 | ||
2428 | if (err) { | 2432 | if (err != 0) { |
2429 | return err; | 2433 | return err; |
2430 | } | 2434 | } |
2431 | } | 2435 | } |
@@ -2521,7 +2525,7 @@ int gr_gk20a_init_ctx_state(struct gk20a *g) | |||
2521 | void gk20a_gr_destroy_ctx_buffer(struct gk20a *g, | 2525 | void gk20a_gr_destroy_ctx_buffer(struct gk20a *g, |
2522 | struct gr_ctx_buffer_desc *desc) | 2526 | struct gr_ctx_buffer_desc *desc) |
2523 | { | 2527 | { |
2524 | if (!desc) { | 2528 | if (desc == NULL) { |
2525 | return; | 2529 | return; |
2526 | } | 2530 | } |
2527 | nvgpu_dma_free(g, &desc->mem); | 2531 | nvgpu_dma_free(g, &desc->mem); |
@@ -2541,7 +2545,7 @@ int gk20a_gr_alloc_ctx_buffer(struct gk20a *g, | |||
2541 | } | 2545 | } |
2542 | 2546 | ||
2543 | err = nvgpu_dma_alloc_sys(g, size, &desc->mem); | 2547 | err = nvgpu_dma_alloc_sys(g, size, &desc->mem); |
2544 | if (err) { | 2548 | if (err != 0) { |
2545 | return err; | 2549 | return err; |
2546 | } | 2550 | } |
2547 | 2551 | ||
@@ -2585,7 +2589,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2585 | 2589 | ||
2586 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR], | 2590 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR], |
2587 | cb_buffer_size); | 2591 | cb_buffer_size); |
2588 | if (err) { | 2592 | if (err != 0) { |
2589 | goto clean_up; | 2593 | goto clean_up; |
2590 | } | 2594 | } |
2591 | 2595 | ||
@@ -2593,7 +2597,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2593 | err = g->ops.secure_alloc(g, | 2597 | err = g->ops.secure_alloc(g, |
2594 | &gr->global_ctx_buffer[CIRCULAR_VPR], | 2598 | &gr->global_ctx_buffer[CIRCULAR_VPR], |
2595 | cb_buffer_size); | 2599 | cb_buffer_size); |
2596 | if (err) { | 2600 | if (err != 0) { |
2597 | goto clean_up; | 2601 | goto clean_up; |
2598 | } | 2602 | } |
2599 | } | 2603 | } |
@@ -2602,7 +2606,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2602 | 2606 | ||
2603 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL], | 2607 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL], |
2604 | pagepool_buffer_size); | 2608 | pagepool_buffer_size); |
2605 | if (err) { | 2609 | if (err != 0) { |
2606 | goto clean_up; | 2610 | goto clean_up; |
2607 | } | 2611 | } |
2608 | 2612 | ||
@@ -2610,7 +2614,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2610 | err = g->ops.secure_alloc(g, | 2614 | err = g->ops.secure_alloc(g, |
2611 | &gr->global_ctx_buffer[PAGEPOOL_VPR], | 2615 | &gr->global_ctx_buffer[PAGEPOOL_VPR], |
2612 | pagepool_buffer_size); | 2616 | pagepool_buffer_size); |
2613 | if (err) { | 2617 | if (err != 0) { |
2614 | goto clean_up; | 2618 | goto clean_up; |
2615 | } | 2619 | } |
2616 | } | 2620 | } |
@@ -2619,7 +2623,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2619 | 2623 | ||
2620 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE], | 2624 | err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE], |
2621 | attr_buffer_size); | 2625 | attr_buffer_size); |
2622 | if (err) { | 2626 | if (err != 0) { |
2623 | goto clean_up; | 2627 | goto clean_up; |
2624 | } | 2628 | } |
2625 | 2629 | ||
@@ -2627,7 +2631,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2627 | err = g->ops.secure_alloc(g, | 2631 | err = g->ops.secure_alloc(g, |
2628 | &gr->global_ctx_buffer[ATTRIBUTE_VPR], | 2632 | &gr->global_ctx_buffer[ATTRIBUTE_VPR], |
2629 | attr_buffer_size); | 2633 | attr_buffer_size); |
2630 | if (err) { | 2634 | if (err != 0) { |
2631 | goto clean_up; | 2635 | goto clean_up; |
2632 | } | 2636 | } |
2633 | } | 2637 | } |
@@ -2638,7 +2642,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2638 | err = gk20a_gr_alloc_ctx_buffer(g, | 2642 | err = gk20a_gr_alloc_ctx_buffer(g, |
2639 | &gr->global_ctx_buffer[GOLDEN_CTX], | 2643 | &gr->global_ctx_buffer[GOLDEN_CTX], |
2640 | gr->ctx_vars.golden_image_size); | 2644 | gr->ctx_vars.golden_image_size); |
2641 | if (err) { | 2645 | if (err != 0) { |
2642 | goto clean_up; | 2646 | goto clean_up; |
2643 | } | 2647 | } |
2644 | 2648 | ||
@@ -2649,7 +2653,7 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2649 | &gr->global_ctx_buffer[PRIV_ACCESS_MAP], | 2653 | &gr->global_ctx_buffer[PRIV_ACCESS_MAP], |
2650 | gr->ctx_vars.priv_access_map_size); | 2654 | gr->ctx_vars.priv_access_map_size); |
2651 | 2655 | ||
2652 | if (err) { | 2656 | if (err != 0) { |
2653 | goto clean_up; | 2657 | goto clean_up; |
2654 | } | 2658 | } |
2655 | 2659 | ||
@@ -2660,8 +2664,9 @@ int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g) | |||
2660 | err = nvgpu_dma_alloc_sys(g, | 2664 | err = nvgpu_dma_alloc_sys(g, |
2661 | gr->ctx_vars.fecs_trace_buffer_size, | 2665 | gr->ctx_vars.fecs_trace_buffer_size, |
2662 | &gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem); | 2666 | &gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem); |
2663 | if (err) | 2667 | if (err != 0) { |
2664 | goto clean_up; | 2668 | goto clean_up; |
2669 | } | ||
2665 | 2670 | ||
2666 | gr->global_ctx_buffer[FECS_TRACE_BUFFER].destroy = | 2671 | gr->global_ctx_buffer[FECS_TRACE_BUFFER].destroy = |
2667 | gk20a_gr_destroy_ctx_buffer; | 2672 | gk20a_gr_destroy_ctx_buffer; |
@@ -2724,7 +2729,7 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, | |||
2724 | nvgpu_log_fn(g, " "); | 2729 | nvgpu_log_fn(g, " "); |
2725 | 2730 | ||
2726 | tsg = tsg_gk20a_from_ch(c); | 2731 | tsg = tsg_gk20a_from_ch(c); |
2727 | if (!tsg) { | 2732 | if (tsg == NULL) { |
2728 | return -EINVAL; | 2733 | return -EINVAL; |
2729 | } | 2734 | } |
2730 | 2735 | ||
@@ -2745,7 +2750,7 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, | |||
2745 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, | 2750 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, |
2746 | NVGPU_VM_MAP_CACHEABLE, | 2751 | NVGPU_VM_MAP_CACHEABLE, |
2747 | gk20a_mem_flag_none, true, mem->aperture); | 2752 | gk20a_mem_flag_none, true, mem->aperture); |
2748 | if (!gpu_va) { | 2753 | if (gpu_va == 0ULL) { |
2749 | goto clean_up; | 2754 | goto clean_up; |
2750 | } | 2755 | } |
2751 | g_bfr_va[CIRCULAR_VA] = gpu_va; | 2756 | g_bfr_va[CIRCULAR_VA] = gpu_va; |
@@ -2764,7 +2769,7 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, | |||
2764 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, | 2769 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, |
2765 | NVGPU_VM_MAP_CACHEABLE, | 2770 | NVGPU_VM_MAP_CACHEABLE, |
2766 | gk20a_mem_flag_none, false, mem->aperture); | 2771 | gk20a_mem_flag_none, false, mem->aperture); |
2767 | if (!gpu_va) { | 2772 | if (gpu_va == 0ULL) { |
2768 | goto clean_up; | 2773 | goto clean_up; |
2769 | } | 2774 | } |
2770 | g_bfr_va[ATTRIBUTE_VA] = gpu_va; | 2775 | g_bfr_va[ATTRIBUTE_VA] = gpu_va; |
@@ -2783,7 +2788,7 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, | |||
2783 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, | 2788 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, |
2784 | NVGPU_VM_MAP_CACHEABLE, | 2789 | NVGPU_VM_MAP_CACHEABLE, |
2785 | gk20a_mem_flag_none, true, mem->aperture); | 2790 | gk20a_mem_flag_none, true, mem->aperture); |
2786 | if (!gpu_va) { | 2791 | if (gpu_va == 0ULL) { |
2787 | goto clean_up; | 2792 | goto clean_up; |
2788 | } | 2793 | } |
2789 | g_bfr_va[PAGEPOOL_VA] = gpu_va; | 2794 | g_bfr_va[PAGEPOOL_VA] = gpu_va; |
@@ -2793,7 +2798,7 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, | |||
2793 | mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem; | 2798 | mem = &gr->global_ctx_buffer[GOLDEN_CTX].mem; |
2794 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0, | 2799 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0, |
2795 | gk20a_mem_flag_none, true, mem->aperture); | 2800 | gk20a_mem_flag_none, true, mem->aperture); |
2796 | if (!gpu_va) { | 2801 | if (gpu_va == 0ULL) { |
2797 | goto clean_up; | 2802 | goto clean_up; |
2798 | } | 2803 | } |
2799 | g_bfr_va[GOLDEN_CTX_VA] = gpu_va; | 2804 | g_bfr_va[GOLDEN_CTX_VA] = gpu_va; |
@@ -2804,7 +2809,7 @@ int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, | |||
2804 | mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem; | 2809 | mem = &gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem; |
2805 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0, | 2810 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 0, |
2806 | gk20a_mem_flag_none, true, mem->aperture); | 2811 | gk20a_mem_flag_none, true, mem->aperture); |
2807 | if (!gpu_va) { | 2812 | if (gpu_va == 0ULL) { |
2808 | goto clean_up; | 2813 | goto clean_up; |
2809 | } | 2814 | } |
2810 | g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; | 2815 | g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; |
@@ -2854,7 +2859,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, | |||
2854 | gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; | 2859 | gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; |
2855 | 2860 | ||
2856 | err = nvgpu_dma_alloc(g, gr->ctx_vars.buffer_total_size, &gr_ctx->mem); | 2861 | err = nvgpu_dma_alloc(g, gr->ctx_vars.buffer_total_size, &gr_ctx->mem); |
2857 | if (err) { | 2862 | if (err != 0) { |
2858 | return err; | 2863 | return err; |
2859 | } | 2864 | } |
2860 | 2865 | ||
@@ -2864,7 +2869,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, | |||
2864 | 0, /* not GPU-cacheable */ | 2869 | 0, /* not GPU-cacheable */ |
2865 | gk20a_mem_flag_none, true, | 2870 | gk20a_mem_flag_none, true, |
2866 | gr_ctx->mem.aperture); | 2871 | gr_ctx->mem.aperture); |
2867 | if (!gr_ctx->mem.gpu_va) { | 2872 | if (gr_ctx->mem.gpu_va == 0ULL) { |
2868 | goto err_free_mem; | 2873 | goto err_free_mem; |
2869 | } | 2874 | } |
2870 | 2875 | ||
@@ -2882,13 +2887,13 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g, | |||
2882 | struct nvgpu_gr_ctx *gr_ctx = &tsg->gr_ctx; | 2887 | struct nvgpu_gr_ctx *gr_ctx = &tsg->gr_ctx; |
2883 | int err; | 2888 | int err; |
2884 | 2889 | ||
2885 | if (!tsg->vm) { | 2890 | if (tsg->vm == NULL) { |
2886 | nvgpu_err(tsg->g, "No address space bound"); | 2891 | nvgpu_err(tsg->g, "No address space bound"); |
2887 | return -ENOMEM; | 2892 | return -ENOMEM; |
2888 | } | 2893 | } |
2889 | 2894 | ||
2890 | err = g->ops.gr.alloc_gr_ctx(g, gr_ctx, tsg->vm, class, padding); | 2895 | err = g->ops.gr.alloc_gr_ctx(g, gr_ctx, tsg->vm, class, padding); |
2891 | if (err) { | 2896 | if (err != 0) { |
2892 | return err; | 2897 | return err; |
2893 | } | 2898 | } |
2894 | 2899 | ||
@@ -2907,8 +2912,8 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g, | |||
2907 | gr_gk20a_free_channel_patch_ctx(g, vm, gr_ctx); | 2912 | gr_gk20a_free_channel_patch_ctx(g, vm, gr_ctx); |
2908 | gr_gk20a_free_channel_pm_ctx(g, vm, gr_ctx); | 2913 | gr_gk20a_free_channel_pm_ctx(g, vm, gr_ctx); |
2909 | 2914 | ||
2910 | if (g->ops.gr.dump_ctxsw_stats && | 2915 | if ((g->ops.gr.dump_ctxsw_stats != NULL) && |
2911 | g->gr.ctx_vars.dump_ctxsw_stats_on_channel_close) { | 2916 | g->gr.ctx_vars.dump_ctxsw_stats_on_channel_close) { |
2912 | g->ops.gr.dump_ctxsw_stats(g, vm, gr_ctx); | 2917 | g->ops.gr.dump_ctxsw_stats(g, vm, gr_ctx); |
2913 | } | 2918 | } |
2914 | 2919 | ||
@@ -2926,7 +2931,7 @@ void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) | |||
2926 | { | 2931 | { |
2927 | struct gk20a *g = tsg->g; | 2932 | struct gk20a *g = tsg->g; |
2928 | 2933 | ||
2929 | if (!tsg->vm) { | 2934 | if (tsg->vm == NULL) { |
2930 | nvgpu_err(g, "No address space bound"); | 2935 | nvgpu_err(g, "No address space bound"); |
2931 | return; | 2936 | return; |
2932 | } | 2937 | } |
@@ -2950,7 +2955,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, | |||
2950 | nvgpu_log_fn(g, " "); | 2955 | nvgpu_log_fn(g, " "); |
2951 | 2956 | ||
2952 | tsg = tsg_gk20a_from_ch(c); | 2957 | tsg = tsg_gk20a_from_ch(c); |
2953 | if (!tsg) { | 2958 | if (tsg == NULL) { |
2954 | return -EINVAL; | 2959 | return -EINVAL; |
2955 | } | 2960 | } |
2956 | 2961 | ||
@@ -2963,7 +2968,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, | |||
2963 | 2968 | ||
2964 | err = nvgpu_dma_alloc_map_sys(ch_vm, | 2969 | err = nvgpu_dma_alloc_map_sys(ch_vm, |
2965 | alloc_size * sizeof(u32), &patch_ctx->mem); | 2970 | alloc_size * sizeof(u32), &patch_ctx->mem); |
2966 | if (err) { | 2971 | if (err != 0) { |
2967 | return err; | 2972 | return err; |
2968 | } | 2973 | } |
2969 | 2974 | ||
@@ -3014,7 +3019,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3014 | nvgpu_log_fn(g, " "); | 3019 | nvgpu_log_fn(g, " "); |
3015 | 3020 | ||
3016 | /* an address space needs to have been bound at this point.*/ | 3021 | /* an address space needs to have been bound at this point.*/ |
3017 | if (!gk20a_channel_as_bound(c) && !c->vm) { | 3022 | if (!gk20a_channel_as_bound(c) && (c->vm == NULL)) { |
3018 | nvgpu_err(g, | 3023 | nvgpu_err(g, |
3019 | "not bound to address space at time" | 3024 | "not bound to address space at time" |
3020 | " of grctx allocation"); | 3025 | " of grctx allocation"); |
@@ -3042,7 +3047,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3042 | err = gr_gk20a_alloc_tsg_gr_ctx(g, tsg, | 3047 | err = gr_gk20a_alloc_tsg_gr_ctx(g, tsg, |
3043 | class_num, | 3048 | class_num, |
3044 | flags); | 3049 | flags); |
3045 | if (err) { | 3050 | if (err != 0) { |
3046 | nvgpu_err(g, | 3051 | nvgpu_err(g, |
3047 | "fail to allocate TSG gr ctx buffer"); | 3052 | "fail to allocate TSG gr ctx buffer"); |
3048 | nvgpu_vm_put(tsg->vm); | 3053 | nvgpu_vm_put(tsg->vm); |
@@ -3054,7 +3059,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3054 | if (!nvgpu_mem_is_valid(&gr_ctx->patch_ctx.mem)) { | 3059 | if (!nvgpu_mem_is_valid(&gr_ctx->patch_ctx.mem)) { |
3055 | gr_ctx->patch_ctx.data_count = 0; | 3060 | gr_ctx->patch_ctx.data_count = 0; |
3056 | err = gr_gk20a_alloc_channel_patch_ctx(g, c); | 3061 | err = gr_gk20a_alloc_channel_patch_ctx(g, c); |
3057 | if (err) { | 3062 | if (err != 0) { |
3058 | nvgpu_err(g, | 3063 | nvgpu_err(g, |
3059 | "fail to allocate patch buffer"); | 3064 | "fail to allocate patch buffer"); |
3060 | goto out; | 3065 | goto out; |
@@ -3063,7 +3068,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3063 | 3068 | ||
3064 | /* map global buffer to channel gpu_va and commit */ | 3069 | /* map global buffer to channel gpu_va and commit */ |
3065 | err = g->ops.gr.map_global_ctx_buffers(g, c); | 3070 | err = g->ops.gr.map_global_ctx_buffers(g, c); |
3066 | if (err) { | 3071 | if (err != 0) { |
3067 | nvgpu_err(g, | 3072 | nvgpu_err(g, |
3068 | "fail to map global ctx buffer"); | 3073 | "fail to map global ctx buffer"); |
3069 | goto out; | 3074 | goto out; |
@@ -3072,7 +3077,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3072 | 3077 | ||
3073 | /* commit gr ctx buffer */ | 3078 | /* commit gr ctx buffer */ |
3074 | err = g->ops.gr.commit_inst(c, gr_ctx->mem.gpu_va); | 3079 | err = g->ops.gr.commit_inst(c, gr_ctx->mem.gpu_va); |
3075 | if (err) { | 3080 | if (err != 0) { |
3076 | nvgpu_err(g, | 3081 | nvgpu_err(g, |
3077 | "fail to commit gr ctx buffer"); | 3082 | "fail to commit gr ctx buffer"); |
3078 | goto out; | 3083 | goto out; |
@@ -3080,7 +3085,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3080 | 3085 | ||
3081 | /* init golden image, ELPG enabled after this is done */ | 3086 | /* init golden image, ELPG enabled after this is done */ |
3082 | err = gr_gk20a_init_golden_ctx_image(g, c); | 3087 | err = gr_gk20a_init_golden_ctx_image(g, c); |
3083 | if (err) { | 3088 | if (err != 0) { |
3084 | nvgpu_err(g, | 3089 | nvgpu_err(g, |
3085 | "fail to init golden ctx image"); | 3090 | "fail to init golden ctx image"); |
3086 | goto out; | 3091 | goto out; |
@@ -3088,7 +3093,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3088 | 3093 | ||
3089 | /* load golden image */ | 3094 | /* load golden image */ |
3090 | gr_gk20a_load_golden_ctx_image(g, c); | 3095 | gr_gk20a_load_golden_ctx_image(g, c); |
3091 | if (err) { | 3096 | if (err != 0) { |
3092 | nvgpu_err(g, | 3097 | nvgpu_err(g, |
3093 | "fail to load golden ctx image"); | 3098 | "fail to load golden ctx image"); |
3094 | goto out; | 3099 | goto out; |
@@ -3096,9 +3101,10 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3096 | #ifdef CONFIG_GK20A_CTXSW_TRACE | 3101 | #ifdef CONFIG_GK20A_CTXSW_TRACE |
3097 | if (g->ops.fecs_trace.bind_channel && !c->vpr) { | 3102 | if (g->ops.fecs_trace.bind_channel && !c->vpr) { |
3098 | err = g->ops.fecs_trace.bind_channel(g, c); | 3103 | err = g->ops.fecs_trace.bind_channel(g, c); |
3099 | if (err) | 3104 | if (err != 0) { |
3100 | nvgpu_warn(g, | 3105 | nvgpu_warn(g, |
3101 | "fail to bind channel for ctxsw trace"); | 3106 | "fail to bind channel for ctxsw trace"); |
3107 | } | ||
3102 | } | 3108 | } |
3103 | #endif | 3109 | #endif |
3104 | 3110 | ||
@@ -3111,7 +3117,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3111 | } else { | 3117 | } else { |
3112 | /* commit gr ctx buffer */ | 3118 | /* commit gr ctx buffer */ |
3113 | err = g->ops.gr.commit_inst(c, gr_ctx->mem.gpu_va); | 3119 | err = g->ops.gr.commit_inst(c, gr_ctx->mem.gpu_va); |
3114 | if (err) { | 3120 | if (err != 0) { |
3115 | nvgpu_err(g, | 3121 | nvgpu_err(g, |
3116 | "fail to commit gr ctx buffer"); | 3122 | "fail to commit gr ctx buffer"); |
3117 | goto out; | 3123 | goto out; |
@@ -3119,9 +3125,10 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags) | |||
3119 | #ifdef CONFIG_GK20A_CTXSW_TRACE | 3125 | #ifdef CONFIG_GK20A_CTXSW_TRACE |
3120 | if (g->ops.fecs_trace.bind_channel && !c->vpr) { | 3126 | if (g->ops.fecs_trace.bind_channel && !c->vpr) { |
3121 | err = g->ops.fecs_trace.bind_channel(g, c); | 3127 | err = g->ops.fecs_trace.bind_channel(g, c); |
3122 | if (err) | 3128 | if (err != 0) { |
3123 | nvgpu_warn(g, | 3129 | nvgpu_warn(g, |
3124 | "fail to bind channel for ctxsw trace"); | 3130 | "fail to bind channel for ctxsw trace"); |
3131 | } | ||
3125 | } | 3132 | } |
3126 | #endif | 3133 | #endif |
3127 | } | 3134 | } |
@@ -3240,7 +3247,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) | |||
3240 | if (gr->fbp_rop_l2_en_mask == NULL) { | 3247 | if (gr->fbp_rop_l2_en_mask == NULL) { |
3241 | gr->fbp_rop_l2_en_mask = | 3248 | gr->fbp_rop_l2_en_mask = |
3242 | nvgpu_kzalloc(g, gr->max_fbps_count * sizeof(u32)); | 3249 | nvgpu_kzalloc(g, gr->max_fbps_count * sizeof(u32)); |
3243 | if (!gr->fbp_rop_l2_en_mask) { | 3250 | if (gr->fbp_rop_l2_en_mask == NULL) { |
3244 | goto clean_up; | 3251 | goto clean_up; |
3245 | } | 3252 | } |
3246 | } else { | 3253 | } else { |
@@ -3267,7 +3274,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) | |||
3267 | 3274 | ||
3268 | gr->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, GPU_LIT_NUM_ZCULL_BANKS); | 3275 | gr->max_zcull_per_gpc_count = nvgpu_get_litter_value(g, GPU_LIT_NUM_ZCULL_BANKS); |
3269 | 3276 | ||
3270 | if (!gr->gpc_count) { | 3277 | if (gr->gpc_count == 0U) { |
3271 | nvgpu_err(g, "gpc_count==0!"); | 3278 | nvgpu_err(g, "gpc_count==0!"); |
3272 | goto clean_up; | 3279 | goto clean_up; |
3273 | } | 3280 | } |
@@ -3313,8 +3320,9 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) | |||
3313 | 4 * sizeof(u32)); | 3320 | 4 * sizeof(u32)); |
3314 | } | 3321 | } |
3315 | 3322 | ||
3316 | if (!gr->gpc_tpc_count || !gr->gpc_tpc_mask || !gr->gpc_zcb_count || | 3323 | if ((gr->gpc_tpc_count == NULL) || (gr->gpc_tpc_mask == NULL) || |
3317 | !gr->gpc_ppc_count || !gr->gpc_skip_mask) { | 3324 | (gr->gpc_zcb_count == NULL) || (gr->gpc_ppc_count == NULL) || |
3325 | (gr->gpc_skip_mask == NULL)) { | ||
3318 | goto clean_up; | 3326 | goto clean_up; |
3319 | } | 3327 | } |
3320 | 3328 | ||
@@ -3341,15 +3349,15 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr) | |||
3341 | gr->zcb_count += gr->gpc_zcb_count[gpc_index]; | 3349 | gr->zcb_count += gr->gpc_zcb_count[gpc_index]; |
3342 | 3350 | ||
3343 | for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) { | 3351 | for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) { |
3344 | if (!gr->pes_tpc_count[pes_index]) { | 3352 | if (gr->pes_tpc_count[pes_index] == NULL) { |
3345 | gr->pes_tpc_count[pes_index] = | 3353 | gr->pes_tpc_count[pes_index] = |
3346 | nvgpu_kzalloc(g, gr->gpc_count * | 3354 | nvgpu_kzalloc(g, gr->gpc_count * |
3347 | sizeof(u32)); | 3355 | sizeof(u32)); |
3348 | gr->pes_tpc_mask[pes_index] = | 3356 | gr->pes_tpc_mask[pes_index] = |
3349 | nvgpu_kzalloc(g, gr->gpc_count * | 3357 | nvgpu_kzalloc(g, gr->gpc_count * |
3350 | sizeof(u32)); | 3358 | sizeof(u32)); |
3351 | if (!gr->pes_tpc_count[pes_index] || | 3359 | if ((gr->pes_tpc_count[pes_index] == NULL) || |
3352 | !gr->pes_tpc_mask[pes_index]) { | 3360 | (gr->pes_tpc_mask[pes_index] == NULL)) { |
3353 | goto clean_up; | 3361 | goto clean_up; |
3354 | } | 3362 | } |
3355 | } | 3363 | } |
@@ -3520,8 +3528,11 @@ static int gr_gk20a_init_map_tiles(struct gk20a *g, struct gr_gk20a *gr) | |||
3520 | sorted_to_unsorted_gpc_map = | 3528 | sorted_to_unsorted_gpc_map = |
3521 | nvgpu_kzalloc(g, num_gpcs * sizeof(s32)); | 3529 | nvgpu_kzalloc(g, num_gpcs * sizeof(s32)); |
3522 | 3530 | ||
3523 | if (!(init_frac && init_err && run_err && sorted_num_tpcs && | 3531 | if (!((init_frac != NULL) && |
3524 | sorted_to_unsorted_gpc_map)) { | 3532 | (init_err != NULL) && |
3533 | (run_err != NULL) && | ||
3534 | (sorted_num_tpcs != NULL) && | ||
3535 | (sorted_to_unsorted_gpc_map != NULL))) { | ||
3525 | ret = -ENOMEM; | 3536 | ret = -ENOMEM; |
3526 | goto clean_up; | 3537 | goto clean_up; |
3527 | } | 3538 | } |
@@ -3712,7 +3723,7 @@ int gr_gk20a_bind_ctxsw_zcull(struct gk20a *g, struct gr_gk20a *gr, | |||
3712 | struct zcull_ctx_desc *zcull_ctx; | 3723 | struct zcull_ctx_desc *zcull_ctx; |
3713 | 3724 | ||
3714 | tsg = tsg_gk20a_from_ch(c); | 3725 | tsg = tsg_gk20a_from_ch(c); |
3715 | if (!tsg) { | 3726 | if (tsg == NULL) { |
3716 | return -EINVAL; | 3727 | return -EINVAL; |
3717 | } | 3728 | } |
3718 | 3729 | ||
@@ -3881,11 +3892,12 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr, | |||
3881 | 3892 | ||
3882 | c_tbl = &gr->zbc_col_tbl[i]; | 3893 | c_tbl = &gr->zbc_col_tbl[i]; |
3883 | 3894 | ||
3884 | if (c_tbl->ref_cnt && c_tbl->format == zbc_val->format && | 3895 | if ((c_tbl->ref_cnt != 0U) && |
3885 | memcmp(c_tbl->color_ds, zbc_val->color_ds, | 3896 | (c_tbl->format == zbc_val->format) && |
3886 | sizeof(zbc_val->color_ds)) == 0 && | 3897 | (memcmp(c_tbl->color_ds, zbc_val->color_ds, |
3887 | memcmp(c_tbl->color_l2, zbc_val->color_l2, | 3898 | sizeof(zbc_val->color_ds)) == 0) && |
3888 | sizeof(zbc_val->color_l2)) == 0) { | 3899 | (memcmp(c_tbl->color_l2, zbc_val->color_l2, |
3900 | sizeof(zbc_val->color_l2)) == 0)) { | ||
3889 | 3901 | ||
3890 | added = true; | 3902 | added = true; |
3891 | c_tbl->ref_cnt++; | 3903 | c_tbl->ref_cnt++; |
@@ -3904,7 +3916,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr, | |||
3904 | ret = g->ops.gr.add_zbc_color(g, gr, | 3916 | ret = g->ops.gr.add_zbc_color(g, gr, |
3905 | zbc_val, gr->max_used_color_index); | 3917 | zbc_val, gr->max_used_color_index); |
3906 | 3918 | ||
3907 | if (!ret) { | 3919 | if (ret == 0) { |
3908 | gr->max_used_color_index++; | 3920 | gr->max_used_color_index++; |
3909 | } | 3921 | } |
3910 | } | 3922 | } |
@@ -3915,9 +3927,9 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr, | |||
3915 | 3927 | ||
3916 | d_tbl = &gr->zbc_dep_tbl[i]; | 3928 | d_tbl = &gr->zbc_dep_tbl[i]; |
3917 | 3929 | ||
3918 | if (d_tbl->ref_cnt && | 3930 | if ((d_tbl->ref_cnt != 0U) && |
3919 | d_tbl->depth == zbc_val->depth && | 3931 | (d_tbl->depth == zbc_val->depth) && |
3920 | d_tbl->format == zbc_val->format) { | 3932 | (d_tbl->format == zbc_val->format)) { |
3921 | added = true; | 3933 | added = true; |
3922 | d_tbl->ref_cnt++; | 3934 | d_tbl->ref_cnt++; |
3923 | ret = 0; | 3935 | ret = 0; |
@@ -3935,7 +3947,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr, | |||
3935 | ret = g->ops.gr.add_zbc_depth(g, gr, | 3947 | ret = g->ops.gr.add_zbc_depth(g, gr, |
3936 | zbc_val, gr->max_used_depth_index); | 3948 | zbc_val, gr->max_used_depth_index); |
3937 | 3949 | ||
3938 | if (!ret) { | 3950 | if (ret == 0) { |
3939 | gr->max_used_depth_index++; | 3951 | gr->max_used_depth_index++; |
3940 | } | 3952 | } |
3941 | } | 3953 | } |
@@ -4352,7 +4364,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr) | |||
4352 | u32 zcull_alloc_num = num_gpcs * num_tpc_per_gpc; | 4364 | u32 zcull_alloc_num = num_gpcs * num_tpc_per_gpc; |
4353 | u32 map_tile_count; | 4365 | u32 map_tile_count; |
4354 | 4366 | ||
4355 | if (!gr->map_tiles) { | 4367 | if (gr->map_tiles == NULL) { |
4356 | return -1; | 4368 | return -1; |
4357 | } | 4369 | } |
4358 | 4370 | ||
@@ -4362,7 +4374,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr) | |||
4362 | } | 4374 | } |
4363 | zcull_map_tiles = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); | 4375 | zcull_map_tiles = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); |
4364 | 4376 | ||
4365 | if (!zcull_map_tiles) { | 4377 | if (zcull_map_tiles == NULL) { |
4366 | nvgpu_err(g, | 4378 | nvgpu_err(g, |
4367 | "failed to allocate zcull map titles"); | 4379 | "failed to allocate zcull map titles"); |
4368 | return -ENOMEM; | 4380 | return -ENOMEM; |
@@ -4370,7 +4382,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr) | |||
4370 | 4382 | ||
4371 | zcull_bank_counters = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); | 4383 | zcull_bank_counters = nvgpu_kzalloc(g, zcull_alloc_num * sizeof(u32)); |
4372 | 4384 | ||
4373 | if (!zcull_bank_counters) { | 4385 | if (zcull_bank_counters == NULL) { |
4374 | nvgpu_err(g, | 4386 | nvgpu_err(g, |
4375 | "failed to allocate zcull bank counters"); | 4387 | "failed to allocate zcull bank counters"); |
4376 | nvgpu_kfree(g, zcull_map_tiles); | 4388 | nvgpu_kfree(g, zcull_map_tiles); |
@@ -4384,7 +4396,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr) | |||
4384 | zcull_bank_counters[map_tile_count]++; | 4396 | zcull_bank_counters[map_tile_count]++; |
4385 | } | 4397 | } |
4386 | 4398 | ||
4387 | if (g->ops.gr.program_zcull_mapping) { | 4399 | if (g->ops.gr.program_zcull_mapping != NULL) { |
4388 | g->ops.gr.program_zcull_mapping(g, zcull_alloc_num, | 4400 | g->ops.gr.program_zcull_mapping(g, zcull_alloc_num, |
4389 | zcull_map_tiles); | 4401 | zcull_map_tiles); |
4390 | } | 4402 | } |
@@ -4512,7 +4524,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g) | |||
4512 | 4524 | ||
4513 | gr_gk20a_zcull_init_hw(g, gr); | 4525 | gr_gk20a_zcull_init_hw(g, gr); |
4514 | 4526 | ||
4515 | if (g->ops.priv_ring.set_ppriv_timeout_settings) { | 4527 | if (g->ops.priv_ring.set_ppriv_timeout_settings != NULL) { |
4516 | g->ops.priv_ring.set_ppriv_timeout_settings(g); | 4528 | g->ops.priv_ring.set_ppriv_timeout_settings(g); |
4517 | } | 4529 | } |
4518 | 4530 | ||
@@ -4570,13 +4582,13 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g) | |||
4570 | 4582 | ||
4571 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), | 4583 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), |
4572 | GR_IDLE_CHECK_DEFAULT); | 4584 | GR_IDLE_CHECK_DEFAULT); |
4573 | if (err) { | 4585 | if (err != 0U) { |
4574 | goto out; | 4586 | goto out; |
4575 | } | 4587 | } |
4576 | 4588 | ||
4577 | if (g->ops.gr.init_preemption_state) { | 4589 | if (g->ops.gr.init_preemption_state) { |
4578 | err = g->ops.gr.init_preemption_state(g); | 4590 | err = g->ops.gr.init_preemption_state(g); |
4579 | if (err) { | 4591 | if (err != 0U) { |
4580 | goto out; | 4592 | goto out; |
4581 | } | 4593 | } |
4582 | } | 4594 | } |
@@ -4590,13 +4602,13 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g) | |||
4590 | 4602 | ||
4591 | /* floorsweep anything left */ | 4603 | /* floorsweep anything left */ |
4592 | err = g->ops.gr.init_fs_state(g); | 4604 | err = g->ops.gr.init_fs_state(g); |
4593 | if (err) { | 4605 | if (err != 0U) { |
4594 | goto out; | 4606 | goto out; |
4595 | } | 4607 | } |
4596 | 4608 | ||
4597 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), | 4609 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), |
4598 | GR_IDLE_CHECK_DEFAULT); | 4610 | GR_IDLE_CHECK_DEFAULT); |
4599 | if (err) { | 4611 | if (err != 0U) { |
4600 | goto restore_fe_go_idle; | 4612 | goto restore_fe_go_idle; |
4601 | } | 4613 | } |
4602 | 4614 | ||
@@ -4605,8 +4617,8 @@ restore_fe_go_idle: | |||
4605 | gk20a_writel(g, gr_fe_go_idle_timeout_r(), | 4617 | gk20a_writel(g, gr_fe_go_idle_timeout_r(), |
4606 | gr_fe_go_idle_timeout_count_prod_f()); | 4618 | gr_fe_go_idle_timeout_count_prod_f()); |
4607 | 4619 | ||
4608 | if (err || gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), | 4620 | if ((err != 0U) || (gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), |
4609 | GR_IDLE_CHECK_DEFAULT)) { | 4621 | GR_IDLE_CHECK_DEFAULT) != 0)) { |
4610 | goto out; | 4622 | goto out; |
4611 | } | 4623 | } |
4612 | 4624 | ||
@@ -4721,7 +4733,7 @@ static int gk20a_init_gr_prepare(struct gk20a *g) | |||
4721 | 4733 | ||
4722 | if (!g->gr.ctx_vars.valid) { | 4734 | if (!g->gr.ctx_vars.valid) { |
4723 | err = gr_gk20a_init_ctx_vars(g, &g->gr); | 4735 | err = gr_gk20a_init_ctx_vars(g, &g->gr); |
4724 | if (err) { | 4736 | if (err != 0U) { |
4725 | nvgpu_err(g, | 4737 | nvgpu_err(g, |
4726 | "fail to load gr init ctx"); | 4738 | "fail to load gr init ctx"); |
4727 | } | 4739 | } |
@@ -4756,7 +4768,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g) | |||
4756 | } | 4768 | } |
4757 | 4769 | ||
4758 | nvgpu_udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT); | 4770 | nvgpu_udelay(CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT); |
4759 | } while (!nvgpu_timeout_expired(&timeout)); | 4771 | } while (nvgpu_timeout_expired(&timeout) == 0); |
4760 | 4772 | ||
4761 | nvgpu_err(g, "Falcon mem scrubbing timeout"); | 4773 | nvgpu_err(g, "Falcon mem scrubbing timeout"); |
4762 | return -ETIMEDOUT; | 4774 | return -ETIMEDOUT; |
@@ -4767,17 +4779,17 @@ static int gr_gk20a_init_ctxsw(struct gk20a *g) | |||
4767 | u32 err = 0; | 4779 | u32 err = 0; |
4768 | 4780 | ||
4769 | err = g->ops.gr.load_ctxsw_ucode(g); | 4781 | err = g->ops.gr.load_ctxsw_ucode(g); |
4770 | if (err) { | 4782 | if (err != 0U) { |
4771 | goto out; | 4783 | goto out; |
4772 | } | 4784 | } |
4773 | 4785 | ||
4774 | err = gr_gk20a_wait_ctxsw_ready(g); | 4786 | err = gr_gk20a_wait_ctxsw_ready(g); |
4775 | if (err) { | 4787 | if (err != 0U) { |
4776 | goto out; | 4788 | goto out; |
4777 | } | 4789 | } |
4778 | 4790 | ||
4779 | out: | 4791 | out: |
4780 | if (err) { | 4792 | if (err != 0U) { |
4781 | nvgpu_err(g, "fail"); | 4793 | nvgpu_err(g, "fail"); |
4782 | } else { | 4794 | } else { |
4783 | nvgpu_log_fn(g, "done"); | 4795 | nvgpu_log_fn(g, "done"); |
@@ -4804,18 +4816,18 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g) | |||
4804 | } | 4816 | } |
4805 | 4817 | ||
4806 | err = gr_gk20a_wait_mem_scrubbing(g); | 4818 | err = gr_gk20a_wait_mem_scrubbing(g); |
4807 | if (err) { | 4819 | if (err != 0U) { |
4808 | goto out; | 4820 | goto out; |
4809 | } | 4821 | } |
4810 | 4822 | ||
4811 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), | 4823 | err = gr_gk20a_wait_idle(g, gk20a_get_gr_idle_timeout(g), |
4812 | GR_IDLE_CHECK_DEFAULT); | 4824 | GR_IDLE_CHECK_DEFAULT); |
4813 | if (err) { | 4825 | if (err != 0U) { |
4814 | goto out; | 4826 | goto out; |
4815 | } | 4827 | } |
4816 | 4828 | ||
4817 | out: | 4829 | out: |
4818 | if (err) { | 4830 | if (err != 0U) { |
4819 | nvgpu_err(g, "fail"); | 4831 | nvgpu_err(g, "fail"); |
4820 | } else { | 4832 | } else { |
4821 | nvgpu_log_fn(g, "done"); | 4833 | nvgpu_log_fn(g, "done"); |
@@ -4878,34 +4890,34 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) | |||
4878 | #endif | 4890 | #endif |
4879 | 4891 | ||
4880 | err = gr_gk20a_init_gr_config(g, gr); | 4892 | err = gr_gk20a_init_gr_config(g, gr); |
4881 | if (err) { | 4893 | if (err != 0) { |
4882 | goto clean_up; | 4894 | goto clean_up; |
4883 | } | 4895 | } |
4884 | 4896 | ||
4885 | err = gr_gk20a_init_map_tiles(g, gr); | 4897 | err = gr_gk20a_init_map_tiles(g, gr); |
4886 | if (err) { | 4898 | if (err != 0) { |
4887 | goto clean_up; | 4899 | goto clean_up; |
4888 | } | 4900 | } |
4889 | 4901 | ||
4890 | if (g->ops.ltc.init_comptags) { | 4902 | if (g->ops.ltc.init_comptags) { |
4891 | err = g->ops.ltc.init_comptags(g, gr); | 4903 | err = g->ops.ltc.init_comptags(g, gr); |
4892 | if (err) { | 4904 | if (err != 0) { |
4893 | goto clean_up; | 4905 | goto clean_up; |
4894 | } | 4906 | } |
4895 | } | 4907 | } |
4896 | 4908 | ||
4897 | err = gr_gk20a_init_zcull(g, gr); | 4909 | err = gr_gk20a_init_zcull(g, gr); |
4898 | if (err) { | 4910 | if (err != 0) { |
4899 | goto clean_up; | 4911 | goto clean_up; |
4900 | } | 4912 | } |
4901 | 4913 | ||
4902 | err = g->ops.gr.alloc_global_ctx_buffers(g); | 4914 | err = g->ops.gr.alloc_global_ctx_buffers(g); |
4903 | if (err) { | 4915 | if (err != 0) { |
4904 | goto clean_up; | 4916 | goto clean_up; |
4905 | } | 4917 | } |
4906 | 4918 | ||
4907 | err = gr_gk20a_init_access_map(g); | 4919 | err = gr_gk20a_init_access_map(g); |
4908 | if (err) { | 4920 | if (err != 0) { |
4909 | goto clean_up; | 4921 | goto clean_up; |
4910 | } | 4922 | } |
4911 | 4923 | ||
@@ -4931,7 +4943,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g) | |||
4931 | gr->sw_ready = true; | 4943 | gr->sw_ready = true; |
4932 | 4944 | ||
4933 | err = nvgpu_ecc_init_support(g); | 4945 | err = nvgpu_ecc_init_support(g); |
4934 | if (err) { | 4946 | if (err != 0) { |
4935 | goto clean_up; | 4947 | goto clean_up; |
4936 | } | 4948 | } |
4937 | 4949 | ||
@@ -4958,15 +4970,15 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) | |||
4958 | size = 0; | 4970 | size = 0; |
4959 | 4971 | ||
4960 | err = gr_gk20a_fecs_get_reglist_img_size(g, &size); | 4972 | err = gr_gk20a_fecs_get_reglist_img_size(g, &size); |
4961 | if (err) { | 4973 | if (err != 0) { |
4962 | nvgpu_err(g, | 4974 | nvgpu_err(g, |
4963 | "fail to query fecs pg buffer size"); | 4975 | "fail to query fecs pg buffer size"); |
4964 | return err; | 4976 | return err; |
4965 | } | 4977 | } |
4966 | 4978 | ||
4967 | if (!pmu->pg_buf.cpu_va) { | 4979 | if (pmu->pg_buf.cpu_va == NULL) { |
4968 | err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); | 4980 | err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); |
4969 | if (err) { | 4981 | if (err != 0) { |
4970 | nvgpu_err(g, "failed to allocate memory"); | 4982 | nvgpu_err(g, "failed to allocate memory"); |
4971 | return -ENOMEM; | 4983 | return -ENOMEM; |
4972 | } | 4984 | } |
@@ -4974,14 +4986,14 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) | |||
4974 | 4986 | ||
4975 | 4987 | ||
4976 | err = gr_gk20a_fecs_set_reglist_bind_inst(g, &mm->pmu.inst_block); | 4988 | err = gr_gk20a_fecs_set_reglist_bind_inst(g, &mm->pmu.inst_block); |
4977 | if (err) { | 4989 | if (err != 0) { |
4978 | nvgpu_err(g, | 4990 | nvgpu_err(g, |
4979 | "fail to bind pmu inst to gr"); | 4991 | "fail to bind pmu inst to gr"); |
4980 | return err; | 4992 | return err; |
4981 | } | 4993 | } |
4982 | 4994 | ||
4983 | err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.gpu_va); | 4995 | err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.gpu_va); |
4984 | if (err) { | 4996 | if (err != 0) { |
4985 | nvgpu_err(g, | 4997 | nvgpu_err(g, |
4986 | "fail to set pg buffer pmu va"); | 4998 | "fail to set pg buffer pmu va"); |
4987 | return err; | 4999 | return err; |
@@ -5004,30 +5016,30 @@ int gk20a_init_gr_support(struct gk20a *g) | |||
5004 | } | 5016 | } |
5005 | 5017 | ||
5006 | err = gr_gk20a_init_ctxsw(g); | 5018 | err = gr_gk20a_init_ctxsw(g); |
5007 | if (err) { | 5019 | if (err != 0) { |
5008 | return err; | 5020 | return err; |
5009 | } | 5021 | } |
5010 | 5022 | ||
5011 | /* this appears query for sw states but fecs actually init | 5023 | /* this appears query for sw states but fecs actually init |
5012 | ramchain, etc so this is hw init */ | 5024 | ramchain, etc so this is hw init */ |
5013 | err = g->ops.gr.init_ctx_state(g); | 5025 | err = g->ops.gr.init_ctx_state(g); |
5014 | if (err) { | 5026 | if (err != 0) { |
5015 | return err; | 5027 | return err; |
5016 | } | 5028 | } |
5017 | 5029 | ||
5018 | err = gk20a_init_gr_setup_sw(g); | 5030 | err = gk20a_init_gr_setup_sw(g); |
5019 | if (err) { | 5031 | if (err != 0) { |
5020 | return err; | 5032 | return err; |
5021 | } | 5033 | } |
5022 | 5034 | ||
5023 | err = gk20a_init_gr_setup_hw(g); | 5035 | err = gk20a_init_gr_setup_hw(g); |
5024 | if (err) { | 5036 | if (err != 0) { |
5025 | return err; | 5037 | return err; |
5026 | } | 5038 | } |
5027 | 5039 | ||
5028 | if (g->can_elpg) { | 5040 | if (g->can_elpg) { |
5029 | err = gk20a_init_gr_bind_fecs_elpg(g); | 5041 | err = gk20a_init_gr_bind_fecs_elpg(g); |
5030 | if (err) { | 5042 | if (err != 0) { |
5031 | return err; | 5043 | return err; |
5032 | } | 5044 | } |
5033 | } | 5045 | } |
@@ -5105,12 +5117,12 @@ int gk20a_enable_gr_hw(struct gk20a *g) | |||
5105 | nvgpu_log_fn(g, " "); | 5117 | nvgpu_log_fn(g, " "); |
5106 | 5118 | ||
5107 | err = gk20a_init_gr_prepare(g); | 5119 | err = gk20a_init_gr_prepare(g); |
5108 | if (err) { | 5120 | if (err != 0) { |
5109 | return err; | 5121 | return err; |
5110 | } | 5122 | } |
5111 | 5123 | ||
5112 | err = gk20a_init_gr_reset_enable_hw(g); | 5124 | err = gk20a_init_gr_reset_enable_hw(g); |
5113 | if (err) { | 5125 | if (err != 0) { |
5114 | return err; | 5126 | return err; |
5115 | } | 5127 | } |
5116 | 5128 | ||
@@ -5136,19 +5148,19 @@ int gk20a_gr_reset(struct gk20a *g) | |||
5136 | nvgpu_mutex_acquire(&g->gr.fecs_mutex); | 5148 | nvgpu_mutex_acquire(&g->gr.fecs_mutex); |
5137 | 5149 | ||
5138 | err = gk20a_enable_gr_hw(g); | 5150 | err = gk20a_enable_gr_hw(g); |
5139 | if (err) { | 5151 | if (err != 0) { |
5140 | nvgpu_mutex_release(&g->gr.fecs_mutex); | 5152 | nvgpu_mutex_release(&g->gr.fecs_mutex); |
5141 | return err; | 5153 | return err; |
5142 | } | 5154 | } |
5143 | 5155 | ||
5144 | err = gk20a_init_gr_setup_hw(g); | 5156 | err = gk20a_init_gr_setup_hw(g); |
5145 | if (err) { | 5157 | if (err != 0) { |
5146 | nvgpu_mutex_release(&g->gr.fecs_mutex); | 5158 | nvgpu_mutex_release(&g->gr.fecs_mutex); |
5147 | return err; | 5159 | return err; |
5148 | } | 5160 | } |
5149 | 5161 | ||
5150 | err = gr_gk20a_init_ctxsw(g); | 5162 | err = gr_gk20a_init_ctxsw(g); |
5151 | if (err) { | 5163 | if (err != 0) { |
5152 | nvgpu_mutex_release(&g->gr.fecs_mutex); | 5164 | nvgpu_mutex_release(&g->gr.fecs_mutex); |
5153 | return err; | 5165 | return err; |
5154 | } | 5166 | } |
@@ -5158,27 +5170,27 @@ int gk20a_gr_reset(struct gk20a *g) | |||
5158 | /* this appears query for sw states but fecs actually init | 5170 | /* this appears query for sw states but fecs actually init |
5159 | ramchain, etc so this is hw init */ | 5171 | ramchain, etc so this is hw init */ |
5160 | err = g->ops.gr.init_ctx_state(g); | 5172 | err = g->ops.gr.init_ctx_state(g); |
5161 | if (err) { | 5173 | if (err != 0) { |
5162 | return err; | 5174 | return err; |
5163 | } | 5175 | } |
5164 | 5176 | ||
5165 | size = 0; | 5177 | size = 0; |
5166 | err = gr_gk20a_fecs_get_reglist_img_size(g, &size); | 5178 | err = gr_gk20a_fecs_get_reglist_img_size(g, &size); |
5167 | if (err) { | 5179 | if (err != 0) { |
5168 | nvgpu_err(g, | 5180 | nvgpu_err(g, |
5169 | "fail to query fecs pg buffer size"); | 5181 | "fail to query fecs pg buffer size"); |
5170 | return err; | 5182 | return err; |
5171 | } | 5183 | } |
5172 | 5184 | ||
5173 | err = gr_gk20a_fecs_set_reglist_bind_inst(g, &g->mm.pmu.inst_block); | 5185 | err = gr_gk20a_fecs_set_reglist_bind_inst(g, &g->mm.pmu.inst_block); |
5174 | if (err) { | 5186 | if (err != 0) { |
5175 | nvgpu_err(g, | 5187 | nvgpu_err(g, |
5176 | "fail to bind pmu inst to gr"); | 5188 | "fail to bind pmu inst to gr"); |
5177 | return err; | 5189 | return err; |
5178 | } | 5190 | } |
5179 | 5191 | ||
5180 | err = gr_gk20a_fecs_set_reglist_virtual_addr(g, g->pmu.pg_buf.gpu_va); | 5192 | err = gr_gk20a_fecs_set_reglist_virtual_addr(g, g->pmu.pg_buf.gpu_va); |
5181 | if (err) { | 5193 | if (err != 0) { |
5182 | nvgpu_err(g, | 5194 | nvgpu_err(g, |
5183 | "fail to set pg buffer pmu va"); | 5195 | "fail to set pg buffer pmu va"); |
5184 | return err; | 5196 | return err; |
@@ -5276,7 +5288,7 @@ int gk20a_gr_handle_fecs_error(struct gk20a *g, struct channel_gk20a *ch, | |||
5276 | u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r()); | 5288 | u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r()); |
5277 | int ret = 0; | 5289 | int ret = 0; |
5278 | 5290 | ||
5279 | if (!gr_fecs_intr) { | 5291 | if (gr_fecs_intr == 0U) { |
5280 | return 0; | 5292 | return 0; |
5281 | } | 5293 | } |
5282 | 5294 | ||
@@ -5560,7 +5572,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx( | |||
5560 | /* slow path */ | 5572 | /* slow path */ |
5561 | for (chid = 0; chid < f->num_channels; chid++) { | 5573 | for (chid = 0; chid < f->num_channels; chid++) { |
5562 | struct channel_gk20a *ch = &f->channel[chid]; | 5574 | struct channel_gk20a *ch = &f->channel[chid]; |
5563 | if (!gk20a_channel_get(ch)) { | 5575 | if (gk20a_channel_get(ch) == NULL) { |
5564 | continue; | 5576 | continue; |
5565 | } | 5577 | } |
5566 | 5578 | ||
@@ -5575,7 +5587,7 @@ static struct channel_gk20a *gk20a_gr_get_channel_from_ctx( | |||
5575 | gk20a_channel_put(ch); | 5587 | gk20a_channel_put(ch); |
5576 | } | 5588 | } |
5577 | 5589 | ||
5578 | if (!ret) { | 5590 | if (ret == NULL) { |
5579 | goto unlock; | 5591 | goto unlock; |
5580 | } | 5592 | } |
5581 | 5593 | ||
@@ -5800,7 +5812,7 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
5800 | 5812 | ||
5801 | for (sm = 0; sm < sm_per_tpc; sm++) { | 5813 | for (sm = 0; sm < sm_per_tpc; sm++) { |
5802 | 5814 | ||
5803 | if (!(esr_sm_sel & (1 << sm))) { | 5815 | if ((esr_sm_sel & BIT32(sm)) == 0U) { |
5804 | continue; | 5816 | continue; |
5805 | } | 5817 | } |
5806 | 5818 | ||
@@ -5878,12 +5890,12 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, | |||
5878 | } | 5890 | } |
5879 | 5891 | ||
5880 | /* Handle GCC exception */ | 5892 | /* Handle GCC exception */ |
5881 | if (gr_gpc0_gpccs_gpc_exception_gcc_v(gpc_exception) && | 5893 | if ((gr_gpc0_gpccs_gpc_exception_gcc_v(gpc_exception) != 0U) && |
5882 | g->ops.gr.handle_gcc_exception) { | 5894 | (g->ops.gr.handle_gcc_exception != NULL)) { |
5883 | int gcc_ret = 0; | 5895 | int gcc_ret = 0; |
5884 | gcc_ret = g->ops.gr.handle_gcc_exception(g, gpc, tpc, | 5896 | gcc_ret = g->ops.gr.handle_gcc_exception(g, gpc, tpc, |
5885 | post_event, fault_ch, hww_global_esr); | 5897 | post_event, fault_ch, hww_global_esr); |
5886 | ret |= ret ? ret : gcc_ret; | 5898 | ret |= (ret != 0) ? ret : gcc_ret; |
5887 | } | 5899 | } |
5888 | 5900 | ||
5889 | /* Handle GPCCS exceptions */ | 5901 | /* Handle GPCCS exceptions */ |
@@ -5891,7 +5903,7 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, | |||
5891 | int ret_ecc = 0; | 5903 | int ret_ecc = 0; |
5892 | ret_ecc = g->ops.gr.handle_gpc_gpccs_exception(g, gpc, | 5904 | ret_ecc = g->ops.gr.handle_gpc_gpccs_exception(g, gpc, |
5893 | gpc_exception); | 5905 | gpc_exception); |
5894 | ret |= ret ? ret : ret_ecc; | 5906 | ret |= (ret != 0) ? ret : ret_ecc; |
5895 | } | 5907 | } |
5896 | 5908 | ||
5897 | /* Handle GPCMMU exceptions */ | 5909 | /* Handle GPCMMU exceptions */ |
@@ -5900,7 +5912,7 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g, bool *post_event, | |||
5900 | 5912 | ||
5901 | ret_mmu = g->ops.gr.handle_gpc_gpcmmu_exception(g, gpc, | 5913 | ret_mmu = g->ops.gr.handle_gpc_gpcmmu_exception(g, gpc, |
5902 | gpc_exception); | 5914 | gpc_exception); |
5903 | ret |= ret ? ret : ret_mmu; | 5915 | ret |= (ret != 0) ? ret : ret_mmu; |
5904 | } | 5916 | } |
5905 | 5917 | ||
5906 | } | 5918 | } |
@@ -5939,7 +5951,7 @@ int gk20a_gr_isr(struct gk20a *g) | |||
5939 | nvgpu_log_fn(g, " "); | 5951 | nvgpu_log_fn(g, " "); |
5940 | nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr); | 5952 | nvgpu_log(g, gpu_dbg_intr, "pgraph intr %08x", gr_intr); |
5941 | 5953 | ||
5942 | if (!gr_intr) { | 5954 | if (gr_intr == 0U) { |
5943 | return 0; | 5955 | return 0; |
5944 | } | 5956 | } |
5945 | 5957 | ||
@@ -5974,7 +5986,7 @@ int gk20a_gr_isr(struct gk20a *g) | |||
5974 | nvgpu_err(g, "ch id is INVALID 0xffffffff"); | 5986 | nvgpu_err(g, "ch id is INVALID 0xffffffff"); |
5975 | } | 5987 | } |
5976 | 5988 | ||
5977 | if (ch && gk20a_is_channel_marked_as_tsg(ch)) { | 5989 | if ((ch != NULL) && gk20a_is_channel_marked_as_tsg(ch)) { |
5978 | tsg = &g->fifo.tsg[ch->tsgid]; | 5990 | tsg = &g->fifo.tsg[ch->tsgid]; |
5979 | } | 5991 | } |
5980 | 5992 | ||
@@ -6153,7 +6165,8 @@ int gk20a_gr_isr(struct gk20a *g) | |||
6153 | } | 6165 | } |
6154 | 6166 | ||
6155 | /* check if a gpc exception has occurred */ | 6167 | /* check if a gpc exception has occurred */ |
6156 | if (exception & gr_exception_gpc_m() && !need_reset) { | 6168 | if (((exception & gr_exception_gpc_m()) != 0U) && |
6169 | !need_reset) { | ||
6157 | bool post_event = false; | 6170 | bool post_event = false; |
6158 | 6171 | ||
6159 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, | 6172 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, |
@@ -6171,7 +6184,7 @@ int gk20a_gr_isr(struct gk20a *g) | |||
6171 | 6184 | ||
6172 | /* signal clients waiting on an event */ | 6185 | /* signal clients waiting on an event */ |
6173 | if (g->ops.gr.sm_debugger_attached(g) && | 6186 | if (g->ops.gr.sm_debugger_attached(g) && |
6174 | post_event && fault_ch) { | 6187 | post_event && (fault_ch != NULL)) { |
6175 | g->ops.debugger.post_events(fault_ch); | 6188 | g->ops.debugger.post_events(fault_ch); |
6176 | } | 6189 | } |
6177 | } | 6190 | } |
@@ -6202,7 +6215,7 @@ int gk20a_gr_isr(struct gk20a *g) | |||
6202 | } | 6215 | } |
6203 | } | 6216 | } |
6204 | 6217 | ||
6205 | if (gr_intr && !ch) { | 6218 | if ((gr_intr != 0U) && (ch == NULL)) { |
6206 | /* Clear interrupts for unused channel. This is | 6219 | /* Clear interrupts for unused channel. This is |
6207 | probably an interrupt during gk20a_free_channel() */ | 6220 | probably an interrupt during gk20a_free_channel() */ |
6208 | nvgpu_err(g, | 6221 | nvgpu_err(g, |
@@ -6222,7 +6235,7 @@ int gk20a_gr_isr(struct gk20a *g) | |||
6222 | } | 6235 | } |
6223 | 6236 | ||
6224 | /* Posting of BPT events should be the last thing in this function */ | 6237 | /* Posting of BPT events should be the last thing in this function */ |
6225 | if (global_esr && tsg) { | 6238 | if ((global_esr != 0U) && (tsg != NULL)) { |
6226 | gk20a_gr_post_bpt_events(g, tsg, global_esr); | 6239 | gk20a_gr_post_bpt_events(g, tsg, global_esr); |
6227 | } | 6240 | } |
6228 | 6241 | ||
@@ -6415,7 +6428,8 @@ int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr, | |||
6415 | return 0; | 6428 | return 0; |
6416 | } | 6429 | } |
6417 | return 0; | 6430 | return 0; |
6418 | } else if (g->ops.gr.is_egpc_addr && g->ops.gr.is_egpc_addr(g, addr)) { | 6431 | } else if ((g->ops.gr.is_egpc_addr != NULL) && |
6432 | g->ops.gr.is_egpc_addr(g, addr)) { | ||
6419 | return g->ops.gr.decode_egpc_addr(g, | 6433 | return g->ops.gr.decode_egpc_addr(g, |
6420 | addr, addr_type, gpc_num, | 6434 | addr, addr_type, gpc_num, |
6421 | tpc_num, broadcast_flags); | 6435 | tpc_num, broadcast_flags); |
@@ -6485,7 +6499,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, | |||
6485 | &gpc_num, &tpc_num, &ppc_num, &be_num, | 6499 | &gpc_num, &tpc_num, &ppc_num, &be_num, |
6486 | &broadcast_flags); | 6500 | &broadcast_flags); |
6487 | nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); | 6501 | nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type = %d", addr_type); |
6488 | if (err) { | 6502 | if (err != 0) { |
6489 | return err; | 6503 | return err; |
6490 | } | 6504 | } |
6491 | 6505 | ||
@@ -6495,7 +6509,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, | |||
6495 | * table. Convert a BE unicast address to a broadcast address | 6509 | * table. Convert a BE unicast address to a broadcast address |
6496 | * so that we can look up the offset. */ | 6510 | * so that we can look up the offset. */ |
6497 | if ((addr_type == CTXSW_ADDR_TYPE_BE) && | 6511 | if ((addr_type == CTXSW_ADDR_TYPE_BE) && |
6498 | !(broadcast_flags & PRI_BROADCAST_FLAGS_BE)) { | 6512 | ((broadcast_flags & PRI_BROADCAST_FLAGS_BE) == 0U)) { |
6499 | priv_addr_table[t++] = pri_be_shared_addr(g, addr); | 6513 | priv_addr_table[t++] = pri_be_shared_addr(g, addr); |
6500 | } else { | 6514 | } else { |
6501 | priv_addr_table[t++] = addr; | 6515 | priv_addr_table[t++] = addr; |
@@ -6523,7 +6537,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, | |||
6523 | } else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { | 6537 | } else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { |
6524 | err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num, | 6538 | err = gr_gk20a_split_ppc_broadcast_addr(g, addr, gpc_num, |
6525 | priv_addr_table, &t); | 6539 | priv_addr_table, &t); |
6526 | if (err) { | 6540 | if (err != 0) { |
6527 | return err; | 6541 | return err; |
6528 | } | 6542 | } |
6529 | } else { | 6543 | } else { |
@@ -6542,7 +6556,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, | |||
6542 | } | 6556 | } |
6543 | } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || | 6557 | } else if (((addr_type == CTXSW_ADDR_TYPE_EGPC) || |
6544 | (addr_type == CTXSW_ADDR_TYPE_ETPC)) && | 6558 | (addr_type == CTXSW_ADDR_TYPE_ETPC)) && |
6545 | g->ops.gr.egpc_etpc_priv_addr_table) { | 6559 | (g->ops.gr.egpc_etpc_priv_addr_table != NULL)) { |
6546 | nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); | 6560 | nvgpu_log(g, gpu_dbg_gpu_dbg, "addr_type : EGPC/ETPC"); |
6547 | g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, | 6561 | g->ops.gr.egpc_etpc_priv_addr_table(g, addr, gpc_num, tpc_num, |
6548 | broadcast_flags, priv_addr_table, &t); | 6562 | broadcast_flags, priv_addr_table, &t); |
@@ -6556,7 +6570,7 @@ int gr_gk20a_create_priv_addr_table(struct gk20a *g, | |||
6556 | g->ops.gr.split_fbpa_broadcast_addr(g, addr, | 6570 | g->ops.gr.split_fbpa_broadcast_addr(g, addr, |
6557 | nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS), | 6571 | nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPAS), |
6558 | priv_addr_table, &t); | 6572 | priv_addr_table, &t); |
6559 | } else if (!(broadcast_flags & PRI_BROADCAST_FLAGS_GPC)) { | 6573 | } else if ((broadcast_flags & PRI_BROADCAST_FLAGS_GPC) == 0U) { |
6560 | if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) { | 6574 | if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) { |
6561 | for (tpc_num = 0; | 6575 | for (tpc_num = 0; |
6562 | tpc_num < g->gr.gpc_tpc_count[gpc_num]; | 6576 | tpc_num < g->gr.gpc_tpc_count[gpc_num]; |
@@ -6607,7 +6621,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, | |||
6607 | } | 6621 | } |
6608 | 6622 | ||
6609 | priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); | 6623 | priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); |
6610 | if (!priv_registers) { | 6624 | if (priv_registers == NULL) { |
6611 | nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); | 6625 | nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); |
6612 | err = PTR_ERR(priv_registers); | 6626 | err = PTR_ERR(priv_registers); |
6613 | goto cleanup; | 6627 | goto cleanup; |
@@ -6630,7 +6644,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, | |||
6630 | num_registers = 1; | 6644 | num_registers = 1; |
6631 | } | 6645 | } |
6632 | 6646 | ||
6633 | if (!g->gr.ctx_vars.local_golden_image) { | 6647 | if (g->gr.ctx_vars.local_golden_image == NULL) { |
6634 | nvgpu_log_fn(g, "no context switch header info to work with"); | 6648 | nvgpu_log_fn(g, "no context switch header info to work with"); |
6635 | err = -EINVAL; | 6649 | err = -EINVAL; |
6636 | goto cleanup; | 6650 | goto cleanup; |
@@ -6643,7 +6657,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g, | |||
6643 | g->gr.ctx_vars.local_golden_image, | 6657 | g->gr.ctx_vars.local_golden_image, |
6644 | g->gr.ctx_vars.golden_image_size, | 6658 | g->gr.ctx_vars.golden_image_size, |
6645 | &priv_offset); | 6659 | &priv_offset); |
6646 | if (err) { | 6660 | if (err != 0) { |
6647 | nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", | 6661 | nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", |
6648 | addr); /*, grPriRegStr(addr)));*/ | 6662 | addr); /*, grPriRegStr(addr)));*/ |
6649 | goto cleanup; | 6663 | goto cleanup; |
@@ -6690,7 +6704,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, | |||
6690 | } | 6704 | } |
6691 | 6705 | ||
6692 | priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); | 6706 | priv_registers = nvgpu_kzalloc(g, sizeof(u32) * potential_offsets); |
6693 | if (!priv_registers) { | 6707 | if (priv_registers == NULL) { |
6694 | nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); | 6708 | nvgpu_log_fn(g, "failed alloc for potential_offsets=%d", potential_offsets); |
6695 | return -ENOMEM; | 6709 | return -ENOMEM; |
6696 | } | 6710 | } |
@@ -6710,7 +6724,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, | |||
6710 | num_registers = 1; | 6724 | num_registers = 1; |
6711 | } | 6725 | } |
6712 | 6726 | ||
6713 | if (!g->gr.ctx_vars.local_golden_image) { | 6727 | if (g->gr.ctx_vars.local_golden_image == NULL) { |
6714 | nvgpu_log_fn(g, "no context switch header info to work with"); | 6728 | nvgpu_log_fn(g, "no context switch header info to work with"); |
6715 | err = -EINVAL; | 6729 | err = -EINVAL; |
6716 | goto cleanup; | 6730 | goto cleanup; |
@@ -6720,7 +6734,7 @@ int gr_gk20a_get_pm_ctx_buffer_offsets(struct gk20a *g, | |||
6720 | err = gr_gk20a_find_priv_offset_in_pm_buffer(g, | 6734 | err = gr_gk20a_find_priv_offset_in_pm_buffer(g, |
6721 | priv_registers[i], | 6735 | priv_registers[i], |
6722 | &priv_offset); | 6736 | &priv_offset); |
6723 | if (err) { | 6737 | if (err != 0) { |
6724 | nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", | 6738 | nvgpu_log_fn(g, "Could not determine priv_offset for addr:0x%x", |
6725 | addr); /*, grPriRegStr(addr)));*/ | 6739 | addr); /*, grPriRegStr(addr)));*/ |
6726 | goto cleanup; | 6740 | goto cleanup; |
@@ -6799,7 +6813,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, | |||
6799 | struct nvgpu_mem *ctxheader = &ch->ctx_header; | 6813 | struct nvgpu_mem *ctxheader = &ch->ctx_header; |
6800 | 6814 | ||
6801 | tsg = tsg_gk20a_from_ch(ch); | 6815 | tsg = tsg_gk20a_from_ch(ch); |
6802 | if (!tsg) { | 6816 | if (tsg == NULL) { |
6803 | return -EINVAL; | 6817 | return -EINVAL; |
6804 | } | 6818 | } |
6805 | 6819 | ||
@@ -6826,7 +6840,7 @@ static int gr_gk20a_ctx_patch_smpc(struct gk20a *g, | |||
6826 | tmp = nvgpu_mem_rd(g, mem, | 6840 | tmp = nvgpu_mem_rd(g, mem, |
6827 | ctxsw_prog_main_image_patch_count_o()); | 6841 | ctxsw_prog_main_image_patch_count_o()); |
6828 | 6842 | ||
6829 | if (!tmp) { | 6843 | if (tmp == 0U) { |
6830 | gr_ctx->patch_ctx.data_count = 0; | 6844 | gr_ctx->patch_ctx.data_count = 0; |
6831 | } | 6845 | } |
6832 | 6846 | ||
@@ -6940,7 +6954,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g, | |||
6940 | 6954 | ||
6941 | nvgpu_log_info(g, " gpc = %d tpc = %d", | 6955 | nvgpu_log_info(g, " gpc = %d tpc = %d", |
6942 | gpc_num, tpc_num); | 6956 | gpc_num, tpc_num); |
6943 | } else if ((g->ops.gr.is_etpc_addr) && | 6957 | } else if ((g->ops.gr.is_etpc_addr != NULL) && |
6944 | g->ops.gr.is_etpc_addr(g, addr)) { | 6958 | g->ops.gr.is_etpc_addr(g, addr)) { |
6945 | g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num); | 6959 | g->ops.gr.get_egpc_etpc_num(g, addr, &gpc_num, &tpc_num); |
6946 | gpc_base = g->ops.gr.get_egpc_base(g); | 6960 | gpc_base = g->ops.gr.get_egpc_base(g); |
@@ -7197,7 +7211,7 @@ gr_gk20a_process_context_buffer_priv_segment(struct gk20a *g, | |||
7197 | } | 7211 | } |
7198 | } else if ((addr_type == CTXSW_ADDR_TYPE_EGPC) || | 7212 | } else if ((addr_type == CTXSW_ADDR_TYPE_EGPC) || |
7199 | (addr_type == CTXSW_ADDR_TYPE_ETPC)) { | 7213 | (addr_type == CTXSW_ADDR_TYPE_ETPC)) { |
7200 | if (!(g->ops.gr.get_egpc_base)) { | 7214 | if (g->ops.gr.get_egpc_base == NULL) { |
7201 | return -EINVAL; | 7215 | return -EINVAL; |
7202 | } | 7216 | } |
7203 | 7217 | ||
@@ -7404,7 +7418,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, | |||
7404 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, | 7418 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
7405 | "addr_type = %d, broadcast_flags: %08x", | 7419 | "addr_type = %d, broadcast_flags: %08x", |
7406 | addr_type, broadcast_flags); | 7420 | addr_type, broadcast_flags); |
7407 | if (err) { | 7421 | if (err != 0) { |
7408 | return err; | 7422 | return err; |
7409 | } | 7423 | } |
7410 | 7424 | ||
@@ -7434,10 +7448,10 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, | |||
7434 | err = gr_gk20a_find_priv_offset_in_ext_buffer(g, | 7448 | err = gr_gk20a_find_priv_offset_in_ext_buffer(g, |
7435 | addr, is_quad, quad, context_buffer, | 7449 | addr, is_quad, quad, context_buffer, |
7436 | context_buffer_size, priv_offset); | 7450 | context_buffer_size, priv_offset); |
7437 | if (!err || (err && is_quad)) { | 7451 | if ((err == 0) || ((err != 0) && is_quad)) { |
7438 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, | 7452 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
7439 | "err = %d, is_quad = %s", | 7453 | "err = %d, is_quad = %s", |
7440 | err, is_quad ? "true" : false); | 7454 | err, is_quad ? "true" : "false"); |
7441 | return err; | 7455 | return err; |
7442 | } | 7456 | } |
7443 | 7457 | ||
@@ -7451,7 +7465,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, | |||
7451 | addr_type, addr, | 7465 | addr_type, addr, |
7452 | 0, 0, 0, 0, | 7466 | 0, 0, 0, 0, |
7453 | &offset); | 7467 | &offset); |
7454 | if (err) { | 7468 | if (err != 0) { |
7455 | return err; | 7469 | return err; |
7456 | } | 7470 | } |
7457 | 7471 | ||
@@ -7481,7 +7495,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, | |||
7481 | err = gr_gk20a_determine_ppc_configuration(g, context, | 7495 | err = gr_gk20a_determine_ppc_configuration(g, context, |
7482 | &num_ppcs, &ppc_mask, | 7496 | &num_ppcs, &ppc_mask, |
7483 | ®_list_ppc_count); | 7497 | ®_list_ppc_count); |
7484 | if (err) { | 7498 | if (err != 0) { |
7485 | nvgpu_err(g, "determine ppc configuration failed"); | 7499 | nvgpu_err(g, "determine ppc configuration failed"); |
7486 | return err; | 7500 | return err; |
7487 | } | 7501 | } |
@@ -7508,7 +7522,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, | |||
7508 | addr_type, | 7522 | addr_type, |
7509 | num_tpcs, num_ppcs, reg_list_ppc_count, | 7523 | num_tpcs, num_ppcs, reg_list_ppc_count, |
7510 | &offset_in_segment); | 7524 | &offset_in_segment); |
7511 | if (err) { | 7525 | if (err != 0) { |
7512 | return -EINVAL; | 7526 | return -EINVAL; |
7513 | } | 7527 | } |
7514 | 7528 | ||
@@ -7522,8 +7536,8 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g, | |||
7522 | i, num_tpcs, | 7536 | i, num_tpcs, |
7523 | num_ppcs, ppc_mask, | 7537 | num_ppcs, ppc_mask, |
7524 | &offset); | 7538 | &offset); |
7525 | if (err) { | 7539 | if (err != 0) { |
7526 | return -EINVAL; | 7540 | return -EINVAL; |
7527 | } | 7541 | } |
7528 | 7542 | ||
7529 | *priv_offset = offset_to_segment + offset; | 7543 | *priv_offset = offset_to_segment + offset; |
@@ -7864,7 +7878,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g) | |||
7864 | map_size = hwpm_ctxsw_reg_count_max * sizeof(*map); | 7878 | map_size = hwpm_ctxsw_reg_count_max * sizeof(*map); |
7865 | 7879 | ||
7866 | map = nvgpu_big_zalloc(g, map_size); | 7880 | map = nvgpu_big_zalloc(g, map_size); |
7867 | if (!map) { | 7881 | if (map == NULL) { |
7868 | return -ENOMEM; | 7882 | return -ENOMEM; |
7869 | } | 7883 | } |
7870 | 7884 | ||
@@ -7989,7 +8003,7 @@ static int gr_gk20a_find_priv_offset_in_pm_buffer(struct gk20a *g, | |||
7989 | /* Create map of pri address and pm offset if necessary */ | 8003 | /* Create map of pri address and pm offset if necessary */ |
7990 | if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) { | 8004 | if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map == NULL) { |
7991 | err = gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(g); | 8005 | err = gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(g); |
7992 | if (err) { | 8006 | if (err != 0) { |
7993 | return err; | 8007 | return err; |
7994 | } | 8008 | } |
7995 | } | 8009 | } |
@@ -8025,7 +8039,7 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch) | |||
8025 | * valid bit must be checked to be absolutely certain that a | 8039 | * valid bit must be checked to be absolutely certain that a |
8026 | * valid context is currently resident. | 8040 | * valid context is currently resident. |
8027 | */ | 8041 | */ |
8028 | if (!gr_fecs_current_ctx_valid_v(curr_gr_ctx)) { | 8042 | if (gr_fecs_current_ctx_valid_v(curr_gr_ctx) == 0U) { |
8029 | return NULL; | 8043 | return NULL; |
8030 | } | 8044 | } |
8031 | 8045 | ||
@@ -8035,12 +8049,12 @@ bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch) | |||
8035 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, | 8049 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
8036 | "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" | 8050 | "curr_gr_chid=%d curr_tsgid=%d, ch->tsgid=%d" |
8037 | " ch->chid=%d", | 8051 | " ch->chid=%d", |
8038 | curr_ch ? curr_ch->chid : -1, | 8052 | (curr_ch != NULL) ? curr_ch->chid : -1, |
8039 | curr_gr_tsgid, | 8053 | curr_gr_tsgid, |
8040 | ch->tsgid, | 8054 | ch->tsgid, |
8041 | ch->chid); | 8055 | ch->chid); |
8042 | 8056 | ||
8043 | if (!curr_ch) { | 8057 | if (curr_ch == NULL) { |
8044 | return false; | 8058 | return false; |
8045 | } | 8059 | } |
8046 | 8060 | ||
@@ -8081,7 +8095,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8081 | num_ctx_wr_ops, num_ctx_rd_ops); | 8095 | num_ctx_wr_ops, num_ctx_rd_ops); |
8082 | 8096 | ||
8083 | tsg = tsg_gk20a_from_ch(ch); | 8097 | tsg = tsg_gk20a_from_ch(ch); |
8084 | if (!tsg) { | 8098 | if (tsg == NULL) { |
8085 | return -EINVAL; | 8099 | return -EINVAL; |
8086 | } | 8100 | } |
8087 | 8101 | ||
@@ -8099,8 +8113,8 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8099 | } | 8113 | } |
8100 | 8114 | ||
8101 | /* if this is a quad access, setup for special access*/ | 8115 | /* if this is a quad access, setup for special access*/ |
8102 | if (ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD) | 8116 | if ((ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD)) |
8103 | && g->ops.gr.access_smpc_reg) { | 8117 | && (g->ops.gr.access_smpc_reg != NULL)) { |
8104 | g->ops.gr.access_smpc_reg(g, | 8118 | g->ops.gr.access_smpc_reg(g, |
8105 | ctx_ops[i].quad, | 8119 | ctx_ops[i].quad, |
8106 | ctx_ops[i].offset); | 8120 | ctx_ops[i].offset); |
@@ -8155,14 +8169,14 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8155 | 8169 | ||
8156 | /* they're the same size, so just use one alloc for both */ | 8170 | /* they're the same size, so just use one alloc for both */ |
8157 | offsets = nvgpu_kzalloc(g, 2 * sizeof(u32) * max_offsets); | 8171 | offsets = nvgpu_kzalloc(g, 2 * sizeof(u32) * max_offsets); |
8158 | if (!offsets) { | 8172 | if (offsets == NULL) { |
8159 | err = -ENOMEM; | 8173 | err = -ENOMEM; |
8160 | goto cleanup; | 8174 | goto cleanup; |
8161 | } | 8175 | } |
8162 | offset_addrs = offsets + max_offsets; | 8176 | offset_addrs = offsets + max_offsets; |
8163 | 8177 | ||
8164 | err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); | 8178 | err = gr_gk20a_ctx_patch_write_begin(g, gr_ctx, false); |
8165 | if (err) { | 8179 | if (err != 0) { |
8166 | goto cleanup; | 8180 | goto cleanup; |
8167 | } | 8181 | } |
8168 | 8182 | ||
@@ -8191,7 +8205,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8191 | &num_offsets, | 8205 | &num_offsets, |
8192 | ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD), | 8206 | ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD), |
8193 | ctx_ops[i].quad); | 8207 | ctx_ops[i].quad); |
8194 | if (!err) { | 8208 | if (err == 0) { |
8195 | if (!gr_ctx_ready) { | 8209 | if (!gr_ctx_ready) { |
8196 | gr_ctx_ready = true; | 8210 | gr_ctx_ready = true; |
8197 | } | 8211 | } |
@@ -8202,7 +8216,7 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8202 | max_offsets, | 8216 | max_offsets, |
8203 | offsets, offset_addrs, | 8217 | offsets, offset_addrs, |
8204 | &num_offsets); | 8218 | &num_offsets); |
8205 | if (err) { | 8219 | if (err != 0) { |
8206 | nvgpu_log(g, gpu_dbg_gpu_dbg, | 8220 | nvgpu_log(g, gpu_dbg_gpu_dbg, |
8207 | "ctx op invalid offset: offset=0x%x", | 8221 | "ctx op invalid offset: offset=0x%x", |
8208 | ctx_ops[i].offset); | 8222 | ctx_ops[i].offset); |
@@ -8224,8 +8238,8 @@ int __gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8224 | } | 8238 | } |
8225 | 8239 | ||
8226 | /* if this is a quad access, setup for special access*/ | 8240 | /* if this is a quad access, setup for special access*/ |
8227 | if (ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD) && | 8241 | if ((ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD)) && |
8228 | g->ops.gr.access_smpc_reg) { | 8242 | (g->ops.gr.access_smpc_reg != NULL)) { |
8229 | g->ops.gr.access_smpc_reg(g, ctx_ops[i].quad, | 8243 | g->ops.gr.access_smpc_reg(g, ctx_ops[i].quad, |
8230 | ctx_ops[i].offset); | 8244 | ctx_ops[i].offset); |
8231 | } | 8245 | } |
@@ -8313,7 +8327,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, | |||
8313 | * determine if the context we're interested in is current. | 8327 | * determine if the context we're interested in is current. |
8314 | */ | 8328 | */ |
8315 | err = gr_gk20a_disable_ctxsw(g); | 8329 | err = gr_gk20a_disable_ctxsw(g); |
8316 | if (err) { | 8330 | if (err != 0) { |
8317 | nvgpu_err(g, "unable to stop gr ctxsw"); | 8331 | nvgpu_err(g, "unable to stop gr ctxsw"); |
8318 | /* this should probably be ctx-fatal... */ | 8332 | /* this should probably be ctx-fatal... */ |
8319 | return err; | 8333 | return err; |
@@ -8421,7 +8435,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, | |||
8421 | 8435 | ||
8422 | nvgpu_usleep_range(delay, delay * 2); | 8436 | nvgpu_usleep_range(delay, delay * 2); |
8423 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); | 8437 | delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); |
8424 | } while (!nvgpu_timeout_expired(&timeout)); | 8438 | } while (nvgpu_timeout_expired(&timeout) == 0); |
8425 | 8439 | ||
8426 | dbgr_control0 = gk20a_readl(g, | 8440 | dbgr_control0 = gk20a_readl(g, |
8427 | gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); | 8441 | gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); |
@@ -8475,7 +8489,7 @@ void gk20a_gr_suspend_single_sm(struct gk20a *g, | |||
8475 | 8489 | ||
8476 | err = g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm, | 8490 | err = g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm, |
8477 | global_esr_mask, check_errors); | 8491 | global_esr_mask, check_errors); |
8478 | if (err) { | 8492 | if (err != 0) { |
8479 | nvgpu_err(g, | 8493 | nvgpu_err(g, |
8480 | "SuspendSm failed"); | 8494 | "SuspendSm failed"); |
8481 | return; | 8495 | return; |
@@ -8516,7 +8530,7 @@ void gk20a_gr_suspend_all_sms(struct gk20a *g, | |||
8516 | err = g->ops.gr.wait_for_sm_lock_down(g, | 8530 | err = g->ops.gr.wait_for_sm_lock_down(g, |
8517 | gpc, tpc, sm, | 8531 | gpc, tpc, sm, |
8518 | global_esr_mask, check_errors); | 8532 | global_esr_mask, check_errors); |
8519 | if (err) { | 8533 | if (err != 0) { |
8520 | nvgpu_err(g, "SuspendAllSms failed"); | 8534 | nvgpu_err(g, "SuspendAllSms failed"); |
8521 | return; | 8535 | return; |
8522 | } | 8536 | } |
@@ -8599,14 +8613,14 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g, | |||
8599 | u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); | 8613 | u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); |
8600 | 8614 | ||
8601 | ops = nvgpu_kcalloc(g, g->gr.no_of_sm, sizeof(*ops)); | 8615 | ops = nvgpu_kcalloc(g, g->gr.no_of_sm, sizeof(*ops)); |
8602 | if (!ops) { | 8616 | if (ops == NULL) { |
8603 | return -ENOMEM; | 8617 | return -ENOMEM; |
8604 | } | 8618 | } |
8605 | for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) { | 8619 | for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) { |
8606 | int gpc, tpc; | 8620 | int gpc, tpc; |
8607 | u32 tpc_offset, gpc_offset, reg_offset, reg_mask, reg_val; | 8621 | u32 tpc_offset, gpc_offset, reg_offset, reg_mask, reg_val; |
8608 | 8622 | ||
8609 | if (!(sms & (1 << sm_id))) { | 8623 | if ((sms & BIT64(sm_id)) == 0ULL) { |
8610 | continue; | 8624 | continue; |
8611 | } | 8625 | } |
8612 | 8626 | ||
@@ -8641,7 +8655,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g, | |||
8641 | } | 8655 | } |
8642 | 8656 | ||
8643 | err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0, NULL); | 8657 | err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0, NULL); |
8644 | if (err) { | 8658 | if (err != 0) { |
8645 | nvgpu_err(g, "Failed to access register"); | 8659 | nvgpu_err(g, "Failed to access register"); |
8646 | } | 8660 | } |
8647 | nvgpu_kfree(g, ops); | 8661 | nvgpu_kfree(g, ops); |
@@ -8698,7 +8712,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g, | |||
8698 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | 8712 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
8699 | 8713 | ||
8700 | err = gr_gk20a_disable_ctxsw(g); | 8714 | err = gr_gk20a_disable_ctxsw(g); |
8701 | if (err) { | 8715 | if (err != 0) { |
8702 | nvgpu_err(g, "unable to stop gr ctxsw"); | 8716 | nvgpu_err(g, "unable to stop gr ctxsw"); |
8703 | goto clean_up; | 8717 | goto clean_up; |
8704 | } | 8718 | } |
@@ -8718,7 +8732,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g, | |||
8718 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | 8732 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
8719 | 8733 | ||
8720 | err = gr_gk20a_enable_ctxsw(g); | 8734 | err = gr_gk20a_enable_ctxsw(g); |
8721 | if (err) { | 8735 | if (err != 0) { |
8722 | nvgpu_err(g, "unable to restart ctxsw!"); | 8736 | nvgpu_err(g, "unable to restart ctxsw!"); |
8723 | } | 8737 | } |
8724 | 8738 | ||
@@ -8743,7 +8757,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g, | |||
8743 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | 8757 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); |
8744 | 8758 | ||
8745 | err = gr_gk20a_disable_ctxsw(g); | 8759 | err = gr_gk20a_disable_ctxsw(g); |
8746 | if (err) { | 8760 | if (err != 0) { |
8747 | nvgpu_err(g, "unable to stop gr ctxsw"); | 8761 | nvgpu_err(g, "unable to stop gr ctxsw"); |
8748 | goto clean_up; | 8762 | goto clean_up; |
8749 | } | 8763 | } |
@@ -8759,7 +8773,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g, | |||
8759 | } | 8773 | } |
8760 | 8774 | ||
8761 | err = gr_gk20a_enable_ctxsw(g); | 8775 | err = gr_gk20a_enable_ctxsw(g); |
8762 | if (err) { | 8776 | if (err != 0) { |
8763 | nvgpu_err(g, "unable to restart ctxsw!"); | 8777 | nvgpu_err(g, "unable to restart ctxsw!"); |
8764 | } | 8778 | } |
8765 | 8779 | ||
@@ -8812,7 +8826,7 @@ int gr_gk20a_wait_for_pause(struct gk20a *g, struct nvgpu_warpstate *w_state) | |||
8812 | 8826 | ||
8813 | err = g->ops.gr.lock_down_sm(g, gpc, tpc, sm, | 8827 | err = g->ops.gr.lock_down_sm(g, gpc, tpc, sm, |
8814 | global_mask, false); | 8828 | global_mask, false); |
8815 | if (err) { | 8829 | if (err != 0) { |
8816 | nvgpu_err(g, "sm did not lock down!"); | 8830 | nvgpu_err(g, "sm did not lock down!"); |
8817 | return err; | 8831 | return err; |
8818 | } | 8832 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h index 32a30d78..d832d905 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h | |||
@@ -239,23 +239,23 @@ enum ctxsw_addr_type { | |||
239 | CTXSW_ADDR_TYPE_FBP = 10, | 239 | CTXSW_ADDR_TYPE_FBP = 10, |
240 | }; | 240 | }; |
241 | 241 | ||
242 | #define PRI_BROADCAST_FLAGS_NONE 0 | 242 | #define PRI_BROADCAST_FLAGS_NONE 0U |
243 | #define PRI_BROADCAST_FLAGS_GPC BIT(0) | 243 | #define PRI_BROADCAST_FLAGS_GPC BIT32(0) |
244 | #define PRI_BROADCAST_FLAGS_TPC BIT(1) | 244 | #define PRI_BROADCAST_FLAGS_TPC BIT32(1) |
245 | #define PRI_BROADCAST_FLAGS_BE BIT(2) | 245 | #define PRI_BROADCAST_FLAGS_BE BIT32(2) |
246 | #define PRI_BROADCAST_FLAGS_PPC BIT(3) | 246 | #define PRI_BROADCAST_FLAGS_PPC BIT32(3) |
247 | #define PRI_BROADCAST_FLAGS_LTCS BIT(4) | 247 | #define PRI_BROADCAST_FLAGS_LTCS BIT32(4) |
248 | #define PRI_BROADCAST_FLAGS_LTSS BIT(5) | 248 | #define PRI_BROADCAST_FLAGS_LTSS BIT32(5) |
249 | #define PRI_BROADCAST_FLAGS_FBPA BIT(6) | 249 | #define PRI_BROADCAST_FLAGS_FBPA BIT32(6) |
250 | #define PRI_BROADCAST_FLAGS_EGPC BIT(7) | 250 | #define PRI_BROADCAST_FLAGS_EGPC BIT32(7) |
251 | #define PRI_BROADCAST_FLAGS_ETPC BIT(8) | 251 | #define PRI_BROADCAST_FLAGS_ETPC BIT32(8) |
252 | #define PRI_BROADCAST_FLAGS_PMMGPC BIT(9) | 252 | #define PRI_BROADCAST_FLAGS_PMMGPC BIT32(9) |
253 | #define PRI_BROADCAST_FLAGS_PMM_GPCS BIT(10) | 253 | #define PRI_BROADCAST_FLAGS_PMM_GPCS BIT32(10) |
254 | #define PRI_BROADCAST_FLAGS_PMM_GPCGS_GPCTPCA BIT(11) | 254 | #define PRI_BROADCAST_FLAGS_PMM_GPCGS_GPCTPCA BIT32(11) |
255 | #define PRI_BROADCAST_FLAGS_PMM_GPCGS_GPCTPCB BIT(12) | 255 | #define PRI_BROADCAST_FLAGS_PMM_GPCGS_GPCTPCB BIT32(12) |
256 | #define PRI_BROADCAST_FLAGS_PMMFBP BIT(13) | 256 | #define PRI_BROADCAST_FLAGS_PMMFBP BIT32(13) |
257 | #define PRI_BROADCAST_FLAGS_PMM_FBPS BIT(14) | 257 | #define PRI_BROADCAST_FLAGS_PMM_FBPS BIT32(14) |
258 | #define PRI_BROADCAST_FLAGS_PMM_FBPGS_LTC BIT(15) | 258 | #define PRI_BROADCAST_FLAGS_PMM_FBPGS_LTC BIT32(15) |
259 | #define PRI_BROADCAST_FLAGS_PMM_FBPGS_ROP BIT(16) | 259 | #define PRI_BROADCAST_FLAGS_PMM_FBPGS_ROP BIT32(16) |
260 | 260 | ||
261 | #endif /* GR_PRI_GK20A_H */ | 261 | #endif /* GR_PRI_GK20A_H */ |