summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c
diff options
context:
space:
mode:
authorDeepak Goyal <dgoyal@nvidia.com>2017-09-22 02:38:10 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-04 05:24:30 -0400
commit0e8aee1c1a38abbc2dccf3f604a9843cf38071e0 (patch)
treed7da679255e79a3c48041af1e78bc8d7374d47d2 /drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c
parentedb116661348f1bc843849cdcc318fa47cf9724a (diff)
gpu: nvgpu: skip clk gating prog for sim/emu.
For Simualtion/Emulation platforms,clock gating should be skipped as it is not supported. Added new flags "can_"X"lcg" to check platform capability before doing SLCG,BLCG and ELCG. Bug 200314250 Change-Id: I4124d444a77a4c06df8c1d82c6038bfd457f3db0 Signed-off-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1566049 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c99
1 files changed, 98 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c b/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c
index 473c97f3..944fa741 100644
--- a/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c
+++ b/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -26,6 +26,7 @@
26#define __gp10b_gating_reglist_h__ 26#define __gp10b_gating_reglist_h__
27 27
28#include "gp10b_gating_reglist.h" 28#include "gp10b_gating_reglist.h"
29#include <nvgpu/enabled.h>
29 30
30struct gating_desc { 31struct gating_desc {
31 u32 addr; 32 u32 addr;
@@ -281,6 +282,10 @@ void gp10b_slcg_bus_load_gating_prod(struct gk20a *g,
281{ 282{
282 u32 i; 283 u32 i;
283 u32 size = sizeof(gp10b_slcg_bus) / sizeof(struct gating_desc); 284 u32 size = sizeof(gp10b_slcg_bus) / sizeof(struct gating_desc);
285
286 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
287 return;
288
284 for (i = 0; i < size; i++) { 289 for (i = 0; i < size; i++) {
285 if (prod) 290 if (prod)
286 gk20a_writel(g, gp10b_slcg_bus[i].addr, 291 gk20a_writel(g, gp10b_slcg_bus[i].addr,
@@ -296,6 +301,10 @@ void gp10b_slcg_ce2_load_gating_prod(struct gk20a *g,
296{ 301{
297 u32 i; 302 u32 i;
298 u32 size = sizeof(gp10b_slcg_ce2) / sizeof(struct gating_desc); 303 u32 size = sizeof(gp10b_slcg_ce2) / sizeof(struct gating_desc);
304
305 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
306 return;
307
299 for (i = 0; i < size; i++) { 308 for (i = 0; i < size; i++) {
300 if (prod) 309 if (prod)
301 gk20a_writel(g, gp10b_slcg_ce2[i].addr, 310 gk20a_writel(g, gp10b_slcg_ce2[i].addr,
@@ -311,6 +320,10 @@ void gp10b_slcg_chiplet_load_gating_prod(struct gk20a *g,
311{ 320{
312 u32 i; 321 u32 i;
313 u32 size = sizeof(gp10b_slcg_chiplet) / sizeof(struct gating_desc); 322 u32 size = sizeof(gp10b_slcg_chiplet) / sizeof(struct gating_desc);
323
324 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
325 return;
326
314 for (i = 0; i < size; i++) { 327 for (i = 0; i < size; i++) {
315 if (prod) 328 if (prod)
316 gk20a_writel(g, gp10b_slcg_chiplet[i].addr, 329 gk20a_writel(g, gp10b_slcg_chiplet[i].addr,
@@ -331,6 +344,10 @@ void gp10b_slcg_fb_load_gating_prod(struct gk20a *g,
331{ 344{
332 u32 i; 345 u32 i;
333 u32 size = sizeof(gp10b_slcg_fb) / sizeof(struct gating_desc); 346 u32 size = sizeof(gp10b_slcg_fb) / sizeof(struct gating_desc);
347
348 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
349 return;
350
334 for (i = 0; i < size; i++) { 351 for (i = 0; i < size; i++) {
335 if (prod) 352 if (prod)
336 gk20a_writel(g, gp10b_slcg_fb[i].addr, 353 gk20a_writel(g, gp10b_slcg_fb[i].addr,
@@ -346,6 +363,10 @@ void gp10b_slcg_fifo_load_gating_prod(struct gk20a *g,
346{ 363{
347 u32 i; 364 u32 i;
348 u32 size = sizeof(gp10b_slcg_fifo) / sizeof(struct gating_desc); 365 u32 size = sizeof(gp10b_slcg_fifo) / sizeof(struct gating_desc);
366
367 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
368 return;
369
349 for (i = 0; i < size; i++) { 370 for (i = 0; i < size; i++) {
350 if (prod) 371 if (prod)
351 gk20a_writel(g, gp10b_slcg_fifo[i].addr, 372 gk20a_writel(g, gp10b_slcg_fifo[i].addr,
@@ -361,6 +382,10 @@ void gr_gp10b_slcg_gr_load_gating_prod(struct gk20a *g,
361{ 382{
362 u32 i; 383 u32 i;
363 u32 size = sizeof(gp10b_slcg_gr) / sizeof(struct gating_desc); 384 u32 size = sizeof(gp10b_slcg_gr) / sizeof(struct gating_desc);
385
386 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
387 return;
388
364 for (i = 0; i < size; i++) { 389 for (i = 0; i < size; i++) {
365 if (prod) 390 if (prod)
366 gk20a_writel(g, gp10b_slcg_gr[i].addr, 391 gk20a_writel(g, gp10b_slcg_gr[i].addr,
@@ -376,6 +401,10 @@ void ltc_gp10b_slcg_ltc_load_gating_prod(struct gk20a *g,
376{ 401{
377 u32 i; 402 u32 i;
378 u32 size = sizeof(gp10b_slcg_ltc) / sizeof(struct gating_desc); 403 u32 size = sizeof(gp10b_slcg_ltc) / sizeof(struct gating_desc);
404
405 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
406 return;
407
379 for (i = 0; i < size; i++) { 408 for (i = 0; i < size; i++) {
380 if (prod) 409 if (prod)
381 gk20a_writel(g, gp10b_slcg_ltc[i].addr, 410 gk20a_writel(g, gp10b_slcg_ltc[i].addr,
@@ -391,6 +420,10 @@ void gp10b_slcg_perf_load_gating_prod(struct gk20a *g,
391{ 420{
392 u32 i; 421 u32 i;
393 u32 size = sizeof(gp10b_slcg_perf) / sizeof(struct gating_desc); 422 u32 size = sizeof(gp10b_slcg_perf) / sizeof(struct gating_desc);
423
424 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
425 return;
426
394 for (i = 0; i < size; i++) { 427 for (i = 0; i < size; i++) {
395 if (prod) 428 if (prod)
396 gk20a_writel(g, gp10b_slcg_perf[i].addr, 429 gk20a_writel(g, gp10b_slcg_perf[i].addr,
@@ -406,6 +439,10 @@ void gp10b_slcg_priring_load_gating_prod(struct gk20a *g,
406{ 439{
407 u32 i; 440 u32 i;
408 u32 size = sizeof(gp10b_slcg_priring) / sizeof(struct gating_desc); 441 u32 size = sizeof(gp10b_slcg_priring) / sizeof(struct gating_desc);
442
443 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
444 return;
445
409 for (i = 0; i < size; i++) { 446 for (i = 0; i < size; i++) {
410 if (prod) 447 if (prod)
411 gk20a_writel(g, gp10b_slcg_priring[i].addr, 448 gk20a_writel(g, gp10b_slcg_priring[i].addr,
@@ -421,6 +458,10 @@ void gp10b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
421{ 458{
422 u32 i; 459 u32 i;
423 u32 size = sizeof(gp10b_slcg_pwr_csb) / sizeof(struct gating_desc); 460 u32 size = sizeof(gp10b_slcg_pwr_csb) / sizeof(struct gating_desc);
461
462 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
463 return;
464
424 for (i = 0; i < size; i++) { 465 for (i = 0; i < size; i++) {
425 if (prod) 466 if (prod)
426 gk20a_writel(g, gp10b_slcg_pwr_csb[i].addr, 467 gk20a_writel(g, gp10b_slcg_pwr_csb[i].addr,
@@ -436,6 +477,10 @@ void gp10b_slcg_pmu_load_gating_prod(struct gk20a *g,
436{ 477{
437 u32 i; 478 u32 i;
438 u32 size = sizeof(gp10b_slcg_pmu) / sizeof(struct gating_desc); 479 u32 size = sizeof(gp10b_slcg_pmu) / sizeof(struct gating_desc);
480
481 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
482 return;
483
439 for (i = 0; i < size; i++) { 484 for (i = 0; i < size; i++) {
440 if (prod) 485 if (prod)
441 gk20a_writel(g, gp10b_slcg_pmu[i].addr, 486 gk20a_writel(g, gp10b_slcg_pmu[i].addr,
@@ -451,6 +496,10 @@ void gp10b_slcg_therm_load_gating_prod(struct gk20a *g,
451{ 496{
452 u32 i; 497 u32 i;
453 u32 size = sizeof(gp10b_slcg_therm) / sizeof(struct gating_desc); 498 u32 size = sizeof(gp10b_slcg_therm) / sizeof(struct gating_desc);
499
500 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
501 return;
502
454 for (i = 0; i < size; i++) { 503 for (i = 0; i < size; i++) {
455 if (prod) 504 if (prod)
456 gk20a_writel(g, gp10b_slcg_therm[i].addr, 505 gk20a_writel(g, gp10b_slcg_therm[i].addr,
@@ -466,6 +515,10 @@ void gp10b_slcg_xbar_load_gating_prod(struct gk20a *g,
466{ 515{
467 u32 i; 516 u32 i;
468 u32 size = sizeof(gp10b_slcg_xbar) / sizeof(struct gating_desc); 517 u32 size = sizeof(gp10b_slcg_xbar) / sizeof(struct gating_desc);
518
519 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
520 return;
521
469 for (i = 0; i < size; i++) { 522 for (i = 0; i < size; i++) {
470 if (prod) 523 if (prod)
471 gk20a_writel(g, gp10b_slcg_xbar[i].addr, 524 gk20a_writel(g, gp10b_slcg_xbar[i].addr,
@@ -481,6 +534,10 @@ void gp10b_blcg_bus_load_gating_prod(struct gk20a *g,
481{ 534{
482 u32 i; 535 u32 i;
483 u32 size = sizeof(gp10b_blcg_bus) / sizeof(struct gating_desc); 536 u32 size = sizeof(gp10b_blcg_bus) / sizeof(struct gating_desc);
537
538 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
539 return;
540
484 for (i = 0; i < size; i++) { 541 for (i = 0; i < size; i++) {
485 if (prod) 542 if (prod)
486 gk20a_writel(g, gp10b_blcg_bus[i].addr, 543 gk20a_writel(g, gp10b_blcg_bus[i].addr,
@@ -496,6 +553,10 @@ void gp10b_blcg_ce_load_gating_prod(struct gk20a *g,
496{ 553{
497 u32 i; 554 u32 i;
498 u32 size = sizeof(gp10b_blcg_ce) / sizeof(struct gating_desc); 555 u32 size = sizeof(gp10b_blcg_ce) / sizeof(struct gating_desc);
556
557 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
558 return;
559
499 for (i = 0; i < size; i++) { 560 for (i = 0; i < size; i++) {
500 if (prod) 561 if (prod)
501 gk20a_writel(g, gp10b_blcg_ce[i].addr, 562 gk20a_writel(g, gp10b_blcg_ce[i].addr,
@@ -511,6 +572,10 @@ void gp10b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
511{ 572{
512 u32 i; 573 u32 i;
513 u32 size = sizeof(gp10b_blcg_ctxsw_prog) / sizeof(struct gating_desc); 574 u32 size = sizeof(gp10b_blcg_ctxsw_prog) / sizeof(struct gating_desc);
575
576 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
577 return;
578
514 for (i = 0; i < size; i++) { 579 for (i = 0; i < size; i++) {
515 if (prod) 580 if (prod)
516 gk20a_writel(g, gp10b_blcg_ctxsw_prog[i].addr, 581 gk20a_writel(g, gp10b_blcg_ctxsw_prog[i].addr,
@@ -526,6 +591,10 @@ void gp10b_blcg_fb_load_gating_prod(struct gk20a *g,
526{ 591{
527 u32 i; 592 u32 i;
528 u32 size = sizeof(gp10b_blcg_fb) / sizeof(struct gating_desc); 593 u32 size = sizeof(gp10b_blcg_fb) / sizeof(struct gating_desc);
594
595 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
596 return;
597
529 for (i = 0; i < size; i++) { 598 for (i = 0; i < size; i++) {
530 if (prod) 599 if (prod)
531 gk20a_writel(g, gp10b_blcg_fb[i].addr, 600 gk20a_writel(g, gp10b_blcg_fb[i].addr,
@@ -541,6 +610,10 @@ void gp10b_blcg_fifo_load_gating_prod(struct gk20a *g,
541{ 610{
542 u32 i; 611 u32 i;
543 u32 size = sizeof(gp10b_blcg_fifo) / sizeof(struct gating_desc); 612 u32 size = sizeof(gp10b_blcg_fifo) / sizeof(struct gating_desc);
613
614 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
615 return;
616
544 for (i = 0; i < size; i++) { 617 for (i = 0; i < size; i++) {
545 if (prod) 618 if (prod)
546 gk20a_writel(g, gp10b_blcg_fifo[i].addr, 619 gk20a_writel(g, gp10b_blcg_fifo[i].addr,
@@ -556,6 +629,10 @@ void gp10b_blcg_gr_load_gating_prod(struct gk20a *g,
556{ 629{
557 u32 i; 630 u32 i;
558 u32 size = sizeof(gp10b_blcg_gr) / sizeof(struct gating_desc); 631 u32 size = sizeof(gp10b_blcg_gr) / sizeof(struct gating_desc);
632
633 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
634 return;
635
559 for (i = 0; i < size; i++) { 636 for (i = 0; i < size; i++) {
560 if (prod) 637 if (prod)
561 gk20a_writel(g, gp10b_blcg_gr[i].addr, 638 gk20a_writel(g, gp10b_blcg_gr[i].addr,
@@ -571,6 +648,10 @@ void gp10b_blcg_ltc_load_gating_prod(struct gk20a *g,
571{ 648{
572 u32 i; 649 u32 i;
573 u32 size = sizeof(gp10b_blcg_ltc) / sizeof(struct gating_desc); 650 u32 size = sizeof(gp10b_blcg_ltc) / sizeof(struct gating_desc);
651
652 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
653 return;
654
574 for (i = 0; i < size; i++) { 655 for (i = 0; i < size; i++) {
575 if (prod) 656 if (prod)
576 gk20a_writel(g, gp10b_blcg_ltc[i].addr, 657 gk20a_writel(g, gp10b_blcg_ltc[i].addr,
@@ -586,6 +667,10 @@ void gp10b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
586{ 667{
587 u32 i; 668 u32 i;
588 u32 size = sizeof(gp10b_blcg_pwr_csb) / sizeof(struct gating_desc); 669 u32 size = sizeof(gp10b_blcg_pwr_csb) / sizeof(struct gating_desc);
670
671 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
672 return;
673
589 for (i = 0; i < size; i++) { 674 for (i = 0; i < size; i++) {
590 if (prod) 675 if (prod)
591 gk20a_writel(g, gp10b_blcg_pwr_csb[i].addr, 676 gk20a_writel(g, gp10b_blcg_pwr_csb[i].addr,
@@ -601,6 +686,10 @@ void gp10b_blcg_pmu_load_gating_prod(struct gk20a *g,
601{ 686{
602 u32 i; 687 u32 i;
603 u32 size = sizeof(gp10b_blcg_pmu) / sizeof(struct gating_desc); 688 u32 size = sizeof(gp10b_blcg_pmu) / sizeof(struct gating_desc);
689
690 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
691 return;
692
604 for (i = 0; i < size; i++) { 693 for (i = 0; i < size; i++) {
605 if (prod) 694 if (prod)
606 gk20a_writel(g, gp10b_blcg_pmu[i].addr, 695 gk20a_writel(g, gp10b_blcg_pmu[i].addr,
@@ -616,6 +705,10 @@ void gp10b_blcg_xbar_load_gating_prod(struct gk20a *g,
616{ 705{
617 u32 i; 706 u32 i;
618 u32 size = sizeof(gp10b_blcg_xbar) / sizeof(struct gating_desc); 707 u32 size = sizeof(gp10b_blcg_xbar) / sizeof(struct gating_desc);
708
709 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
710 return;
711
619 for (i = 0; i < size; i++) { 712 for (i = 0; i < size; i++) {
620 if (prod) 713 if (prod)
621 gk20a_writel(g, gp10b_blcg_xbar[i].addr, 714 gk20a_writel(g, gp10b_blcg_xbar[i].addr,
@@ -631,6 +724,10 @@ void gr_gp10b_pg_gr_load_gating_prod(struct gk20a *g,
631{ 724{
632 u32 i; 725 u32 i;
633 u32 size = sizeof(gp10b_pg_gr) / sizeof(struct gating_desc); 726 u32 size = sizeof(gp10b_pg_gr) / sizeof(struct gating_desc);
727
728 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
729 return;
730
634 for (i = 0; i < size; i++) { 731 for (i = 0; i < size; i++) {
635 if (prod) 732 if (prod)
636 gk20a_writel(g, gp10b_pg_gr[i].addr, 733 gk20a_writel(g, gp10b_pg_gr[i].addr,