summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c
diff options
context:
space:
mode:
authorDeepak Goyal <dgoyal@nvidia.com>2017-09-22 06:06:36 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-04 05:27:12 -0400
commit192afccf7c9982ea47b46fd4b7ace4114ff7b45e (patch)
tree755137eb73ca1b70baf7ac6cf4fa540c57c6d459 /drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c
parentf63f96866dd3cd696e37cf7e83d419cca4f965fa (diff)
gpu: nvgpu: gv11b: skip clk gating prog for pre-si
For pre-silicon platforms, clock gating should be skipped as it is not supported. Added new flags "can_"x"lcg" to check platform capability before programming SLCG,BLCG and ELCG. Bug 200314250 Change-Id: Iec7564b00b988cdd50a02f3130662727839c5047 Signed-off-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1566251 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c49
1 files changed, 25 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c b/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c
index b9953221..ff4880c4 100644
--- a/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c
+++ b/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c
@@ -27,6 +27,7 @@
27 27
28#include <linux/types.h> 28#include <linux/types.h>
29#include "gv11b_gating_reglist.h" 29#include "gv11b_gating_reglist.h"
30#include <nvgpu/enabled.h>
30 31
31struct gating_desc { 32struct gating_desc {
32 u32 addr; 33 u32 addr;
@@ -282,7 +283,7 @@ void gv11b_slcg_bus_load_gating_prod(struct gk20a *g,
282 u32 i; 283 u32 i;
283 u32 size = sizeof(gv11b_slcg_bus) / sizeof(struct gating_desc); 284 u32 size = sizeof(gv11b_slcg_bus) / sizeof(struct gating_desc);
284 285
285 if (!g->slcg_enabled) 286 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
286 return; 287 return;
287 288
288 for (i = 0; i < size; i++) { 289 for (i = 0; i < size; i++) {
@@ -301,7 +302,7 @@ void gv11b_slcg_ce2_load_gating_prod(struct gk20a *g,
301 u32 i; 302 u32 i;
302 u32 size = sizeof(gv11b_slcg_ce2) / sizeof(struct gating_desc); 303 u32 size = sizeof(gv11b_slcg_ce2) / sizeof(struct gating_desc);
303 304
304 if (!g->slcg_enabled) 305 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
305 return; 306 return;
306 307
307 for (i = 0; i < size; i++) { 308 for (i = 0; i < size; i++) {
@@ -320,7 +321,7 @@ void gv11b_slcg_chiplet_load_gating_prod(struct gk20a *g,
320 u32 i; 321 u32 i;
321 u32 size = sizeof(gv11b_slcg_chiplet) / sizeof(struct gating_desc); 322 u32 size = sizeof(gv11b_slcg_chiplet) / sizeof(struct gating_desc);
322 323
323 if (!g->slcg_enabled) 324 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
324 return; 325 return;
325 326
326 for (i = 0; i < size; i++) { 327 for (i = 0; i < size; i++) {
@@ -344,7 +345,7 @@ void gv11b_slcg_fb_load_gating_prod(struct gk20a *g,
344 u32 i; 345 u32 i;
345 u32 size = sizeof(gv11b_slcg_fb) / sizeof(struct gating_desc); 346 u32 size = sizeof(gv11b_slcg_fb) / sizeof(struct gating_desc);
346 347
347 if (!g->slcg_enabled) 348 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
348 return; 349 return;
349 350
350 for (i = 0; i < size; i++) { 351 for (i = 0; i < size; i++) {
@@ -363,7 +364,7 @@ void gv11b_slcg_fifo_load_gating_prod(struct gk20a *g,
363 u32 i; 364 u32 i;
364 u32 size = sizeof(gv11b_slcg_fifo) / sizeof(struct gating_desc); 365 u32 size = sizeof(gv11b_slcg_fifo) / sizeof(struct gating_desc);
365 366
366 if (!g->slcg_enabled) 367 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
367 return; 368 return;
368 369
369 for (i = 0; i < size; i++) { 370 for (i = 0; i < size; i++) {
@@ -382,7 +383,7 @@ void gr_gv11b_slcg_gr_load_gating_prod(struct gk20a *g,
382 u32 i; 383 u32 i;
383 u32 size = sizeof(gv11b_slcg_gr) / sizeof(struct gating_desc); 384 u32 size = sizeof(gv11b_slcg_gr) / sizeof(struct gating_desc);
384 385
385 if (!g->slcg_enabled) 386 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
386 return; 387 return;
387 388
388 for (i = 0; i < size; i++) { 389 for (i = 0; i < size; i++) {
@@ -401,7 +402,7 @@ void ltc_gv11b_slcg_ltc_load_gating_prod(struct gk20a *g,
401 u32 i; 402 u32 i;
402 u32 size = sizeof(gv11b_slcg_ltc) / sizeof(struct gating_desc); 403 u32 size = sizeof(gv11b_slcg_ltc) / sizeof(struct gating_desc);
403 404
404 if (!g->slcg_enabled) 405 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
405 return; 406 return;
406 407
407 for (i = 0; i < size; i++) { 408 for (i = 0; i < size; i++) {
@@ -420,7 +421,7 @@ void gv11b_slcg_perf_load_gating_prod(struct gk20a *g,
420 u32 i; 421 u32 i;
421 u32 size = sizeof(gv11b_slcg_perf) / sizeof(struct gating_desc); 422 u32 size = sizeof(gv11b_slcg_perf) / sizeof(struct gating_desc);
422 423
423 if (!g->slcg_enabled) 424 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
424 return; 425 return;
425 426
426 for (i = 0; i < size; i++) { 427 for (i = 0; i < size; i++) {
@@ -439,7 +440,7 @@ void gv11b_slcg_priring_load_gating_prod(struct gk20a *g,
439 u32 i; 440 u32 i;
440 u32 size = sizeof(gv11b_slcg_priring) / sizeof(struct gating_desc); 441 u32 size = sizeof(gv11b_slcg_priring) / sizeof(struct gating_desc);
441 442
442 if (!g->slcg_enabled) 443 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
443 return; 444 return;
444 445
445 for (i = 0; i < size; i++) { 446 for (i = 0; i < size; i++) {
@@ -458,7 +459,7 @@ void gv11b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
458 u32 i; 459 u32 i;
459 u32 size = sizeof(gv11b_slcg_pwr_csb) / sizeof(struct gating_desc); 460 u32 size = sizeof(gv11b_slcg_pwr_csb) / sizeof(struct gating_desc);
460 461
461 if (!g->slcg_enabled) 462 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
462 return; 463 return;
463 464
464 for (i = 0; i < size; i++) { 465 for (i = 0; i < size; i++) {
@@ -477,7 +478,7 @@ void gv11b_slcg_pmu_load_gating_prod(struct gk20a *g,
477 u32 i; 478 u32 i;
478 u32 size = sizeof(gv11b_slcg_pmu) / sizeof(struct gating_desc); 479 u32 size = sizeof(gv11b_slcg_pmu) / sizeof(struct gating_desc);
479 480
480 if (!g->slcg_enabled) 481 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
481 return; 482 return;
482 483
483 for (i = 0; i < size; i++) { 484 for (i = 0; i < size; i++) {
@@ -496,7 +497,7 @@ void gv11b_slcg_therm_load_gating_prod(struct gk20a *g,
496 u32 i; 497 u32 i;
497 u32 size = sizeof(gv11b_slcg_therm) / sizeof(struct gating_desc); 498 u32 size = sizeof(gv11b_slcg_therm) / sizeof(struct gating_desc);
498 499
499 if (!g->slcg_enabled) 500 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
500 return; 501 return;
501 502
502 for (i = 0; i < size; i++) { 503 for (i = 0; i < size; i++) {
@@ -515,7 +516,7 @@ void gv11b_slcg_xbar_load_gating_prod(struct gk20a *g,
515 u32 i; 516 u32 i;
516 u32 size = sizeof(gv11b_slcg_xbar) / sizeof(struct gating_desc); 517 u32 size = sizeof(gv11b_slcg_xbar) / sizeof(struct gating_desc);
517 518
518 if (!g->slcg_enabled) 519 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
519 return; 520 return;
520 521
521 for (i = 0; i < size; i++) { 522 for (i = 0; i < size; i++) {
@@ -534,7 +535,7 @@ void gv11b_blcg_bus_load_gating_prod(struct gk20a *g,
534 u32 i; 535 u32 i;
535 u32 size = sizeof(gv11b_blcg_bus) / sizeof(struct gating_desc); 536 u32 size = sizeof(gv11b_blcg_bus) / sizeof(struct gating_desc);
536 537
537 if (!g->blcg_enabled) 538 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
538 return; 539 return;
539 540
540 for (i = 0; i < size; i++) { 541 for (i = 0; i < size; i++) {
@@ -553,7 +554,7 @@ void gv11b_blcg_ce_load_gating_prod(struct gk20a *g,
553 u32 i; 554 u32 i;
554 u32 size = sizeof(gv11b_blcg_ce) / sizeof(struct gating_desc); 555 u32 size = sizeof(gv11b_blcg_ce) / sizeof(struct gating_desc);
555 556
556 if (!g->blcg_enabled) 557 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
557 return; 558 return;
558 559
559 for (i = 0; i < size; i++) { 560 for (i = 0; i < size; i++) {
@@ -572,7 +573,7 @@ void gv11b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
572 u32 i; 573 u32 i;
573 u32 size = sizeof(gv11b_blcg_ctxsw_prog) / sizeof(struct gating_desc); 574 u32 size = sizeof(gv11b_blcg_ctxsw_prog) / sizeof(struct gating_desc);
574 575
575 if (!g->blcg_enabled) 576 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
576 return; 577 return;
577 578
578 for (i = 0; i < size; i++) { 579 for (i = 0; i < size; i++) {
@@ -591,7 +592,7 @@ void gv11b_blcg_fb_load_gating_prod(struct gk20a *g,
591 u32 i; 592 u32 i;
592 u32 size = sizeof(gv11b_blcg_fb) / sizeof(struct gating_desc); 593 u32 size = sizeof(gv11b_blcg_fb) / sizeof(struct gating_desc);
593 594
594 if (!g->blcg_enabled) 595 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
595 return; 596 return;
596 597
597 for (i = 0; i < size; i++) { 598 for (i = 0; i < size; i++) {
@@ -610,7 +611,7 @@ void gv11b_blcg_fifo_load_gating_prod(struct gk20a *g,
610 u32 i; 611 u32 i;
611 u32 size = sizeof(gv11b_blcg_fifo) / sizeof(struct gating_desc); 612 u32 size = sizeof(gv11b_blcg_fifo) / sizeof(struct gating_desc);
612 613
613 if (!g->blcg_enabled) 614 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
614 return; 615 return;
615 616
616 for (i = 0; i < size; i++) { 617 for (i = 0; i < size; i++) {
@@ -629,7 +630,7 @@ void gv11b_blcg_gr_load_gating_prod(struct gk20a *g,
629 u32 i; 630 u32 i;
630 u32 size = sizeof(gv11b_blcg_gr) / sizeof(struct gating_desc); 631 u32 size = sizeof(gv11b_blcg_gr) / sizeof(struct gating_desc);
631 632
632 if (!g->blcg_enabled) 633 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
633 return; 634 return;
634 635
635 for (i = 0; i < size; i++) { 636 for (i = 0; i < size; i++) {
@@ -648,7 +649,7 @@ void gv11b_blcg_ltc_load_gating_prod(struct gk20a *g,
648 u32 i; 649 u32 i;
649 u32 size = sizeof(gv11b_blcg_ltc) / sizeof(struct gating_desc); 650 u32 size = sizeof(gv11b_blcg_ltc) / sizeof(struct gating_desc);
650 651
651 if (!g->blcg_enabled) 652 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
652 return; 653 return;
653 654
654 for (i = 0; i < size; i++) { 655 for (i = 0; i < size; i++) {
@@ -667,7 +668,7 @@ void gv11b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
667 u32 i; 668 u32 i;
668 u32 size = sizeof(gv11b_blcg_pwr_csb) / sizeof(struct gating_desc); 669 u32 size = sizeof(gv11b_blcg_pwr_csb) / sizeof(struct gating_desc);
669 670
670 if (!g->blcg_enabled) 671 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
671 return; 672 return;
672 673
673 for (i = 0; i < size; i++) { 674 for (i = 0; i < size; i++) {
@@ -686,7 +687,7 @@ void gv11b_blcg_pmu_load_gating_prod(struct gk20a *g,
686 u32 i; 687 u32 i;
687 u32 size = sizeof(gv11b_blcg_pmu) / sizeof(struct gating_desc); 688 u32 size = sizeof(gv11b_blcg_pmu) / sizeof(struct gating_desc);
688 689
689 if (!g->blcg_enabled) 690 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
690 return; 691 return;
691 692
692 for (i = 0; i < size; i++) { 693 for (i = 0; i < size; i++) {
@@ -705,7 +706,7 @@ void gv11b_blcg_xbar_load_gating_prod(struct gk20a *g,
705 u32 i; 706 u32 i;
706 u32 size = sizeof(gv11b_blcg_xbar) / sizeof(struct gating_desc); 707 u32 size = sizeof(gv11b_blcg_xbar) / sizeof(struct gating_desc);
707 708
708 if (!g->blcg_enabled) 709 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
709 return; 710 return;
710 711
711 for (i = 0; i < size; i++) { 712 for (i = 0; i < size; i++) {
@@ -724,7 +725,7 @@ void gr_gv11b_pg_gr_load_gating_prod(struct gk20a *g,
724 u32 i; 725 u32 i;
725 u32 size = sizeof(gv11b_pg_gr) / sizeof(struct gating_desc); 726 u32 size = sizeof(gv11b_pg_gr) / sizeof(struct gating_desc);
726 727
727 if (!g->blcg_enabled) 728 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
728 return; 729 return;
729 730
730 for (i = 0; i < size; i++) { 731 for (i = 0; i < size; i++) {