summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c
diff options
context:
space:
mode:
authorDeepak Goyal <dgoyal@nvidia.com>2017-09-22 02:38:10 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-04 05:24:30 -0400
commit0e8aee1c1a38abbc2dccf3f604a9843cf38071e0 (patch)
treed7da679255e79a3c48041af1e78bc8d7374d47d2 /drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c
parentedb116661348f1bc843849cdcc318fa47cf9724a (diff)
gpu: nvgpu: skip clk gating prog for sim/emu.
For Simualtion/Emulation platforms,clock gating should be skipped as it is not supported. Added new flags "can_"X"lcg" to check platform capability before doing SLCG,BLCG and ELCG. Bug 200314250 Change-Id: I4124d444a77a4c06df8c1d82c6038bfd457f3db0 Signed-off-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1566049 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c')
-rw-r--r--drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c85
1 files changed, 85 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c b/drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c
index 5a634313..169a1fee 100644
--- a/drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c
+++ b/drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c
@@ -26,6 +26,7 @@
26#define __gp106_gating_reglist_h__ 26#define __gp106_gating_reglist_h__
27 27
28#include "gp106_gating_reglist.h" 28#include "gp106_gating_reglist.h"
29#include <nvgpu/enabled.h>
29 30
30struct gating_desc { 31struct gating_desc {
31 u32 addr; 32 u32 addr;
@@ -276,6 +277,10 @@ void gp106_slcg_bus_load_gating_prod(struct gk20a *g,
276{ 277{
277 u32 i; 278 u32 i;
278 u32 size = sizeof(gp106_slcg_bus) / sizeof(struct gating_desc); 279 u32 size = sizeof(gp106_slcg_bus) / sizeof(struct gating_desc);
280
281 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
282 return;
283
279 for (i = 0; i < size; i++) { 284 for (i = 0; i < size; i++) {
280 if (prod) 285 if (prod)
281 gk20a_writel(g, gp106_slcg_bus[i].addr, 286 gk20a_writel(g, gp106_slcg_bus[i].addr,
@@ -291,6 +296,10 @@ void gp106_slcg_ce2_load_gating_prod(struct gk20a *g,
291{ 296{
292 u32 i; 297 u32 i;
293 u32 size = sizeof(gp106_slcg_ce2) / sizeof(struct gating_desc); 298 u32 size = sizeof(gp106_slcg_ce2) / sizeof(struct gating_desc);
299
300 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
301 return;
302
294 for (i = 0; i < size; i++) { 303 for (i = 0; i < size; i++) {
295 if (prod) 304 if (prod)
296 gk20a_writel(g, gp106_slcg_ce2[i].addr, 305 gk20a_writel(g, gp106_slcg_ce2[i].addr,
@@ -306,6 +315,10 @@ void gp106_slcg_chiplet_load_gating_prod(struct gk20a *g,
306{ 315{
307 u32 i; 316 u32 i;
308 u32 size = sizeof(gp106_slcg_chiplet) / sizeof(struct gating_desc); 317 u32 size = sizeof(gp106_slcg_chiplet) / sizeof(struct gating_desc);
318
319 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
320 return;
321
309 for (i = 0; i < size; i++) { 322 for (i = 0; i < size; i++) {
310 if (prod) 323 if (prod)
311 gk20a_writel(g, gp106_slcg_chiplet[i].addr, 324 gk20a_writel(g, gp106_slcg_chiplet[i].addr,
@@ -326,6 +339,10 @@ void gp106_slcg_fb_load_gating_prod(struct gk20a *g,
326{ 339{
327 u32 i; 340 u32 i;
328 u32 size = sizeof(gp106_slcg_fb) / sizeof(struct gating_desc); 341 u32 size = sizeof(gp106_slcg_fb) / sizeof(struct gating_desc);
342
343 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
344 return;
345
329 for (i = 0; i < size; i++) { 346 for (i = 0; i < size; i++) {
330 if (prod) 347 if (prod)
331 gk20a_writel(g, gp106_slcg_fb[i].addr, 348 gk20a_writel(g, gp106_slcg_fb[i].addr,
@@ -341,6 +358,10 @@ void gp106_slcg_fifo_load_gating_prod(struct gk20a *g,
341{ 358{
342 u32 i; 359 u32 i;
343 u32 size = sizeof(gp106_slcg_fifo) / sizeof(struct gating_desc); 360 u32 size = sizeof(gp106_slcg_fifo) / sizeof(struct gating_desc);
361
362 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
363 return;
364
344 for (i = 0; i < size; i++) { 365 for (i = 0; i < size; i++) {
345 if (prod) 366 if (prod)
346 gk20a_writel(g, gp106_slcg_fifo[i].addr, 367 gk20a_writel(g, gp106_slcg_fifo[i].addr,
@@ -356,6 +377,10 @@ void gr_gp106_slcg_gr_load_gating_prod(struct gk20a *g,
356{ 377{
357 u32 i; 378 u32 i;
358 u32 size = sizeof(gp106_slcg_gr) / sizeof(struct gating_desc); 379 u32 size = sizeof(gp106_slcg_gr) / sizeof(struct gating_desc);
380
381 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
382 return;
383
359 for (i = 0; i < size; i++) { 384 for (i = 0; i < size; i++) {
360 if (prod) 385 if (prod)
361 gk20a_writel(g, gp106_slcg_gr[i].addr, 386 gk20a_writel(g, gp106_slcg_gr[i].addr,
@@ -371,6 +396,10 @@ void ltc_gp106_slcg_ltc_load_gating_prod(struct gk20a *g,
371{ 396{
372 u32 i; 397 u32 i;
373 u32 size = sizeof(gp106_slcg_ltc) / sizeof(struct gating_desc); 398 u32 size = sizeof(gp106_slcg_ltc) / sizeof(struct gating_desc);
399
400 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
401 return;
402
374 for (i = 0; i < size; i++) { 403 for (i = 0; i < size; i++) {
375 if (prod) 404 if (prod)
376 gk20a_writel(g, gp106_slcg_ltc[i].addr, 405 gk20a_writel(g, gp106_slcg_ltc[i].addr,
@@ -386,6 +415,10 @@ void gp106_slcg_perf_load_gating_prod(struct gk20a *g,
386{ 415{
387 u32 i; 416 u32 i;
388 u32 size = sizeof(gp106_slcg_perf) / sizeof(struct gating_desc); 417 u32 size = sizeof(gp106_slcg_perf) / sizeof(struct gating_desc);
418
419 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
420 return;
421
389 for (i = 0; i < size; i++) { 422 for (i = 0; i < size; i++) {
390 if (prod) 423 if (prod)
391 gk20a_writel(g, gp106_slcg_perf[i].addr, 424 gk20a_writel(g, gp106_slcg_perf[i].addr,
@@ -401,6 +434,10 @@ void gp106_slcg_priring_load_gating_prod(struct gk20a *g,
401{ 434{
402 u32 i; 435 u32 i;
403 u32 size = sizeof(gp106_slcg_priring) / sizeof(struct gating_desc); 436 u32 size = sizeof(gp106_slcg_priring) / sizeof(struct gating_desc);
437
438 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
439 return;
440
404 for (i = 0; i < size; i++) { 441 for (i = 0; i < size; i++) {
405 if (prod) 442 if (prod)
406 gk20a_writel(g, gp106_slcg_priring[i].addr, 443 gk20a_writel(g, gp106_slcg_priring[i].addr,
@@ -416,6 +453,10 @@ void gp106_slcg_pmu_load_gating_prod(struct gk20a *g,
416{ 453{
417 u32 i; 454 u32 i;
418 u32 size = sizeof(gp106_slcg_pmu) / sizeof(struct gating_desc); 455 u32 size = sizeof(gp106_slcg_pmu) / sizeof(struct gating_desc);
456
457 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
458 return;
459
419 for (i = 0; i < size; i++) { 460 for (i = 0; i < size; i++) {
420 if (prod) 461 if (prod)
421 gk20a_writel(g, gp106_slcg_pmu[i].addr, 462 gk20a_writel(g, gp106_slcg_pmu[i].addr,
@@ -431,6 +472,10 @@ void gp106_slcg_therm_load_gating_prod(struct gk20a *g,
431{ 472{
432 u32 i; 473 u32 i;
433 u32 size = sizeof(gp106_slcg_therm) / sizeof(struct gating_desc); 474 u32 size = sizeof(gp106_slcg_therm) / sizeof(struct gating_desc);
475
476 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
477 return;
478
434 for (i = 0; i < size; i++) { 479 for (i = 0; i < size; i++) {
435 if (prod) 480 if (prod)
436 gk20a_writel(g, gp106_slcg_therm[i].addr, 481 gk20a_writel(g, gp106_slcg_therm[i].addr,
@@ -446,6 +491,10 @@ void gp106_slcg_xbar_load_gating_prod(struct gk20a *g,
446{ 491{
447 u32 i; 492 u32 i;
448 u32 size = sizeof(gp106_slcg_xbar) / sizeof(struct gating_desc); 493 u32 size = sizeof(gp106_slcg_xbar) / sizeof(struct gating_desc);
494
495 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
496 return;
497
449 for (i = 0; i < size; i++) { 498 for (i = 0; i < size; i++) {
450 if (prod) 499 if (prod)
451 gk20a_writel(g, gp106_slcg_xbar[i].addr, 500 gk20a_writel(g, gp106_slcg_xbar[i].addr,
@@ -461,6 +510,10 @@ void gp106_blcg_bus_load_gating_prod(struct gk20a *g,
461{ 510{
462 u32 i; 511 u32 i;
463 u32 size = sizeof(gp106_blcg_bus) / sizeof(struct gating_desc); 512 u32 size = sizeof(gp106_blcg_bus) / sizeof(struct gating_desc);
513
514 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
515 return;
516
464 for (i = 0; i < size; i++) { 517 for (i = 0; i < size; i++) {
465 if (prod) 518 if (prod)
466 gk20a_writel(g, gp106_blcg_bus[i].addr, 519 gk20a_writel(g, gp106_blcg_bus[i].addr,
@@ -476,6 +529,10 @@ void gp106_blcg_ce_load_gating_prod(struct gk20a *g,
476{ 529{
477 u32 i; 530 u32 i;
478 u32 size = sizeof(gp106_blcg_ce) / sizeof(struct gating_desc); 531 u32 size = sizeof(gp106_blcg_ce) / sizeof(struct gating_desc);
532
533 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
534 return;
535
479 for (i = 0; i < size; i++) { 536 for (i = 0; i < size; i++) {
480 if (prod) 537 if (prod)
481 gk20a_writel(g, gp106_blcg_ce[i].addr, 538 gk20a_writel(g, gp106_blcg_ce[i].addr,
@@ -491,6 +548,10 @@ void gp106_blcg_fb_load_gating_prod(struct gk20a *g,
491{ 548{
492 u32 i; 549 u32 i;
493 u32 size = sizeof(gp106_blcg_fb) / sizeof(struct gating_desc); 550 u32 size = sizeof(gp106_blcg_fb) / sizeof(struct gating_desc);
551
552 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
553 return;
554
494 for (i = 0; i < size; i++) { 555 for (i = 0; i < size; i++) {
495 if (prod) 556 if (prod)
496 gk20a_writel(g, gp106_blcg_fb[i].addr, 557 gk20a_writel(g, gp106_blcg_fb[i].addr,
@@ -506,6 +567,10 @@ void gp106_blcg_fifo_load_gating_prod(struct gk20a *g,
506{ 567{
507 u32 i; 568 u32 i;
508 u32 size = sizeof(gp106_blcg_fifo) / sizeof(struct gating_desc); 569 u32 size = sizeof(gp106_blcg_fifo) / sizeof(struct gating_desc);
570
571 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
572 return;
573
509 for (i = 0; i < size; i++) { 574 for (i = 0; i < size; i++) {
510 if (prod) 575 if (prod)
511 gk20a_writel(g, gp106_blcg_fifo[i].addr, 576 gk20a_writel(g, gp106_blcg_fifo[i].addr,
@@ -521,6 +586,10 @@ void gp106_blcg_gr_load_gating_prod(struct gk20a *g,
521{ 586{
522 u32 i; 587 u32 i;
523 u32 size = sizeof(gp106_blcg_gr) / sizeof(struct gating_desc); 588 u32 size = sizeof(gp106_blcg_gr) / sizeof(struct gating_desc);
589
590 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
591 return;
592
524 for (i = 0; i < size; i++) { 593 for (i = 0; i < size; i++) {
525 if (prod) 594 if (prod)
526 gk20a_writel(g, gp106_blcg_gr[i].addr, 595 gk20a_writel(g, gp106_blcg_gr[i].addr,
@@ -536,6 +605,10 @@ void gp106_blcg_ltc_load_gating_prod(struct gk20a *g,
536{ 605{
537 u32 i; 606 u32 i;
538 u32 size = sizeof(gp106_blcg_ltc) / sizeof(struct gating_desc); 607 u32 size = sizeof(gp106_blcg_ltc) / sizeof(struct gating_desc);
608
609 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
610 return;
611
539 for (i = 0; i < size; i++) { 612 for (i = 0; i < size; i++) {
540 if (prod) 613 if (prod)
541 gk20a_writel(g, gp106_blcg_ltc[i].addr, 614 gk20a_writel(g, gp106_blcg_ltc[i].addr,
@@ -551,6 +624,10 @@ void gp106_blcg_pmu_load_gating_prod(struct gk20a *g,
551{ 624{
552 u32 i; 625 u32 i;
553 u32 size = sizeof(gp106_blcg_pmu) / sizeof(struct gating_desc); 626 u32 size = sizeof(gp106_blcg_pmu) / sizeof(struct gating_desc);
627
628 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
629 return;
630
554 for (i = 0; i < size; i++) { 631 for (i = 0; i < size; i++) {
555 if (prod) 632 if (prod)
556 gk20a_writel(g, gp106_blcg_pmu[i].addr, 633 gk20a_writel(g, gp106_blcg_pmu[i].addr,
@@ -566,6 +643,10 @@ void gp106_blcg_xbar_load_gating_prod(struct gk20a *g,
566{ 643{
567 u32 i; 644 u32 i;
568 u32 size = sizeof(gp106_blcg_xbar) / sizeof(struct gating_desc); 645 u32 size = sizeof(gp106_blcg_xbar) / sizeof(struct gating_desc);
646
647 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
648 return;
649
569 for (i = 0; i < size; i++) { 650 for (i = 0; i < size; i++) {
570 if (prod) 651 if (prod)
571 gk20a_writel(g, gp106_blcg_xbar[i].addr, 652 gk20a_writel(g, gp106_blcg_xbar[i].addr,
@@ -581,6 +662,10 @@ void gr_gp106_pg_gr_load_gating_prod(struct gk20a *g,
581{ 662{
582 u32 i; 663 u32 i;
583 u32 size = sizeof(gp106_pg_gr) / sizeof(struct gating_desc); 664 u32 size = sizeof(gp106_pg_gr) / sizeof(struct gating_desc);
665
666 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
667 return;
668
584 for (i = 0; i < size; i++) { 669 for (i = 0; i < size; i++) {
585 if (prod) 670 if (prod)
586 gk20a_writel(g, gp106_pg_gr[i].addr, 671 gk20a_writel(g, gp106_pg_gr[i].addr,