diff options
author | Deepak Goyal <dgoyal@nvidia.com> | 2017-09-22 02:38:10 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-10-04 05:24:30 -0400 |
commit | 0e8aee1c1a38abbc2dccf3f604a9843cf38071e0 (patch) | |
tree | d7da679255e79a3c48041af1e78bc8d7374d47d2 /drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c | |
parent | edb116661348f1bc843849cdcc318fa47cf9724a (diff) |
gpu: nvgpu: skip clk gating prog for sim/emu.
For Simualtion/Emulation platforms,clock gating
should be skipped as it is not supported.
Added new flags "can_"X"lcg" to check platform
capability before doing SLCG,BLCG and ELCG.
Bug 200314250
Change-Id: I4124d444a77a4c06df8c1d82c6038bfd457f3db0
Signed-off-by: Deepak Goyal <dgoyal@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1566049
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c')
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c | 95 |
1 files changed, 94 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c b/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c index ca67c80a..0ebb2d0d 100644 --- a/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c +++ b/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -26,6 +26,7 @@ | |||
26 | #define __gm20b_gating_reglist_h__ | 26 | #define __gm20b_gating_reglist_h__ |
27 | 27 | ||
28 | #include "gm20b_gating_reglist.h" | 28 | #include "gm20b_gating_reglist.h" |
29 | #include <nvgpu/enabled.h> | ||
29 | 30 | ||
30 | struct gating_desc { | 31 | struct gating_desc { |
31 | u32 addr; | 32 | u32 addr; |
@@ -290,6 +291,10 @@ void gm20b_slcg_bus_load_gating_prod(struct gk20a *g, | |||
290 | { | 291 | { |
291 | u32 i; | 292 | u32 i; |
292 | u32 size = sizeof(gm20b_slcg_bus) / sizeof(struct gating_desc); | 293 | u32 size = sizeof(gm20b_slcg_bus) / sizeof(struct gating_desc); |
294 | |||
295 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
296 | return; | ||
297 | |||
293 | for (i = 0; i < size; i++) { | 298 | for (i = 0; i < size; i++) { |
294 | if (prod) | 299 | if (prod) |
295 | gk20a_writel(g, gm20b_slcg_bus[i].addr, | 300 | gk20a_writel(g, gm20b_slcg_bus[i].addr, |
@@ -305,6 +310,10 @@ void gm20b_slcg_ce2_load_gating_prod(struct gk20a *g, | |||
305 | { | 310 | { |
306 | u32 i; | 311 | u32 i; |
307 | u32 size = sizeof(gm20b_slcg_ce2) / sizeof(struct gating_desc); | 312 | u32 size = sizeof(gm20b_slcg_ce2) / sizeof(struct gating_desc); |
313 | |||
314 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
315 | return; | ||
316 | |||
308 | for (i = 0; i < size; i++) { | 317 | for (i = 0; i < size; i++) { |
309 | if (prod) | 318 | if (prod) |
310 | gk20a_writel(g, gm20b_slcg_ce2[i].addr, | 319 | gk20a_writel(g, gm20b_slcg_ce2[i].addr, |
@@ -320,6 +329,10 @@ void gm20b_slcg_chiplet_load_gating_prod(struct gk20a *g, | |||
320 | { | 329 | { |
321 | u32 i; | 330 | u32 i; |
322 | u32 size = sizeof(gm20b_slcg_chiplet) / sizeof(struct gating_desc); | 331 | u32 size = sizeof(gm20b_slcg_chiplet) / sizeof(struct gating_desc); |
332 | |||
333 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
334 | return; | ||
335 | |||
323 | for (i = 0; i < size; i++) { | 336 | for (i = 0; i < size; i++) { |
324 | if (prod) | 337 | if (prod) |
325 | gk20a_writel(g, gm20b_slcg_chiplet[i].addr, | 338 | gk20a_writel(g, gm20b_slcg_chiplet[i].addr, |
@@ -340,6 +353,10 @@ void gm20b_slcg_fb_load_gating_prod(struct gk20a *g, | |||
340 | { | 353 | { |
341 | u32 i; | 354 | u32 i; |
342 | u32 size = sizeof(gm20b_slcg_fb) / sizeof(struct gating_desc); | 355 | u32 size = sizeof(gm20b_slcg_fb) / sizeof(struct gating_desc); |
356 | |||
357 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
358 | return; | ||
359 | |||
343 | for (i = 0; i < size; i++) { | 360 | for (i = 0; i < size; i++) { |
344 | if (prod) | 361 | if (prod) |
345 | gk20a_writel(g, gm20b_slcg_fb[i].addr, | 362 | gk20a_writel(g, gm20b_slcg_fb[i].addr, |
@@ -355,6 +372,10 @@ void gm20b_slcg_fifo_load_gating_prod(struct gk20a *g, | |||
355 | { | 372 | { |
356 | u32 i; | 373 | u32 i; |
357 | u32 size = sizeof(gm20b_slcg_fifo) / sizeof(struct gating_desc); | 374 | u32 size = sizeof(gm20b_slcg_fifo) / sizeof(struct gating_desc); |
375 | |||
376 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
377 | return; | ||
378 | |||
358 | for (i = 0; i < size; i++) { | 379 | for (i = 0; i < size; i++) { |
359 | if (prod) | 380 | if (prod) |
360 | gk20a_writel(g, gm20b_slcg_fifo[i].addr, | 381 | gk20a_writel(g, gm20b_slcg_fifo[i].addr, |
@@ -370,6 +391,10 @@ void gr_gm20b_slcg_gr_load_gating_prod(struct gk20a *g, | |||
370 | { | 391 | { |
371 | u32 i; | 392 | u32 i; |
372 | u32 size = sizeof(gm20b_slcg_gr) / sizeof(struct gating_desc); | 393 | u32 size = sizeof(gm20b_slcg_gr) / sizeof(struct gating_desc); |
394 | |||
395 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
396 | return; | ||
397 | |||
373 | for (i = 0; i < size; i++) { | 398 | for (i = 0; i < size; i++) { |
374 | if (prod) | 399 | if (prod) |
375 | gk20a_writel(g, gm20b_slcg_gr[i].addr, | 400 | gk20a_writel(g, gm20b_slcg_gr[i].addr, |
@@ -385,6 +410,10 @@ void ltc_gm20b_slcg_ltc_load_gating_prod(struct gk20a *g, | |||
385 | { | 410 | { |
386 | u32 i; | 411 | u32 i; |
387 | u32 size = sizeof(gm20b_slcg_ltc) / sizeof(struct gating_desc); | 412 | u32 size = sizeof(gm20b_slcg_ltc) / sizeof(struct gating_desc); |
413 | |||
414 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
415 | return; | ||
416 | |||
388 | for (i = 0; i < size; i++) { | 417 | for (i = 0; i < size; i++) { |
389 | if (prod) | 418 | if (prod) |
390 | gk20a_writel(g, gm20b_slcg_ltc[i].addr, | 419 | gk20a_writel(g, gm20b_slcg_ltc[i].addr, |
@@ -400,6 +429,10 @@ void gm20b_slcg_perf_load_gating_prod(struct gk20a *g, | |||
400 | { | 429 | { |
401 | u32 i; | 430 | u32 i; |
402 | u32 size = sizeof(gm20b_slcg_perf) / sizeof(struct gating_desc); | 431 | u32 size = sizeof(gm20b_slcg_perf) / sizeof(struct gating_desc); |
432 | |||
433 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
434 | return; | ||
435 | |||
403 | for (i = 0; i < size; i++) { | 436 | for (i = 0; i < size; i++) { |
404 | if (prod) | 437 | if (prod) |
405 | gk20a_writel(g, gm20b_slcg_perf[i].addr, | 438 | gk20a_writel(g, gm20b_slcg_perf[i].addr, |
@@ -415,6 +448,10 @@ void gm20b_slcg_priring_load_gating_prod(struct gk20a *g, | |||
415 | { | 448 | { |
416 | u32 i; | 449 | u32 i; |
417 | u32 size = sizeof(gm20b_slcg_priring) / sizeof(struct gating_desc); | 450 | u32 size = sizeof(gm20b_slcg_priring) / sizeof(struct gating_desc); |
451 | |||
452 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
453 | return; | ||
454 | |||
418 | for (i = 0; i < size; i++) { | 455 | for (i = 0; i < size; i++) { |
419 | if (prod) | 456 | if (prod) |
420 | gk20a_writel(g, gm20b_slcg_priring[i].addr, | 457 | gk20a_writel(g, gm20b_slcg_priring[i].addr, |
@@ -430,6 +467,10 @@ void gm20b_slcg_pwr_csb_load_gating_prod(struct gk20a *g, | |||
430 | { | 467 | { |
431 | u32 i; | 468 | u32 i; |
432 | u32 size = sizeof(gm20b_slcg_pwr_csb) / sizeof(struct gating_desc); | 469 | u32 size = sizeof(gm20b_slcg_pwr_csb) / sizeof(struct gating_desc); |
470 | |||
471 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
472 | return; | ||
473 | |||
433 | for (i = 0; i < size; i++) { | 474 | for (i = 0; i < size; i++) { |
434 | if (prod) | 475 | if (prod) |
435 | gk20a_writel(g, gm20b_slcg_pwr_csb[i].addr, | 476 | gk20a_writel(g, gm20b_slcg_pwr_csb[i].addr, |
@@ -445,6 +486,10 @@ void gm20b_slcg_pmu_load_gating_prod(struct gk20a *g, | |||
445 | { | 486 | { |
446 | u32 i; | 487 | u32 i; |
447 | u32 size = sizeof(gm20b_slcg_pmu) / sizeof(struct gating_desc); | 488 | u32 size = sizeof(gm20b_slcg_pmu) / sizeof(struct gating_desc); |
489 | |||
490 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
491 | return; | ||
492 | |||
448 | for (i = 0; i < size; i++) { | 493 | for (i = 0; i < size; i++) { |
449 | if (prod) | 494 | if (prod) |
450 | gk20a_writel(g, gm20b_slcg_pmu[i].addr, | 495 | gk20a_writel(g, gm20b_slcg_pmu[i].addr, |
@@ -460,6 +505,10 @@ void gm20b_slcg_therm_load_gating_prod(struct gk20a *g, | |||
460 | { | 505 | { |
461 | u32 i; | 506 | u32 i; |
462 | u32 size = sizeof(gm20b_slcg_therm) / sizeof(struct gating_desc); | 507 | u32 size = sizeof(gm20b_slcg_therm) / sizeof(struct gating_desc); |
508 | |||
509 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
510 | return; | ||
511 | |||
463 | for (i = 0; i < size; i++) { | 512 | for (i = 0; i < size; i++) { |
464 | if (prod) | 513 | if (prod) |
465 | gk20a_writel(g, gm20b_slcg_therm[i].addr, | 514 | gk20a_writel(g, gm20b_slcg_therm[i].addr, |
@@ -475,6 +524,10 @@ void gm20b_slcg_xbar_load_gating_prod(struct gk20a *g, | |||
475 | { | 524 | { |
476 | u32 i; | 525 | u32 i; |
477 | u32 size = sizeof(gm20b_slcg_xbar) / sizeof(struct gating_desc); | 526 | u32 size = sizeof(gm20b_slcg_xbar) / sizeof(struct gating_desc); |
527 | |||
528 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) | ||
529 | return; | ||
530 | |||
478 | for (i = 0; i < size; i++) { | 531 | for (i = 0; i < size; i++) { |
479 | if (prod) | 532 | if (prod) |
480 | gk20a_writel(g, gm20b_slcg_xbar[i].addr, | 533 | gk20a_writel(g, gm20b_slcg_xbar[i].addr, |
@@ -490,6 +543,10 @@ void gm20b_blcg_bus_load_gating_prod(struct gk20a *g, | |||
490 | { | 543 | { |
491 | u32 i; | 544 | u32 i; |
492 | u32 size = sizeof(gm20b_blcg_bus) / sizeof(struct gating_desc); | 545 | u32 size = sizeof(gm20b_blcg_bus) / sizeof(struct gating_desc); |
546 | |||
547 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) | ||
548 | return; | ||
549 | |||
493 | for (i = 0; i < size; i++) { | 550 | for (i = 0; i < size; i++) { |
494 | if (prod) | 551 | if (prod) |
495 | gk20a_writel(g, gm20b_blcg_bus[i].addr, | 552 | gk20a_writel(g, gm20b_blcg_bus[i].addr, |
@@ -505,6 +562,10 @@ void gm20b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g, | |||
505 | { | 562 | { |
506 | u32 i; | 563 | u32 i; |
507 | u32 size = sizeof(gm20b_blcg_ctxsw_prog) / sizeof(struct gating_desc); | 564 | u32 size = sizeof(gm20b_blcg_ctxsw_prog) / sizeof(struct gating_desc); |
565 | |||
566 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) | ||
567 | return; | ||
568 | |||
508 | for (i = 0; i < size; i++) { | 569 | for (i = 0; i < size; i++) { |
509 | if (prod) | 570 | if (prod) |
510 | gk20a_writel(g, gm20b_blcg_ctxsw_prog[i].addr, | 571 | gk20a_writel(g, gm20b_blcg_ctxsw_prog[i].addr, |
@@ -520,6 +581,10 @@ void gm20b_blcg_fb_load_gating_prod(struct gk20a *g, | |||
520 | { | 581 | { |
521 | u32 i; | 582 | u32 i; |
522 | u32 size = sizeof(gm20b_blcg_fb) / sizeof(struct gating_desc); | 583 | u32 size = sizeof(gm20b_blcg_fb) / sizeof(struct gating_desc); |
584 | |||
585 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) | ||
586 | return; | ||
587 | |||
523 | for (i = 0; i < size; i++) { | 588 | for (i = 0; i < size; i++) { |
524 | if (prod) | 589 | if (prod) |
525 | gk20a_writel(g, gm20b_blcg_fb[i].addr, | 590 | gk20a_writel(g, gm20b_blcg_fb[i].addr, |
@@ -535,6 +600,10 @@ void gm20b_blcg_fifo_load_gating_prod(struct gk20a *g, | |||
535 | { | 600 | { |
536 | u32 i; | 601 | u32 i; |
537 | u32 size = sizeof(gm20b_blcg_fifo) / sizeof(struct gating_desc); | 602 | u32 size = sizeof(gm20b_blcg_fifo) / sizeof(struct gating_desc); |
603 | |||
604 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) | ||
605 | return; | ||
606 | |||
538 | for (i = 0; i < size; i++) { | 607 | for (i = 0; i < size; i++) { |
539 | if (prod) | 608 | if (prod) |
540 | gk20a_writel(g, gm20b_blcg_fifo[i].addr, | 609 | gk20a_writel(g, gm20b_blcg_fifo[i].addr, |
@@ -550,6 +619,10 @@ void gm20b_blcg_gr_load_gating_prod(struct gk20a *g, | |||
550 | { | 619 | { |
551 | u32 i; | 620 | u32 i; |
552 | u32 size = sizeof(gm20b_blcg_gr) / sizeof(struct gating_desc); | 621 | u32 size = sizeof(gm20b_blcg_gr) / sizeof(struct gating_desc); |
622 | |||
623 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) | ||
624 | return; | ||
625 | |||
553 | for (i = 0; i < size; i++) { | 626 | for (i = 0; i < size; i++) { |
554 | if (prod) | 627 | if (prod) |
555 | gk20a_writel(g, gm20b_blcg_gr[i].addr, | 628 | gk20a_writel(g, gm20b_blcg_gr[i].addr, |
@@ -565,6 +638,10 @@ void gm20b_blcg_ltc_load_gating_prod(struct gk20a *g, | |||
565 | { | 638 | { |
566 | u32 i; | 639 | u32 i; |
567 | u32 size = sizeof(gm20b_blcg_ltc) / sizeof(struct gating_desc); | 640 | u32 size = sizeof(gm20b_blcg_ltc) / sizeof(struct gating_desc); |
641 | |||
642 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) | ||
643 | return; | ||
644 | |||
568 | for (i = 0; i < size; i++) { | 645 | for (i = 0; i < size; i++) { |
569 | if (prod) | 646 | if (prod) |
570 | gk20a_writel(g, gm20b_blcg_ltc[i].addr, | 647 | gk20a_writel(g, gm20b_blcg_ltc[i].addr, |
@@ -580,6 +657,10 @@ void gm20b_blcg_pwr_csb_load_gating_prod(struct gk20a *g, | |||
580 | { | 657 | { |
581 | u32 i; | 658 | u32 i; |
582 | u32 size = sizeof(gm20b_blcg_pwr_csb) / sizeof(struct gating_desc); | 659 | u32 size = sizeof(gm20b_blcg_pwr_csb) / sizeof(struct gating_desc); |
660 | |||
661 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) | ||
662 | return; | ||
663 | |||
583 | for (i = 0; i < size; i++) { | 664 | for (i = 0; i < size; i++) { |
584 | if (prod) | 665 | if (prod) |
585 | gk20a_writel(g, gm20b_blcg_pwr_csb[i].addr, | 666 | gk20a_writel(g, gm20b_blcg_pwr_csb[i].addr, |
@@ -595,6 +676,10 @@ void gm20b_blcg_pmu_load_gating_prod(struct gk20a *g, | |||
595 | { | 676 | { |
596 | u32 i; | 677 | u32 i; |
597 | u32 size = sizeof(gm20b_blcg_pmu) / sizeof(struct gating_desc); | 678 | u32 size = sizeof(gm20b_blcg_pmu) / sizeof(struct gating_desc); |
679 | |||
680 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) | ||
681 | return; | ||
682 | |||
598 | for (i = 0; i < size; i++) { | 683 | for (i = 0; i < size; i++) { |
599 | if (prod) | 684 | if (prod) |
600 | gk20a_writel(g, gm20b_blcg_pmu[i].addr, | 685 | gk20a_writel(g, gm20b_blcg_pmu[i].addr, |
@@ -610,6 +695,10 @@ void gm20b_blcg_xbar_load_gating_prod(struct gk20a *g, | |||
610 | { | 695 | { |
611 | u32 i; | 696 | u32 i; |
612 | u32 size = sizeof(gm20b_blcg_xbar) / sizeof(struct gating_desc); | 697 | u32 size = sizeof(gm20b_blcg_xbar) / sizeof(struct gating_desc); |
698 | |||
699 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) | ||
700 | return; | ||
701 | |||
613 | for (i = 0; i < size; i++) { | 702 | for (i = 0; i < size; i++) { |
614 | if (prod) | 703 | if (prod) |
615 | gk20a_writel(g, gm20b_blcg_xbar[i].addr, | 704 | gk20a_writel(g, gm20b_blcg_xbar[i].addr, |
@@ -625,6 +714,10 @@ void gr_gm20b_pg_gr_load_gating_prod(struct gk20a *g, | |||
625 | { | 714 | { |
626 | u32 i; | 715 | u32 i; |
627 | u32 size = sizeof(gm20b_pg_gr) / sizeof(struct gating_desc); | 716 | u32 size = sizeof(gm20b_pg_gr) / sizeof(struct gating_desc); |
717 | |||
718 | if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) | ||
719 | return; | ||
720 | |||
628 | for (i = 0; i < size; i++) { | 721 | for (i = 0; i < size; i++) { |
629 | if (prod) | 722 | if (prod) |
630 | gk20a_writel(g, gm20b_pg_gr[i].addr, | 723 | gk20a_writel(g, gm20b_pg_gr[i].addr, |