summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDeepak Goyal <dgoyal@nvidia.com>2017-09-22 02:38:10 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-04 05:24:30 -0400
commit0e8aee1c1a38abbc2dccf3f604a9843cf38071e0 (patch)
treed7da679255e79a3c48041af1e78bc8d7374d47d2
parentedb116661348f1bc843849cdcc318fa47cf9724a (diff)
gpu: nvgpu: skip clk gating prog for sim/emu.
For Simualtion/Emulation platforms,clock gating should be skipped as it is not supported. Added new flags "can_"X"lcg" to check platform capability before doing SLCG,BLCG and ELCG. Bug 200314250 Change-Id: I4124d444a77a4c06df8c1d82c6038bfd457f3db0 Signed-off-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1566049 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/linux/driver_common.c8
-rw-r--r--drivers/gpu/nvgpu/common/linux/pci.c18
-rw-r--r--drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c3
-rw-r--r--drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c3
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_gating_reglist.c23
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_gk20a.h9
-rw-r--r--drivers/gpu/nvgpu/gk20a/platform_vgpu_tegra.c3
-rw-r--r--drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c95
-rw-r--r--drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c85
-rw-r--r--drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c99
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/enabled.h3
12 files changed, 352 insertions, 3 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c
index e8530c05..5f2961f4 100644
--- a/drivers/gpu/nvgpu/common/linux/driver_common.c
+++ b/drivers/gpu/nvgpu/common/linux/driver_common.c
@@ -116,6 +116,14 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
116 nvgpu_platform_is_silicon(g) ? platform->enable_mscg : false; 116 nvgpu_platform_is_silicon(g) ? platform->enable_mscg : false;
117 g->can_elpg = 117 g->can_elpg =
118 nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false; 118 nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false;
119
120 __nvgpu_set_enabled(g, NVGPU_GPU_CAN_ELCG,
121 nvgpu_platform_is_silicon(g) ? platform->can_elcg : false);
122 __nvgpu_set_enabled(g, NVGPU_GPU_CAN_SLCG,
123 nvgpu_platform_is_silicon(g) ? platform->can_slcg : false);
124 __nvgpu_set_enabled(g, NVGPU_GPU_CAN_BLCG,
125 nvgpu_platform_is_silicon(g) ? platform->can_blcg : false);
126
119 g->default_pri_timeout = platform->default_pri_timeout; 127 g->default_pri_timeout = platform->default_pri_timeout;
120 g->aggressive_sync_destroy = platform->aggressive_sync_destroy; 128 g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
121 g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh; 129 g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh;
diff --git a/drivers/gpu/nvgpu/common/linux/pci.c b/drivers/gpu/nvgpu/common/linux/pci.c
index 1a7d1842..6e3e02e5 100644
--- a/drivers/gpu/nvgpu/common/linux/pci.c
+++ b/drivers/gpu/nvgpu/common/linux/pci.c
@@ -79,6 +79,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
79 .enable_slcg = true, 79 .enable_slcg = true,
80 .enable_blcg = true, 80 .enable_blcg = true,
81 .enable_mscg = true, 81 .enable_mscg = true,
82 .can_slcg = true,
83 .can_blcg = true,
84 .can_elcg = true,
82 .default_pri_timeout = 0x3ff, 85 .default_pri_timeout = 0x3ff,
83 86
84 .disable_aspm = true, 87 .disable_aspm = true,
@@ -112,6 +115,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
112 .enable_slcg = true, 115 .enable_slcg = true,
113 .enable_blcg = true, 116 .enable_blcg = true,
114 .enable_mscg = true, 117 .enable_mscg = true,
118 .can_slcg = true,
119 .can_blcg = true,
120 .can_elcg = true,
115 .default_pri_timeout = 0x3ff, 121 .default_pri_timeout = 0x3ff,
116 122
117 .disable_aspm = true, 123 .disable_aspm = true,
@@ -145,6 +151,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
145 .enable_slcg = true, 151 .enable_slcg = true,
146 .enable_blcg = true, 152 .enable_blcg = true,
147 .enable_mscg = true, 153 .enable_mscg = true,
154 .can_slcg = true,
155 .can_blcg = true,
156 .can_elcg = true,
148 .default_pri_timeout = 0x3ff, 157 .default_pri_timeout = 0x3ff,
149 158
150 .disable_aspm = true, 159 .disable_aspm = true,
@@ -178,6 +187,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
178 .enable_slcg = true, 187 .enable_slcg = true,
179 .enable_blcg = true, 188 .enable_blcg = true,
180 .enable_mscg = true, 189 .enable_mscg = true,
190 .can_slcg = true,
191 .can_blcg = true,
192 .can_elcg = true,
181 .default_pri_timeout = 0x3ff, 193 .default_pri_timeout = 0x3ff,
182 194
183 .disable_aspm = true, 195 .disable_aspm = true,
@@ -211,6 +223,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
211 .enable_slcg = false, 223 .enable_slcg = false,
212 .enable_blcg = false, 224 .enable_blcg = false,
213 .enable_mscg = false, 225 .enable_mscg = false,
226 .can_slcg = false,
227 .can_blcg = false,
228 .can_elcg = false,
214 .default_pri_timeout = 0x3ff, 229 .default_pri_timeout = 0x3ff,
215 230
216 .disable_aspm = true, 231 .disable_aspm = true,
@@ -241,6 +256,9 @@ static struct gk20a_platform nvgpu_pci_device[] = {
241 .enable_slcg = false, 256 .enable_slcg = false,
242 .enable_blcg = false, 257 .enable_blcg = false,
243 .enable_mscg = false, 258 .enable_mscg = false,
259 .can_slcg = false,
260 .can_blcg = false,
261 .can_elcg = false,
244 .default_pri_timeout = 0x3ff, 262 .default_pri_timeout = 0x3ff,
245 263
246 .disable_aspm = true, 264 .disable_aspm = true,
diff --git a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
index 2c556d8a..6379d8ef 100644
--- a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
@@ -911,6 +911,9 @@ struct gk20a_platform gm20b_tegra_platform = {
911 .enable_slcg = true, 911 .enable_slcg = true,
912 .enable_blcg = true, 912 .enable_blcg = true,
913 .enable_elcg = true, 913 .enable_elcg = true,
914 .can_slcg = true,
915 .can_blcg = true,
916 .can_elcg = true,
914 .enable_elpg = true, 917 .enable_elpg = true,
915 .enable_aelpg = true, 918 .enable_aelpg = true,
916 .enable_perfmon = true, 919 .enable_perfmon = true,
diff --git a/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c
index bdb7fb3f..c1ff34e3 100644
--- a/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c
+++ b/drivers/gpu/nvgpu/common/linux/platform_gp10b_tegra.c
@@ -373,6 +373,9 @@ struct gk20a_platform gp10b_tegra_platform = {
373 .enable_blcg = true, 373 .enable_blcg = true,
374 .enable_slcg = true, 374 .enable_slcg = true,
375 .enable_elcg = true, 375 .enable_elcg = true,
376 .can_slcg = true,
377 .can_blcg = true,
378 .can_elcg = true,
376 .enable_aelpg = true, 379 .enable_aelpg = true,
377 .enable_perfmon = true, 380 .enable_perfmon = true,
378 381
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_gating_reglist.c b/drivers/gpu/nvgpu/gk20a/gk20a_gating_reglist.c
index 45b25425..751c6a19 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_gating_reglist.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_gating_reglist.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012-2015, NVIDIA Corporation. All rights reserved. 2 * Copyright (c) 2012-2017, NVIDIA Corporation. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -26,6 +26,7 @@
26#define __gk20a_gating_reglist_h__ 26#define __gk20a_gating_reglist_h__
27 27
28#include "gk20a_gating_reglist.h" 28#include "gk20a_gating_reglist.h"
29#include <nvgpu/enabled.h>
29 30
30struct gating_desc { 31struct gating_desc {
31 u32 addr; 32 u32 addr;
@@ -305,6 +306,10 @@ void gr_gk20a_slcg_gr_load_gating_prod(struct gk20a *g,
305{ 306{
306 u32 i; 307 u32 i;
307 u32 size = sizeof(gk20a_slcg_gr) / sizeof(struct gating_desc); 308 u32 size = sizeof(gk20a_slcg_gr) / sizeof(struct gating_desc);
309
310 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
311 return;
312
308 for (i = 0; i < size; i++) { 313 for (i = 0; i < size; i++) {
309 if (prod) 314 if (prod)
310 gk20a_writel(g, gk20a_slcg_gr[i].addr, 315 gk20a_writel(g, gk20a_slcg_gr[i].addr,
@@ -325,6 +330,10 @@ void gr_gk20a_slcg_perf_load_gating_prod(struct gk20a *g,
325{ 330{
326 u32 i; 331 u32 i;
327 u32 size = sizeof(gk20a_slcg_perf) / sizeof(struct gating_desc); 332 u32 size = sizeof(gk20a_slcg_perf) / sizeof(struct gating_desc);
333
334 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
335 return;
336
328 for (i = 0; i < size; i++) { 337 for (i = 0; i < size; i++) {
329 if (prod) 338 if (prod)
330 gk20a_writel(g, gk20a_slcg_perf[i].addr, 339 gk20a_writel(g, gk20a_slcg_perf[i].addr,
@@ -340,6 +349,10 @@ void gr_gk20a_blcg_gr_load_gating_prod(struct gk20a *g,
340{ 349{
341 u32 i; 350 u32 i;
342 u32 size = sizeof(gk20a_blcg_gr) / sizeof(struct gating_desc); 351 u32 size = sizeof(gk20a_blcg_gr) / sizeof(struct gating_desc);
352
353 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
354 return;
355
343 for (i = 0; i < size; i++) { 356 for (i = 0; i < size; i++) {
344 if (prod) 357 if (prod)
345 gk20a_writel(g, gk20a_blcg_gr[i].addr, 358 gk20a_writel(g, gk20a_blcg_gr[i].addr,
@@ -355,6 +368,10 @@ void gr_gk20a_pg_gr_load_gating_prod(struct gk20a *g,
355{ 368{
356 u32 i; 369 u32 i;
357 u32 size = sizeof(gk20a_pg_gr) / sizeof(struct gating_desc); 370 u32 size = sizeof(gk20a_pg_gr) / sizeof(struct gating_desc);
371
372 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
373 return;
374
358 for (i = 0; i < size; i++) { 375 for (i = 0; i < size; i++) {
359 if (prod) 376 if (prod)
360 gk20a_writel(g, gk20a_pg_gr[i].addr, 377 gk20a_writel(g, gk20a_pg_gr[i].addr,
@@ -370,6 +387,10 @@ void gr_gk20a_slcg_therm_load_gating_prod(struct gk20a *g,
370{ 387{
371 u32 i; 388 u32 i;
372 u32 size = sizeof(gk20a_slcg_therm) / sizeof(struct gating_desc); 389 u32 size = sizeof(gk20a_slcg_therm) / sizeof(struct gating_desc);
390
391 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
392 return;
393
373 for (i = 0; i < size; i++) { 394 for (i = 0; i < size; i++) {
374 if (prod) 395 if (prod)
375 gk20a_writel(g, gk20a_slcg_therm[i].addr, 396 gk20a_writel(g, gk20a_slcg_therm[i].addr,
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 679b8492..628b6823 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -4067,6 +4067,9 @@ void gr_gk20a_init_blcg_mode(struct gk20a *g, u32 mode, u32 engine)
4067{ 4067{
4068 u32 gate_ctrl; 4068 u32 gate_ctrl;
4069 4069
4070 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
4071 return;
4072
4070 gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine)); 4073 gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));
4071 4074
4072 switch (mode) { 4075 switch (mode) {
@@ -4095,6 +4098,9 @@ void gr_gk20a_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
4095 4098
4096 gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine)); 4099 gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine));
4097 4100
4101 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG))
4102 return;
4103
4098 switch (mode) { 4104 switch (mode) {
4099 case ELCG_RUN: 4105 case ELCG_RUN:
4100 gate_ctrl = set_field(gate_ctrl, 4106 gate_ctrl = set_field(gate_ctrl,
diff --git a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
index 02a5e519..c2c73b9c 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/platform_gk20a.h
@@ -91,6 +91,15 @@ struct gk20a_platform {
91 /* Engine Level Clock Gating: true = enable flase = disable */ 91 /* Engine Level Clock Gating: true = enable flase = disable */
92 bool enable_elcg; 92 bool enable_elcg;
93 93
94 /* Should be populated at probe. */
95 bool can_slcg;
96
97 /* Should be populated at probe. */
98 bool can_blcg;
99
100 /* Should be populated at probe. */
101 bool can_elcg;
102
94 /* Engine Level Power Gating: true = enable flase = disable */ 103 /* Engine Level Power Gating: true = enable flase = disable */
95 bool enable_elpg; 104 bool enable_elpg;
96 105
diff --git a/drivers/gpu/nvgpu/gk20a/platform_vgpu_tegra.c b/drivers/gpu/nvgpu/gk20a/platform_vgpu_tegra.c
index 43abee5d..90a37c47 100644
--- a/drivers/gpu/nvgpu/gk20a/platform_vgpu_tegra.c
+++ b/drivers/gpu/nvgpu/gk20a/platform_vgpu_tegra.c
@@ -57,6 +57,9 @@ struct gk20a_platform vgpu_tegra_platform = {
57 .enable_elcg = false, 57 .enable_elcg = false,
58 .enable_elpg = false, 58 .enable_elpg = false,
59 .enable_aelpg = false, 59 .enable_aelpg = false,
60 .can_slcg = false,
61 .can_blcg = false,
62 .can_elcg = false,
60 63
61 .ch_wdt_timeout_ms = 5000, 64 .ch_wdt_timeout_ms = 5000,
62 65
diff --git a/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c b/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c
index ca67c80a..0ebb2d0d 100644
--- a/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c
+++ b/drivers/gpu/nvgpu/gm20b/gm20b_gating_reglist.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -26,6 +26,7 @@
26#define __gm20b_gating_reglist_h__ 26#define __gm20b_gating_reglist_h__
27 27
28#include "gm20b_gating_reglist.h" 28#include "gm20b_gating_reglist.h"
29#include <nvgpu/enabled.h>
29 30
30struct gating_desc { 31struct gating_desc {
31 u32 addr; 32 u32 addr;
@@ -290,6 +291,10 @@ void gm20b_slcg_bus_load_gating_prod(struct gk20a *g,
290{ 291{
291 u32 i; 292 u32 i;
292 u32 size = sizeof(gm20b_slcg_bus) / sizeof(struct gating_desc); 293 u32 size = sizeof(gm20b_slcg_bus) / sizeof(struct gating_desc);
294
295 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
296 return;
297
293 for (i = 0; i < size; i++) { 298 for (i = 0; i < size; i++) {
294 if (prod) 299 if (prod)
295 gk20a_writel(g, gm20b_slcg_bus[i].addr, 300 gk20a_writel(g, gm20b_slcg_bus[i].addr,
@@ -305,6 +310,10 @@ void gm20b_slcg_ce2_load_gating_prod(struct gk20a *g,
305{ 310{
306 u32 i; 311 u32 i;
307 u32 size = sizeof(gm20b_slcg_ce2) / sizeof(struct gating_desc); 312 u32 size = sizeof(gm20b_slcg_ce2) / sizeof(struct gating_desc);
313
314 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
315 return;
316
308 for (i = 0; i < size; i++) { 317 for (i = 0; i < size; i++) {
309 if (prod) 318 if (prod)
310 gk20a_writel(g, gm20b_slcg_ce2[i].addr, 319 gk20a_writel(g, gm20b_slcg_ce2[i].addr,
@@ -320,6 +329,10 @@ void gm20b_slcg_chiplet_load_gating_prod(struct gk20a *g,
320{ 329{
321 u32 i; 330 u32 i;
322 u32 size = sizeof(gm20b_slcg_chiplet) / sizeof(struct gating_desc); 331 u32 size = sizeof(gm20b_slcg_chiplet) / sizeof(struct gating_desc);
332
333 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
334 return;
335
323 for (i = 0; i < size; i++) { 336 for (i = 0; i < size; i++) {
324 if (prod) 337 if (prod)
325 gk20a_writel(g, gm20b_slcg_chiplet[i].addr, 338 gk20a_writel(g, gm20b_slcg_chiplet[i].addr,
@@ -340,6 +353,10 @@ void gm20b_slcg_fb_load_gating_prod(struct gk20a *g,
340{ 353{
341 u32 i; 354 u32 i;
342 u32 size = sizeof(gm20b_slcg_fb) / sizeof(struct gating_desc); 355 u32 size = sizeof(gm20b_slcg_fb) / sizeof(struct gating_desc);
356
357 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
358 return;
359
343 for (i = 0; i < size; i++) { 360 for (i = 0; i < size; i++) {
344 if (prod) 361 if (prod)
345 gk20a_writel(g, gm20b_slcg_fb[i].addr, 362 gk20a_writel(g, gm20b_slcg_fb[i].addr,
@@ -355,6 +372,10 @@ void gm20b_slcg_fifo_load_gating_prod(struct gk20a *g,
355{ 372{
356 u32 i; 373 u32 i;
357 u32 size = sizeof(gm20b_slcg_fifo) / sizeof(struct gating_desc); 374 u32 size = sizeof(gm20b_slcg_fifo) / sizeof(struct gating_desc);
375
376 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
377 return;
378
358 for (i = 0; i < size; i++) { 379 for (i = 0; i < size; i++) {
359 if (prod) 380 if (prod)
360 gk20a_writel(g, gm20b_slcg_fifo[i].addr, 381 gk20a_writel(g, gm20b_slcg_fifo[i].addr,
@@ -370,6 +391,10 @@ void gr_gm20b_slcg_gr_load_gating_prod(struct gk20a *g,
370{ 391{
371 u32 i; 392 u32 i;
372 u32 size = sizeof(gm20b_slcg_gr) / sizeof(struct gating_desc); 393 u32 size = sizeof(gm20b_slcg_gr) / sizeof(struct gating_desc);
394
395 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
396 return;
397
373 for (i = 0; i < size; i++) { 398 for (i = 0; i < size; i++) {
374 if (prod) 399 if (prod)
375 gk20a_writel(g, gm20b_slcg_gr[i].addr, 400 gk20a_writel(g, gm20b_slcg_gr[i].addr,
@@ -385,6 +410,10 @@ void ltc_gm20b_slcg_ltc_load_gating_prod(struct gk20a *g,
385{ 410{
386 u32 i; 411 u32 i;
387 u32 size = sizeof(gm20b_slcg_ltc) / sizeof(struct gating_desc); 412 u32 size = sizeof(gm20b_slcg_ltc) / sizeof(struct gating_desc);
413
414 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
415 return;
416
388 for (i = 0; i < size; i++) { 417 for (i = 0; i < size; i++) {
389 if (prod) 418 if (prod)
390 gk20a_writel(g, gm20b_slcg_ltc[i].addr, 419 gk20a_writel(g, gm20b_slcg_ltc[i].addr,
@@ -400,6 +429,10 @@ void gm20b_slcg_perf_load_gating_prod(struct gk20a *g,
400{ 429{
401 u32 i; 430 u32 i;
402 u32 size = sizeof(gm20b_slcg_perf) / sizeof(struct gating_desc); 431 u32 size = sizeof(gm20b_slcg_perf) / sizeof(struct gating_desc);
432
433 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
434 return;
435
403 for (i = 0; i < size; i++) { 436 for (i = 0; i < size; i++) {
404 if (prod) 437 if (prod)
405 gk20a_writel(g, gm20b_slcg_perf[i].addr, 438 gk20a_writel(g, gm20b_slcg_perf[i].addr,
@@ -415,6 +448,10 @@ void gm20b_slcg_priring_load_gating_prod(struct gk20a *g,
415{ 448{
416 u32 i; 449 u32 i;
417 u32 size = sizeof(gm20b_slcg_priring) / sizeof(struct gating_desc); 450 u32 size = sizeof(gm20b_slcg_priring) / sizeof(struct gating_desc);
451
452 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
453 return;
454
418 for (i = 0; i < size; i++) { 455 for (i = 0; i < size; i++) {
419 if (prod) 456 if (prod)
420 gk20a_writel(g, gm20b_slcg_priring[i].addr, 457 gk20a_writel(g, gm20b_slcg_priring[i].addr,
@@ -430,6 +467,10 @@ void gm20b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
430{ 467{
431 u32 i; 468 u32 i;
432 u32 size = sizeof(gm20b_slcg_pwr_csb) / sizeof(struct gating_desc); 469 u32 size = sizeof(gm20b_slcg_pwr_csb) / sizeof(struct gating_desc);
470
471 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
472 return;
473
433 for (i = 0; i < size; i++) { 474 for (i = 0; i < size; i++) {
434 if (prod) 475 if (prod)
435 gk20a_writel(g, gm20b_slcg_pwr_csb[i].addr, 476 gk20a_writel(g, gm20b_slcg_pwr_csb[i].addr,
@@ -445,6 +486,10 @@ void gm20b_slcg_pmu_load_gating_prod(struct gk20a *g,
445{ 486{
446 u32 i; 487 u32 i;
447 u32 size = sizeof(gm20b_slcg_pmu) / sizeof(struct gating_desc); 488 u32 size = sizeof(gm20b_slcg_pmu) / sizeof(struct gating_desc);
489
490 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
491 return;
492
448 for (i = 0; i < size; i++) { 493 for (i = 0; i < size; i++) {
449 if (prod) 494 if (prod)
450 gk20a_writel(g, gm20b_slcg_pmu[i].addr, 495 gk20a_writel(g, gm20b_slcg_pmu[i].addr,
@@ -460,6 +505,10 @@ void gm20b_slcg_therm_load_gating_prod(struct gk20a *g,
460{ 505{
461 u32 i; 506 u32 i;
462 u32 size = sizeof(gm20b_slcg_therm) / sizeof(struct gating_desc); 507 u32 size = sizeof(gm20b_slcg_therm) / sizeof(struct gating_desc);
508
509 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
510 return;
511
463 for (i = 0; i < size; i++) { 512 for (i = 0; i < size; i++) {
464 if (prod) 513 if (prod)
465 gk20a_writel(g, gm20b_slcg_therm[i].addr, 514 gk20a_writel(g, gm20b_slcg_therm[i].addr,
@@ -475,6 +524,10 @@ void gm20b_slcg_xbar_load_gating_prod(struct gk20a *g,
475{ 524{
476 u32 i; 525 u32 i;
477 u32 size = sizeof(gm20b_slcg_xbar) / sizeof(struct gating_desc); 526 u32 size = sizeof(gm20b_slcg_xbar) / sizeof(struct gating_desc);
527
528 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
529 return;
530
478 for (i = 0; i < size; i++) { 531 for (i = 0; i < size; i++) {
479 if (prod) 532 if (prod)
480 gk20a_writel(g, gm20b_slcg_xbar[i].addr, 533 gk20a_writel(g, gm20b_slcg_xbar[i].addr,
@@ -490,6 +543,10 @@ void gm20b_blcg_bus_load_gating_prod(struct gk20a *g,
490{ 543{
491 u32 i; 544 u32 i;
492 u32 size = sizeof(gm20b_blcg_bus) / sizeof(struct gating_desc); 545 u32 size = sizeof(gm20b_blcg_bus) / sizeof(struct gating_desc);
546
547 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
548 return;
549
493 for (i = 0; i < size; i++) { 550 for (i = 0; i < size; i++) {
494 if (prod) 551 if (prod)
495 gk20a_writel(g, gm20b_blcg_bus[i].addr, 552 gk20a_writel(g, gm20b_blcg_bus[i].addr,
@@ -505,6 +562,10 @@ void gm20b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
505{ 562{
506 u32 i; 563 u32 i;
507 u32 size = sizeof(gm20b_blcg_ctxsw_prog) / sizeof(struct gating_desc); 564 u32 size = sizeof(gm20b_blcg_ctxsw_prog) / sizeof(struct gating_desc);
565
566 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
567 return;
568
508 for (i = 0; i < size; i++) { 569 for (i = 0; i < size; i++) {
509 if (prod) 570 if (prod)
510 gk20a_writel(g, gm20b_blcg_ctxsw_prog[i].addr, 571 gk20a_writel(g, gm20b_blcg_ctxsw_prog[i].addr,
@@ -520,6 +581,10 @@ void gm20b_blcg_fb_load_gating_prod(struct gk20a *g,
520{ 581{
521 u32 i; 582 u32 i;
522 u32 size = sizeof(gm20b_blcg_fb) / sizeof(struct gating_desc); 583 u32 size = sizeof(gm20b_blcg_fb) / sizeof(struct gating_desc);
584
585 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
586 return;
587
523 for (i = 0; i < size; i++) { 588 for (i = 0; i < size; i++) {
524 if (prod) 589 if (prod)
525 gk20a_writel(g, gm20b_blcg_fb[i].addr, 590 gk20a_writel(g, gm20b_blcg_fb[i].addr,
@@ -535,6 +600,10 @@ void gm20b_blcg_fifo_load_gating_prod(struct gk20a *g,
535{ 600{
536 u32 i; 601 u32 i;
537 u32 size = sizeof(gm20b_blcg_fifo) / sizeof(struct gating_desc); 602 u32 size = sizeof(gm20b_blcg_fifo) / sizeof(struct gating_desc);
603
604 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
605 return;
606
538 for (i = 0; i < size; i++) { 607 for (i = 0; i < size; i++) {
539 if (prod) 608 if (prod)
540 gk20a_writel(g, gm20b_blcg_fifo[i].addr, 609 gk20a_writel(g, gm20b_blcg_fifo[i].addr,
@@ -550,6 +619,10 @@ void gm20b_blcg_gr_load_gating_prod(struct gk20a *g,
550{ 619{
551 u32 i; 620 u32 i;
552 u32 size = sizeof(gm20b_blcg_gr) / sizeof(struct gating_desc); 621 u32 size = sizeof(gm20b_blcg_gr) / sizeof(struct gating_desc);
622
623 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
624 return;
625
553 for (i = 0; i < size; i++) { 626 for (i = 0; i < size; i++) {
554 if (prod) 627 if (prod)
555 gk20a_writel(g, gm20b_blcg_gr[i].addr, 628 gk20a_writel(g, gm20b_blcg_gr[i].addr,
@@ -565,6 +638,10 @@ void gm20b_blcg_ltc_load_gating_prod(struct gk20a *g,
565{ 638{
566 u32 i; 639 u32 i;
567 u32 size = sizeof(gm20b_blcg_ltc) / sizeof(struct gating_desc); 640 u32 size = sizeof(gm20b_blcg_ltc) / sizeof(struct gating_desc);
641
642 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
643 return;
644
568 for (i = 0; i < size; i++) { 645 for (i = 0; i < size; i++) {
569 if (prod) 646 if (prod)
570 gk20a_writel(g, gm20b_blcg_ltc[i].addr, 647 gk20a_writel(g, gm20b_blcg_ltc[i].addr,
@@ -580,6 +657,10 @@ void gm20b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
580{ 657{
581 u32 i; 658 u32 i;
582 u32 size = sizeof(gm20b_blcg_pwr_csb) / sizeof(struct gating_desc); 659 u32 size = sizeof(gm20b_blcg_pwr_csb) / sizeof(struct gating_desc);
660
661 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
662 return;
663
583 for (i = 0; i < size; i++) { 664 for (i = 0; i < size; i++) {
584 if (prod) 665 if (prod)
585 gk20a_writel(g, gm20b_blcg_pwr_csb[i].addr, 666 gk20a_writel(g, gm20b_blcg_pwr_csb[i].addr,
@@ -595,6 +676,10 @@ void gm20b_blcg_pmu_load_gating_prod(struct gk20a *g,
595{ 676{
596 u32 i; 677 u32 i;
597 u32 size = sizeof(gm20b_blcg_pmu) / sizeof(struct gating_desc); 678 u32 size = sizeof(gm20b_blcg_pmu) / sizeof(struct gating_desc);
679
680 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
681 return;
682
598 for (i = 0; i < size; i++) { 683 for (i = 0; i < size; i++) {
599 if (prod) 684 if (prod)
600 gk20a_writel(g, gm20b_blcg_pmu[i].addr, 685 gk20a_writel(g, gm20b_blcg_pmu[i].addr,
@@ -610,6 +695,10 @@ void gm20b_blcg_xbar_load_gating_prod(struct gk20a *g,
610{ 695{
611 u32 i; 696 u32 i;
612 u32 size = sizeof(gm20b_blcg_xbar) / sizeof(struct gating_desc); 697 u32 size = sizeof(gm20b_blcg_xbar) / sizeof(struct gating_desc);
698
699 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
700 return;
701
613 for (i = 0; i < size; i++) { 702 for (i = 0; i < size; i++) {
614 if (prod) 703 if (prod)
615 gk20a_writel(g, gm20b_blcg_xbar[i].addr, 704 gk20a_writel(g, gm20b_blcg_xbar[i].addr,
@@ -625,6 +714,10 @@ void gr_gm20b_pg_gr_load_gating_prod(struct gk20a *g,
625{ 714{
626 u32 i; 715 u32 i;
627 u32 size = sizeof(gm20b_pg_gr) / sizeof(struct gating_desc); 716 u32 size = sizeof(gm20b_pg_gr) / sizeof(struct gating_desc);
717
718 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
719 return;
720
628 for (i = 0; i < size; i++) { 721 for (i = 0; i < size; i++) {
629 if (prod) 722 if (prod)
630 gk20a_writel(g, gm20b_pg_gr[i].addr, 723 gk20a_writel(g, gm20b_pg_gr[i].addr,
diff --git a/drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c b/drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c
index 5a634313..169a1fee 100644
--- a/drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c
+++ b/drivers/gpu/nvgpu/gp106/gp106_gating_reglist.c
@@ -26,6 +26,7 @@
26#define __gp106_gating_reglist_h__ 26#define __gp106_gating_reglist_h__
27 27
28#include "gp106_gating_reglist.h" 28#include "gp106_gating_reglist.h"
29#include <nvgpu/enabled.h>
29 30
30struct gating_desc { 31struct gating_desc {
31 u32 addr; 32 u32 addr;
@@ -276,6 +277,10 @@ void gp106_slcg_bus_load_gating_prod(struct gk20a *g,
276{ 277{
277 u32 i; 278 u32 i;
278 u32 size = sizeof(gp106_slcg_bus) / sizeof(struct gating_desc); 279 u32 size = sizeof(gp106_slcg_bus) / sizeof(struct gating_desc);
280
281 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
282 return;
283
279 for (i = 0; i < size; i++) { 284 for (i = 0; i < size; i++) {
280 if (prod) 285 if (prod)
281 gk20a_writel(g, gp106_slcg_bus[i].addr, 286 gk20a_writel(g, gp106_slcg_bus[i].addr,
@@ -291,6 +296,10 @@ void gp106_slcg_ce2_load_gating_prod(struct gk20a *g,
291{ 296{
292 u32 i; 297 u32 i;
293 u32 size = sizeof(gp106_slcg_ce2) / sizeof(struct gating_desc); 298 u32 size = sizeof(gp106_slcg_ce2) / sizeof(struct gating_desc);
299
300 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
301 return;
302
294 for (i = 0; i < size; i++) { 303 for (i = 0; i < size; i++) {
295 if (prod) 304 if (prod)
296 gk20a_writel(g, gp106_slcg_ce2[i].addr, 305 gk20a_writel(g, gp106_slcg_ce2[i].addr,
@@ -306,6 +315,10 @@ void gp106_slcg_chiplet_load_gating_prod(struct gk20a *g,
306{ 315{
307 u32 i; 316 u32 i;
308 u32 size = sizeof(gp106_slcg_chiplet) / sizeof(struct gating_desc); 317 u32 size = sizeof(gp106_slcg_chiplet) / sizeof(struct gating_desc);
318
319 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
320 return;
321
309 for (i = 0; i < size; i++) { 322 for (i = 0; i < size; i++) {
310 if (prod) 323 if (prod)
311 gk20a_writel(g, gp106_slcg_chiplet[i].addr, 324 gk20a_writel(g, gp106_slcg_chiplet[i].addr,
@@ -326,6 +339,10 @@ void gp106_slcg_fb_load_gating_prod(struct gk20a *g,
326{ 339{
327 u32 i; 340 u32 i;
328 u32 size = sizeof(gp106_slcg_fb) / sizeof(struct gating_desc); 341 u32 size = sizeof(gp106_slcg_fb) / sizeof(struct gating_desc);
342
343 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
344 return;
345
329 for (i = 0; i < size; i++) { 346 for (i = 0; i < size; i++) {
330 if (prod) 347 if (prod)
331 gk20a_writel(g, gp106_slcg_fb[i].addr, 348 gk20a_writel(g, gp106_slcg_fb[i].addr,
@@ -341,6 +358,10 @@ void gp106_slcg_fifo_load_gating_prod(struct gk20a *g,
341{ 358{
342 u32 i; 359 u32 i;
343 u32 size = sizeof(gp106_slcg_fifo) / sizeof(struct gating_desc); 360 u32 size = sizeof(gp106_slcg_fifo) / sizeof(struct gating_desc);
361
362 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
363 return;
364
344 for (i = 0; i < size; i++) { 365 for (i = 0; i < size; i++) {
345 if (prod) 366 if (prod)
346 gk20a_writel(g, gp106_slcg_fifo[i].addr, 367 gk20a_writel(g, gp106_slcg_fifo[i].addr,
@@ -356,6 +377,10 @@ void gr_gp106_slcg_gr_load_gating_prod(struct gk20a *g,
356{ 377{
357 u32 i; 378 u32 i;
358 u32 size = sizeof(gp106_slcg_gr) / sizeof(struct gating_desc); 379 u32 size = sizeof(gp106_slcg_gr) / sizeof(struct gating_desc);
380
381 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
382 return;
383
359 for (i = 0; i < size; i++) { 384 for (i = 0; i < size; i++) {
360 if (prod) 385 if (prod)
361 gk20a_writel(g, gp106_slcg_gr[i].addr, 386 gk20a_writel(g, gp106_slcg_gr[i].addr,
@@ -371,6 +396,10 @@ void ltc_gp106_slcg_ltc_load_gating_prod(struct gk20a *g,
371{ 396{
372 u32 i; 397 u32 i;
373 u32 size = sizeof(gp106_slcg_ltc) / sizeof(struct gating_desc); 398 u32 size = sizeof(gp106_slcg_ltc) / sizeof(struct gating_desc);
399
400 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
401 return;
402
374 for (i = 0; i < size; i++) { 403 for (i = 0; i < size; i++) {
375 if (prod) 404 if (prod)
376 gk20a_writel(g, gp106_slcg_ltc[i].addr, 405 gk20a_writel(g, gp106_slcg_ltc[i].addr,
@@ -386,6 +415,10 @@ void gp106_slcg_perf_load_gating_prod(struct gk20a *g,
386{ 415{
387 u32 i; 416 u32 i;
388 u32 size = sizeof(gp106_slcg_perf) / sizeof(struct gating_desc); 417 u32 size = sizeof(gp106_slcg_perf) / sizeof(struct gating_desc);
418
419 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
420 return;
421
389 for (i = 0; i < size; i++) { 422 for (i = 0; i < size; i++) {
390 if (prod) 423 if (prod)
391 gk20a_writel(g, gp106_slcg_perf[i].addr, 424 gk20a_writel(g, gp106_slcg_perf[i].addr,
@@ -401,6 +434,10 @@ void gp106_slcg_priring_load_gating_prod(struct gk20a *g,
401{ 434{
402 u32 i; 435 u32 i;
403 u32 size = sizeof(gp106_slcg_priring) / sizeof(struct gating_desc); 436 u32 size = sizeof(gp106_slcg_priring) / sizeof(struct gating_desc);
437
438 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
439 return;
440
404 for (i = 0; i < size; i++) { 441 for (i = 0; i < size; i++) {
405 if (prod) 442 if (prod)
406 gk20a_writel(g, gp106_slcg_priring[i].addr, 443 gk20a_writel(g, gp106_slcg_priring[i].addr,
@@ -416,6 +453,10 @@ void gp106_slcg_pmu_load_gating_prod(struct gk20a *g,
416{ 453{
417 u32 i; 454 u32 i;
418 u32 size = sizeof(gp106_slcg_pmu) / sizeof(struct gating_desc); 455 u32 size = sizeof(gp106_slcg_pmu) / sizeof(struct gating_desc);
456
457 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
458 return;
459
419 for (i = 0; i < size; i++) { 460 for (i = 0; i < size; i++) {
420 if (prod) 461 if (prod)
421 gk20a_writel(g, gp106_slcg_pmu[i].addr, 462 gk20a_writel(g, gp106_slcg_pmu[i].addr,
@@ -431,6 +472,10 @@ void gp106_slcg_therm_load_gating_prod(struct gk20a *g,
431{ 472{
432 u32 i; 473 u32 i;
433 u32 size = sizeof(gp106_slcg_therm) / sizeof(struct gating_desc); 474 u32 size = sizeof(gp106_slcg_therm) / sizeof(struct gating_desc);
475
476 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
477 return;
478
434 for (i = 0; i < size; i++) { 479 for (i = 0; i < size; i++) {
435 if (prod) 480 if (prod)
436 gk20a_writel(g, gp106_slcg_therm[i].addr, 481 gk20a_writel(g, gp106_slcg_therm[i].addr,
@@ -446,6 +491,10 @@ void gp106_slcg_xbar_load_gating_prod(struct gk20a *g,
446{ 491{
447 u32 i; 492 u32 i;
448 u32 size = sizeof(gp106_slcg_xbar) / sizeof(struct gating_desc); 493 u32 size = sizeof(gp106_slcg_xbar) / sizeof(struct gating_desc);
494
495 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
496 return;
497
449 for (i = 0; i < size; i++) { 498 for (i = 0; i < size; i++) {
450 if (prod) 499 if (prod)
451 gk20a_writel(g, gp106_slcg_xbar[i].addr, 500 gk20a_writel(g, gp106_slcg_xbar[i].addr,
@@ -461,6 +510,10 @@ void gp106_blcg_bus_load_gating_prod(struct gk20a *g,
461{ 510{
462 u32 i; 511 u32 i;
463 u32 size = sizeof(gp106_blcg_bus) / sizeof(struct gating_desc); 512 u32 size = sizeof(gp106_blcg_bus) / sizeof(struct gating_desc);
513
514 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
515 return;
516
464 for (i = 0; i < size; i++) { 517 for (i = 0; i < size; i++) {
465 if (prod) 518 if (prod)
466 gk20a_writel(g, gp106_blcg_bus[i].addr, 519 gk20a_writel(g, gp106_blcg_bus[i].addr,
@@ -476,6 +529,10 @@ void gp106_blcg_ce_load_gating_prod(struct gk20a *g,
476{ 529{
477 u32 i; 530 u32 i;
478 u32 size = sizeof(gp106_blcg_ce) / sizeof(struct gating_desc); 531 u32 size = sizeof(gp106_blcg_ce) / sizeof(struct gating_desc);
532
533 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
534 return;
535
479 for (i = 0; i < size; i++) { 536 for (i = 0; i < size; i++) {
480 if (prod) 537 if (prod)
481 gk20a_writel(g, gp106_blcg_ce[i].addr, 538 gk20a_writel(g, gp106_blcg_ce[i].addr,
@@ -491,6 +548,10 @@ void gp106_blcg_fb_load_gating_prod(struct gk20a *g,
491{ 548{
492 u32 i; 549 u32 i;
493 u32 size = sizeof(gp106_blcg_fb) / sizeof(struct gating_desc); 550 u32 size = sizeof(gp106_blcg_fb) / sizeof(struct gating_desc);
551
552 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
553 return;
554
494 for (i = 0; i < size; i++) { 555 for (i = 0; i < size; i++) {
495 if (prod) 556 if (prod)
496 gk20a_writel(g, gp106_blcg_fb[i].addr, 557 gk20a_writel(g, gp106_blcg_fb[i].addr,
@@ -506,6 +567,10 @@ void gp106_blcg_fifo_load_gating_prod(struct gk20a *g,
506{ 567{
507 u32 i; 568 u32 i;
508 u32 size = sizeof(gp106_blcg_fifo) / sizeof(struct gating_desc); 569 u32 size = sizeof(gp106_blcg_fifo) / sizeof(struct gating_desc);
570
571 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
572 return;
573
509 for (i = 0; i < size; i++) { 574 for (i = 0; i < size; i++) {
510 if (prod) 575 if (prod)
511 gk20a_writel(g, gp106_blcg_fifo[i].addr, 576 gk20a_writel(g, gp106_blcg_fifo[i].addr,
@@ -521,6 +586,10 @@ void gp106_blcg_gr_load_gating_prod(struct gk20a *g,
521{ 586{
522 u32 i; 587 u32 i;
523 u32 size = sizeof(gp106_blcg_gr) / sizeof(struct gating_desc); 588 u32 size = sizeof(gp106_blcg_gr) / sizeof(struct gating_desc);
589
590 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
591 return;
592
524 for (i = 0; i < size; i++) { 593 for (i = 0; i < size; i++) {
525 if (prod) 594 if (prod)
526 gk20a_writel(g, gp106_blcg_gr[i].addr, 595 gk20a_writel(g, gp106_blcg_gr[i].addr,
@@ -536,6 +605,10 @@ void gp106_blcg_ltc_load_gating_prod(struct gk20a *g,
536{ 605{
537 u32 i; 606 u32 i;
538 u32 size = sizeof(gp106_blcg_ltc) / sizeof(struct gating_desc); 607 u32 size = sizeof(gp106_blcg_ltc) / sizeof(struct gating_desc);
608
609 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
610 return;
611
539 for (i = 0; i < size; i++) { 612 for (i = 0; i < size; i++) {
540 if (prod) 613 if (prod)
541 gk20a_writel(g, gp106_blcg_ltc[i].addr, 614 gk20a_writel(g, gp106_blcg_ltc[i].addr,
@@ -551,6 +624,10 @@ void gp106_blcg_pmu_load_gating_prod(struct gk20a *g,
551{ 624{
552 u32 i; 625 u32 i;
553 u32 size = sizeof(gp106_blcg_pmu) / sizeof(struct gating_desc); 626 u32 size = sizeof(gp106_blcg_pmu) / sizeof(struct gating_desc);
627
628 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
629 return;
630
554 for (i = 0; i < size; i++) { 631 for (i = 0; i < size; i++) {
555 if (prod) 632 if (prod)
556 gk20a_writel(g, gp106_blcg_pmu[i].addr, 633 gk20a_writel(g, gp106_blcg_pmu[i].addr,
@@ -566,6 +643,10 @@ void gp106_blcg_xbar_load_gating_prod(struct gk20a *g,
566{ 643{
567 u32 i; 644 u32 i;
568 u32 size = sizeof(gp106_blcg_xbar) / sizeof(struct gating_desc); 645 u32 size = sizeof(gp106_blcg_xbar) / sizeof(struct gating_desc);
646
647 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
648 return;
649
569 for (i = 0; i < size; i++) { 650 for (i = 0; i < size; i++) {
570 if (prod) 651 if (prod)
571 gk20a_writel(g, gp106_blcg_xbar[i].addr, 652 gk20a_writel(g, gp106_blcg_xbar[i].addr,
@@ -581,6 +662,10 @@ void gr_gp106_pg_gr_load_gating_prod(struct gk20a *g,
581{ 662{
582 u32 i; 663 u32 i;
583 u32 size = sizeof(gp106_pg_gr) / sizeof(struct gating_desc); 664 u32 size = sizeof(gp106_pg_gr) / sizeof(struct gating_desc);
665
666 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
667 return;
668
584 for (i = 0; i < size; i++) { 669 for (i = 0; i < size; i++) {
585 if (prod) 670 if (prod)
586 gk20a_writel(g, gp106_pg_gr[i].addr, 671 gk20a_writel(g, gp106_pg_gr[i].addr,
diff --git a/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c b/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c
index 473c97f3..944fa741 100644
--- a/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c
+++ b/drivers/gpu/nvgpu/gp10b/gp10b_gating_reglist.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -26,6 +26,7 @@
26#define __gp10b_gating_reglist_h__ 26#define __gp10b_gating_reglist_h__
27 27
28#include "gp10b_gating_reglist.h" 28#include "gp10b_gating_reglist.h"
29#include <nvgpu/enabled.h>
29 30
30struct gating_desc { 31struct gating_desc {
31 u32 addr; 32 u32 addr;
@@ -281,6 +282,10 @@ void gp10b_slcg_bus_load_gating_prod(struct gk20a *g,
281{ 282{
282 u32 i; 283 u32 i;
283 u32 size = sizeof(gp10b_slcg_bus) / sizeof(struct gating_desc); 284 u32 size = sizeof(gp10b_slcg_bus) / sizeof(struct gating_desc);
285
286 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
287 return;
288
284 for (i = 0; i < size; i++) { 289 for (i = 0; i < size; i++) {
285 if (prod) 290 if (prod)
286 gk20a_writel(g, gp10b_slcg_bus[i].addr, 291 gk20a_writel(g, gp10b_slcg_bus[i].addr,
@@ -296,6 +301,10 @@ void gp10b_slcg_ce2_load_gating_prod(struct gk20a *g,
296{ 301{
297 u32 i; 302 u32 i;
298 u32 size = sizeof(gp10b_slcg_ce2) / sizeof(struct gating_desc); 303 u32 size = sizeof(gp10b_slcg_ce2) / sizeof(struct gating_desc);
304
305 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
306 return;
307
299 for (i = 0; i < size; i++) { 308 for (i = 0; i < size; i++) {
300 if (prod) 309 if (prod)
301 gk20a_writel(g, gp10b_slcg_ce2[i].addr, 310 gk20a_writel(g, gp10b_slcg_ce2[i].addr,
@@ -311,6 +320,10 @@ void gp10b_slcg_chiplet_load_gating_prod(struct gk20a *g,
311{ 320{
312 u32 i; 321 u32 i;
313 u32 size = sizeof(gp10b_slcg_chiplet) / sizeof(struct gating_desc); 322 u32 size = sizeof(gp10b_slcg_chiplet) / sizeof(struct gating_desc);
323
324 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
325 return;
326
314 for (i = 0; i < size; i++) { 327 for (i = 0; i < size; i++) {
315 if (prod) 328 if (prod)
316 gk20a_writel(g, gp10b_slcg_chiplet[i].addr, 329 gk20a_writel(g, gp10b_slcg_chiplet[i].addr,
@@ -331,6 +344,10 @@ void gp10b_slcg_fb_load_gating_prod(struct gk20a *g,
331{ 344{
332 u32 i; 345 u32 i;
333 u32 size = sizeof(gp10b_slcg_fb) / sizeof(struct gating_desc); 346 u32 size = sizeof(gp10b_slcg_fb) / sizeof(struct gating_desc);
347
348 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
349 return;
350
334 for (i = 0; i < size; i++) { 351 for (i = 0; i < size; i++) {
335 if (prod) 352 if (prod)
336 gk20a_writel(g, gp10b_slcg_fb[i].addr, 353 gk20a_writel(g, gp10b_slcg_fb[i].addr,
@@ -346,6 +363,10 @@ void gp10b_slcg_fifo_load_gating_prod(struct gk20a *g,
346{ 363{
347 u32 i; 364 u32 i;
348 u32 size = sizeof(gp10b_slcg_fifo) / sizeof(struct gating_desc); 365 u32 size = sizeof(gp10b_slcg_fifo) / sizeof(struct gating_desc);
366
367 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
368 return;
369
349 for (i = 0; i < size; i++) { 370 for (i = 0; i < size; i++) {
350 if (prod) 371 if (prod)
351 gk20a_writel(g, gp10b_slcg_fifo[i].addr, 372 gk20a_writel(g, gp10b_slcg_fifo[i].addr,
@@ -361,6 +382,10 @@ void gr_gp10b_slcg_gr_load_gating_prod(struct gk20a *g,
361{ 382{
362 u32 i; 383 u32 i;
363 u32 size = sizeof(gp10b_slcg_gr) / sizeof(struct gating_desc); 384 u32 size = sizeof(gp10b_slcg_gr) / sizeof(struct gating_desc);
385
386 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
387 return;
388
364 for (i = 0; i < size; i++) { 389 for (i = 0; i < size; i++) {
365 if (prod) 390 if (prod)
366 gk20a_writel(g, gp10b_slcg_gr[i].addr, 391 gk20a_writel(g, gp10b_slcg_gr[i].addr,
@@ -376,6 +401,10 @@ void ltc_gp10b_slcg_ltc_load_gating_prod(struct gk20a *g,
376{ 401{
377 u32 i; 402 u32 i;
378 u32 size = sizeof(gp10b_slcg_ltc) / sizeof(struct gating_desc); 403 u32 size = sizeof(gp10b_slcg_ltc) / sizeof(struct gating_desc);
404
405 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
406 return;
407
379 for (i = 0; i < size; i++) { 408 for (i = 0; i < size; i++) {
380 if (prod) 409 if (prod)
381 gk20a_writel(g, gp10b_slcg_ltc[i].addr, 410 gk20a_writel(g, gp10b_slcg_ltc[i].addr,
@@ -391,6 +420,10 @@ void gp10b_slcg_perf_load_gating_prod(struct gk20a *g,
391{ 420{
392 u32 i; 421 u32 i;
393 u32 size = sizeof(gp10b_slcg_perf) / sizeof(struct gating_desc); 422 u32 size = sizeof(gp10b_slcg_perf) / sizeof(struct gating_desc);
423
424 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
425 return;
426
394 for (i = 0; i < size; i++) { 427 for (i = 0; i < size; i++) {
395 if (prod) 428 if (prod)
396 gk20a_writel(g, gp10b_slcg_perf[i].addr, 429 gk20a_writel(g, gp10b_slcg_perf[i].addr,
@@ -406,6 +439,10 @@ void gp10b_slcg_priring_load_gating_prod(struct gk20a *g,
406{ 439{
407 u32 i; 440 u32 i;
408 u32 size = sizeof(gp10b_slcg_priring) / sizeof(struct gating_desc); 441 u32 size = sizeof(gp10b_slcg_priring) / sizeof(struct gating_desc);
442
443 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
444 return;
445
409 for (i = 0; i < size; i++) { 446 for (i = 0; i < size; i++) {
410 if (prod) 447 if (prod)
411 gk20a_writel(g, gp10b_slcg_priring[i].addr, 448 gk20a_writel(g, gp10b_slcg_priring[i].addr,
@@ -421,6 +458,10 @@ void gp10b_slcg_pwr_csb_load_gating_prod(struct gk20a *g,
421{ 458{
422 u32 i; 459 u32 i;
423 u32 size = sizeof(gp10b_slcg_pwr_csb) / sizeof(struct gating_desc); 460 u32 size = sizeof(gp10b_slcg_pwr_csb) / sizeof(struct gating_desc);
461
462 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
463 return;
464
424 for (i = 0; i < size; i++) { 465 for (i = 0; i < size; i++) {
425 if (prod) 466 if (prod)
426 gk20a_writel(g, gp10b_slcg_pwr_csb[i].addr, 467 gk20a_writel(g, gp10b_slcg_pwr_csb[i].addr,
@@ -436,6 +477,10 @@ void gp10b_slcg_pmu_load_gating_prod(struct gk20a *g,
436{ 477{
437 u32 i; 478 u32 i;
438 u32 size = sizeof(gp10b_slcg_pmu) / sizeof(struct gating_desc); 479 u32 size = sizeof(gp10b_slcg_pmu) / sizeof(struct gating_desc);
480
481 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
482 return;
483
439 for (i = 0; i < size; i++) { 484 for (i = 0; i < size; i++) {
440 if (prod) 485 if (prod)
441 gk20a_writel(g, gp10b_slcg_pmu[i].addr, 486 gk20a_writel(g, gp10b_slcg_pmu[i].addr,
@@ -451,6 +496,10 @@ void gp10b_slcg_therm_load_gating_prod(struct gk20a *g,
451{ 496{
452 u32 i; 497 u32 i;
453 u32 size = sizeof(gp10b_slcg_therm) / sizeof(struct gating_desc); 498 u32 size = sizeof(gp10b_slcg_therm) / sizeof(struct gating_desc);
499
500 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
501 return;
502
454 for (i = 0; i < size; i++) { 503 for (i = 0; i < size; i++) {
455 if (prod) 504 if (prod)
456 gk20a_writel(g, gp10b_slcg_therm[i].addr, 505 gk20a_writel(g, gp10b_slcg_therm[i].addr,
@@ -466,6 +515,10 @@ void gp10b_slcg_xbar_load_gating_prod(struct gk20a *g,
466{ 515{
467 u32 i; 516 u32 i;
468 u32 size = sizeof(gp10b_slcg_xbar) / sizeof(struct gating_desc); 517 u32 size = sizeof(gp10b_slcg_xbar) / sizeof(struct gating_desc);
518
519 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG))
520 return;
521
469 for (i = 0; i < size; i++) { 522 for (i = 0; i < size; i++) {
470 if (prod) 523 if (prod)
471 gk20a_writel(g, gp10b_slcg_xbar[i].addr, 524 gk20a_writel(g, gp10b_slcg_xbar[i].addr,
@@ -481,6 +534,10 @@ void gp10b_blcg_bus_load_gating_prod(struct gk20a *g,
481{ 534{
482 u32 i; 535 u32 i;
483 u32 size = sizeof(gp10b_blcg_bus) / sizeof(struct gating_desc); 536 u32 size = sizeof(gp10b_blcg_bus) / sizeof(struct gating_desc);
537
538 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
539 return;
540
484 for (i = 0; i < size; i++) { 541 for (i = 0; i < size; i++) {
485 if (prod) 542 if (prod)
486 gk20a_writel(g, gp10b_blcg_bus[i].addr, 543 gk20a_writel(g, gp10b_blcg_bus[i].addr,
@@ -496,6 +553,10 @@ void gp10b_blcg_ce_load_gating_prod(struct gk20a *g,
496{ 553{
497 u32 i; 554 u32 i;
498 u32 size = sizeof(gp10b_blcg_ce) / sizeof(struct gating_desc); 555 u32 size = sizeof(gp10b_blcg_ce) / sizeof(struct gating_desc);
556
557 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
558 return;
559
499 for (i = 0; i < size; i++) { 560 for (i = 0; i < size; i++) {
500 if (prod) 561 if (prod)
501 gk20a_writel(g, gp10b_blcg_ce[i].addr, 562 gk20a_writel(g, gp10b_blcg_ce[i].addr,
@@ -511,6 +572,10 @@ void gp10b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g,
511{ 572{
512 u32 i; 573 u32 i;
513 u32 size = sizeof(gp10b_blcg_ctxsw_prog) / sizeof(struct gating_desc); 574 u32 size = sizeof(gp10b_blcg_ctxsw_prog) / sizeof(struct gating_desc);
575
576 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
577 return;
578
514 for (i = 0; i < size; i++) { 579 for (i = 0; i < size; i++) {
515 if (prod) 580 if (prod)
516 gk20a_writel(g, gp10b_blcg_ctxsw_prog[i].addr, 581 gk20a_writel(g, gp10b_blcg_ctxsw_prog[i].addr,
@@ -526,6 +591,10 @@ void gp10b_blcg_fb_load_gating_prod(struct gk20a *g,
526{ 591{
527 u32 i; 592 u32 i;
528 u32 size = sizeof(gp10b_blcg_fb) / sizeof(struct gating_desc); 593 u32 size = sizeof(gp10b_blcg_fb) / sizeof(struct gating_desc);
594
595 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
596 return;
597
529 for (i = 0; i < size; i++) { 598 for (i = 0; i < size; i++) {
530 if (prod) 599 if (prod)
531 gk20a_writel(g, gp10b_blcg_fb[i].addr, 600 gk20a_writel(g, gp10b_blcg_fb[i].addr,
@@ -541,6 +610,10 @@ void gp10b_blcg_fifo_load_gating_prod(struct gk20a *g,
541{ 610{
542 u32 i; 611 u32 i;
543 u32 size = sizeof(gp10b_blcg_fifo) / sizeof(struct gating_desc); 612 u32 size = sizeof(gp10b_blcg_fifo) / sizeof(struct gating_desc);
613
614 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
615 return;
616
544 for (i = 0; i < size; i++) { 617 for (i = 0; i < size; i++) {
545 if (prod) 618 if (prod)
546 gk20a_writel(g, gp10b_blcg_fifo[i].addr, 619 gk20a_writel(g, gp10b_blcg_fifo[i].addr,
@@ -556,6 +629,10 @@ void gp10b_blcg_gr_load_gating_prod(struct gk20a *g,
556{ 629{
557 u32 i; 630 u32 i;
558 u32 size = sizeof(gp10b_blcg_gr) / sizeof(struct gating_desc); 631 u32 size = sizeof(gp10b_blcg_gr) / sizeof(struct gating_desc);
632
633 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
634 return;
635
559 for (i = 0; i < size; i++) { 636 for (i = 0; i < size; i++) {
560 if (prod) 637 if (prod)
561 gk20a_writel(g, gp10b_blcg_gr[i].addr, 638 gk20a_writel(g, gp10b_blcg_gr[i].addr,
@@ -571,6 +648,10 @@ void gp10b_blcg_ltc_load_gating_prod(struct gk20a *g,
571{ 648{
572 u32 i; 649 u32 i;
573 u32 size = sizeof(gp10b_blcg_ltc) / sizeof(struct gating_desc); 650 u32 size = sizeof(gp10b_blcg_ltc) / sizeof(struct gating_desc);
651
652 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
653 return;
654
574 for (i = 0; i < size; i++) { 655 for (i = 0; i < size; i++) {
575 if (prod) 656 if (prod)
576 gk20a_writel(g, gp10b_blcg_ltc[i].addr, 657 gk20a_writel(g, gp10b_blcg_ltc[i].addr,
@@ -586,6 +667,10 @@ void gp10b_blcg_pwr_csb_load_gating_prod(struct gk20a *g,
586{ 667{
587 u32 i; 668 u32 i;
588 u32 size = sizeof(gp10b_blcg_pwr_csb) / sizeof(struct gating_desc); 669 u32 size = sizeof(gp10b_blcg_pwr_csb) / sizeof(struct gating_desc);
670
671 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
672 return;
673
589 for (i = 0; i < size; i++) { 674 for (i = 0; i < size; i++) {
590 if (prod) 675 if (prod)
591 gk20a_writel(g, gp10b_blcg_pwr_csb[i].addr, 676 gk20a_writel(g, gp10b_blcg_pwr_csb[i].addr,
@@ -601,6 +686,10 @@ void gp10b_blcg_pmu_load_gating_prod(struct gk20a *g,
601{ 686{
602 u32 i; 687 u32 i;
603 u32 size = sizeof(gp10b_blcg_pmu) / sizeof(struct gating_desc); 688 u32 size = sizeof(gp10b_blcg_pmu) / sizeof(struct gating_desc);
689
690 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
691 return;
692
604 for (i = 0; i < size; i++) { 693 for (i = 0; i < size; i++) {
605 if (prod) 694 if (prod)
606 gk20a_writel(g, gp10b_blcg_pmu[i].addr, 695 gk20a_writel(g, gp10b_blcg_pmu[i].addr,
@@ -616,6 +705,10 @@ void gp10b_blcg_xbar_load_gating_prod(struct gk20a *g,
616{ 705{
617 u32 i; 706 u32 i;
618 u32 size = sizeof(gp10b_blcg_xbar) / sizeof(struct gating_desc); 707 u32 size = sizeof(gp10b_blcg_xbar) / sizeof(struct gating_desc);
708
709 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
710 return;
711
619 for (i = 0; i < size; i++) { 712 for (i = 0; i < size; i++) {
620 if (prod) 713 if (prod)
621 gk20a_writel(g, gp10b_blcg_xbar[i].addr, 714 gk20a_writel(g, gp10b_blcg_xbar[i].addr,
@@ -631,6 +724,10 @@ void gr_gp10b_pg_gr_load_gating_prod(struct gk20a *g,
631{ 724{
632 u32 i; 725 u32 i;
633 u32 size = sizeof(gp10b_pg_gr) / sizeof(struct gating_desc); 726 u32 size = sizeof(gp10b_pg_gr) / sizeof(struct gating_desc);
727
728 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG))
729 return;
730
634 for (i = 0; i < size; i++) { 731 for (i = 0; i < size; i++) {
635 if (prod) 732 if (prod)
636 gk20a_writel(g, gp10b_pg_gr[i].addr, 733 gk20a_writel(g, gp10b_pg_gr[i].addr,
diff --git a/drivers/gpu/nvgpu/include/nvgpu/enabled.h b/drivers/gpu/nvgpu/include/nvgpu/enabled.h
index 12a78af5..41758fe7 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/enabled.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/enabled.h
@@ -58,6 +58,9 @@ struct gk20a;
58#define NVGPU_PMU_PSTATE 49 58#define NVGPU_PMU_PSTATE 49
59#define NVGPU_PMU_ZBC_SAVE 50 59#define NVGPU_PMU_ZBC_SAVE 50
60#define NVGPU_PMU_FECS_BOOTSTRAP_DONE 51 60#define NVGPU_PMU_FECS_BOOTSTRAP_DONE 51
61#define NVGPU_GPU_CAN_BLCG 52
62#define NVGPU_GPU_CAN_SLCG 53
63#define NVGPU_GPU_CAN_ELCG 54
61 64
62/* whether to run PREOS binary on dGPUs */ 65/* whether to run PREOS binary on dGPUs */
63#define NVGPU_PMU_RUN_PREOS 52 66#define NVGPU_PMU_RUN_PREOS 52