summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2019-04-30 04:24:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2019-05-09 17:41:30 -0400
commitc81cc032c48a1b25e095b17b77399166c9091ff3 (patch)
treeace7d238c55bbb5e96fb6fd74deb156f3c513bae /drivers/gpu/nvgpu
parentf495f52c70c6bd7b7a4e6897270e4696efa57d5c (diff)
gpu: nvgpu: add cg and pg function
Add new power/clock gating functions that can be called by other units. New clock_gating functions will reside in cg.c under common/power_features/cg unit. New power gating functions will reside in pg.c under common/power_features/pg unit. Use nvgpu_pg_elpg_disable and nvgpu_pg_elpg_enable to disable/enable elpg and also in gr_gk20a_elpg_protected macro to access gr registers. Add cg_pg_lock to make elpg_enabled, elcg_enabled, blcg_enabled and slcg_enabled thread safe. JIRA NVGPU-2014 Change-Id: I00d124c2ee16242c9a3ef82e7620fbb7f1297aff Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2025493 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> (cherry-picked from c90585856567a547173a8b207365b3a4a3ccdd57 in dev-kernel) Reviewed-on: https://git-master.nvidia.com/r/2108406 GVS: Gerrit_Virtual_Submit Reviewed-by: Bibek Basu <bbasu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/Makefile6
-rw-r--r--drivers/gpu/nvgpu/Makefile.sources3
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c20
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c14
-rw-r--r--drivers/gpu/nvgpu/common/power_features/cg/cg.c566
-rw-r--r--drivers/gpu/nvgpu/common/power_features/pg/pg.c106
-rw-r--r--drivers/gpu/nvgpu/common/power_features/power_features.c66
-rw-r--r--drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/ce2_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c59
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c49
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c104
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h11
-rw-r--r--drivers/gpu/nvgpu/gm20b/pmu_gm20b.c2
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c45
-rw-r--r--drivers/gpu/nvgpu/gv11b/pmu_gv11b.c2
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/power_features/cg.h55
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/power_features/pg.h36
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/power_features/power_features.h34
-rw-r--r--drivers/gpu/nvgpu/os/linux/driver_common.c1
-rw-r--r--drivers/gpu/nvgpu/os/linux/sysfs.c137
-rw-r--r--drivers/gpu/nvgpu/os/linux/vgpu/vgpu_linux.c1
24 files changed, 984 insertions, 354 deletions
diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile
index fdfaf092..819be617 100644
--- a/drivers/gpu/nvgpu/Makefile
+++ b/drivers/gpu/nvgpu/Makefile
@@ -51,8 +51,10 @@ nvgpu-y += common/bus/bus_gk20a.o \
51 common/mc/mc_gp10b.o \ 51 common/mc/mc_gp10b.o \
52 common/mc/mc_gv11b.o \ 52 common/mc/mc_gv11b.o \
53 common/mc/mc_gv100.o \ 53 common/mc/mc_gv100.o \
54 common/sync/channel_sync.o 54 common/sync/channel_sync.o \
55 55 common/power_features/power_features.o \
56 common/power_features/cg/cg.o \
57 common/power_features/pg/pg.o
56# Linux specific parts of nvgpu. 58# Linux specific parts of nvgpu.
57nvgpu-y += \ 59nvgpu-y += \
58 os/linux/os_ops.o \ 60 os/linux/os_ops.o \
diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources
index 8f7cdcf9..6ba8555d 100644
--- a/drivers/gpu/nvgpu/Makefile.sources
+++ b/drivers/gpu/nvgpu/Makefile.sources
@@ -116,6 +116,9 @@ srcs := os/posix/nvgpu.c \
116 common/mc/mc_gp10b.c \ 116 common/mc/mc_gp10b.c \
117 common/mc/mc_gv11b.c \ 117 common/mc/mc_gv11b.c \
118 common/mc/mc_gv100.c \ 118 common/mc/mc_gv100.c \
119 common/power_features/power_features.c \
120 common/power_features/cg/cg.c \
121 common/power_features/pg/pg.c \
119 boardobj/boardobj.c \ 122 boardobj/boardobj.c \
120 boardobj/boardobjgrp.c \ 123 boardobj/boardobjgrp.c \
121 boardobj/boardobjgrpmask.c \ 124 boardobj/boardobjgrpmask.c \
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index c9aac4af..fc7a9ae4 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -30,6 +30,7 @@
30#include <nvgpu/pramin.h> 30#include <nvgpu/pramin.h>
31#include <nvgpu/enabled.h> 31#include <nvgpu/enabled.h>
32#include <nvgpu/gk20a.h> 32#include <nvgpu/gk20a.h>
33#include <nvgpu/power_features/cg.h>
33 34
34/* 35/*
35 * Attempt to find a reserved memory area to determine PTE size for the passed 36 * Attempt to find a reserved memory area to determine PTE size for the passed
@@ -349,22 +350,9 @@ static int nvgpu_init_mm_reset_enable_hw(struct gk20a *g)
349 g->ops.mc.fb_reset(g); 350 g->ops.mc.fb_reset(g);
350 } 351 }
351 352
352 if (g->ops.clock_gating.slcg_fb_load_gating_prod) { 353 nvgpu_cg_slcg_fb_ltc_load_enable(g);
353 g->ops.clock_gating.slcg_fb_load_gating_prod(g, 354
354 g->slcg_enabled); 355 nvgpu_cg_blcg_fb_ltc_load_enable(g);
355 }
356 if (g->ops.clock_gating.slcg_ltc_load_gating_prod) {
357 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
358 g->slcg_enabled);
359 }
360 if (g->ops.clock_gating.blcg_fb_load_gating_prod) {
361 g->ops.clock_gating.blcg_fb_load_gating_prod(g,
362 g->blcg_enabled);
363 }
364 if (g->ops.clock_gating.blcg_ltc_load_gating_prod) {
365 g->ops.clock_gating.blcg_ltc_load_gating_prod(g,
366 g->blcg_enabled);
367 }
368 356
369 if (g->ops.fb.init_fs_state) { 357 if (g->ops.fb.init_fs_state) {
370 g->ops.fb.init_fs_state(g); 358 g->ops.fb.init_fs_state(g);
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index f86dc2c2..b9cfd033 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -30,6 +30,8 @@
30#include <nvgpu/bug.h> 30#include <nvgpu/bug.h>
31#include <nvgpu/utils.h> 31#include <nvgpu/utils.h>
32#include <nvgpu/gk20a.h> 32#include <nvgpu/gk20a.h>
33#include <nvgpu/power_features/cg.h>
34
33 35
34static int nvgpu_pg_init_task(void *arg); 36static int nvgpu_pg_init_task(void *arg);
35 37
@@ -44,15 +46,9 @@ static int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable)
44 /* bring PMU falcon/engine out of reset */ 46 /* bring PMU falcon/engine out of reset */
45 g->ops.pmu.reset_engine(g, true); 47 g->ops.pmu.reset_engine(g, true);
46 48
47 if (g->ops.clock_gating.slcg_pmu_load_gating_prod) { 49 nvgpu_cg_slcg_pmu_load_enable(g);
48 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
49 g->slcg_enabled);
50 }
51 50
52 if (g->ops.clock_gating.blcg_pmu_load_gating_prod) { 51 nvgpu_cg_blcg_pmu_load_enable(g);
53 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
54 g->blcg_enabled);
55 }
56 52
57 if (nvgpu_flcn_mem_scrub_wait(pmu->flcn)) { 53 if (nvgpu_flcn_mem_scrub_wait(pmu->flcn)) {
58 /* keep PMU falcon/engine in reset 54 /* keep PMU falcon/engine in reset
@@ -446,7 +442,7 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g)
446 g->ops.gr.pmu_save_zbc(g, 0xf); 442 g->ops.gr.pmu_save_zbc(g, 0xf);
447 } 443 }
448 444
449 if (g->elpg_enabled) { 445 if (g->can_elpg && g->elpg_enabled) {
450 /* Init reg with prod values*/ 446 /* Init reg with prod values*/
451 if (g->ops.pmu.pmu_setup_elpg) { 447 if (g->ops.pmu.pmu_setup_elpg) {
452 g->ops.pmu.pmu_setup_elpg(g); 448 g->ops.pmu.pmu_setup_elpg(g);
diff --git a/drivers/gpu/nvgpu/common/power_features/cg/cg.c b/drivers/gpu/nvgpu/common/power_features/cg/cg.c
new file mode 100644
index 00000000..66b95226
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/power_features/cg/cg.c
@@ -0,0 +1,566 @@
1/*
2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/gk20a.h>
24#include <nvgpu/enabled.h>
25#include <nvgpu/power_features/cg.h>
26
27static void nvgpu_cg_set_mode(struct gk20a *g, int cgmode, int mode_config)
28{
29 u32 engine_idx;
30 u32 active_engine_id = 0;
31 struct fifo_engine_info_gk20a *engine_info = NULL;
32 struct fifo_gk20a *f = &g->fifo;
33
34 nvgpu_log_fn(g, " ");
35
36 for (engine_idx = 0; engine_idx < f->num_engines; ++engine_idx) {
37 active_engine_id = f->active_engines_list[engine_idx];
38 engine_info = &f->engine_info[active_engine_id];
39
40 /* gr_engine supports both BLCG and ELCG */
41 if ((cgmode == BLCG_MODE) && (engine_info->engine_enum ==
42 ENGINE_GR_GK20A)) {
43 g->ops.therm.init_blcg_mode(g, (u32)mode_config,
44 active_engine_id);
45 break;
46 } else if (cgmode == ELCG_MODE) {
47 g->ops.therm.init_elcg_mode(g, (u32)mode_config,
48 active_engine_id);
49 } else {
50 nvgpu_err(g, "invalid cg mode %d, config %d for "
51 "act_eng_id %d",
52 cgmode, mode_config, active_engine_id);
53 }
54 }
55}
56
57void nvgpu_cg_elcg_enable(struct gk20a *g)
58{
59 nvgpu_log_fn(g, " ");
60
61 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
62 return;
63 }
64
65 nvgpu_mutex_acquire(&g->cg_pg_lock);
66 if (g->elcg_enabled) {
67 nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_AUTO);
68 }
69 nvgpu_mutex_release(&g->cg_pg_lock);
70}
71
72void nvgpu_cg_elcg_disable(struct gk20a *g)
73{
74 nvgpu_log_fn(g, " ");
75
76 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
77 return;
78 }
79
80 nvgpu_mutex_acquire(&g->cg_pg_lock);
81 if (g->elcg_enabled) {
82 nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_RUN);
83 }
84 nvgpu_mutex_release(&g->cg_pg_lock);
85
86}
87
88void nvgpu_cg_blcg_mode_enable(struct gk20a *g)
89{
90 nvgpu_log_fn(g, " ");
91
92 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
93 return;
94 }
95
96 nvgpu_mutex_acquire(&g->cg_pg_lock);
97 if (g->blcg_enabled) {
98 nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_AUTO);
99 }
100 nvgpu_mutex_release(&g->cg_pg_lock);
101
102}
103
104void nvgpu_cg_blcg_mode_disable(struct gk20a *g)
105{
106 nvgpu_log_fn(g, " ");
107
108 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
109 return;
110 }
111
112 nvgpu_mutex_acquire(&g->cg_pg_lock);
113 if (g->blcg_enabled) {
114 nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_RUN);
115 }
116 nvgpu_mutex_release(&g->cg_pg_lock);
117
118
119}
120
121void nvgpu_cg_blcg_fb_ltc_load_enable(struct gk20a *g)
122{
123 nvgpu_log_fn(g, " ");
124
125 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
126 return;
127 }
128 nvgpu_mutex_acquire(&g->cg_pg_lock);
129 if (!g->blcg_enabled) {
130 goto done;
131 }
132 if (g->ops.clock_gating.blcg_fb_load_gating_prod != NULL) {
133 g->ops.clock_gating.blcg_fb_load_gating_prod(g, true);
134 }
135 if (g->ops.clock_gating.blcg_ltc_load_gating_prod != NULL) {
136 g->ops.clock_gating.blcg_ltc_load_gating_prod(g, true);
137 }
138done:
139 nvgpu_mutex_release(&g->cg_pg_lock);
140}
141
142void nvgpu_cg_blcg_fifo_load_enable(struct gk20a *g)
143{
144 nvgpu_log_fn(g, " ");
145
146 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
147 return;
148 }
149 nvgpu_mutex_acquire(&g->cg_pg_lock);
150 if (!g->blcg_enabled) {
151 goto done;
152 }
153 if (g->ops.clock_gating.blcg_fifo_load_gating_prod != NULL) {
154 g->ops.clock_gating.blcg_fifo_load_gating_prod(g, true);
155 }
156done:
157 nvgpu_mutex_release(&g->cg_pg_lock);
158}
159
160void nvgpu_cg_blcg_pmu_load_enable(struct gk20a *g)
161{
162 nvgpu_log_fn(g, " ");
163
164 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
165 return;
166 }
167 nvgpu_mutex_acquire(&g->cg_pg_lock);
168 if (!g->blcg_enabled) {
169 goto done;
170 }
171 if (g->ops.clock_gating.blcg_pmu_load_gating_prod != NULL) {
172 g->ops.clock_gating.blcg_pmu_load_gating_prod(g, true);
173 }
174done:
175 nvgpu_mutex_release(&g->cg_pg_lock);
176}
177
178void nvgpu_cg_blcg_ce_load_enable(struct gk20a *g)
179{
180 nvgpu_log_fn(g, " ");
181
182 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
183 return;
184 }
185 nvgpu_mutex_acquire(&g->cg_pg_lock);
186 if (!g->blcg_enabled) {
187 goto done;
188 }
189 if (g->ops.clock_gating.blcg_ce_load_gating_prod != NULL) {
190 g->ops.clock_gating.blcg_ce_load_gating_prod(g, true);
191 }
192done:
193 nvgpu_mutex_release(&g->cg_pg_lock);
194}
195
196void nvgpu_cg_blcg_gr_load_enable(struct gk20a *g)
197{
198 nvgpu_log_fn(g, " ");
199
200 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
201 return;
202 }
203 nvgpu_mutex_acquire(&g->cg_pg_lock);
204 if (!g->blcg_enabled) {
205 goto done;
206 }
207 if (g->ops.clock_gating.blcg_gr_load_gating_prod != NULL) {
208 g->ops.clock_gating.blcg_gr_load_gating_prod(g, true);
209 }
210done:
211 nvgpu_mutex_release(&g->cg_pg_lock);
212}
213
214void nvgpu_cg_slcg_fb_ltc_load_enable(struct gk20a *g)
215{
216 nvgpu_log_fn(g, " ");
217
218 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
219 return;
220 }
221 nvgpu_mutex_acquire(&g->cg_pg_lock);
222 if (!g->slcg_enabled) {
223 goto done;
224 }
225 if (g->ops.clock_gating.slcg_fb_load_gating_prod != NULL) {
226 g->ops.clock_gating.slcg_fb_load_gating_prod(g, true);
227 }
228 if (g->ops.clock_gating.slcg_ltc_load_gating_prod != NULL) {
229 g->ops.clock_gating.slcg_ltc_load_gating_prod(g, true);
230 }
231done:
232 nvgpu_mutex_release(&g->cg_pg_lock);
233}
234
235void nvgpu_cg_slcg_priring_load_enable(struct gk20a *g)
236{
237 nvgpu_log_fn(g, " ");
238
239 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
240 return;
241 }
242 nvgpu_mutex_acquire(&g->cg_pg_lock);
243 if (!g->slcg_enabled) {
244 goto done;
245 }
246 if (g->ops.clock_gating.slcg_priring_load_gating_prod != NULL) {
247 g->ops.clock_gating.slcg_priring_load_gating_prod(g, true);
248 }
249done:
250 nvgpu_mutex_release(&g->cg_pg_lock);
251}
252
253void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g)
254{
255 nvgpu_log_fn(g, " ");
256
257 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
258 return;
259 }
260 nvgpu_mutex_acquire(&g->cg_pg_lock);
261 if (!g->slcg_enabled) {
262 goto done;
263 }
264 if (g->ops.clock_gating.slcg_ltc_load_gating_prod != NULL) {
265 g->ops.clock_gating.slcg_ltc_load_gating_prod(g, true);
266 }
267 if (g->ops.clock_gating.slcg_perf_load_gating_prod != NULL) {
268 g->ops.clock_gating.slcg_perf_load_gating_prod(g, true);
269 }
270 if (g->ops.clock_gating.slcg_gr_load_gating_prod != NULL) {
271 g->ops.clock_gating.slcg_gr_load_gating_prod(g, true);
272 }
273done:
274 nvgpu_mutex_release(&g->cg_pg_lock);
275}
276
277void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g)
278{
279 nvgpu_log_fn(g, " ");
280
281 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
282 return;
283 }
284 nvgpu_mutex_acquire(&g->cg_pg_lock);
285 if (!g->slcg_enabled) {
286 goto done;
287 }
288 if (g->ops.clock_gating.slcg_gr_load_gating_prod != NULL) {
289 g->ops.clock_gating.slcg_gr_load_gating_prod(g, false);
290 }
291 if (g->ops.clock_gating.slcg_perf_load_gating_prod != NULL) {
292 g->ops.clock_gating.slcg_perf_load_gating_prod(g, false);
293 }
294 if (g->ops.clock_gating.slcg_ltc_load_gating_prod != NULL) {
295 g->ops.clock_gating.slcg_ltc_load_gating_prod(g, false);
296 }
297done:
298 nvgpu_mutex_release(&g->cg_pg_lock);
299}
300
301void nvgpu_cg_slcg_fifo_load_enable(struct gk20a *g)
302{
303 nvgpu_log_fn(g, " ");
304
305 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
306 return;
307 }
308 nvgpu_mutex_acquire(&g->cg_pg_lock);
309 if (!g->slcg_enabled) {
310 goto done;
311 }
312 if (g->ops.clock_gating.slcg_fifo_load_gating_prod != NULL) {
313 g->ops.clock_gating.slcg_fifo_load_gating_prod(g, true);
314 }
315done:
316 nvgpu_mutex_release(&g->cg_pg_lock);
317}
318
319void nvgpu_cg_slcg_pmu_load_enable(struct gk20a *g)
320{
321 nvgpu_log_fn(g, " ");
322
323 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
324 return;
325 }
326 nvgpu_mutex_acquire(&g->cg_pg_lock);
327 if (!g->slcg_enabled) {
328 goto done;
329 }
330 if (g->ops.clock_gating.slcg_pmu_load_gating_prod != NULL) {
331 g->ops.clock_gating.slcg_pmu_load_gating_prod(g, true);
332 }
333done:
334 nvgpu_mutex_release(&g->cg_pg_lock);
335}
336
337void nvgpu_cg_slcg_ce2_load_enable(struct gk20a *g)
338{
339 nvgpu_log_fn(g, " ");
340
341 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
342 return;
343 }
344 nvgpu_mutex_acquire(&g->cg_pg_lock);
345 if (!g->slcg_enabled) {
346 goto done;
347 }
348 if (g->ops.clock_gating.slcg_ce2_load_gating_prod != NULL) {
349 g->ops.clock_gating.slcg_ce2_load_gating_prod(g, true);
350 }
351done:
352 nvgpu_mutex_release(&g->cg_pg_lock);
353}
354
355void nvgpu_cg_init_gr_load_gating_prod(struct gk20a *g)
356{
357 nvgpu_log_fn(g, " ");
358
359 nvgpu_mutex_acquire(&g->cg_pg_lock);
360
361 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
362 goto check_can_blcg;
363 }
364 if (!g->slcg_enabled) {
365 goto check_can_blcg;
366 }
367
368 if (g->ops.clock_gating.slcg_bus_load_gating_prod != NULL) {
369 g->ops.clock_gating.slcg_bus_load_gating_prod(g, true);
370 }
371 if (g->ops.clock_gating.slcg_chiplet_load_gating_prod != NULL) {
372 g->ops.clock_gating.slcg_chiplet_load_gating_prod(g, true);
373 }
374 if (g->ops.clock_gating.slcg_gr_load_gating_prod != NULL) {
375 g->ops.clock_gating.slcg_gr_load_gating_prod(g, true);
376 }
377 if (g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod != NULL) {
378 g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod(g,
379 true);
380 }
381 if (g->ops.clock_gating.slcg_perf_load_gating_prod != NULL) {
382 g->ops.clock_gating.slcg_perf_load_gating_prod(g, true);
383 }
384 if (g->ops.clock_gating.slcg_xbar_load_gating_prod != NULL) {
385 g->ops.clock_gating.slcg_xbar_load_gating_prod(g, true);
386 }
387
388check_can_blcg:
389 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
390 goto pg_gr_load;
391 }
392 if (!g->blcg_enabled) {
393 goto pg_gr_load;
394 }
395 if (g->ops.clock_gating.blcg_bus_load_gating_prod != NULL) {
396 g->ops.clock_gating.blcg_bus_load_gating_prod(g, true);
397 }
398 if (g->ops.clock_gating.blcg_gr_load_gating_prod != NULL) {
399 g->ops.clock_gating.blcg_gr_load_gating_prod(g, true);
400 }
401 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod != NULL) {
402 g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod(g,
403 true);
404 }
405 if (g->ops.clock_gating.blcg_xbar_load_gating_prod != NULL) {
406 g->ops.clock_gating.blcg_xbar_load_gating_prod(g, true);
407 }
408pg_gr_load:
409 if (g->ops.clock_gating.pg_gr_load_gating_prod != NULL) {
410 g->ops.clock_gating.pg_gr_load_gating_prod(g, true);
411 }
412
413 nvgpu_mutex_release(&g->cg_pg_lock);
414}
415
416void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable)
417{
418 nvgpu_log_fn(g, " ");
419
420 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
421 return;
422 }
423
424 nvgpu_mutex_release(&g->cg_pg_lock);
425 if (enable) {
426 if (!g->elcg_enabled) {
427 g->elcg_enabled = true;
428 nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_AUTO);
429 }
430 } else {
431 if (g->elcg_enabled) {
432 g->elcg_enabled = false;
433 nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_RUN);
434 }
435 }
436 nvgpu_mutex_release(&g->cg_pg_lock);
437}
438
439void nvgpu_cg_blcg_set_blcg_enabled(struct gk20a *g, bool enable)
440{
441 bool load = false;
442
443 nvgpu_log_fn(g, " ");
444
445 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
446 return;
447 }
448
449 nvgpu_mutex_acquire(&g->cg_pg_lock);
450 if (enable) {
451 if (!g->blcg_enabled) {
452 load = true;
453 g->blcg_enabled = true;
454 }
455 } else {
456 if (g->blcg_enabled) {
457 load = true;
458 g->blcg_enabled = false;
459 }
460 }
461 if (!load ) {
462 goto done;
463 }
464
465 if (g->ops.clock_gating.blcg_bus_load_gating_prod != NULL) {
466 g->ops.clock_gating.blcg_bus_load_gating_prod(g, enable);
467 }
468 if (g->ops.clock_gating.blcg_ce_load_gating_prod != NULL) {
469 g->ops.clock_gating.blcg_ce_load_gating_prod(g, enable);
470 }
471 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod != NULL) {
472 g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod(g,
473 enable);
474 }
475 if (g->ops.clock_gating.blcg_fb_load_gating_prod != NULL) {
476 g->ops.clock_gating.blcg_fb_load_gating_prod(g, enable);
477 }
478 if (g->ops.clock_gating.blcg_fifo_load_gating_prod != NULL) {
479 g->ops.clock_gating.blcg_fifo_load_gating_prod(g, enable);
480 }
481 if (g->ops.clock_gating.blcg_gr_load_gating_prod != NULL) {
482 g->ops.clock_gating.blcg_gr_load_gating_prod(g, enable);
483 }
484 if (g->ops.clock_gating.blcg_ltc_load_gating_prod != NULL) {
485 g->ops.clock_gating.blcg_ltc_load_gating_prod(g, enable);
486 }
487 if (g->ops.clock_gating.blcg_pmu_load_gating_prod != NULL) {
488 g->ops.clock_gating.blcg_pmu_load_gating_prod(g, enable);
489 }
490 if (g->ops.clock_gating.blcg_xbar_load_gating_prod != NULL) {
491 g->ops.clock_gating.blcg_xbar_load_gating_prod(g, enable);
492 }
493
494done:
495 nvgpu_mutex_release(&g->cg_pg_lock);
496}
497
498void nvgpu_cg_slcg_set_slcg_enabled(struct gk20a *g, bool enable)
499{
500 bool load = false;
501
502 nvgpu_log_fn(g, " ");
503
504 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
505 return;
506 }
507
508 nvgpu_mutex_acquire(&g->cg_pg_lock);
509 if (enable) {
510 if (!g->slcg_enabled) {
511 load = true;
512 g->slcg_enabled = true;
513 }
514 } else {
515 if (g->slcg_enabled) {
516 load = true;
517 g->slcg_enabled = false;
518 }
519 }
520 if (!load ) {
521 goto done;
522 }
523
524 if (g->ops.clock_gating.slcg_bus_load_gating_prod != NULL) {
525 g->ops.clock_gating.slcg_bus_load_gating_prod(g, enable);
526 }
527 if (g->ops.clock_gating.slcg_ce2_load_gating_prod != NULL) {
528 g->ops.clock_gating.slcg_ce2_load_gating_prod(g, enable);
529 }
530 if (g->ops.clock_gating.slcg_chiplet_load_gating_prod != NULL) {
531 g->ops.clock_gating.slcg_chiplet_load_gating_prod(g, enable);
532 }
533 if (g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod !=
534 NULL) {
535 g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod(g,
536 enable);
537 }
538 if (g->ops.clock_gating.slcg_fb_load_gating_prod != NULL) {
539 g->ops.clock_gating.slcg_fb_load_gating_prod(g, enable);
540 }
541 if (g->ops.clock_gating.slcg_fifo_load_gating_prod != NULL) {
542 g->ops.clock_gating.slcg_fifo_load_gating_prod(g, enable);
543 }
544 if (g->ops.clock_gating.slcg_gr_load_gating_prod != NULL) {
545 g->ops.clock_gating.slcg_gr_load_gating_prod(g, enable);
546 }
547 if (g->ops.clock_gating.slcg_ltc_load_gating_prod != NULL) {
548 g->ops.clock_gating.slcg_ltc_load_gating_prod(g, enable);
549 }
550 if (g->ops.clock_gating.slcg_perf_load_gating_prod != NULL) {
551 g->ops.clock_gating.slcg_perf_load_gating_prod(g, enable);
552 }
553 if (g->ops.clock_gating.slcg_priring_load_gating_prod != NULL) {
554 g->ops.clock_gating.slcg_priring_load_gating_prod(g,
555 enable);
556 }
557 if (g->ops.clock_gating.slcg_pmu_load_gating_prod != NULL) {
558 g->ops.clock_gating.slcg_pmu_load_gating_prod(g, enable);
559 }
560 if (g->ops.clock_gating.slcg_xbar_load_gating_prod != NULL) {
561 g->ops.clock_gating.slcg_xbar_load_gating_prod(g, enable);
562 }
563
564done:
565 nvgpu_mutex_release(&g->cg_pg_lock);
566}
diff --git a/drivers/gpu/nvgpu/common/power_features/pg/pg.c b/drivers/gpu/nvgpu/common/power_features/pg/pg.c
new file mode 100644
index 00000000..fa31f4e3
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/power_features/pg/pg.c
@@ -0,0 +1,106 @@
1/*
2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/gk20a.h>
24#include <nvgpu/pmu.h>
25#include <nvgpu/power_features/pg.h>
26
27bool nvgpu_pg_elpg_is_enabled(struct gk20a *g)
28{
29 bool elpg_enabled;
30
31 nvgpu_log_fn(g, " ");
32
33 nvgpu_mutex_acquire(&g->cg_pg_lock);
34 elpg_enabled = g->elpg_enabled;
35 nvgpu_mutex_release(&g->cg_pg_lock);
36 return elpg_enabled;
37}
38
39int nvgpu_pg_elpg_enable(struct gk20a *g)
40{
41 int err = 0;
42
43 nvgpu_log_fn(g, " ");
44
45 if (!g->can_elpg) {
46 return 0;
47 }
48
49 nvgpu_mutex_acquire(&g->cg_pg_lock);
50 if (g->elpg_enabled) {
51 err = nvgpu_pmu_pg_global_enable(g, true);
52 }
53 nvgpu_mutex_release(&g->cg_pg_lock);
54 return err;
55}
56
57int nvgpu_pg_elpg_disable(struct gk20a *g)
58{
59 int err = 0;
60
61 nvgpu_log_fn(g, " ");
62
63 if (!g->can_elpg) {
64 return 0;
65 }
66
67 nvgpu_mutex_acquire(&g->cg_pg_lock);
68 if (g->elpg_enabled) {
69 err = nvgpu_pmu_pg_global_enable(g, false);
70 }
71 nvgpu_mutex_release(&g->cg_pg_lock);
72 return err;
73}
74
75int nvgpu_pg_elpg_set_elpg_enabled(struct gk20a *g, bool enable)
76{
77 int err = 0;
78 bool change_mode = false;
79
80 nvgpu_log_fn(g, " ");
81
82 if (!g->can_elpg) {
83 return 0;
84 }
85
86 nvgpu_mutex_acquire(&g->cg_pg_lock);
87 if (enable) {
88 if (!g->elpg_enabled) {
89 change_mode = true;
90 g->elpg_enabled = true;
91 }
92 } else {
93 if (g->elpg_enabled) {
94 change_mode = true;
95 g->elpg_enabled = false;
96 }
97 }
98 if (!change_mode) {
99 goto done;
100 }
101
102 err = nvgpu_pmu_pg_global_enable(g, enable);
103done:
104 nvgpu_mutex_release(&g->cg_pg_lock);
105 return err;
106}
diff --git a/drivers/gpu/nvgpu/common/power_features/power_features.c b/drivers/gpu/nvgpu/common/power_features/power_features.c
new file mode 100644
index 00000000..792fdc01
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/power_features/power_features.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/gk20a.h>
24#include <nvgpu/power_features/cg.h>
25#include <nvgpu/power_features/pg.h>
26#include <nvgpu/power_features/power_features.h>
27
28int nvgpu_cg_pg_disable(struct gk20a *g)
29{
30 int err = 0;
31
32 nvgpu_log_fn(g, " ");
33
34 /* disable elpg before clock gating */
35 err = nvgpu_pg_elpg_disable(g);
36 if (err != 0) {
37 nvgpu_err(g, "failed to set disable elpg");
38 }
39 nvgpu_cg_slcg_gr_perf_ltc_load_disable(g);
40
41 nvgpu_cg_blcg_mode_disable(g);
42
43 nvgpu_cg_elcg_disable(g);
44
45 return err;
46}
47
48int nvgpu_cg_pg_enable(struct gk20a *g)
49{
50 int err = 0;
51
52 nvgpu_log_fn(g, " ");
53
54 nvgpu_cg_elcg_enable(g);
55
56 nvgpu_cg_blcg_mode_enable(g);
57
58 nvgpu_cg_slcg_gr_perf_ltc_load_enable(g);
59
60 err = nvgpu_pg_elpg_enable(g);
61 if (err != 0) {
62 nvgpu_err(g, "failed to set enable elpg");
63 }
64
65 return err;
66}
diff --git a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c
index e30d94f9..8c9b23fa 100644
--- a/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c
+++ b/drivers/gpu/nvgpu/common/priv_ring/priv_ring_gm20b.c
@@ -26,6 +26,7 @@
26#include <nvgpu/enabled.h> 26#include <nvgpu/enabled.h>
27#include <nvgpu/io.h> 27#include <nvgpu/io.h>
28#include <nvgpu/utils.h> 28#include <nvgpu/utils.h>
29#include <nvgpu/power_features/cg.h>
29 30
30#include "priv_ring_gm20b.h" 31#include "priv_ring_gm20b.h"
31 32
@@ -41,10 +42,7 @@ void gm20b_priv_ring_enable(struct gk20a *g)
41 42
42 nvgpu_log(g, gpu_dbg_info, "enabling priv ring"); 43 nvgpu_log(g, gpu_dbg_info, "enabling priv ring");
43 44
44 if (g->ops.clock_gating.slcg_priring_load_gating_prod) { 45 nvgpu_cg_slcg_priring_load_enable(g);
45 g->ops.clock_gating.slcg_priring_load_gating_prod(g,
46 g->slcg_enabled);
47 }
48 46
49 gk20a_writel(g,pri_ringmaster_command_r(), 47 gk20a_writel(g,pri_ringmaster_command_r(),
50 0x4); 48 0x4);
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
index 6df8f6e4..5052fc35 100644
--- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c
@@ -30,6 +30,7 @@
30#include <nvgpu/io.h> 30#include <nvgpu/io.h>
31#include <nvgpu/utils.h> 31#include <nvgpu/utils.h>
32#include <nvgpu/channel.h> 32#include <nvgpu/channel.h>
33#include <nvgpu/power_features/cg.h>
33 34
34#include "gk20a.h" 35#include "gk20a.h"
35#include "gk20a/fence_gk20a.h" 36#include "gk20a/fence_gk20a.h"
@@ -339,14 +340,9 @@ int gk20a_init_ce_support(struct gk20a *g)
339 340
340 g->ops.mc.reset(g, ce_reset_mask); 341 g->ops.mc.reset(g, ce_reset_mask);
341 342
342 if (g->ops.clock_gating.slcg_ce2_load_gating_prod) { 343 nvgpu_cg_slcg_ce2_load_enable(g);
343 g->ops.clock_gating.slcg_ce2_load_gating_prod(g, 344
344 g->slcg_enabled); 345 nvgpu_cg_blcg_ce_load_enable(g);
345 }
346 if (g->ops.clock_gating.blcg_ce_load_gating_prod) {
347 g->ops.clock_gating.blcg_ce_load_gating_prod(g,
348 g->blcg_enabled);
349 }
350 346
351 if (ce_app->initialised) { 347 if (ce_app->initialised) {
352 /* assume this happen during poweron/poweroff GPU sequence */ 348 /* assume this happen during poweron/poweroff GPU sequence */
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index adc13c3d..1686d01e 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Tegra GK20A GPU Debugger/Profiler Driver 2 * Tegra GK20A GPU Debugger/Profiler Driver
3 * 3 *
4 * Copyright (c) 2013-2018, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
@@ -32,6 +32,7 @@
32#include <nvgpu/utils.h> 32#include <nvgpu/utils.h>
33#include <nvgpu/channel.h> 33#include <nvgpu/channel.h>
34#include <nvgpu/unit.h> 34#include <nvgpu/unit.h>
35#include <nvgpu/power_features/power_features.h>
35 36
36#include "gk20a.h" 37#include "gk20a.h"
37#include "gr_gk20a.h" 38#include "gr_gk20a.h"
@@ -234,60 +235,28 @@ int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, bool disable_powergate)
234 return err; 235 return err;
235 } 236 }
236 237
237 /*do elpg disable before clock gating */ 238 err = nvgpu_cg_pg_disable(g);
238 nvgpu_pmu_pg_global_enable(g, false);
239 239
240 if (g->ops.clock_gating.slcg_gr_load_gating_prod) { 240 if (err == 0) {
241 g->ops.clock_gating.slcg_gr_load_gating_prod(g, 241 dbg_s->is_pg_disabled = true;
242 false); 242 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn,
243 "pg disabled");
243 } 244 }
244 if (g->ops.clock_gating.slcg_perf_load_gating_prod) {
245 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
246 false);
247 }
248 if (g->ops.clock_gating.slcg_ltc_load_gating_prod) {
249 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
250 false);
251 }
252
253 gr_gk20a_init_cg_mode(g, BLCG_MODE, BLCG_RUN);
254 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
255
256 dbg_s->is_pg_disabled = true;
257 } else { 245 } else {
258 /* restore (can) powergate, clk state */ 246 /* restore (can) powergate, clk state */
259 /* release pending exceptions to fault/be handled as usual */ 247 /* release pending exceptions to fault/be handled as usual */
260 /*TBD: ordering of these? */ 248 /*TBD: ordering of these? */
261 249
262 if (g->elcg_enabled) { 250 err = nvgpu_cg_pg_enable(g);
263 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_AUTO);
264 }
265 251
266 if (g->blcg_enabled) { 252 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
267 gr_gk20a_init_cg_mode(g, BLCG_MODE, BLCG_AUTO);
268 }
269
270 if (g->slcg_enabled) {
271 if (g->ops.clock_gating.slcg_ltc_load_gating_prod) {
272 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
273 g->slcg_enabled);
274 }
275 if (g->ops.clock_gating.slcg_perf_load_gating_prod) {
276 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
277 g->slcg_enabled);
278 }
279 if (g->ops.clock_gating.slcg_gr_load_gating_prod) {
280 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
281 g->slcg_enabled);
282 }
283 }
284 nvgpu_pmu_pg_global_enable(g, true);
285
286 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn,
287 "module idle");
288 gk20a_idle(g); 253 gk20a_idle(g);
289 254
290 dbg_s->is_pg_disabled = false; 255 if (err == 0) {
256 dbg_s->is_pg_disabled = false;
257 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn,
258 "pg enabled");
259 }
291 } 260 }
292 261
293 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %s done", 262 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %s done",
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 78f777ae..6d89940a 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -45,6 +45,8 @@
45#include <nvgpu/utils.h> 45#include <nvgpu/utils.h>
46#include <nvgpu/channel.h> 46#include <nvgpu/channel.h>
47#include <nvgpu/unit.h> 47#include <nvgpu/unit.h>
48#include <nvgpu/power_features/power_features.h>
49#include <nvgpu/power_features/cg.h>
48 50
49#include "gk20a.h" 51#include "gk20a.h"
50#include "mm_gk20a.h" 52#include "mm_gk20a.h"
@@ -824,14 +826,9 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
824 /* enable pmc pfifo */ 826 /* enable pmc pfifo */
825 g->ops.mc.reset(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_FIFO)); 827 g->ops.mc.reset(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_FIFO));
826 828
827 if (g->ops.clock_gating.slcg_fifo_load_gating_prod) { 829 nvgpu_cg_slcg_fifo_load_enable(g);
828 g->ops.clock_gating.slcg_fifo_load_gating_prod(g, 830
829 g->slcg_enabled); 831 nvgpu_cg_blcg_fifo_load_enable(g);
830 }
831 if (g->ops.clock_gating.blcg_fifo_load_gating_prod) {
832 g->ops.clock_gating.blcg_fifo_load_gating_prod(g,
833 g->blcg_enabled);
834 }
835 832
836 timeout = gk20a_readl(g, fifo_fb_timeout_r()); 833 timeout = gk20a_readl(g, fifo_fb_timeout_r());
837 timeout = set_field(timeout, fifo_fb_timeout_period_m(), 834 timeout = set_field(timeout, fifo_fb_timeout_period_m(),
@@ -1361,8 +1358,8 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1361 } 1358 }
1362 1359
1363 if (engine_enum == ENGINE_GR_GK20A) { 1360 if (engine_enum == ENGINE_GR_GK20A) {
1364 if (g->support_pmu && g->can_elpg) { 1361 if (g->support_pmu) {
1365 if (nvgpu_pmu_disable_elpg(g)) { 1362 if (nvgpu_pg_elpg_disable(g) != 0 ) {
1366 nvgpu_err(g, "failed to set disable elpg"); 1363 nvgpu_err(g, "failed to set disable elpg");
1367 } 1364 }
1368 } 1365 }
@@ -1391,8 +1388,10 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1391 "HALT gr pipe not supported and " 1388 "HALT gr pipe not supported and "
1392 "gr cannot be reset without halting gr pipe"); 1389 "gr cannot be reset without halting gr pipe");
1393 } 1390 }
1394 if (g->support_pmu && g->can_elpg) { 1391 if (g->support_pmu) {
1395 nvgpu_pmu_enable_elpg(g); 1392 if (nvgpu_pg_elpg_enable(g) != 0 ) {
1393 nvgpu_err(g, "failed to set enable elpg");
1394 }
1396 } 1395 }
1397 } 1396 }
1398 if ((engine_enum == ENGINE_GRCE_GK20A) || 1397 if ((engine_enum == ENGINE_GRCE_GK20A) ||
@@ -1638,25 +1637,11 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
1638 g->fifo.deferred_reset_pending = false; 1637 g->fifo.deferred_reset_pending = false;
1639 1638
1640 /* Disable power management */ 1639 /* Disable power management */
1641 if (g->support_pmu && g->can_elpg) { 1640 if (g->support_pmu) {
1642 if (nvgpu_pmu_disable_elpg(g)) { 1641 if (nvgpu_cg_pg_disable(g) != 0) {
1643 nvgpu_err(g, "failed to set disable elpg"); 1642 nvgpu_warn(g, "fail to disable power mgmt");
1644 } 1643 }
1645 } 1644 }
1646 if (g->ops.clock_gating.slcg_gr_load_gating_prod) {
1647 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
1648 false);
1649 }
1650 if (g->ops.clock_gating.slcg_perf_load_gating_prod) {
1651 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
1652 false);
1653 }
1654 if (g->ops.clock_gating.slcg_ltc_load_gating_prod) {
1655 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
1656 false);
1657 }
1658
1659 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
1660 1645
1661 /* Disable fifo access */ 1646 /* Disable fifo access */
1662 grfifo_ctl = gk20a_readl(g, gr_gpfifo_ctl_r()); 1647 grfifo_ctl = gk20a_readl(g, gr_gpfifo_ctl_r());
@@ -1842,8 +1827,10 @@ static bool gk20a_fifo_handle_mmu_fault_locked(
1842 gr_gpfifo_ctl_semaphore_access_enabled_f()); 1827 gr_gpfifo_ctl_semaphore_access_enabled_f());
1843 1828
1844 /* It is safe to enable ELPG again. */ 1829 /* It is safe to enable ELPG again. */
1845 if (g->support_pmu && g->can_elpg) { 1830 if (g->support_pmu) {
1846 nvgpu_pmu_enable_elpg(g); 1831 if (nvgpu_cg_pg_enable(g) != 0) {
1832 nvgpu_warn(g, "fail to enable power mgmt");
1833 }
1847 } 1834 }
1848 1835
1849 return verbose; 1836 return verbose;
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 9e4d3c37..a4c1ce58 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -43,6 +43,8 @@
43#include <nvgpu/utils.h> 43#include <nvgpu/utils.h>
44#include <nvgpu/channel.h> 44#include <nvgpu/channel.h>
45#include <nvgpu/unit.h> 45#include <nvgpu/unit.h>
46#include <nvgpu/power_features/pg.h>
47#include <nvgpu/power_features/cg.h>
46 48
47#include "gk20a.h" 49#include "gk20a.h"
48#include "gr_gk20a.h" 50#include "gr_gk20a.h"
@@ -91,8 +93,6 @@ static void gr_gk20a_free_channel_patch_ctx(struct gk20a *g,
91/* golden ctx image */ 93/* golden ctx image */
92static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, 94static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
93 struct channel_gk20a *c); 95 struct channel_gk20a *c);
94/*elcg init */
95static void gr_gk20a_enable_elcg(struct gk20a *g);
96 96
97int gr_gk20a_get_ctx_id(struct gk20a *g, 97int gr_gk20a_get_ctx_id(struct gk20a *g,
98 struct channel_gk20a *c, 98 struct channel_gk20a *c,
@@ -4227,33 +4227,6 @@ int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
4227 gr_gk20a_add_zbc(g, gr, zbc_val)); 4227 gr_gk20a_add_zbc(g, gr, zbc_val));
4228} 4228}
4229 4229
4230void gr_gk20a_init_cg_mode(struct gk20a *g, u32 cgmode, u32 mode_config)
4231{
4232 u32 engine_idx;
4233 u32 active_engine_id = 0;
4234 struct fifo_engine_info_gk20a *engine_info = NULL;
4235 struct fifo_gk20a *f = &g->fifo;
4236
4237 for (engine_idx = 0; engine_idx < f->num_engines; ++engine_idx) {
4238 active_engine_id = f->active_engines_list[engine_idx];
4239 engine_info = &f->engine_info[active_engine_id];
4240
4241 /* gr_engine supports both BLCG and ELCG */
4242 if ((cgmode == BLCG_MODE) &&
4243 (engine_info->engine_enum == ENGINE_GR_GK20A)) {
4244 g->ops.therm.init_blcg_mode(g, mode_config, active_engine_id);
4245 break;
4246 } else if (cgmode == ELCG_MODE) {
4247 g->ops.therm.init_elcg_mode(g, mode_config,
4248 active_engine_id);
4249 } else {
4250 nvgpu_err(g, "invalid cg mode %d, config %d for "
4251 "act_eng_id %d",
4252 cgmode, mode_config, active_engine_id);
4253 }
4254 }
4255}
4256
4257void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, 4230void gr_gk20a_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries,
4258 u32 *zcull_map_tiles) 4231 u32 *zcull_map_tiles)
4259{ 4232{
@@ -4655,60 +4628,6 @@ out:
4655 return err; 4628 return err;
4656} 4629}
4657 4630
4658static void gr_gk20a_load_gating_prod(struct gk20a *g)
4659{
4660 nvgpu_log_fn(g, " ");
4661
4662 /* slcg prod values */
4663 if (g->ops.clock_gating.slcg_bus_load_gating_prod) {
4664 g->ops.clock_gating.slcg_bus_load_gating_prod(g,
4665 g->slcg_enabled);
4666 }
4667 if (g->ops.clock_gating.slcg_chiplet_load_gating_prod) {
4668 g->ops.clock_gating.slcg_chiplet_load_gating_prod(g,
4669 g->slcg_enabled);
4670 }
4671 if (g->ops.clock_gating.slcg_gr_load_gating_prod) {
4672 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
4673 g->slcg_enabled);
4674 }
4675 if (g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod) {
4676 g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod(g,
4677 g->slcg_enabled);
4678 }
4679 if (g->ops.clock_gating.slcg_perf_load_gating_prod) {
4680 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
4681 g->slcg_enabled);
4682 }
4683 if (g->ops.clock_gating.slcg_xbar_load_gating_prod) {
4684 g->ops.clock_gating.slcg_xbar_load_gating_prod(g,
4685 g->slcg_enabled);
4686 }
4687
4688 /* blcg prod values */
4689 if (g->ops.clock_gating.blcg_bus_load_gating_prod) {
4690 g->ops.clock_gating.blcg_bus_load_gating_prod(g,
4691 g->blcg_enabled);
4692 }
4693 if (g->ops.clock_gating.blcg_gr_load_gating_prod) {
4694 g->ops.clock_gating.blcg_gr_load_gating_prod(g,
4695 g->blcg_enabled);
4696 }
4697 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod) {
4698 g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod(g,
4699 g->blcg_enabled);
4700 }
4701 if (g->ops.clock_gating.blcg_xbar_load_gating_prod) {
4702 g->ops.clock_gating.blcg_xbar_load_gating_prod(g,
4703 g->blcg_enabled);
4704 }
4705 if (g->ops.clock_gating.pg_gr_load_gating_prod) {
4706 g->ops.clock_gating.pg_gr_load_gating_prod(g, true);
4707 }
4708
4709 nvgpu_log_fn(g, "done");
4710}
4711
4712static int gk20a_init_gr_prepare(struct gk20a *g) 4631static int gk20a_init_gr_prepare(struct gk20a *g)
4713{ 4632{
4714 u32 err = 0; 4633 u32 err = 0;
@@ -4718,10 +4637,10 @@ static int gk20a_init_gr_prepare(struct gk20a *g)
4718 g->ops.mc.reset_mask(g, NVGPU_UNIT_BLG) | 4637 g->ops.mc.reset_mask(g, NVGPU_UNIT_BLG) |
4719 g->ops.mc.reset_mask(g, NVGPU_UNIT_PERFMON)); 4638 g->ops.mc.reset_mask(g, NVGPU_UNIT_PERFMON));
4720 4639
4721 gr_gk20a_load_gating_prod(g); 4640 nvgpu_cg_init_gr_load_gating_prod(g);
4722 4641
4723 /* Disable elcg until it gets enabled later in the init*/ 4642 /* Disable elcg until it gets enabled later in the init*/
4724 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN); 4643 nvgpu_cg_elcg_disable(g);
4725 4644
4726 /* enable fifo access */ 4645 /* enable fifo access */
4727 gk20a_writel(g, gr_gpfifo_ctl_r(), 4646 gk20a_writel(g, gr_gpfifo_ctl_r(),
@@ -5041,7 +4960,7 @@ int gk20a_init_gr_support(struct gk20a *g)
5041 } 4960 }
5042 } 4961 }
5043 4962
5044 gr_gk20a_enable_elcg(g); 4963 nvgpu_cg_elcg_enable(g);
5045 /* GR is inialized, signal possible waiters */ 4964 /* GR is inialized, signal possible waiters */
5046 g->gr.initialized = true; 4965 g->gr.initialized = true;
5047 nvgpu_cond_signal(&g->gr.init_wq); 4966 nvgpu_cond_signal(&g->gr.init_wq);
@@ -5128,15 +5047,6 @@ int gk20a_enable_gr_hw(struct gk20a *g)
5128 return 0; 5047 return 0;
5129} 5048}
5130 5049
5131static void gr_gk20a_enable_elcg(struct gk20a *g)
5132{
5133 if (g->elcg_enabled) {
5134 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_AUTO);
5135 } else {
5136 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
5137 }
5138}
5139
5140int gk20a_gr_reset(struct gk20a *g) 5050int gk20a_gr_reset(struct gk20a *g)
5141{ 5051{
5142 int err; 5052 int err;
@@ -5193,8 +5103,8 @@ int gk20a_gr_reset(struct gk20a *g)
5193 return err; 5103 return err;
5194 } 5104 }
5195 5105
5196 gr_gk20a_load_gating_prod(g); 5106 nvgpu_cg_init_gr_load_gating_prod(g);
5197 gr_gk20a_enable_elcg(g); 5107 nvgpu_cg_elcg_enable(g);
5198 5108
5199 return err; 5109 return err;
5200} 5110}
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index d795a3fc..8ff2cfd4 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -28,6 +28,7 @@
28 28
29#include "gr_ctx_gk20a.h" 29#include "gr_ctx_gk20a.h"
30#include "mm_gk20a.h" 30#include "mm_gk20a.h"
31#include <nvgpu/power_features/pg.h>
31 32
32#include <nvgpu/comptags.h> 33#include <nvgpu/comptags.h>
33#include <nvgpu/cond.h> 34#include <nvgpu/cond.h>
@@ -598,16 +599,16 @@ u32 gk20a_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g);
598#define gr_gk20a_elpg_protected_call(g, func) \ 599#define gr_gk20a_elpg_protected_call(g, func) \
599 ({ \ 600 ({ \
600 int err = 0; \ 601 int err = 0; \
601 if ((g->support_pmu) && (g->elpg_enabled)) {\ 602 if (g->support_pmu) {\
602 err = nvgpu_pmu_disable_elpg(g); \ 603 err = nvgpu_pg_elpg_disable(g);\
603 if (err != 0) {\ 604 if (err != 0) {\
604 nvgpu_pmu_enable_elpg(g); \ 605 err = nvgpu_pg_elpg_enable(g); \
605 } \ 606 } \
606 } \ 607 } \
607 if (err == 0) { \ 608 if (err == 0) { \
608 err = func; \ 609 err = func; \
609 if ((g->support_pmu) && (g->elpg_enabled)) {\ 610 if (g->support_pmu) {\
610 nvgpu_pmu_enable_elpg(g); \ 611 (void)nvgpu_pg_elpg_enable(g); \
611 } \ 612 } \
612 } \ 613 } \
613 err; \ 614 err; \
diff --git a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
index df0ae58d..30fbf70b 100644
--- a/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/pmu_gm20b.c
@@ -106,7 +106,7 @@ int gm20b_pmu_setup_elpg(struct gk20a *g)
106 106
107 nvgpu_log_fn(g, " "); 107 nvgpu_log_fn(g, " ");
108 108
109 if (g->elpg_enabled) { 109 if (g->can_elpg && g->elpg_enabled) {
110 reg_writes = ((sizeof(_pginitseq_gm20b) / 110 reg_writes = ((sizeof(_pginitseq_gm20b) /
111 sizeof((_pginitseq_gm20b)[0]))); 111 sizeof((_pginitseq_gm20b)[0])));
112 /* Initialize registers with production values*/ 112 /* Initialize registers with production values*/
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
index d268ab88..6f12cf9f 100644
--- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
@@ -282,7 +282,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g)
282 282
283 nvgpu_log_fn(g, " "); 283 nvgpu_log_fn(g, " ");
284 284
285 if (g->elpg_enabled) { 285 if (g->can_elpg && g->elpg_enabled) {
286 reg_writes = ((sizeof(_pginitseq_gp10b) / 286 reg_writes = ((sizeof(_pginitseq_gp10b) /
287 sizeof((_pginitseq_gp10b)[0]))); 287 sizeof((_pginitseq_gp10b)[0])));
288 /* Initialize registers with production values*/ 288 /* Initialize registers with production values*/
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index 5b84df47..b3c59f84 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -43,6 +43,8 @@
43#include <nvgpu/gk20a.h> 43#include <nvgpu/gk20a.h>
44#include <nvgpu/channel.h> 44#include <nvgpu/channel.h>
45#include <nvgpu/unit.h> 45#include <nvgpu/unit.h>
46#include <nvgpu/power_features/cg.h>
47#include <nvgpu/power_features/power_features.h>
46 48
47#include "gk20a/fifo_gk20a.h" 49#include "gk20a/fifo_gk20a.h"
48 50
@@ -1095,25 +1097,11 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1095 g->fifo.deferred_reset_pending = false; 1097 g->fifo.deferred_reset_pending = false;
1096 1098
1097 /* Disable power management */ 1099 /* Disable power management */
1098 if (g->support_pmu && g->elpg_enabled) { 1100 if (g->support_pmu) {
1099 if (nvgpu_pmu_disable_elpg(g)) { 1101 if (nvgpu_cg_pg_disable(g) != 0) {
1100 nvgpu_err(g, "failed to set disable elpg"); 1102 nvgpu_warn(g, "fail to disable power mgmt");
1101 } 1103 }
1102 } 1104 }
1103 if (g->ops.clock_gating.slcg_gr_load_gating_prod) {
1104 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
1105 false);
1106 }
1107 if (g->ops.clock_gating.slcg_perf_load_gating_prod) {
1108 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
1109 false);
1110 }
1111 if (g->ops.clock_gating.slcg_ltc_load_gating_prod) {
1112 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
1113 false);
1114 }
1115
1116 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
1117 1105
1118 if (rc_type == RC_TYPE_MMU_FAULT) { 1106 if (rc_type == RC_TYPE_MMU_FAULT) {
1119 gk20a_debug_dump(g); 1107 gk20a_debug_dump(g);
@@ -1220,8 +1208,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1220 gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED); 1208 gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED);
1221 1209
1222 /* It is safe to enable ELPG again. */ 1210 /* It is safe to enable ELPG again. */
1223 if (g->support_pmu && g->elpg_enabled) { 1211 if (g->support_pmu) {
1224 nvgpu_pmu_enable_elpg(g); 1212 if (nvgpu_cg_pg_enable(g) != 0) {
1213 nvgpu_warn(g, "fail to enable power mgmt");
1214 }
1225 } 1215 }
1226 1216
1227 g->ops.fifo.teardown_unmask_intr(g); 1217 g->ops.fifo.teardown_unmask_intr(g);
@@ -1312,18 +1302,11 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g)
1312 /* enable pmc pfifo */ 1302 /* enable pmc pfifo */
1313 g->ops.mc.reset(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_FIFO)); 1303 g->ops.mc.reset(g, g->ops.mc.reset_mask(g, NVGPU_UNIT_FIFO));
1314 1304
1315 if (g->ops.clock_gating.slcg_ce2_load_gating_prod) { 1305 nvgpu_cg_slcg_ce2_load_enable(g);
1316 g->ops.clock_gating.slcg_ce2_load_gating_prod(g, 1306
1317 g->slcg_enabled); 1307 nvgpu_cg_slcg_fifo_load_enable(g);
1318 } 1308
1319 if (g->ops.clock_gating.slcg_fifo_load_gating_prod) { 1309 nvgpu_cg_blcg_fifo_load_enable(g);
1320 g->ops.clock_gating.slcg_fifo_load_gating_prod(g,
1321 g->slcg_enabled);
1322 }
1323 if (g->ops.clock_gating.blcg_fifo_load_gating_prod) {
1324 g->ops.clock_gating.blcg_fifo_load_gating_prod(g,
1325 g->blcg_enabled);
1326 }
1327 1310
1328 timeout = gk20a_readl(g, fifo_fb_timeout_r()); 1311 timeout = gk20a_readl(g, fifo_fb_timeout_r());
1329 nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout); 1312 nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout);
diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
index a9f183b1..1001ba16 100644
--- a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
@@ -124,7 +124,7 @@ int gv11b_pmu_setup_elpg(struct gk20a *g)
124 124
125 nvgpu_log_fn(g, " "); 125 nvgpu_log_fn(g, " ");
126 126
127 if (g->elpg_enabled) { 127 if (g->can_elpg && g->elpg_enabled) {
128 reg_writes = ((sizeof(_pginitseq_gv11b) / 128 reg_writes = ((sizeof(_pginitseq_gv11b) /
129 sizeof((_pginitseq_gv11b)[0]))); 129 sizeof((_pginitseq_gv11b)[0])));
130 /* Initialize registers with production values*/ 130 /* Initialize registers with production values*/
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
index 81a4e7b8..f393e799 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/gk20a.h
@@ -1490,6 +1490,7 @@ struct gk20a {
1490 u32 max_timeslice_us; 1490 u32 max_timeslice_us;
1491 bool runlist_interleave; 1491 bool runlist_interleave;
1492 1492
1493 struct nvgpu_mutex cg_pg_lock;
1493 bool slcg_enabled; 1494 bool slcg_enabled;
1494 bool blcg_enabled; 1495 bool blcg_enabled;
1495 bool elcg_enabled; 1496 bool elcg_enabled;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/power_features/cg.h b/drivers/gpu/nvgpu/include/nvgpu/power_features/cg.h
new file mode 100644
index 00000000..3bb86267
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/power_features/cg.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23
24#ifndef NVGPU_POWER_FEATURES_CG_H
25#define NVGPU_POWER_FEATURES_CG_H
26
27#include <nvgpu/types.h>
28
29struct gk20a;
30struct fifo_gk20a;
31
32void nvgpu_cg_init_gr_load_gating_prod(struct gk20a *g);
33void nvgpu_cg_elcg_enable(struct gk20a *g);
34void nvgpu_cg_elcg_disable(struct gk20a *g);
35void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable);
36
37void nvgpu_cg_blcg_mode_enable(struct gk20a *g);
38void nvgpu_cg_blcg_mode_disable(struct gk20a *g);
39void nvgpu_cg_blcg_fb_ltc_load_enable(struct gk20a *g);
40void nvgpu_cg_blcg_fifo_load_enable(struct gk20a *g);
41void nvgpu_cg_blcg_pmu_load_enable(struct gk20a *g);
42void nvgpu_cg_blcg_ce_load_enable(struct gk20a *g);
43void nvgpu_cg_blcg_gr_load_enable(struct gk20a *g);
44void nvgpu_cg_blcg_set_blcg_enabled(struct gk20a *g, bool enable);
45
46void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g);
47void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g);
48void nvgpu_cg_slcg_fb_ltc_load_enable(struct gk20a *g);
49void nvgpu_cg_slcg_priring_load_enable(struct gk20a *g);
50void nvgpu_cg_slcg_fifo_load_enable(struct gk20a *g);
51void nvgpu_cg_slcg_pmu_load_enable(struct gk20a *g);
52void nvgpu_cg_slcg_ce2_load_enable(struct gk20a *g);
53void nvgpu_cg_slcg_set_slcg_enabled(struct gk20a *g, bool enable);
54
55#endif /*NVGPU_POWER_FEATURES_CG_H*/
diff --git a/drivers/gpu/nvgpu/include/nvgpu/power_features/pg.h b/drivers/gpu/nvgpu/include/nvgpu/power_features/pg.h
new file mode 100644
index 00000000..d7357807
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/power_features/pg.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23
24#ifndef NVGPU_POWER_FEATURES_PG_H
25#define NVGPU_POWER_FEATURES_PG_H
26
27#include <nvgpu/types.h>
28
29struct gk20a;
30
31int nvgpu_pg_elpg_disable(struct gk20a *g);
32int nvgpu_pg_elpg_enable(struct gk20a *g);
33bool nvgpu_pg_elpg_is_enabled(struct gk20a *g);
34int nvgpu_pg_elpg_set_elpg_enabled(struct gk20a *g, bool enable);
35
36#endif /*NVGPU_POWER_FEATURES_PG_H*/
diff --git a/drivers/gpu/nvgpu/include/nvgpu/power_features/power_features.h b/drivers/gpu/nvgpu/include/nvgpu/power_features/power_features.h
new file mode 100644
index 00000000..f6ffccf1
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/power_features/power_features.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23
24#ifndef NVGPU_POWER_FEATURES_H
25#define NVGPU_POWER_FEATURES_H
26
27#include <nvgpu/types.h>
28
29struct gk20a;
30
31int nvgpu_cg_pg_disable(struct gk20a *g);
32int nvgpu_cg_pg_enable(struct gk20a *g);
33
34#endif /*NVGPU_POWER_FEATURES_H*/
diff --git a/drivers/gpu/nvgpu/os/linux/driver_common.c b/drivers/gpu/nvgpu/os/linux/driver_common.c
index cf7877e2..ea4dadac 100644
--- a/drivers/gpu/nvgpu/os/linux/driver_common.c
+++ b/drivers/gpu/nvgpu/os/linux/driver_common.c
@@ -65,6 +65,7 @@ static void nvgpu_init_vars(struct gk20a *g)
65 nvgpu_mutex_init(&g->ctxsw_disable_lock); 65 nvgpu_mutex_init(&g->ctxsw_disable_lock);
66 nvgpu_mutex_init(&g->tpc_pg_lock); 66 nvgpu_mutex_init(&g->tpc_pg_lock);
67 nvgpu_mutex_init(&g->clk_arb_enable_lock); 67 nvgpu_mutex_init(&g->clk_arb_enable_lock);
68 nvgpu_mutex_init(&g->cg_pg_lock);
68 69
69 /* Init the clock req count to 0 */ 70 /* Init the clock req count to 0 */
70 nvgpu_atomic_set(&g->clk_arb_global_nr, 0); 71 nvgpu_atomic_set(&g->clk_arb_global_nr, 0);
diff --git a/drivers/gpu/nvgpu/os/linux/sysfs.c b/drivers/gpu/nvgpu/os/linux/sysfs.c
index 1ffb6539..759c12e8 100644
--- a/drivers/gpu/nvgpu/os/linux/sysfs.c
+++ b/drivers/gpu/nvgpu/os/linux/sysfs.c
@@ -21,6 +21,8 @@
21#include <nvgpu/kmem.h> 21#include <nvgpu/kmem.h>
22#include <nvgpu/nvhost.h> 22#include <nvgpu/nvhost.h>
23#include <nvgpu/ptimer.h> 23#include <nvgpu/ptimer.h>
24#include <nvgpu/power_features/cg.h>
25#include <nvgpu/power_features/pg.h>
24 26
25#include "os_linux.h" 27#include "os_linux.h"
26#include "sysfs.h" 28#include "sysfs.h"
@@ -49,16 +51,14 @@ static ssize_t elcg_enable_store(struct device *dev,
49 return err; 51 return err;
50 52
51 if (val) { 53 if (val) {
52 g->elcg_enabled = true; 54 nvgpu_cg_elcg_set_elcg_enabled(g, true);
53 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_AUTO);
54 } else { 55 } else {
55 g->elcg_enabled = false; 56 nvgpu_cg_elcg_set_elcg_enabled(g, false);
56 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
57 } 57 }
58 58
59 gk20a_idle(g); 59 gk20a_idle(g);
60 60
61 nvgpu_info(g, "ELCG is %s.", g->elcg_enabled ? "enabled" : 61 nvgpu_info(g, "ELCG is %s.", val ? "enabled" :
62 "disabled"); 62 "disabled");
63 63
64 return count; 64 return count;
@@ -84,45 +84,19 @@ static ssize_t blcg_enable_store(struct device *dev,
84 if (kstrtoul(buf, 10, &val) < 0) 84 if (kstrtoul(buf, 10, &val) < 0)
85 return -EINVAL; 85 return -EINVAL;
86 86
87 if (val)
88 g->blcg_enabled = true;
89 else
90 g->blcg_enabled = false;
91
92 err = gk20a_busy(g); 87 err = gk20a_busy(g);
93 if (err) 88 if (err)
94 return err; 89 return err;
95 90
96 if (g->ops.clock_gating.blcg_bus_load_gating_prod) 91 if (val) {
97 g->ops.clock_gating.blcg_bus_load_gating_prod(g, 92 nvgpu_cg_blcg_set_blcg_enabled(g, true);
98 g->blcg_enabled); 93 } else {
99 if (g->ops.clock_gating.blcg_ce_load_gating_prod) 94 nvgpu_cg_blcg_set_blcg_enabled(g, false);
100 g->ops.clock_gating.blcg_ce_load_gating_prod(g, 95 }
101 g->blcg_enabled); 96
102 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod)
103 g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod(g,
104 g->blcg_enabled);
105 if (g->ops.clock_gating.blcg_fb_load_gating_prod)
106 g->ops.clock_gating.blcg_fb_load_gating_prod(g,
107 g->blcg_enabled);
108 if (g->ops.clock_gating.blcg_fifo_load_gating_prod)
109 g->ops.clock_gating.blcg_fifo_load_gating_prod(g,
110 g->blcg_enabled);
111 if (g->ops.clock_gating.blcg_gr_load_gating_prod)
112 g->ops.clock_gating.blcg_gr_load_gating_prod(g,
113 g->blcg_enabled);
114 if (g->ops.clock_gating.blcg_ltc_load_gating_prod)
115 g->ops.clock_gating.blcg_ltc_load_gating_prod(g,
116 g->blcg_enabled);
117 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
118 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
119 g->blcg_enabled);
120 if (g->ops.clock_gating.blcg_xbar_load_gating_prod)
121 g->ops.clock_gating.blcg_xbar_load_gating_prod(g,
122 g->blcg_enabled);
123 gk20a_idle(g); 97 gk20a_idle(g);
124 98
125 nvgpu_info(g, "BLCG is %s.", g->blcg_enabled ? "enabled" : 99 nvgpu_info(g, "BLCG is %s.", val ? "enabled" :
126 "disabled"); 100 "disabled");
127 101
128 return count; 102 return count;
@@ -149,59 +123,25 @@ static ssize_t slcg_enable_store(struct device *dev,
149 if (kstrtoul(buf, 10, &val) < 0) 123 if (kstrtoul(buf, 10, &val) < 0)
150 return -EINVAL; 124 return -EINVAL;
151 125
152 if (val) 126 err = gk20a_busy(g);
153 g->slcg_enabled = true; 127 if (err) {
154 else 128 return err;
155 g->slcg_enabled = false; 129 }
130
131 if (val) {
132 nvgpu_cg_slcg_set_slcg_enabled(g, true);
133 } else {
134 nvgpu_cg_slcg_set_slcg_enabled(g, false);
135 }
156 136
157 /* 137 /*
158 * TODO: slcg_therm_load_gating is not enabled anywhere during 138 * TODO: slcg_therm_load_gating is not enabled anywhere during
159 * init. Therefore, it would be incongruous to add it here. Once 139 * init. Therefore, it would be incongruous to add it here. Once
160 * it is added to init, we should add it here too. 140 * it is added to init, we should add it here too.
161 */ 141 */
162 err = gk20a_busy(g);
163 if (err)
164 return err;
165
166 if (g->ops.clock_gating.slcg_bus_load_gating_prod)
167 g->ops.clock_gating.slcg_bus_load_gating_prod(g,
168 g->slcg_enabled);
169 if (g->ops.clock_gating.slcg_ce2_load_gating_prod)
170 g->ops.clock_gating.slcg_ce2_load_gating_prod(g,
171 g->slcg_enabled);
172 if (g->ops.clock_gating.slcg_chiplet_load_gating_prod)
173 g->ops.clock_gating.slcg_chiplet_load_gating_prod(g,
174 g->slcg_enabled);
175 if (g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod)
176 g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod(g,
177 g->slcg_enabled);
178 if (g->ops.clock_gating.slcg_fb_load_gating_prod)
179 g->ops.clock_gating.slcg_fb_load_gating_prod(g,
180 g->slcg_enabled);
181 if (g->ops.clock_gating.slcg_fifo_load_gating_prod)
182 g->ops.clock_gating.slcg_fifo_load_gating_prod(g,
183 g->slcg_enabled);
184 if (g->ops.clock_gating.slcg_gr_load_gating_prod)
185 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
186 g->slcg_enabled);
187 if (g->ops.clock_gating.slcg_ltc_load_gating_prod)
188 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
189 g->slcg_enabled);
190 if (g->ops.clock_gating.slcg_perf_load_gating_prod)
191 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
192 g->slcg_enabled);
193 if (g->ops.clock_gating.slcg_priring_load_gating_prod)
194 g->ops.clock_gating.slcg_priring_load_gating_prod(g,
195 g->slcg_enabled);
196 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
197 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
198 g->slcg_enabled);
199 if (g->ops.clock_gating.slcg_xbar_load_gating_prod)
200 g->ops.clock_gating.slcg_xbar_load_gating_prod(g,
201 g->slcg_enabled);
202 gk20a_idle(g); 142 gk20a_idle(g);
203 143
204 nvgpu_info(g, "SLCG is %s.", g->slcg_enabled ? "enabled" : 144 nvgpu_info(g, "SLCG is %s.", val ? "enabled" :
205 "disabled"); 145 "disabled");
206 146
207 return count; 147 return count;
@@ -474,7 +414,7 @@ static ssize_t elpg_enable_store(struct device *dev,
474 return -EINVAL; 414 return -EINVAL;
475 415
476 if (!g->power_on) { 416 if (!g->power_on) {
477 g->elpg_enabled = val ? true : false; 417 return -EINVAL;
478 } else { 418 } else {
479 err = gk20a_busy(g); 419 err = gk20a_busy(g);
480 if (err) 420 if (err)
@@ -483,25 +423,14 @@ static ssize_t elpg_enable_store(struct device *dev,
483 * Since elpg is refcounted, we should not unnecessarily call 423 * Since elpg is refcounted, we should not unnecessarily call
484 * enable/disable if it is already so. 424 * enable/disable if it is already so.
485 */ 425 */
486 if (val && !g->elpg_enabled) { 426 if (val != 0) {
487 g->elpg_enabled = true; 427 nvgpu_pg_elpg_set_elpg_enabled(g, true);
488 nvgpu_pmu_pg_global_enable(g, true); 428 } else {
489 429 nvgpu_pg_elpg_set_elpg_enabled(g, false);
490 } else if (!val && g->elpg_enabled) {
491 if (g->ops.pmu.pmu_pg_engines_feature_list &&
492 g->ops.pmu.pmu_pg_engines_feature_list(g,
493 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
494 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
495 nvgpu_pmu_pg_global_enable(g, false);
496 g->elpg_enabled = false;
497 } else {
498 g->elpg_enabled = false;
499 nvgpu_pmu_pg_global_enable(g, false);
500 }
501 } 430 }
502 gk20a_idle(g); 431 gk20a_idle(g);
503 } 432 }
504 nvgpu_info(g, "ELPG is %s.", g->elpg_enabled ? "enabled" : 433 nvgpu_info(g, "ELPG is %s.", val ? "enabled" :
505 "disabled"); 434 "disabled");
506 435
507 return count; 436 return count;
@@ -512,7 +441,8 @@ static ssize_t elpg_enable_read(struct device *dev,
512{ 441{
513 struct gk20a *g = get_gk20a(dev); 442 struct gk20a *g = get_gk20a(dev);
514 443
515 return snprintf(buf, PAGE_SIZE, "%d\n", g->elpg_enabled ? 1 : 0); 444 return snprintf(buf, PAGE_SIZE, "%d\n",
445 nvgpu_pg_elpg_is_enabled(g) ? 1 : 0);
516} 446}
517 447
518static DEVICE_ATTR(elpg_enable, ROOTRW, elpg_enable_read, elpg_enable_store); 448static DEVICE_ATTR(elpg_enable, ROOTRW, elpg_enable_read, elpg_enable_store);
@@ -610,8 +540,9 @@ static ssize_t mscg_enable_store(struct device *dev,
610 /* make status visible */ 540 /* make status visible */
611 smp_mb(); 541 smp_mb();
612 g->mscg_enabled = false; 542 g->mscg_enabled = false;
613 if (g->elpg_enabled) 543 if (nvgpu_pg_elpg_is_enabled(g)) {
614 nvgpu_pmu_pg_global_enable(g, true); 544 nvgpu_pg_elpg_enable(g);
545 }
615 } 546 }
616 g->mscg_enabled = false; 547 g->mscg_enabled = false;
617 } 548 }
diff --git a/drivers/gpu/nvgpu/os/linux/vgpu/vgpu_linux.c b/drivers/gpu/nvgpu/os/linux/vgpu/vgpu_linux.c
index 91e94696..522f1b86 100644
--- a/drivers/gpu/nvgpu/os/linux/vgpu/vgpu_linux.c
+++ b/drivers/gpu/nvgpu/os/linux/vgpu/vgpu_linux.c
@@ -69,6 +69,7 @@ static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform)
69 nvgpu_mutex_init(&g->power_lock); 69 nvgpu_mutex_init(&g->power_lock);
70 nvgpu_mutex_init(&g->ctxsw_disable_lock); 70 nvgpu_mutex_init(&g->ctxsw_disable_lock);
71 nvgpu_mutex_init(&g->clk_arb_enable_lock); 71 nvgpu_mutex_init(&g->clk_arb_enable_lock);
72 nvgpu_mutex_init(&g->cg_pg_lock);
72 73
73 nvgpu_mutex_init(&priv->vgpu_clk_get_freq_lock); 74 nvgpu_mutex_init(&priv->vgpu_clk_get_freq_lock);
74 75