summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/power_features
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2019-04-30 04:24:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2019-05-09 17:41:30 -0400
commitc81cc032c48a1b25e095b17b77399166c9091ff3 (patch)
treeace7d238c55bbb5e96fb6fd74deb156f3c513bae /drivers/gpu/nvgpu/common/power_features
parentf495f52c70c6bd7b7a4e6897270e4696efa57d5c (diff)
gpu: nvgpu: add cg and pg function
Add new power/clock gating functions that can be called by other units. New clock_gating functions will reside in cg.c under common/power_features/cg unit. New power gating functions will reside in pg.c under common/power_features/pg unit. Use nvgpu_pg_elpg_disable and nvgpu_pg_elpg_enable to disable/enable elpg and also in gr_gk20a_elpg_protected macro to access gr registers. Add cg_pg_lock to make elpg_enabled, elcg_enabled, blcg_enabled and slcg_enabled thread safe. JIRA NVGPU-2014 Change-Id: I00d124c2ee16242c9a3ef82e7620fbb7f1297aff Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2025493 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> (cherry-picked from c90585856567a547173a8b207365b3a4a3ccdd57 in dev-kernel) Reviewed-on: https://git-master.nvidia.com/r/2108406 GVS: Gerrit_Virtual_Submit Reviewed-by: Bibek Basu <bbasu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/power_features')
-rw-r--r--drivers/gpu/nvgpu/common/power_features/cg/cg.c566
-rw-r--r--drivers/gpu/nvgpu/common/power_features/pg/pg.c106
-rw-r--r--drivers/gpu/nvgpu/common/power_features/power_features.c66
3 files changed, 738 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/power_features/cg/cg.c b/drivers/gpu/nvgpu/common/power_features/cg/cg.c
new file mode 100644
index 00000000..66b95226
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/power_features/cg/cg.c
@@ -0,0 +1,566 @@
1/*
2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/gk20a.h>
24#include <nvgpu/enabled.h>
25#include <nvgpu/power_features/cg.h>
26
27static void nvgpu_cg_set_mode(struct gk20a *g, int cgmode, int mode_config)
28{
29 u32 engine_idx;
30 u32 active_engine_id = 0;
31 struct fifo_engine_info_gk20a *engine_info = NULL;
32 struct fifo_gk20a *f = &g->fifo;
33
34 nvgpu_log_fn(g, " ");
35
36 for (engine_idx = 0; engine_idx < f->num_engines; ++engine_idx) {
37 active_engine_id = f->active_engines_list[engine_idx];
38 engine_info = &f->engine_info[active_engine_id];
39
40 /* gr_engine supports both BLCG and ELCG */
41 if ((cgmode == BLCG_MODE) && (engine_info->engine_enum ==
42 ENGINE_GR_GK20A)) {
43 g->ops.therm.init_blcg_mode(g, (u32)mode_config,
44 active_engine_id);
45 break;
46 } else if (cgmode == ELCG_MODE) {
47 g->ops.therm.init_elcg_mode(g, (u32)mode_config,
48 active_engine_id);
49 } else {
50 nvgpu_err(g, "invalid cg mode %d, config %d for "
51 "act_eng_id %d",
52 cgmode, mode_config, active_engine_id);
53 }
54 }
55}
56
57void nvgpu_cg_elcg_enable(struct gk20a *g)
58{
59 nvgpu_log_fn(g, " ");
60
61 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
62 return;
63 }
64
65 nvgpu_mutex_acquire(&g->cg_pg_lock);
66 if (g->elcg_enabled) {
67 nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_AUTO);
68 }
69 nvgpu_mutex_release(&g->cg_pg_lock);
70}
71
72void nvgpu_cg_elcg_disable(struct gk20a *g)
73{
74 nvgpu_log_fn(g, " ");
75
76 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
77 return;
78 }
79
80 nvgpu_mutex_acquire(&g->cg_pg_lock);
81 if (g->elcg_enabled) {
82 nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_RUN);
83 }
84 nvgpu_mutex_release(&g->cg_pg_lock);
85
86}
87
88void nvgpu_cg_blcg_mode_enable(struct gk20a *g)
89{
90 nvgpu_log_fn(g, " ");
91
92 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
93 return;
94 }
95
96 nvgpu_mutex_acquire(&g->cg_pg_lock);
97 if (g->blcg_enabled) {
98 nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_AUTO);
99 }
100 nvgpu_mutex_release(&g->cg_pg_lock);
101
102}
103
104void nvgpu_cg_blcg_mode_disable(struct gk20a *g)
105{
106 nvgpu_log_fn(g, " ");
107
108 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
109 return;
110 }
111
112 nvgpu_mutex_acquire(&g->cg_pg_lock);
113 if (g->blcg_enabled) {
114 nvgpu_cg_set_mode(g, BLCG_MODE, BLCG_RUN);
115 }
116 nvgpu_mutex_release(&g->cg_pg_lock);
117
118
119}
120
121void nvgpu_cg_blcg_fb_ltc_load_enable(struct gk20a *g)
122{
123 nvgpu_log_fn(g, " ");
124
125 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
126 return;
127 }
128 nvgpu_mutex_acquire(&g->cg_pg_lock);
129 if (!g->blcg_enabled) {
130 goto done;
131 }
132 if (g->ops.clock_gating.blcg_fb_load_gating_prod != NULL) {
133 g->ops.clock_gating.blcg_fb_load_gating_prod(g, true);
134 }
135 if (g->ops.clock_gating.blcg_ltc_load_gating_prod != NULL) {
136 g->ops.clock_gating.blcg_ltc_load_gating_prod(g, true);
137 }
138done:
139 nvgpu_mutex_release(&g->cg_pg_lock);
140}
141
142void nvgpu_cg_blcg_fifo_load_enable(struct gk20a *g)
143{
144 nvgpu_log_fn(g, " ");
145
146 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
147 return;
148 }
149 nvgpu_mutex_acquire(&g->cg_pg_lock);
150 if (!g->blcg_enabled) {
151 goto done;
152 }
153 if (g->ops.clock_gating.blcg_fifo_load_gating_prod != NULL) {
154 g->ops.clock_gating.blcg_fifo_load_gating_prod(g, true);
155 }
156done:
157 nvgpu_mutex_release(&g->cg_pg_lock);
158}
159
160void nvgpu_cg_blcg_pmu_load_enable(struct gk20a *g)
161{
162 nvgpu_log_fn(g, " ");
163
164 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
165 return;
166 }
167 nvgpu_mutex_acquire(&g->cg_pg_lock);
168 if (!g->blcg_enabled) {
169 goto done;
170 }
171 if (g->ops.clock_gating.blcg_pmu_load_gating_prod != NULL) {
172 g->ops.clock_gating.blcg_pmu_load_gating_prod(g, true);
173 }
174done:
175 nvgpu_mutex_release(&g->cg_pg_lock);
176}
177
178void nvgpu_cg_blcg_ce_load_enable(struct gk20a *g)
179{
180 nvgpu_log_fn(g, " ");
181
182 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
183 return;
184 }
185 nvgpu_mutex_acquire(&g->cg_pg_lock);
186 if (!g->blcg_enabled) {
187 goto done;
188 }
189 if (g->ops.clock_gating.blcg_ce_load_gating_prod != NULL) {
190 g->ops.clock_gating.blcg_ce_load_gating_prod(g, true);
191 }
192done:
193 nvgpu_mutex_release(&g->cg_pg_lock);
194}
195
196void nvgpu_cg_blcg_gr_load_enable(struct gk20a *g)
197{
198 nvgpu_log_fn(g, " ");
199
200 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
201 return;
202 }
203 nvgpu_mutex_acquire(&g->cg_pg_lock);
204 if (!g->blcg_enabled) {
205 goto done;
206 }
207 if (g->ops.clock_gating.blcg_gr_load_gating_prod != NULL) {
208 g->ops.clock_gating.blcg_gr_load_gating_prod(g, true);
209 }
210done:
211 nvgpu_mutex_release(&g->cg_pg_lock);
212}
213
214void nvgpu_cg_slcg_fb_ltc_load_enable(struct gk20a *g)
215{
216 nvgpu_log_fn(g, " ");
217
218 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
219 return;
220 }
221 nvgpu_mutex_acquire(&g->cg_pg_lock);
222 if (!g->slcg_enabled) {
223 goto done;
224 }
225 if (g->ops.clock_gating.slcg_fb_load_gating_prod != NULL) {
226 g->ops.clock_gating.slcg_fb_load_gating_prod(g, true);
227 }
228 if (g->ops.clock_gating.slcg_ltc_load_gating_prod != NULL) {
229 g->ops.clock_gating.slcg_ltc_load_gating_prod(g, true);
230 }
231done:
232 nvgpu_mutex_release(&g->cg_pg_lock);
233}
234
235void nvgpu_cg_slcg_priring_load_enable(struct gk20a *g)
236{
237 nvgpu_log_fn(g, " ");
238
239 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
240 return;
241 }
242 nvgpu_mutex_acquire(&g->cg_pg_lock);
243 if (!g->slcg_enabled) {
244 goto done;
245 }
246 if (g->ops.clock_gating.slcg_priring_load_gating_prod != NULL) {
247 g->ops.clock_gating.slcg_priring_load_gating_prod(g, true);
248 }
249done:
250 nvgpu_mutex_release(&g->cg_pg_lock);
251}
252
253void nvgpu_cg_slcg_gr_perf_ltc_load_enable(struct gk20a *g)
254{
255 nvgpu_log_fn(g, " ");
256
257 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
258 return;
259 }
260 nvgpu_mutex_acquire(&g->cg_pg_lock);
261 if (!g->slcg_enabled) {
262 goto done;
263 }
264 if (g->ops.clock_gating.slcg_ltc_load_gating_prod != NULL) {
265 g->ops.clock_gating.slcg_ltc_load_gating_prod(g, true);
266 }
267 if (g->ops.clock_gating.slcg_perf_load_gating_prod != NULL) {
268 g->ops.clock_gating.slcg_perf_load_gating_prod(g, true);
269 }
270 if (g->ops.clock_gating.slcg_gr_load_gating_prod != NULL) {
271 g->ops.clock_gating.slcg_gr_load_gating_prod(g, true);
272 }
273done:
274 nvgpu_mutex_release(&g->cg_pg_lock);
275}
276
277void nvgpu_cg_slcg_gr_perf_ltc_load_disable(struct gk20a *g)
278{
279 nvgpu_log_fn(g, " ");
280
281 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
282 return;
283 }
284 nvgpu_mutex_acquire(&g->cg_pg_lock);
285 if (!g->slcg_enabled) {
286 goto done;
287 }
288 if (g->ops.clock_gating.slcg_gr_load_gating_prod != NULL) {
289 g->ops.clock_gating.slcg_gr_load_gating_prod(g, false);
290 }
291 if (g->ops.clock_gating.slcg_perf_load_gating_prod != NULL) {
292 g->ops.clock_gating.slcg_perf_load_gating_prod(g, false);
293 }
294 if (g->ops.clock_gating.slcg_ltc_load_gating_prod != NULL) {
295 g->ops.clock_gating.slcg_ltc_load_gating_prod(g, false);
296 }
297done:
298 nvgpu_mutex_release(&g->cg_pg_lock);
299}
300
301void nvgpu_cg_slcg_fifo_load_enable(struct gk20a *g)
302{
303 nvgpu_log_fn(g, " ");
304
305 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
306 return;
307 }
308 nvgpu_mutex_acquire(&g->cg_pg_lock);
309 if (!g->slcg_enabled) {
310 goto done;
311 }
312 if (g->ops.clock_gating.slcg_fifo_load_gating_prod != NULL) {
313 g->ops.clock_gating.slcg_fifo_load_gating_prod(g, true);
314 }
315done:
316 nvgpu_mutex_release(&g->cg_pg_lock);
317}
318
319void nvgpu_cg_slcg_pmu_load_enable(struct gk20a *g)
320{
321 nvgpu_log_fn(g, " ");
322
323 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
324 return;
325 }
326 nvgpu_mutex_acquire(&g->cg_pg_lock);
327 if (!g->slcg_enabled) {
328 goto done;
329 }
330 if (g->ops.clock_gating.slcg_pmu_load_gating_prod != NULL) {
331 g->ops.clock_gating.slcg_pmu_load_gating_prod(g, true);
332 }
333done:
334 nvgpu_mutex_release(&g->cg_pg_lock);
335}
336
337void nvgpu_cg_slcg_ce2_load_enable(struct gk20a *g)
338{
339 nvgpu_log_fn(g, " ");
340
341 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
342 return;
343 }
344 nvgpu_mutex_acquire(&g->cg_pg_lock);
345 if (!g->slcg_enabled) {
346 goto done;
347 }
348 if (g->ops.clock_gating.slcg_ce2_load_gating_prod != NULL) {
349 g->ops.clock_gating.slcg_ce2_load_gating_prod(g, true);
350 }
351done:
352 nvgpu_mutex_release(&g->cg_pg_lock);
353}
354
355void nvgpu_cg_init_gr_load_gating_prod(struct gk20a *g)
356{
357 nvgpu_log_fn(g, " ");
358
359 nvgpu_mutex_acquire(&g->cg_pg_lock);
360
361 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
362 goto check_can_blcg;
363 }
364 if (!g->slcg_enabled) {
365 goto check_can_blcg;
366 }
367
368 if (g->ops.clock_gating.slcg_bus_load_gating_prod != NULL) {
369 g->ops.clock_gating.slcg_bus_load_gating_prod(g, true);
370 }
371 if (g->ops.clock_gating.slcg_chiplet_load_gating_prod != NULL) {
372 g->ops.clock_gating.slcg_chiplet_load_gating_prod(g, true);
373 }
374 if (g->ops.clock_gating.slcg_gr_load_gating_prod != NULL) {
375 g->ops.clock_gating.slcg_gr_load_gating_prod(g, true);
376 }
377 if (g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod != NULL) {
378 g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod(g,
379 true);
380 }
381 if (g->ops.clock_gating.slcg_perf_load_gating_prod != NULL) {
382 g->ops.clock_gating.slcg_perf_load_gating_prod(g, true);
383 }
384 if (g->ops.clock_gating.slcg_xbar_load_gating_prod != NULL) {
385 g->ops.clock_gating.slcg_xbar_load_gating_prod(g, true);
386 }
387
388check_can_blcg:
389 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
390 goto pg_gr_load;
391 }
392 if (!g->blcg_enabled) {
393 goto pg_gr_load;
394 }
395 if (g->ops.clock_gating.blcg_bus_load_gating_prod != NULL) {
396 g->ops.clock_gating.blcg_bus_load_gating_prod(g, true);
397 }
398 if (g->ops.clock_gating.blcg_gr_load_gating_prod != NULL) {
399 g->ops.clock_gating.blcg_gr_load_gating_prod(g, true);
400 }
401 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod != NULL) {
402 g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod(g,
403 true);
404 }
405 if (g->ops.clock_gating.blcg_xbar_load_gating_prod != NULL) {
406 g->ops.clock_gating.blcg_xbar_load_gating_prod(g, true);
407 }
408pg_gr_load:
409 if (g->ops.clock_gating.pg_gr_load_gating_prod != NULL) {
410 g->ops.clock_gating.pg_gr_load_gating_prod(g, true);
411 }
412
413 nvgpu_mutex_release(&g->cg_pg_lock);
414}
415
416void nvgpu_cg_elcg_set_elcg_enabled(struct gk20a *g, bool enable)
417{
418 nvgpu_log_fn(g, " ");
419
420 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) {
421 return;
422 }
423
424 nvgpu_mutex_release(&g->cg_pg_lock);
425 if (enable) {
426 if (!g->elcg_enabled) {
427 g->elcg_enabled = true;
428 nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_AUTO);
429 }
430 } else {
431 if (g->elcg_enabled) {
432 g->elcg_enabled = false;
433 nvgpu_cg_set_mode(g, ELCG_MODE, ELCG_RUN);
434 }
435 }
436 nvgpu_mutex_release(&g->cg_pg_lock);
437}
438
439void nvgpu_cg_blcg_set_blcg_enabled(struct gk20a *g, bool enable)
440{
441 bool load = false;
442
443 nvgpu_log_fn(g, " ");
444
445 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) {
446 return;
447 }
448
449 nvgpu_mutex_acquire(&g->cg_pg_lock);
450 if (enable) {
451 if (!g->blcg_enabled) {
452 load = true;
453 g->blcg_enabled = true;
454 }
455 } else {
456 if (g->blcg_enabled) {
457 load = true;
458 g->blcg_enabled = false;
459 }
460 }
461 if (!load ) {
462 goto done;
463 }
464
465 if (g->ops.clock_gating.blcg_bus_load_gating_prod != NULL) {
466 g->ops.clock_gating.blcg_bus_load_gating_prod(g, enable);
467 }
468 if (g->ops.clock_gating.blcg_ce_load_gating_prod != NULL) {
469 g->ops.clock_gating.blcg_ce_load_gating_prod(g, enable);
470 }
471 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod != NULL) {
472 g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod(g,
473 enable);
474 }
475 if (g->ops.clock_gating.blcg_fb_load_gating_prod != NULL) {
476 g->ops.clock_gating.blcg_fb_load_gating_prod(g, enable);
477 }
478 if (g->ops.clock_gating.blcg_fifo_load_gating_prod != NULL) {
479 g->ops.clock_gating.blcg_fifo_load_gating_prod(g, enable);
480 }
481 if (g->ops.clock_gating.blcg_gr_load_gating_prod != NULL) {
482 g->ops.clock_gating.blcg_gr_load_gating_prod(g, enable);
483 }
484 if (g->ops.clock_gating.blcg_ltc_load_gating_prod != NULL) {
485 g->ops.clock_gating.blcg_ltc_load_gating_prod(g, enable);
486 }
487 if (g->ops.clock_gating.blcg_pmu_load_gating_prod != NULL) {
488 g->ops.clock_gating.blcg_pmu_load_gating_prod(g, enable);
489 }
490 if (g->ops.clock_gating.blcg_xbar_load_gating_prod != NULL) {
491 g->ops.clock_gating.blcg_xbar_load_gating_prod(g, enable);
492 }
493
494done:
495 nvgpu_mutex_release(&g->cg_pg_lock);
496}
497
498void nvgpu_cg_slcg_set_slcg_enabled(struct gk20a *g, bool enable)
499{
500 bool load = false;
501
502 nvgpu_log_fn(g, " ");
503
504 if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) {
505 return;
506 }
507
508 nvgpu_mutex_acquire(&g->cg_pg_lock);
509 if (enable) {
510 if (!g->slcg_enabled) {
511 load = true;
512 g->slcg_enabled = true;
513 }
514 } else {
515 if (g->slcg_enabled) {
516 load = true;
517 g->slcg_enabled = false;
518 }
519 }
520 if (!load ) {
521 goto done;
522 }
523
524 if (g->ops.clock_gating.slcg_bus_load_gating_prod != NULL) {
525 g->ops.clock_gating.slcg_bus_load_gating_prod(g, enable);
526 }
527 if (g->ops.clock_gating.slcg_ce2_load_gating_prod != NULL) {
528 g->ops.clock_gating.slcg_ce2_load_gating_prod(g, enable);
529 }
530 if (g->ops.clock_gating.slcg_chiplet_load_gating_prod != NULL) {
531 g->ops.clock_gating.slcg_chiplet_load_gating_prod(g, enable);
532 }
533 if (g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod !=
534 NULL) {
535 g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod(g,
536 enable);
537 }
538 if (g->ops.clock_gating.slcg_fb_load_gating_prod != NULL) {
539 g->ops.clock_gating.slcg_fb_load_gating_prod(g, enable);
540 }
541 if (g->ops.clock_gating.slcg_fifo_load_gating_prod != NULL) {
542 g->ops.clock_gating.slcg_fifo_load_gating_prod(g, enable);
543 }
544 if (g->ops.clock_gating.slcg_gr_load_gating_prod != NULL) {
545 g->ops.clock_gating.slcg_gr_load_gating_prod(g, enable);
546 }
547 if (g->ops.clock_gating.slcg_ltc_load_gating_prod != NULL) {
548 g->ops.clock_gating.slcg_ltc_load_gating_prod(g, enable);
549 }
550 if (g->ops.clock_gating.slcg_perf_load_gating_prod != NULL) {
551 g->ops.clock_gating.slcg_perf_load_gating_prod(g, enable);
552 }
553 if (g->ops.clock_gating.slcg_priring_load_gating_prod != NULL) {
554 g->ops.clock_gating.slcg_priring_load_gating_prod(g,
555 enable);
556 }
557 if (g->ops.clock_gating.slcg_pmu_load_gating_prod != NULL) {
558 g->ops.clock_gating.slcg_pmu_load_gating_prod(g, enable);
559 }
560 if (g->ops.clock_gating.slcg_xbar_load_gating_prod != NULL) {
561 g->ops.clock_gating.slcg_xbar_load_gating_prod(g, enable);
562 }
563
564done:
565 nvgpu_mutex_release(&g->cg_pg_lock);
566}
diff --git a/drivers/gpu/nvgpu/common/power_features/pg/pg.c b/drivers/gpu/nvgpu/common/power_features/pg/pg.c
new file mode 100644
index 00000000..fa31f4e3
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/power_features/pg/pg.c
@@ -0,0 +1,106 @@
1/*
2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/gk20a.h>
24#include <nvgpu/pmu.h>
25#include <nvgpu/power_features/pg.h>
26
27bool nvgpu_pg_elpg_is_enabled(struct gk20a *g)
28{
29 bool elpg_enabled;
30
31 nvgpu_log_fn(g, " ");
32
33 nvgpu_mutex_acquire(&g->cg_pg_lock);
34 elpg_enabled = g->elpg_enabled;
35 nvgpu_mutex_release(&g->cg_pg_lock);
36 return elpg_enabled;
37}
38
39int nvgpu_pg_elpg_enable(struct gk20a *g)
40{
41 int err = 0;
42
43 nvgpu_log_fn(g, " ");
44
45 if (!g->can_elpg) {
46 return 0;
47 }
48
49 nvgpu_mutex_acquire(&g->cg_pg_lock);
50 if (g->elpg_enabled) {
51 err = nvgpu_pmu_pg_global_enable(g, true);
52 }
53 nvgpu_mutex_release(&g->cg_pg_lock);
54 return err;
55}
56
57int nvgpu_pg_elpg_disable(struct gk20a *g)
58{
59 int err = 0;
60
61 nvgpu_log_fn(g, " ");
62
63 if (!g->can_elpg) {
64 return 0;
65 }
66
67 nvgpu_mutex_acquire(&g->cg_pg_lock);
68 if (g->elpg_enabled) {
69 err = nvgpu_pmu_pg_global_enable(g, false);
70 }
71 nvgpu_mutex_release(&g->cg_pg_lock);
72 return err;
73}
74
75int nvgpu_pg_elpg_set_elpg_enabled(struct gk20a *g, bool enable)
76{
77 int err = 0;
78 bool change_mode = false;
79
80 nvgpu_log_fn(g, " ");
81
82 if (!g->can_elpg) {
83 return 0;
84 }
85
86 nvgpu_mutex_acquire(&g->cg_pg_lock);
87 if (enable) {
88 if (!g->elpg_enabled) {
89 change_mode = true;
90 g->elpg_enabled = true;
91 }
92 } else {
93 if (g->elpg_enabled) {
94 change_mode = true;
95 g->elpg_enabled = false;
96 }
97 }
98 if (!change_mode) {
99 goto done;
100 }
101
102 err = nvgpu_pmu_pg_global_enable(g, enable);
103done:
104 nvgpu_mutex_release(&g->cg_pg_lock);
105 return err;
106}
diff --git a/drivers/gpu/nvgpu/common/power_features/power_features.c b/drivers/gpu/nvgpu/common/power_features/power_features.c
new file mode 100644
index 00000000..792fdc01
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/power_features/power_features.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/gk20a.h>
24#include <nvgpu/power_features/cg.h>
25#include <nvgpu/power_features/pg.h>
26#include <nvgpu/power_features/power_features.h>
27
28int nvgpu_cg_pg_disable(struct gk20a *g)
29{
30 int err = 0;
31
32 nvgpu_log_fn(g, " ");
33
34 /* disable elpg before clock gating */
35 err = nvgpu_pg_elpg_disable(g);
36 if (err != 0) {
37 nvgpu_err(g, "failed to set disable elpg");
38 }
39 nvgpu_cg_slcg_gr_perf_ltc_load_disable(g);
40
41 nvgpu_cg_blcg_mode_disable(g);
42
43 nvgpu_cg_elcg_disable(g);
44
45 return err;
46}
47
48int nvgpu_cg_pg_enable(struct gk20a *g)
49{
50 int err = 0;
51
52 nvgpu_log_fn(g, " ");
53
54 nvgpu_cg_elcg_enable(g);
55
56 nvgpu_cg_blcg_mode_enable(g);
57
58 nvgpu_cg_slcg_gr_perf_ltc_load_enable(g);
59
60 err = nvgpu_pg_elpg_enable(g);
61 if (err != 0) {
62 nvgpu_err(g, "failed to set enable elpg");
63 }
64
65 return err;
66}