summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2018-06-26 06:11:12 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-04 10:25:41 -0400
commit16ad9f537979c5f3717fc5781b1c2fad22a76f96 (patch)
tree2a150c50983180051fa5ecc942764e081961d787 /drivers/gpu/nvgpu/gp106
parentf125d1b681c324d5d58abcc42fac1301e1faa921 (diff)
gpu: nvgpu: move gp106 specific clk_arbiter code into HAL
Currently, clock arbiter code is extensively using dgpu specific implementation. This patch restructures the clk_arbiter code and moves gp106 specific code into HAL. Following changes are made in this patch 1) clk_domain_get_f_points is now invoked via HAL for gp106 i.e. g->ops.clk.clk_domain_get_f_points. 2) moved nvgpu_clk_arb_change_vf_point and other related static functions to clk_arb_gp106.c. 3) Instead of only checking if get_arbiter_clk_domain is empty, a check for support_clk_freq_controller is also added. This is to enable the clk_arbiter based on support from both the OS and the chips. Bug 2061372 Change-Id: I65b0a4e02145a86fbbfb420ed591b1fa3c86f6dc Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1774279 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp106')
-rw-r--r--drivers/gpu/nvgpu/gp106/clk_arb_gp106.c649
-rw-r--r--drivers/gpu/nvgpu/gp106/clk_arb_gp106.h8
-rw-r--r--drivers/gpu/nvgpu/gp106/clk_gp106.c33
-rw-r--r--drivers/gpu/nvgpu/gp106/clk_gp106.h7
-rw-r--r--drivers/gpu/nvgpu/gp106/hal_gp106.c5
5 files changed, 699 insertions, 3 deletions
diff --git a/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c b/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c
index ca8015d6..860344d0 100644
--- a/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/clk_arb_gp106.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include "gk20a/gk20a.h" 23#include "gk20a/gk20a.h"
24#include <nvgpu/clk_arb.h>
24 25
25#include "clk_arb_gp106.h" 26#include "clk_arb_gp106.h"
26 27
@@ -109,3 +110,649 @@ int gp106_get_arbiter_clk_default(struct gk20a *g, u32 api_domain,
109 110
110 return 0; 111 return 0;
111} 112}
113
114int gp106_init_clk_arbiter(struct gk20a *g)
115{
116 struct nvgpu_clk_arb *arb;
117 u16 default_mhz;
118 int err;
119 int index;
120 struct nvgpu_clk_vf_table *table;
121
122 clk_arb_dbg(g, " ");
123
124 arb = nvgpu_kzalloc(g, sizeof(struct nvgpu_clk_arb));
125 if (!arb)
126 return -ENOMEM;
127
128 err = nvgpu_mutex_init(&arb->pstate_lock);
129 if (err)
130 goto mutex_fail;
131 nvgpu_spinlock_init(&arb->sessions_lock);
132 nvgpu_spinlock_init(&arb->users_lock);
133 nvgpu_spinlock_init(&arb->requests_lock);
134
135 arb->mclk_f_points = nvgpu_kcalloc(g, MAX_F_POINTS, sizeof(u16));
136 if (!arb->mclk_f_points) {
137 err = -ENOMEM;
138 goto init_fail;
139 }
140
141 arb->gpc2clk_f_points = nvgpu_kcalloc(g, MAX_F_POINTS, sizeof(u16));
142 if (!arb->gpc2clk_f_points) {
143 err = -ENOMEM;
144 goto init_fail;
145 }
146
147 for (index = 0; index < 2; index++) {
148 table = &arb->vf_table_pool[index];
149 table->gpc2clk_num_points = MAX_F_POINTS;
150 table->mclk_num_points = MAX_F_POINTS;
151
152 table->gpc2clk_points = nvgpu_kcalloc(g, MAX_F_POINTS,
153 sizeof(struct nvgpu_clk_vf_point));
154 if (!table->gpc2clk_points) {
155 err = -ENOMEM;
156 goto init_fail;
157 }
158
159
160 table->mclk_points = nvgpu_kcalloc(g, MAX_F_POINTS,
161 sizeof(struct nvgpu_clk_vf_point));
162 if (!table->mclk_points) {
163 err = -ENOMEM;
164 goto init_fail;
165 }
166 }
167
168 g->clk_arb = arb;
169 arb->g = g;
170
171 err = g->ops.clk_arb.get_arbiter_clk_default(g,
172 CTRL_CLK_DOMAIN_MCLK, &default_mhz);
173 if (err < 0) {
174 err = -EINVAL;
175 goto init_fail;
176 }
177
178 arb->mclk_default_mhz = default_mhz;
179
180 err = g->ops.clk_arb.get_arbiter_clk_default(g,
181 CTRL_CLK_DOMAIN_GPC2CLK, &default_mhz);
182 if (err < 0) {
183 err = -EINVAL;
184 goto init_fail;
185 }
186
187 arb->gpc2clk_default_mhz = default_mhz;
188
189 arb->actual = &arb->actual_pool[0];
190
191 nvgpu_atomic_set(&arb->req_nr, 0);
192
193 nvgpu_atomic64_set(&arb->alarm_mask, 0);
194 err = nvgpu_clk_notification_queue_alloc(g, &arb->notification_queue,
195 DEFAULT_EVENT_NUMBER);
196 if (err < 0)
197 goto init_fail;
198
199 nvgpu_init_list_node(&arb->users);
200 nvgpu_init_list_node(&arb->sessions);
201 nvgpu_init_list_node(&arb->requests);
202
203 nvgpu_cond_init(&arb->request_wq);
204
205 nvgpu_init_list_node(&arb->update_vf_table_work_item.worker_item);
206 nvgpu_init_list_node(&arb->update_arb_work_item.worker_item);
207 arb->update_vf_table_work_item.arb = arb;
208 arb->update_arb_work_item.arb = arb;
209 arb->update_vf_table_work_item.item_type = CLK_ARB_WORK_UPDATE_VF_TABLE;
210 arb->update_arb_work_item.item_type = CLK_ARB_WORK_UPDATE_ARB;
211
212 err = nvgpu_clk_arb_worker_init(g);
213 if (err < 0)
214 goto init_fail;
215
216#ifdef CONFIG_DEBUG_FS
217 arb->debug = &arb->debug_pool[0];
218
219 if (!arb->debugfs_set) {
220 if (nvgpu_clk_arb_debugfs_init(g))
221 arb->debugfs_set = true;
222 }
223#endif
224 err = clk_vf_point_cache(g);
225 if (err < 0)
226 goto init_fail;
227
228 err = nvgpu_clk_arb_update_vf_table(arb);
229 if (err < 0)
230 goto init_fail;
231 do {
232 /* Check that first run is completed */
233 nvgpu_smp_mb();
234 NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
235 nvgpu_atomic_read(&arb->req_nr), 0);
236 } while (!nvgpu_atomic_read(&arb->req_nr));
237
238
239 return arb->status;
240
241init_fail:
242 nvgpu_kfree(g, arb->gpc2clk_f_points);
243 nvgpu_kfree(g, arb->mclk_f_points);
244
245 for (index = 0; index < 2; index++) {
246 nvgpu_kfree(g, arb->vf_table_pool[index].gpc2clk_points);
247 nvgpu_kfree(g, arb->vf_table_pool[index].mclk_points);
248 }
249
250 nvgpu_mutex_destroy(&arb->pstate_lock);
251
252mutex_fail:
253 nvgpu_kfree(g, arb);
254
255 return err;
256}
257
258static u8 nvgpu_clk_arb_find_vf_point(struct nvgpu_clk_arb *arb,
259 u16 *gpc2clk, u16 *sys2clk, u16 *xbar2clk, u16 *mclk,
260 u32 *voltuv, u32 *voltuv_sram, u32 *nuvmin, u32 *nuvmin_sram)
261{
262 u16 gpc2clk_target, mclk_target;
263 u32 gpc2clk_voltuv, gpc2clk_voltuv_sram;
264 u32 mclk_voltuv, mclk_voltuv_sram;
265 u32 pstate = VF_POINT_INVALID_PSTATE;
266 struct nvgpu_clk_vf_table *table;
267 u32 index, index_mclk;
268 struct nvgpu_clk_vf_point *mclk_vf = NULL;
269
270 do {
271 gpc2clk_target = *gpc2clk;
272 mclk_target = *mclk;
273 gpc2clk_voltuv = 0;
274 gpc2clk_voltuv_sram = 0;
275 mclk_voltuv = 0;
276 mclk_voltuv_sram = 0;
277
278 table = NV_ACCESS_ONCE(arb->current_vf_table);
279 /* pointer to table can be updated by callback */
280 nvgpu_smp_rmb();
281
282 if (!table)
283 continue;
284 if ((!table->gpc2clk_num_points) || (!table->mclk_num_points)) {
285 nvgpu_err(arb->g, "found empty table");
286 goto find_exit;
287 }
288 /* First we check MCLK to find out which PSTATE we are
289 * are requesting, and from there try to find the minimum
290 * GPC2CLK on the same PSTATE that satisfies the request.
291 * If no GPC2CLK can be found, then we need to up the PSTATE
292 */
293
294recalculate_vf_point:
295 for (index = 0; index < table->mclk_num_points; index++) {
296 if (table->mclk_points[index].mem_mhz >= mclk_target) {
297 mclk_vf = &table->mclk_points[index];
298 break;
299 }
300 }
301 if (index == table->mclk_num_points) {
302 mclk_vf = &table->mclk_points[index-1];
303 index = table->mclk_num_points - 1;
304 }
305 index_mclk = index;
306
307 /* round up the freq requests */
308 for (index = 0; index < table->gpc2clk_num_points; index++) {
309 pstate = VF_POINT_COMMON_PSTATE(
310 &table->gpc2clk_points[index], mclk_vf);
311
312 if ((table->gpc2clk_points[index].gpc_mhz >=
313 gpc2clk_target) &&
314 (pstate != VF_POINT_INVALID_PSTATE)) {
315 gpc2clk_target =
316 table->gpc2clk_points[index].gpc_mhz;
317 *sys2clk =
318 table->gpc2clk_points[index].sys_mhz;
319 *xbar2clk =
320 table->gpc2clk_points[index].xbar_mhz;
321
322 gpc2clk_voltuv =
323 table->gpc2clk_points[index].uvolt;
324 gpc2clk_voltuv_sram =
325 table->gpc2clk_points[index].uvolt_sram;
326 break;
327 }
328 }
329
330 if (index == table->gpc2clk_num_points) {
331 pstate = VF_POINT_COMMON_PSTATE(
332 &table->gpc2clk_points[index-1], mclk_vf);
333 if (pstate != VF_POINT_INVALID_PSTATE) {
334 gpc2clk_target =
335 table->gpc2clk_points[index-1].gpc_mhz;
336 *sys2clk =
337 table->gpc2clk_points[index-1].sys_mhz;
338 *xbar2clk =
339 table->gpc2clk_points[index-1].xbar_mhz;
340
341 gpc2clk_voltuv =
342 table->gpc2clk_points[index-1].uvolt;
343 gpc2clk_voltuv_sram =
344 table->gpc2clk_points[index-1].
345 uvolt_sram;
346 } else if (index_mclk >= table->mclk_num_points - 1) {
347 /* There is no available combination of MCLK
348 * and GPC2CLK, we need to fail this
349 */
350 gpc2clk_target = 0;
351 mclk_target = 0;
352 pstate = VF_POINT_INVALID_PSTATE;
353 goto find_exit;
354 } else {
355 /* recalculate with higher PSTATE */
356 gpc2clk_target = *gpc2clk;
357 mclk_target = table->mclk_points[index_mclk+1].
358 mem_mhz;
359 goto recalculate_vf_point;
360 }
361 }
362
363 mclk_target = mclk_vf->mem_mhz;
364 mclk_voltuv = mclk_vf->uvolt;
365 mclk_voltuv_sram = mclk_vf->uvolt_sram;
366
367 } while (!table ||
368 (NV_ACCESS_ONCE(arb->current_vf_table) != table));
369
370find_exit:
371 *voltuv = gpc2clk_voltuv > mclk_voltuv ? gpc2clk_voltuv : mclk_voltuv;
372 *voltuv_sram = gpc2clk_voltuv_sram > mclk_voltuv_sram ?
373 gpc2clk_voltuv_sram : mclk_voltuv_sram;
374 /* noise unaware vmin */
375 *nuvmin = mclk_voltuv;
376 *nuvmin_sram = mclk_voltuv_sram;
377 *gpc2clk = gpc2clk_target < *gpc2clk ? gpc2clk_target : *gpc2clk;
378 *mclk = mclk_target;
379 return pstate;
380}
381
382static int nvgpu_clk_arb_change_vf_point(struct gk20a *g, u16 gpc2clk_target,
383 u16 sys2clk_target, u16 xbar2clk_target, u16 mclk_target, u32 voltuv,
384 u32 voltuv_sram)
385{
386 struct set_fll_clk fllclk;
387 struct nvgpu_clk_arb *arb = g->clk_arb;
388 int status;
389
390 fllclk.gpc2clkmhz = gpc2clk_target;
391 fllclk.sys2clkmhz = sys2clk_target;
392 fllclk.xbar2clkmhz = xbar2clk_target;
393
394 fllclk.voltuv = voltuv;
395
396 /* if voltage ascends we do:
397 * (1) FLL change
398 * (2) Voltage change
399 * (3) MCLK change
400 * If it goes down
401 * (1) MCLK change
402 * (2) Voltage change
403 * (3) FLL change
404 */
405
406 /* descending */
407 if (voltuv < arb->voltuv_actual) {
408 status = g->ops.clk.mclk_change(g, mclk_target);
409 if (status < 0)
410 return status;
411
412 status = volt_set_voltage(g, voltuv, voltuv_sram);
413 if (status < 0)
414 return status;
415
416 status = clk_set_fll_clks(g, &fllclk);
417 if (status < 0)
418 return status;
419 } else {
420 status = clk_set_fll_clks(g, &fllclk);
421 if (status < 0)
422 return status;
423
424 status = volt_set_voltage(g, voltuv, voltuv_sram);
425 if (status < 0)
426 return status;
427
428 status = g->ops.clk.mclk_change(g, mclk_target);
429 if (status < 0)
430 return status;
431 }
432
433 return 0;
434}
435
436void gp106_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
437{
438 struct nvgpu_clk_session *session;
439 struct nvgpu_clk_dev *dev;
440 struct nvgpu_clk_dev *tmp;
441 struct nvgpu_clk_arb_target *target, *actual;
442 struct gk20a *g = arb->g;
443
444 u32 pstate = VF_POINT_INVALID_PSTATE;
445 u32 voltuv, voltuv_sram;
446 bool mclk_set, gpc2clk_set;
447 u32 nuvmin, nuvmin_sram;
448
449 u32 alarms_notified = 0;
450 u32 current_alarm;
451 int status = 0;
452
453 /* Temporary variables for checking target frequency */
454 u16 gpc2clk_target, sys2clk_target, xbar2clk_target, mclk_target;
455 u16 gpc2clk_session_target, mclk_session_target;
456
457#ifdef CONFIG_DEBUG_FS
458 u64 t0, t1;
459 struct nvgpu_clk_arb_debug *debug;
460
461#endif
462
463 clk_arb_dbg(g, " ");
464
465 /* bail out if gpu is down */
466 if (nvgpu_atomic64_read(&arb->alarm_mask) & EVENT(ALARM_GPU_LOST))
467 goto exit_arb;
468
469#ifdef CONFIG_DEBUG_FS
470 g->ops.ptimer.read_ptimer(g, &t0);
471#endif
472
473 /* Only one arbiter should be running */
474 gpc2clk_target = 0;
475 mclk_target = 0;
476
477 nvgpu_spinlock_acquire(&arb->sessions_lock);
478 nvgpu_list_for_each_entry(session, &arb->sessions,
479 nvgpu_clk_session, link) {
480 if (!session->zombie) {
481 mclk_set = false;
482 gpc2clk_set = false;
483 target = (session->target == &session->target_pool[0] ?
484 &session->target_pool[1] :
485 &session->target_pool[0]);
486 nvgpu_spinlock_acquire(&session->session_lock);
487 if (!nvgpu_list_empty(&session->targets)) {
488 /* Copy over state */
489 target->mclk = session->target->mclk;
490 target->gpc2clk = session->target->gpc2clk;
491 /* Query the latest committed request */
492 nvgpu_list_for_each_entry_safe(dev, tmp,
493 &session->targets, nvgpu_clk_dev, node) {
494 if (!mclk_set && dev->mclk_target_mhz) {
495 target->mclk =
496 dev->mclk_target_mhz;
497 mclk_set = true;
498 }
499 if (!gpc2clk_set &&
500 dev->gpc2clk_target_mhz) {
501 target->gpc2clk =
502 dev->gpc2clk_target_mhz;
503 gpc2clk_set = true;
504 }
505 nvgpu_ref_get(&dev->refcount);
506 nvgpu_list_del(&dev->node);
507 nvgpu_spinlock_acquire(
508 &arb->requests_lock);
509 nvgpu_list_add(
510 &dev->node, &arb->requests);
511 nvgpu_spinlock_release(&arb->requests_lock);
512 }
513 session->target = target;
514 }
515 nvgpu_spinlock_release(
516 &session->session_lock);
517
518 mclk_target = mclk_target > session->target->mclk ?
519 mclk_target : session->target->mclk;
520
521 gpc2clk_target =
522 gpc2clk_target > session->target->gpc2clk ?
523 gpc2clk_target : session->target->gpc2clk;
524 }
525 }
526 nvgpu_spinlock_release(&arb->sessions_lock);
527
528 gpc2clk_target = (gpc2clk_target > 0) ? gpc2clk_target :
529 arb->gpc2clk_default_mhz;
530
531 if (gpc2clk_target < arb->gpc2clk_min)
532 gpc2clk_target = arb->gpc2clk_min;
533
534 if (gpc2clk_target > arb->gpc2clk_max)
535 gpc2clk_target = arb->gpc2clk_max;
536
537 mclk_target = (mclk_target > 0) ? mclk_target :
538 arb->mclk_default_mhz;
539
540 if (mclk_target < arb->mclk_min)
541 mclk_target = arb->mclk_min;
542
543 if (mclk_target > arb->mclk_max)
544 mclk_target = arb->mclk_max;
545
546 sys2clk_target = 0;
547 xbar2clk_target = 0;
548
549 gpc2clk_session_target = gpc2clk_target;
550 mclk_session_target = mclk_target;
551
552 /* Query the table for the closest vf point to program */
553 pstate = nvgpu_clk_arb_find_vf_point(arb, &gpc2clk_target,
554 &sys2clk_target, &xbar2clk_target, &mclk_target, &voltuv,
555 &voltuv_sram, &nuvmin, &nuvmin_sram);
556
557 if (pstate == VF_POINT_INVALID_PSTATE) {
558 arb->status = -EINVAL;
559 /* make status visible */
560 nvgpu_smp_mb();
561 goto exit_arb;
562 }
563
564 if ((gpc2clk_target < gpc2clk_session_target) ||
565 (mclk_target < mclk_session_target))
566 nvgpu_clk_arb_set_global_alarm(g,
567 EVENT(ALARM_TARGET_VF_NOT_POSSIBLE));
568
569 if ((arb->actual->gpc2clk == gpc2clk_target) &&
570 (arb->actual->mclk == mclk_target) &&
571 (arb->voltuv_actual == voltuv)) {
572 goto exit_arb;
573 }
574
575 /* Program clocks */
576 /* A change in both mclk of gpc2clk may require a change in voltage */
577
578 nvgpu_mutex_acquire(&arb->pstate_lock);
579 status = nvgpu_lpwr_disable_pg(g, false);
580
581 status = clk_pmu_freq_controller_load(g, false,
582 CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL);
583 if (status < 0) {
584 arb->status = status;
585 nvgpu_mutex_release(&arb->pstate_lock);
586
587 /* make status visible */
588 nvgpu_smp_mb();
589 goto exit_arb;
590 }
591 status = volt_set_noiseaware_vmin(g, nuvmin, nuvmin_sram);
592 if (status < 0) {
593 arb->status = status;
594 nvgpu_mutex_release(&arb->pstate_lock);
595
596 /* make status visible */
597 nvgpu_smp_mb();
598 goto exit_arb;
599 }
600
601 status = nvgpu_clk_arb_change_vf_point(g, gpc2clk_target,
602 sys2clk_target, xbar2clk_target, mclk_target, voltuv,
603 voltuv_sram);
604 if (status < 0) {
605 arb->status = status;
606 nvgpu_mutex_release(&arb->pstate_lock);
607
608 /* make status visible */
609 nvgpu_smp_mb();
610 goto exit_arb;
611 }
612
613 status = clk_pmu_freq_controller_load(g, true,
614 CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL);
615 if (status < 0) {
616 arb->status = status;
617 nvgpu_mutex_release(&arb->pstate_lock);
618
619 /* make status visible */
620 nvgpu_smp_mb();
621 goto exit_arb;
622 }
623
624 status = nvgpu_lwpr_mclk_change(g, pstate);
625 if (status < 0) {
626 arb->status = status;
627 nvgpu_mutex_release(&arb->pstate_lock);
628
629 /* make status visible */
630 nvgpu_smp_mb();
631 goto exit_arb;
632 }
633
634 actual = NV_ACCESS_ONCE(arb->actual) == &arb->actual_pool[0] ?
635 &arb->actual_pool[1] : &arb->actual_pool[0];
636
637 /* do not reorder this pointer */
638 nvgpu_smp_rmb();
639 actual->gpc2clk = gpc2clk_target;
640 actual->mclk = mclk_target;
641 arb->voltuv_actual = voltuv;
642 actual->pstate = pstate;
643 arb->status = status;
644
645 /* Make changes visible to other threads */
646 nvgpu_smp_wmb();
647 arb->actual = actual;
648
649 status = nvgpu_lpwr_enable_pg(g, false);
650 if (status < 0) {
651 arb->status = status;
652 nvgpu_mutex_release(&arb->pstate_lock);
653
654 /* make status visible */
655 nvgpu_smp_mb();
656 goto exit_arb;
657 }
658
659 /* status must be visible before atomic inc */
660 nvgpu_smp_wmb();
661 nvgpu_atomic_inc(&arb->req_nr);
662
663 /* Unlock pstate change for PG */
664 nvgpu_mutex_release(&arb->pstate_lock);
665
666 /* VF Update complete */
667 nvgpu_clk_arb_set_global_alarm(g, EVENT(VF_UPDATE));
668
669 nvgpu_cond_signal_interruptible(&arb->request_wq);
670
671#ifdef CONFIG_DEBUG_FS
672 g->ops.ptimer.read_ptimer(g, &t1);
673
674 debug = arb->debug == &arb->debug_pool[0] ?
675 &arb->debug_pool[1] : &arb->debug_pool[0];
676
677 memcpy(debug, arb->debug, sizeof(arb->debug_pool[0]));
678 debug->switch_num++;
679
680 if (debug->switch_num == 1) {
681 debug->switch_max = debug->switch_min =
682 debug->switch_avg = (t1-t0)/1000;
683 debug->switch_std = 0;
684 } else {
685 s64 prev_avg;
686 s64 curr = (t1-t0)/1000;
687
688 debug->switch_max = curr > debug->switch_max ?
689 curr : debug->switch_max;
690 debug->switch_min = debug->switch_min ?
691 (curr < debug->switch_min ?
692 curr : debug->switch_min) : curr;
693 prev_avg = debug->switch_avg;
694 debug->switch_avg = (curr +
695 (debug->switch_avg * (debug->switch_num-1))) /
696 debug->switch_num;
697 debug->switch_std +=
698 (curr - debug->switch_avg) * (curr - prev_avg);
699 }
700 /* commit changes before exchanging debug pointer */
701 nvgpu_smp_wmb();
702 arb->debug = debug;
703#endif
704
705exit_arb:
706 if (status < 0) {
707 nvgpu_err(g, "Error in arbiter update");
708 nvgpu_clk_arb_set_global_alarm(g,
709 EVENT(ALARM_CLOCK_ARBITER_FAILED));
710 }
711
712 current_alarm = (u32) nvgpu_atomic64_read(&arb->alarm_mask);
713 /* notify completion for all requests */
714 nvgpu_spinlock_acquire(&arb->requests_lock);
715 nvgpu_list_for_each_entry_safe(dev, tmp, &arb->requests,
716 nvgpu_clk_dev, node) {
717 nvgpu_atomic_set(&dev->poll_mask,
718 NVGPU_POLLIN | NVGPU_POLLRDNORM);
719 nvgpu_clk_arb_event_post_event(dev);
720 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
721 nvgpu_list_del(&dev->node);
722 }
723 nvgpu_spinlock_release(&arb->requests_lock);
724
725 nvgpu_atomic_set(&arb->notification_queue.head,
726 nvgpu_atomic_read(&arb->notification_queue.tail));
727 /* notify event for all users */
728 nvgpu_spinlock_acquire(&arb->users_lock);
729 nvgpu_list_for_each_entry(dev, &arb->users, nvgpu_clk_dev, link) {
730 alarms_notified |=
731 nvgpu_clk_arb_notify(dev, arb->actual, current_alarm);
732 }
733 nvgpu_spinlock_release(&arb->users_lock);
734
735 /* clear alarms */
736 nvgpu_clk_arb_clear_global_alarm(g, alarms_notified &
737 ~EVENT(ALARM_GPU_LOST));
738}
739
740void gp106_clk_arb_cleanup(struct nvgpu_clk_arb *arb)
741{
742 struct gk20a *g = arb->g;
743 int index;
744
745 nvgpu_kfree(g, arb->gpc2clk_f_points);
746 nvgpu_kfree(g, arb->mclk_f_points);
747
748 for (index = 0; index < 2; index++) {
749 nvgpu_kfree(g,
750 arb->vf_table_pool[index].gpc2clk_points);
751 nvgpu_kfree(g, arb->vf_table_pool[index].mclk_points);
752 }
753
754 nvgpu_mutex_destroy(&g->clk_arb->pstate_lock);
755 nvgpu_kfree(g, g->clk_arb);
756
757 g->clk_arb = NULL;
758} \ No newline at end of file
diff --git a/drivers/gpu/nvgpu/gp106/clk_arb_gp106.h b/drivers/gpu/nvgpu/gp106/clk_arb_gp106.h
index fc4657f5..e2b2834c 100644
--- a/drivers/gpu/nvgpu/gp106/clk_arb_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/clk_arb_gp106.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,10 +22,16 @@
22#ifndef CLK_ARB_GP106_H 22#ifndef CLK_ARB_GP106_H
23#define CLK_ARB_GP106_H 23#define CLK_ARB_GP106_H
24 24
25struct nvgpu_clk_session;
26struct nvgpu_clk_arb;
27
25u32 gp106_get_arbiter_clk_domains(struct gk20a *g); 28u32 gp106_get_arbiter_clk_domains(struct gk20a *g);
26int gp106_get_arbiter_clk_range(struct gk20a *g, u32 api_domain, 29int gp106_get_arbiter_clk_range(struct gk20a *g, u32 api_domain,
27 u16 *min_mhz, u16 *max_mhz); 30 u16 *min_mhz, u16 *max_mhz);
28int gp106_get_arbiter_clk_default(struct gk20a *g, u32 api_domain, 31int gp106_get_arbiter_clk_default(struct gk20a *g, u32 api_domain,
29 u16 *default_mhz); 32 u16 *default_mhz);
33int gp106_init_clk_arbiter(struct gk20a *g);
34void gp106_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb);
35void gp106_clk_arb_cleanup(struct nvgpu_clk_arb *arb);
30 36
31#endif /* CLK_ARB_GP106_H */ 37#endif /* CLK_ARB_GP106_H */
diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.c b/drivers/gpu/nvgpu/gp106/clk_gp106.c
index 24b07112..dd7a2dd6 100644
--- a/drivers/gpu/nvgpu/gp106/clk_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/clk_gp106.c
@@ -32,7 +32,9 @@
32#include <nvgpu/list.h> 32#include <nvgpu/list.h>
33#include <nvgpu/clk_arb.h> 33#include <nvgpu/clk_arb.h>
34#include <nvgpu/timers.h> 34#include <nvgpu/timers.h>
35#include <nvgpu/pmu.h>
35 36
37#include "clk/clk.h"
36#include "gk20a/gk20a.h" 38#include "gk20a/gk20a.h"
37#include "gp106/mclk_gp106.h" 39#include "gp106/mclk_gp106.h"
38 40
@@ -243,6 +245,37 @@ read_err:
243 245
244} 246}
245 247
248int gp106_clk_domain_get_f_points(
249 struct gk20a *g,
250 u32 clkapidomain,
251 u32 *pfpointscount,
252 u16 *pfreqpointsinmhz)
253{
254 int status = -EINVAL;
255 struct clk_domain *pdomain;
256 u8 i;
257 struct clk_pmupstate *pclk = &g->clk_pmu;
258
259 if (pfpointscount == NULL)
260 return -EINVAL;
261
262 if ((pfreqpointsinmhz == NULL) && (*pfpointscount != 0))
263 return -EINVAL;
264
265 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
266 struct clk_domain *, pdomain, i) {
267 if (pdomain->api_domain == clkapidomain) {
268 status = pdomain->clkdomainclkgetfpoints(g, pclk,
269 pdomain, pfpointscount,
270 pfreqpointsinmhz,
271 CLK_PROG_VFE_ENTRY_LOGIC);
272 return status;
273 }
274 }
275 return status;
276}
277
278
246#ifdef CONFIG_DEBUG_FS 279#ifdef CONFIG_DEBUG_FS
247static int gp106_get_rate_show(void *data , u64 *val) { 280static int gp106_get_rate_show(void *data , u64 *val) {
248 struct namemap_cfg *c = (struct namemap_cfg *) data; 281 struct namemap_cfg *c = (struct namemap_cfg *) data;
diff --git a/drivers/gpu/nvgpu/gp106/clk_gp106.h b/drivers/gpu/nvgpu/gp106/clk_gp106.h
index 97baa224..b7ab3164 100644
--- a/drivers/gpu/nvgpu/gp106/clk_gp106.h
+++ b/drivers/gpu/nvgpu/gp106/clk_gp106.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -56,5 +56,10 @@ int gp106_init_clk_support(struct gk20a *g);
56u32 gp106_crystal_clk_hz(struct gk20a *g); 56u32 gp106_crystal_clk_hz(struct gk20a *g);
57unsigned long gp106_clk_measure_freq(struct gk20a *g, u32 api_domain); 57unsigned long gp106_clk_measure_freq(struct gk20a *g, u32 api_domain);
58int gp106_suspend_clk_support(struct gk20a *g); 58int gp106_suspend_clk_support(struct gk20a *g);
59int gp106_clk_domain_get_f_points(
60 struct gk20a *g,
61 u32 clkapidomain,
62 u32 *pfpointscount,
63 u16 *pfreqpointsinmhz);
59 64
60#endif /* CLK_GP106_H */ 65#endif /* CLK_GP106_H */
diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c
index 78a3ea63..167bfaac 100644
--- a/drivers/gpu/nvgpu/gp106/hal_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c
@@ -675,6 +675,7 @@ static const struct gpu_ops gp106_ops = {
675 .get_crystal_clk_hz = gp106_crystal_clk_hz, 675 .get_crystal_clk_hz = gp106_crystal_clk_hz,
676 .measure_freq = gp106_clk_measure_freq, 676 .measure_freq = gp106_clk_measure_freq,
677 .suspend_clk_support = gp106_suspend_clk_support, 677 .suspend_clk_support = gp106_suspend_clk_support,
678 .clk_domain_get_f_points = gp106_clk_domain_get_f_points,
678 .mclk_init = gp106_mclk_init, 679 .mclk_init = gp106_mclk_init,
679 .mclk_change = gp106_mclk_change, 680 .mclk_change = gp106_mclk_change,
680 .mclk_deinit = gp106_mclk_deinit, 681 .mclk_deinit = gp106_mclk_deinit,
@@ -684,6 +685,9 @@ static const struct gpu_ops gp106_ops = {
684 .get_arbiter_clk_range = gp106_get_arbiter_clk_range, 685 .get_arbiter_clk_range = gp106_get_arbiter_clk_range,
685 .get_arbiter_clk_default = gp106_get_arbiter_clk_default, 686 .get_arbiter_clk_default = gp106_get_arbiter_clk_default,
686 .get_current_pstate = nvgpu_clk_arb_get_current_pstate, 687 .get_current_pstate = nvgpu_clk_arb_get_current_pstate,
688 .arbiter_clk_init = gp106_init_clk_arbiter,
689 .clk_arb_run_arbiter_cb = gp106_clk_arb_run_arbiter_cb,
690 .clk_arb_cleanup = gp106_clk_arb_cleanup,
687 }, 691 },
688 .regops = { 692 .regops = {
689 .exec_regops = exec_regops_gk20a, 693 .exec_regops = exec_regops_gk20a,
@@ -849,6 +853,7 @@ int gp106_init_hal(struct gk20a *g)
849 gops->clk.mclk_init = gp106_ops.clk.mclk_init; 853 gops->clk.mclk_init = gp106_ops.clk.mclk_init;
850 gops->clk.mclk_change = gp106_ops.clk.mclk_change; 854 gops->clk.mclk_change = gp106_ops.clk.mclk_change;
851 gops->clk.mclk_deinit = gp106_ops.clk.mclk_deinit; 855 gops->clk.mclk_deinit = gp106_ops.clk.mclk_deinit;
856 gops->clk.clk_domain_get_f_points = gp106_ops.clk.clk_domain_get_f_points;
852 857
853 gops->clk_arb = gp106_ops.clk_arb; 858 gops->clk_arb = gp106_ops.clk_arb;
854 gops->regops = gp106_ops.regops; 859 gops->regops = gp106_ops.regops;