summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2018-09-04 01:25:33 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-20 13:50:12 -0400
commit519948a9c664020fd0b37118749faad2dfd73d97 (patch)
tree1eb57dc393075670331ca89cf366a093d75d3f47 /drivers/gpu/nvgpu/gp10b
parent1c7258411da89aea5279e9a8d117479928f8bf64 (diff)
gpu: nvgpu: add igpu support for clk_arbiter.
This patch constructs clk_arbiter specific code for gp10b as well as gv11b and does the necessary plumbing in the clk_arbiter code. The changes made are as follows. 1) Constructed clk_arb_gp10b.* files which add support for clk_arb related HALS including the nvgpu_clk_arb_init and nvgpu_clk_arb_cb. This doesn't have support for debugfs nor the VFUpdateEvent yet and consequently no support for arb->notifications. 2) Added gpcclk specific variables corresponding to every gpc2clk in a given clk_arb related struct. 3) Linux specific support_clk_freq_controller is assigned true in platform_gp10b.c and platform_gv11b.c files. 4) Incremented the clk_arb_worker.put atomic variable during worker_deinit so as to allow the worker thread to be stopped. 5) Added the flag clk_arb_events_supported as part of struct nvgpu_clk_arb. This flag is used to selectively account for the extra refcounting present in OS specific code i.e. nvgpu_clk_arb_commit_request_fd. For igpus, the extra refcount is reduced during nvgpu_clk_arb_release_completion_dev. Bug 2061372 Change-Id: Id00acb106db2b46e55aa0324034a16a73723c078 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1774281 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b')
-rw-r--r--drivers/gpu/nvgpu/gp10b/clk_arb_gp10b.c417
-rw-r--r--drivers/gpu/nvgpu/gp10b/clk_arb_gp10b.h39
-rw-r--r--drivers/gpu/nvgpu/gp10b/hal_gp10b.c11
3 files changed, 467 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/clk_arb_gp10b.c b/drivers/gpu/nvgpu/gp10b/clk_arb_gp10b.c
new file mode 100644
index 00000000..4dcc3ca5
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp10b/clk_arb_gp10b.c
@@ -0,0 +1,417 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "gk20a/gk20a.h"
24#include <nvgpu/clk_arb.h>
25
26#include "clk_arb_gp10b.h"
27
28u32 gp10b_get_arbiter_clk_domains(struct gk20a *g)
29{
30 (void)g;
31 clk_arb_dbg(g, " ");
32 return CTRL_CLK_DOMAIN_GPC2CLK;
33}
34
35int gp10b_get_arbiter_f_points(struct gk20a *g,u32 api_domain,
36 u32 *num_points, u16 *freqs_in_mhz)
37{
38 int ret = 0;
39 u32 i;
40 bool is_freq_list_available = false;
41
42 if (*num_points != 0U) {
43 is_freq_list_available = true;
44 }
45
46 clk_arb_dbg(g, " ");
47
48 switch (api_domain) {
49 case CTRL_CLK_DOMAIN_GPC2CLK:
50 ret = g->ops.clk.clk_domain_get_f_points(g, CTRL_CLK_DOMAIN_GPCCLK,
51 num_points, freqs_in_mhz);
52
53 /* multiply by 2 for GPC2CLK */
54 if (ret == 0 && is_freq_list_available) {
55 for (i = 0U; i < *num_points; i++) {
56 freqs_in_mhz[i] *= 2U;
57 }
58 }
59 break;
60 default:
61 ret = -EINVAL;
62 break;
63 }
64
65 return ret;
66}
67
68int gp10b_get_arbiter_clk_range(struct gk20a *g, u32 api_domain,
69 u16 *min_mhz, u16 *max_mhz)
70{
71 int ret = 0;
72
73 clk_arb_dbg(g, " ");
74
75 switch (api_domain) {
76 case CTRL_CLK_DOMAIN_GPC2CLK:
77 ret = g->ops.clk.get_clk_range(g, CTRL_CLK_DOMAIN_GPCCLK,
78 min_mhz, max_mhz);
79
80 if (ret == 0) {
81 *min_mhz *= 2U;
82 *max_mhz *= 2U;
83 }
84 break;
85
86 default:
87 ret = -EINVAL;
88 break;
89 }
90
91 return ret;
92}
93
94int gp10b_get_arbiter_clk_default(struct gk20a *g, u32 api_domain,
95 u16 *default_mhz)
96{
97 int ret = 0;
98 u16 min_mhz, max_mhz;
99
100 clk_arb_dbg(g, " ");
101
102 switch (api_domain) {
103 case CTRL_CLK_DOMAIN_GPC2CLK:
104 ret = gp10b_get_arbiter_clk_range(g, api_domain,
105 &min_mhz, &max_mhz);
106
107 if (ret == 0) {
108 *default_mhz = min_mhz;
109 }
110 break;
111
112 default:
113 ret = -EINVAL;
114 break;
115 }
116
117 return ret;
118}
119
120int gp10b_init_clk_arbiter(struct gk20a *g)
121{
122 struct nvgpu_clk_arb *arb = NULL;
123 u16 default_mhz;
124 int err;
125 int index;
126 struct nvgpu_clk_vf_table *table;
127
128 clk_arb_dbg(g, " ");
129
130 if(g->clk_arb != NULL) {
131 return 0;
132 }
133
134 arb = nvgpu_kzalloc(g, sizeof(struct nvgpu_clk_arb));
135 if (arb == NULL) {
136 return -ENOMEM;
137 }
138
139 arb->clk_arb_events_supported = false;
140
141 err = nvgpu_mutex_init(&arb->pstate_lock);
142 if (err != 0) {
143 goto mutex_fail;
144 }
145
146 nvgpu_spinlock_init(&arb->sessions_lock);
147 nvgpu_spinlock_init(&arb->users_lock);
148 nvgpu_spinlock_init(&arb->requests_lock);
149
150 arb->gpc2clk_f_points = nvgpu_kcalloc(g, MAX_F_POINTS, sizeof(u16));
151 if (arb->gpc2clk_f_points == NULL) {
152 err = -ENOMEM;
153 goto init_fail;
154 }
155
156 for (index = 0; index < 2; index++) {
157 table = &arb->vf_table_pool[index];
158 table->gpc2clk_num_points = MAX_F_POINTS;
159
160 table->gpc2clk_points = (struct nvgpu_clk_vf_point *)
161 nvgpu_kcalloc(g, MAX_F_POINTS,
162 sizeof(struct nvgpu_clk_vf_point));
163 if (table->gpc2clk_points == NULL) {
164 err = -ENOMEM;
165 goto init_fail;
166 }
167 }
168
169 g->clk_arb = arb;
170 arb->g = g;
171
172 err = g->ops.clk_arb.get_arbiter_clk_default(g,
173 CTRL_CLK_DOMAIN_GPC2CLK, &default_mhz);
174 if (err < 0) {
175 err = -EINVAL;
176 goto init_fail;
177 }
178
179 arb->gpc2clk_default_mhz = default_mhz;
180
181 err = g->ops.clk_arb.get_arbiter_clk_range(g, CTRL_CLK_DOMAIN_GPC2CLK,
182 &arb->gpc2clk_min, &arb->gpc2clk_max);
183
184 if (err < 0) {
185 err = -EINVAL;
186 goto init_fail;
187 }
188
189 arb->actual = &arb->actual_pool[0];
190
191 nvgpu_atomic_set(&arb->req_nr, 0);
192
193 nvgpu_atomic64_set(&arb->alarm_mask, 0);
194 err = nvgpu_clk_notification_queue_alloc(g, &arb->notification_queue,
195 DEFAULT_EVENT_NUMBER);
196 if (err < 0) {
197 goto init_fail;
198 }
199
200 nvgpu_init_list_node(&arb->users);
201 nvgpu_init_list_node(&arb->sessions);
202 nvgpu_init_list_node(&arb->requests);
203
204 err = nvgpu_cond_init(&arb->request_wq);
205 if (err < 0) {
206 goto init_fail;
207 }
208
209 nvgpu_init_list_node(&arb->update_arb_work_item.worker_item);
210 arb->update_arb_work_item.arb = arb;
211 arb->update_arb_work_item.item_type = CLK_ARB_WORK_UPDATE_ARB;
212
213 err = nvgpu_clk_arb_worker_init(g);
214 if (err < 0) {
215 goto init_fail;
216 }
217
218 nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item);
219
220 do {
221 /* Check that first run is completed */
222 nvgpu_smp_mb();
223 NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
224 nvgpu_atomic_read(&arb->req_nr) != 0, 0);
225 } while (nvgpu_atomic_read(&arb->req_nr) == 0);
226
227
228 return arb->status;
229
230init_fail:
231 nvgpu_kfree(g, arb->gpc2clk_f_points);
232
233 for (index = 0; index < 2; index++) {
234 nvgpu_kfree(g, arb->vf_table_pool[index].gpc2clk_points);
235 }
236
237 nvgpu_mutex_destroy(&arb->pstate_lock);
238
239mutex_fail:
240 nvgpu_kfree(g, arb);
241
242 return err;
243}
244
245void gp10b_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb)
246{
247 struct nvgpu_clk_session *session;
248 struct nvgpu_clk_dev *dev;
249 struct nvgpu_clk_dev *tmp;
250 struct nvgpu_clk_arb_target *target, *actual;
251 struct gk20a *g = arb->g;
252
253 bool gpc2clk_set;
254
255 int status = 0;
256 unsigned long rounded_rate = 0;
257
258 u16 gpc2clk_target, gpc2clk_session_target;
259
260 clk_arb_dbg(g, " ");
261
262 /* Only one arbiter should be running */
263 gpc2clk_target = 0;
264
265 nvgpu_spinlock_acquire(&arb->sessions_lock);
266 nvgpu_list_for_each_entry(session, &arb->sessions,
267 nvgpu_clk_session, link) {
268 if (session->zombie) {
269 continue;
270 }
271 gpc2clk_set = false;
272 target = (session->target == &session->target_pool[0] ?
273 &session->target_pool[1] :
274 &session->target_pool[0]);
275 nvgpu_spinlock_acquire(&session->session_lock);
276 if (nvgpu_list_empty(&session->targets) == 0) {
277 /* Copy over state */
278 target->gpc2clk = session->target->gpc2clk;
279 /* Query the latest committed request */
280 nvgpu_list_for_each_entry_safe(dev, tmp, &session->targets,
281 nvgpu_clk_dev, node) {
282 if (!gpc2clk_set &&
283 dev->gpc2clk_target_mhz != (u16)0) {
284 target->gpc2clk =
285 dev->gpc2clk_target_mhz;
286 gpc2clk_set = true;
287 }
288 nvgpu_ref_get(&dev->refcount);
289 nvgpu_list_del(&dev->node);
290 nvgpu_spinlock_acquire(&arb->requests_lock);
291 nvgpu_list_add(&dev->node, &arb->requests);
292 nvgpu_spinlock_release(&arb->requests_lock);
293 }
294 session->target = target;
295 }
296 nvgpu_spinlock_release(&session->session_lock);
297
298 gpc2clk_target =
299 gpc2clk_target > session->target->gpc2clk ?
300 gpc2clk_target : session->target->gpc2clk;
301 }
302 nvgpu_spinlock_release(&arb->sessions_lock);
303
304 gpc2clk_target = (gpc2clk_target > (u16)0) ? gpc2clk_target :
305 arb->gpc2clk_default_mhz;
306
307 if (gpc2clk_target < arb->gpc2clk_min) {
308 gpc2clk_target = arb->gpc2clk_min;
309 }
310
311 if (gpc2clk_target > arb->gpc2clk_max) {
312 gpc2clk_target = arb->gpc2clk_max;
313 }
314
315 gpc2clk_session_target = gpc2clk_target;
316
317 if (arb->actual->gpc2clk == gpc2clk_target) {
318 nvgpu_atomic_inc(&arb->req_nr);
319 nvgpu_cond_signal_interruptible(&arb->request_wq);
320 goto exit_arb;
321 }
322
323 nvgpu_mutex_acquire(&arb->pstate_lock);
324
325 /* get the rounded_rate in terms of Hz for igpu
326 * pass (gpcclk) freq = (gpc2clk) freq / 2
327 */
328 status = g->ops.clk.clk_get_round_rate(g,
329 CTRL_CLK_DOMAIN_GPCCLK, (gpc2clk_session_target/2) * 1000000UL, &rounded_rate);
330
331 clk_arb_dbg(g, "rounded_rate: %lu\n",
332 rounded_rate);
333
334 if (status < 0) {
335 arb->status = status;
336 nvgpu_mutex_release(&arb->pstate_lock);
337
338 /* make status visible */
339 nvgpu_smp_mb();
340 nvgpu_atomic_inc(&arb->req_nr);
341 nvgpu_cond_signal_interruptible(&arb->request_wq);
342 goto exit_arb;
343 }
344
345 /* the igpu set_rate accepts freq in Hz */
346 status = g->ops.clk.set_rate(g, CTRL_CLK_DOMAIN_GPCCLK, rounded_rate);
347
348 if (status < 0) {
349 arb->status = status;
350 nvgpu_mutex_release(&arb->pstate_lock);
351
352 /* make status visible */
353 nvgpu_smp_mb();
354 nvgpu_atomic_inc(&arb->req_nr);
355 nvgpu_cond_signal_interruptible(&arb->request_wq);
356 goto exit_arb;
357 }
358
359 actual = ((NV_ACCESS_ONCE(arb->actual)) == &arb->actual_pool[0] ?
360 &arb->actual_pool[1] : &arb->actual_pool[0]);
361
362 /* do not reorder this pointer */
363 nvgpu_smp_rmb();
364 actual->gpc2clk = gpc2clk_target;
365 arb->status = 0;
366
367 /* Make changes visible to other threads */
368 nvgpu_smp_wmb();
369 arb->actual = actual;
370
371 /* status must be visible before atomic inc */
372 nvgpu_smp_wmb();
373 nvgpu_atomic_inc(&arb->req_nr);
374
375 /* Unlock pstate change for PG */
376 nvgpu_mutex_release(&arb->pstate_lock);
377
378 nvgpu_cond_signal_interruptible(&arb->request_wq);
379
380exit_arb:
381 if (status < 0) {
382 nvgpu_err(g, "Error in arbiter update");
383 }
384
385 /* notify completion for all requests */
386 nvgpu_spinlock_acquire(&arb->requests_lock);
387 nvgpu_list_for_each_entry_safe(dev, tmp, &arb->requests,
388 nvgpu_clk_dev, node) {
389 nvgpu_atomic_set(&dev->poll_mask, NVGPU_POLLIN | NVGPU_POLLRDNORM);
390 nvgpu_clk_arb_event_post_event(dev);
391 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
392 nvgpu_list_del(&dev->node);
393 }
394 nvgpu_spinlock_release(&arb->requests_lock);
395
396 clk_arb_dbg(g, "done");
397}
398
399void gp10b_clk_arb_cleanup(struct nvgpu_clk_arb *arb)
400{
401 struct gk20a *g = arb->g;
402 int index;
403
404 nvgpu_kfree(g, arb->gpc2clk_f_points);
405 nvgpu_kfree(g, arb->mclk_f_points);
406
407 for (index = 0; index < 2; index++) {
408 nvgpu_kfree(g,
409 arb->vf_table_pool[index].gpc2clk_points);
410 nvgpu_kfree(g, arb->vf_table_pool[index].mclk_points);
411 }
412
413 nvgpu_mutex_destroy(&g->clk_arb->pstate_lock);
414 nvgpu_kfree(g, g->clk_arb);
415
416 g->clk_arb = NULL;
417} \ No newline at end of file
diff --git a/drivers/gpu/nvgpu/gp10b/clk_arb_gp10b.h b/drivers/gpu/nvgpu/gp10b/clk_arb_gp10b.h
new file mode 100644
index 00000000..6b9966c5
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp10b/clk_arb_gp10b.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#ifndef CLK_ARB_GP10B_H
23#define CLK_ARB_GP10B_H
24
25struct nvgpu_clk_session;
26struct nvgpu_clk_arb;
27
28u32 gp10b_get_arbiter_clk_domains(struct gk20a *g);
29int gp10b_get_arbiter_f_points(struct gk20a *g,u32 api_domain,
30 u32 *num_points, u16 *freqs_in_mhz);
31int gp10b_get_arbiter_clk_range(struct gk20a *g, u32 api_domain,
32 u16 *min_mhz, u16 *max_mhz);
33int gp10b_get_arbiter_clk_default(struct gk20a *g, u32 api_domain,
34 u16 *default_mhz);
35int gp10b_init_clk_arbiter(struct gk20a *g);
36void gp10b_clk_arb_run_arbiter_cb(struct nvgpu_clk_arb *arb);
37void gp10b_clk_arb_cleanup(struct nvgpu_clk_arb *arb);
38
39#endif /* CLK_ARB_GP106_H */
diff --git a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
index 769cab74..1f9e84d3 100644
--- a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
@@ -68,6 +68,7 @@
68#include "gp10b/fifo_gp10b.h" 68#include "gp10b/fifo_gp10b.h"
69#include "gp10b/regops_gp10b.h" 69#include "gp10b/regops_gp10b.h"
70#include "gp10b/ecc_gp10b.h" 70#include "gp10b/ecc_gp10b.h"
71#include "gp10b/clk_arb_gp10b.h"
71 72
72#include "gm20b/gr_gm20b.h" 73#include "gm20b/gr_gm20b.h"
73#include "gm20b/fifo_gm20b.h" 74#include "gm20b/fifo_gm20b.h"
@@ -611,6 +612,15 @@ static const struct gpu_ops gp10b_ops = {
611 .get_irqdest = gk20a_pmu_get_irqdest, 612 .get_irqdest = gk20a_pmu_get_irqdest,
612 .is_debug_mode_enabled = gm20b_pmu_is_debug_mode_en, 613 .is_debug_mode_enabled = gm20b_pmu_is_debug_mode_en,
613 }, 614 },
615 .clk_arb = {
616 .get_arbiter_clk_domains = gp10b_get_arbiter_clk_domains,
617 .get_arbiter_f_points = gp10b_get_arbiter_f_points,
618 .get_arbiter_clk_range = gp10b_get_arbiter_clk_range,
619 .get_arbiter_clk_default = gp10b_get_arbiter_clk_default,
620 .arbiter_clk_init = gp10b_init_clk_arbiter,
621 .clk_arb_run_arbiter_cb = gp10b_clk_arb_run_arbiter_cb,
622 .clk_arb_cleanup = gp10b_clk_arb_cleanup,
623 },
614 .regops = { 624 .regops = {
615 .exec_regops = exec_regops_gk20a, 625 .exec_regops = exec_regops_gk20a,
616 .get_global_whitelist_ranges = 626 .get_global_whitelist_ranges =
@@ -735,6 +745,7 @@ int gp10b_init_hal(struct gk20a *g)
735 gops->pramin = gp10b_ops.pramin; 745 gops->pramin = gp10b_ops.pramin;
736 gops->therm = gp10b_ops.therm; 746 gops->therm = gp10b_ops.therm;
737 gops->pmu = gp10b_ops.pmu; 747 gops->pmu = gp10b_ops.pmu;
748 gops->clk_arb = gp10b_ops.clk_arb;
738 gops->regops = gp10b_ops.regops; 749 gops->regops = gp10b_ops.regops;
739 gops->mc = gp10b_ops.mc; 750 gops->mc = gp10b_ops.mc;
740 gops->debug = gp10b_ops.debug; 751 gops->debug = gp10b_ops.debug;