diff options
author | Joshua Bakita <bakitajoshua@gmail.com> | 2024-09-25 16:09:09 -0400 |
---|---|---|
committer | Joshua Bakita <bakitajoshua@gmail.com> | 2024-09-25 16:09:09 -0400 |
commit | f347fde22f1297e4f022600d201780d5ead78114 (patch) | |
tree | 76be305d6187003a1e0486ff6e91efb1062ae118 /include/os/linux/ioctl_clk_arb.c | |
parent | 8340d234d78a7d0f46c11a584de538148b78b7cb (diff) |
Delete no-longer-needed nvgpu headersHEADmasterjbakita-wip
The dependency on these was removed in commit 8340d234.
Diffstat (limited to 'include/os/linux/ioctl_clk_arb.c')
-rw-r--r-- | include/os/linux/ioctl_clk_arb.c | 583 |
1 files changed, 0 insertions, 583 deletions
diff --git a/include/os/linux/ioctl_clk_arb.c b/include/os/linux/ioctl_clk_arb.c deleted file mode 100644 index 9f32102..0000000 --- a/include/os/linux/ioctl_clk_arb.c +++ /dev/null | |||
@@ -1,583 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * This software is licensed under the terms of the GNU General Public | ||
5 | * License version 2, as published by the Free Software Foundation, and | ||
6 | * may be copied, distributed, and modified under those terms. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | |||
17 | #include <linux/cdev.h> | ||
18 | #include <linux/file.h> | ||
19 | #include <linux/anon_inodes.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | #include <linux/poll.h> | ||
22 | #ifdef CONFIG_DEBUG_FS | ||
23 | #include <linux/debugfs.h> | ||
24 | #endif | ||
25 | #include <uapi/linux/nvgpu.h> | ||
26 | |||
27 | #include <nvgpu/bitops.h> | ||
28 | #include <nvgpu/lock.h> | ||
29 | #include <nvgpu/kmem.h> | ||
30 | #include <nvgpu/atomic.h> | ||
31 | #include <nvgpu/bug.h> | ||
32 | #include <nvgpu/kref.h> | ||
33 | #include <nvgpu/log.h> | ||
34 | #include <nvgpu/barrier.h> | ||
35 | #include <nvgpu/cond.h> | ||
36 | #include <nvgpu/list.h> | ||
37 | #include <nvgpu/clk_arb.h> | ||
38 | #include <nvgpu/gk20a.h> | ||
39 | |||
40 | #include "clk/clk.h" | ||
41 | #include "pstate/pstate.h" | ||
42 | #include "lpwr/lpwr.h" | ||
43 | #include "volt/volt.h" | ||
44 | |||
45 | #ifdef CONFIG_DEBUG_FS | ||
46 | #include "os_linux.h" | ||
47 | #endif | ||
48 | |||
49 | static int nvgpu_clk_arb_release_completion_dev(struct inode *inode, | ||
50 | struct file *filp) | ||
51 | { | ||
52 | struct nvgpu_clk_dev *dev = filp->private_data; | ||
53 | struct nvgpu_clk_session *session = dev->session; | ||
54 | struct gk20a *g = session->g; | ||
55 | struct nvgpu_clk_arb *arb = g->clk_arb; | ||
56 | |||
57 | clk_arb_dbg(g, " "); | ||
58 | |||
59 | nvgpu_spinlock_acquire(&session->session_lock); | ||
60 | nvgpu_spinlock_acquire(&arb->requests_lock); | ||
61 | |||
62 | nvgpu_list_del(&dev->node); | ||
63 | |||
64 | nvgpu_spinlock_release(&arb->requests_lock); | ||
65 | nvgpu_spinlock_release(&session->session_lock); | ||
66 | |||
67 | /* This is done to account for the extra refcount taken in | ||
68 | * nvgpu_clk_arb_commit_request_fd without events support in iGPU | ||
69 | */ | ||
70 | if (!arb->clk_arb_events_supported) { | ||
71 | nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); | ||
72 | } | ||
73 | |||
74 | nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); | ||
75 | nvgpu_ref_put(&session->refcount, nvgpu_clk_arb_free_session); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static inline unsigned int nvgpu_convert_poll_mask(unsigned int nvgpu_poll_mask) | ||
80 | { | ||
81 | unsigned int poll_mask = 0; | ||
82 | |||
83 | if (nvgpu_poll_mask & NVGPU_POLLIN) | ||
84 | poll_mask |= POLLIN; | ||
85 | if (nvgpu_poll_mask & NVGPU_POLLPRI) | ||
86 | poll_mask |= POLLPRI; | ||
87 | if (nvgpu_poll_mask & NVGPU_POLLOUT) | ||
88 | poll_mask |= POLLOUT; | ||
89 | if (nvgpu_poll_mask & NVGPU_POLLRDNORM) | ||
90 | poll_mask |= POLLRDNORM; | ||
91 | if (nvgpu_poll_mask & NVGPU_POLLHUP) | ||
92 | poll_mask |= POLLHUP; | ||
93 | |||
94 | return poll_mask; | ||
95 | } | ||
96 | |||
97 | static unsigned int nvgpu_clk_arb_poll_dev(struct file *filp, poll_table *wait) | ||
98 | { | ||
99 | struct nvgpu_clk_dev *dev = filp->private_data; | ||
100 | |||
101 | clk_arb_dbg(dev->session->g, " "); | ||
102 | |||
103 | poll_wait(filp, &dev->readout_wq.wq, wait); | ||
104 | return nvgpu_convert_poll_mask(nvgpu_atomic_xchg(&dev->poll_mask, 0)); | ||
105 | } | ||
106 | |||
107 | void nvgpu_clk_arb_event_post_event(struct nvgpu_clk_dev *dev) | ||
108 | { | ||
109 | nvgpu_cond_broadcast_interruptible(&dev->readout_wq); | ||
110 | } | ||
111 | |||
112 | static int nvgpu_clk_arb_release_event_dev(struct inode *inode, | ||
113 | struct file *filp) | ||
114 | { | ||
115 | struct nvgpu_clk_dev *dev = filp->private_data; | ||
116 | struct nvgpu_clk_session *session = dev->session; | ||
117 | struct nvgpu_clk_arb *arb; | ||
118 | |||
119 | arb = session->g->clk_arb; | ||
120 | |||
121 | clk_arb_dbg(session->g, " "); | ||
122 | |||
123 | if (arb) { | ||
124 | nvgpu_spinlock_acquire(&arb->users_lock); | ||
125 | nvgpu_list_del(&dev->link); | ||
126 | nvgpu_spinlock_release(&arb->users_lock); | ||
127 | nvgpu_clk_notification_queue_free(arb->g, &dev->queue); | ||
128 | } | ||
129 | |||
130 | nvgpu_ref_put(&session->refcount, nvgpu_clk_arb_free_session); | ||
131 | nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static inline u32 nvgpu_convert_gpu_event(u32 nvgpu_event) | ||
137 | { | ||
138 | u32 nvgpu_gpu_event; | ||
139 | |||
140 | switch (nvgpu_event) { | ||
141 | case NVGPU_EVENT_VF_UPDATE: | ||
142 | nvgpu_gpu_event = NVGPU_GPU_EVENT_VF_UPDATE; | ||
143 | break; | ||
144 | case NVGPU_EVENT_ALARM_TARGET_VF_NOT_POSSIBLE: | ||
145 | nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_TARGET_VF_NOT_POSSIBLE; | ||
146 | break; | ||
147 | case NVGPU_EVENT_ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE: | ||
148 | nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE; | ||
149 | break; | ||
150 | case NVGPU_EVENT_ALARM_CLOCK_ARBITER_FAILED: | ||
151 | nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_CLOCK_ARBITER_FAILED; | ||
152 | break; | ||
153 | case NVGPU_EVENT_ALARM_VF_TABLE_UPDATE_FAILED: | ||
154 | nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_VF_TABLE_UPDATE_FAILED; | ||
155 | break; | ||
156 | case NVGPU_EVENT_ALARM_THERMAL_ABOVE_THRESHOLD: | ||
157 | nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_THERMAL_ABOVE_THRESHOLD; | ||
158 | break; | ||
159 | case NVGPU_EVENT_ALARM_POWER_ABOVE_THRESHOLD: | ||
160 | nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_POWER_ABOVE_THRESHOLD; | ||
161 | break; | ||
162 | case NVGPU_EVENT_ALARM_GPU_LOST: | ||
163 | nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_GPU_LOST; | ||
164 | break; | ||
165 | default: | ||
166 | /* Control shouldn't come here */ | ||
167 | nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_GPU_LOST + 1; | ||
168 | break; | ||
169 | } | ||
170 | return nvgpu_gpu_event; | ||
171 | } | ||
172 | |||
173 | static inline u32 __pending_event(struct nvgpu_clk_dev *dev, | ||
174 | struct nvgpu_gpu_event_info *info) { | ||
175 | |||
176 | u32 tail, head; | ||
177 | u32 events = 0; | ||
178 | struct nvgpu_clk_notification *p_notif; | ||
179 | |||
180 | tail = nvgpu_atomic_read(&dev->queue.tail); | ||
181 | head = nvgpu_atomic_read(&dev->queue.head); | ||
182 | |||
183 | head = (tail - head) < dev->queue.size ? head : tail - dev->queue.size; | ||
184 | |||
185 | if (_WRAPGTEQ(tail, head) && info) { | ||
186 | head++; | ||
187 | p_notif = &dev->queue.notifications[head % dev->queue.size]; | ||
188 | events |= nvgpu_convert_gpu_event(p_notif->notification); | ||
189 | info->event_id = ffs(events) - 1; | ||
190 | info->timestamp = p_notif->timestamp; | ||
191 | nvgpu_atomic_set(&dev->queue.head, head); | ||
192 | } | ||
193 | |||
194 | return events; | ||
195 | } | ||
196 | |||
197 | static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp, char __user *buf, | ||
198 | size_t size, loff_t *off) | ||
199 | { | ||
200 | struct nvgpu_clk_dev *dev = filp->private_data; | ||
201 | struct nvgpu_gpu_event_info info; | ||
202 | ssize_t err; | ||
203 | |||
204 | clk_arb_dbg(dev->session->g, | ||
205 | "filp=%p, buf=%p, size=%zu", filp, buf, size); | ||
206 | |||
207 | if ((size - *off) < sizeof(info)) | ||
208 | return 0; | ||
209 | |||
210 | memset(&info, 0, sizeof(info)); | ||
211 | /* Get the oldest event from the queue */ | ||
212 | while (!__pending_event(dev, &info)) { | ||
213 | if (filp->f_flags & O_NONBLOCK) | ||
214 | return -EAGAIN; | ||
215 | err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq, | ||
216 | __pending_event(dev, &info), 0); | ||
217 | if (err) | ||
218 | return err; | ||
219 | if (info.timestamp) | ||
220 | break; | ||
221 | } | ||
222 | |||
223 | if (copy_to_user(buf + *off, &info, sizeof(info))) | ||
224 | return -EFAULT; | ||
225 | |||
226 | return sizeof(info); | ||
227 | } | ||
228 | |||
229 | static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev, | ||
230 | struct nvgpu_gpu_set_event_filter_args *args) | ||
231 | { | ||
232 | struct gk20a *g = dev->session->g; | ||
233 | u32 mask; | ||
234 | |||
235 | nvgpu_log(g, gpu_dbg_fn, " "); | ||
236 | |||
237 | if (args->flags) | ||
238 | return -EINVAL; | ||
239 | |||
240 | if (args->size != 1) | ||
241 | return -EINVAL; | ||
242 | |||
243 | if (copy_from_user(&mask, (void __user *) args->buffer, | ||
244 | args->size * sizeof(u32))) | ||
245 | return -EFAULT; | ||
246 | |||
247 | /* update alarm mask */ | ||
248 | nvgpu_atomic_set(&dev->enabled_mask, mask); | ||
249 | |||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static long nvgpu_clk_arb_ioctl_event_dev(struct file *filp, unsigned int cmd, | ||
254 | unsigned long arg) | ||
255 | { | ||
256 | struct nvgpu_clk_dev *dev = filp->private_data; | ||
257 | struct gk20a *g = dev->session->g; | ||
258 | u8 buf[NVGPU_EVENT_IOCTL_MAX_ARG_SIZE]; | ||
259 | int err = 0; | ||
260 | |||
261 | nvgpu_log(g, gpu_dbg_fn, "nr=%d", _IOC_NR(cmd)); | ||
262 | |||
263 | if ((_IOC_TYPE(cmd) != NVGPU_EVENT_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0) | ||
264 | || (_IOC_NR(cmd) > NVGPU_EVENT_IOCTL_LAST)) | ||
265 | return -EINVAL; | ||
266 | |||
267 | BUG_ON(_IOC_SIZE(cmd) > NVGPU_EVENT_IOCTL_MAX_ARG_SIZE); | ||
268 | |||
269 | memset(buf, 0, sizeof(buf)); | ||
270 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | ||
271 | if (copy_from_user(buf, (void __user *) arg, _IOC_SIZE(cmd))) | ||
272 | return -EFAULT; | ||
273 | } | ||
274 | |||
275 | switch (cmd) { | ||
276 | case NVGPU_EVENT_IOCTL_SET_FILTER: | ||
277 | err = nvgpu_clk_arb_set_event_filter(dev, | ||
278 | (struct nvgpu_gpu_set_event_filter_args *)buf); | ||
279 | break; | ||
280 | default: | ||
281 | nvgpu_warn(g, "unrecognized event ioctl cmd: 0x%x", cmd); | ||
282 | err = -ENOTTY; | ||
283 | } | ||
284 | |||
285 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) | ||
286 | err = copy_to_user((void __user *) arg, buf, _IOC_SIZE(cmd)); | ||
287 | |||
288 | return err; | ||
289 | } | ||
290 | |||
291 | static const struct file_operations completion_dev_ops = { | ||
292 | .owner = THIS_MODULE, | ||
293 | .release = nvgpu_clk_arb_release_completion_dev, | ||
294 | .poll = nvgpu_clk_arb_poll_dev, | ||
295 | }; | ||
296 | |||
297 | static const struct file_operations event_dev_ops = { | ||
298 | .owner = THIS_MODULE, | ||
299 | .release = nvgpu_clk_arb_release_event_dev, | ||
300 | .poll = nvgpu_clk_arb_poll_dev, | ||
301 | .read = nvgpu_clk_arb_read_event_dev, | ||
302 | #ifdef CONFIG_COMPAT | ||
303 | .compat_ioctl = nvgpu_clk_arb_ioctl_event_dev, | ||
304 | #endif | ||
305 | .unlocked_ioctl = nvgpu_clk_arb_ioctl_event_dev, | ||
306 | }; | ||
307 | |||
308 | static int nvgpu_clk_arb_install_fd(struct gk20a *g, | ||
309 | struct nvgpu_clk_session *session, | ||
310 | const struct file_operations *fops, | ||
311 | struct nvgpu_clk_dev **_dev) | ||
312 | { | ||
313 | struct file *file; | ||
314 | int fd; | ||
315 | int err; | ||
316 | int status; | ||
317 | char name[64]; | ||
318 | struct nvgpu_clk_dev *dev; | ||
319 | |||
320 | clk_arb_dbg(g, " "); | ||
321 | |||
322 | dev = nvgpu_kzalloc(g, sizeof(*dev)); | ||
323 | if (!dev) | ||
324 | return -ENOMEM; | ||
325 | |||
326 | status = nvgpu_clk_notification_queue_alloc(g, &dev->queue, | ||
327 | DEFAULT_EVENT_NUMBER); | ||
328 | if (status < 0) { | ||
329 | err = status; | ||
330 | goto fail; | ||
331 | } | ||
332 | |||
333 | fd = get_unused_fd_flags(O_RDWR); | ||
334 | if (fd < 0) { | ||
335 | err = fd; | ||
336 | goto fail; | ||
337 | } | ||
338 | |||
339 | snprintf(name, sizeof(name), "%s-clk-fd%d", g->name, fd); | ||
340 | file = anon_inode_getfile(name, fops, dev, O_RDWR); | ||
341 | if (IS_ERR(file)) { | ||
342 | err = PTR_ERR(file); | ||
343 | goto fail_fd; | ||
344 | } | ||
345 | |||
346 | fd_install(fd, file); | ||
347 | |||
348 | nvgpu_cond_init(&dev->readout_wq); | ||
349 | |||
350 | nvgpu_atomic_set(&dev->poll_mask, 0); | ||
351 | |||
352 | dev->session = session; | ||
353 | nvgpu_ref_init(&dev->refcount); | ||
354 | |||
355 | nvgpu_ref_get(&session->refcount); | ||
356 | |||
357 | *_dev = dev; | ||
358 | |||
359 | return fd; | ||
360 | |||
361 | fail_fd: | ||
362 | put_unused_fd(fd); | ||
363 | fail: | ||
364 | nvgpu_kfree(g, dev); | ||
365 | |||
366 | return err; | ||
367 | } | ||
368 | |||
369 | int nvgpu_clk_arb_install_event_fd(struct gk20a *g, | ||
370 | struct nvgpu_clk_session *session, int *event_fd, u32 alarm_mask) | ||
371 | { | ||
372 | struct nvgpu_clk_arb *arb = g->clk_arb; | ||
373 | struct nvgpu_clk_dev *dev; | ||
374 | int fd; | ||
375 | |||
376 | clk_arb_dbg(g, " "); | ||
377 | |||
378 | fd = nvgpu_clk_arb_install_fd(g, session, &event_dev_ops, &dev); | ||
379 | if (fd < 0) | ||
380 | return fd; | ||
381 | |||
382 | /* TODO: alarm mask needs to be set to default value to prevent | ||
383 | * failures of legacy tests. This will be removed when sanity is | ||
384 | * updated | ||
385 | */ | ||
386 | if (alarm_mask) | ||
387 | nvgpu_atomic_set(&dev->enabled_mask, alarm_mask); | ||
388 | else | ||
389 | nvgpu_atomic_set(&dev->enabled_mask, EVENT(VF_UPDATE)); | ||
390 | |||
391 | dev->arb_queue_head = nvgpu_atomic_read(&arb->notification_queue.head); | ||
392 | |||
393 | nvgpu_spinlock_acquire(&arb->users_lock); | ||
394 | nvgpu_list_add_tail(&dev->link, &arb->users); | ||
395 | nvgpu_spinlock_release(&arb->users_lock); | ||
396 | |||
397 | *event_fd = fd; | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | int nvgpu_clk_arb_install_request_fd(struct gk20a *g, | ||
403 | struct nvgpu_clk_session *session, int *request_fd) | ||
404 | { | ||
405 | struct nvgpu_clk_dev *dev; | ||
406 | int fd; | ||
407 | |||
408 | clk_arb_dbg(g, " "); | ||
409 | |||
410 | fd = nvgpu_clk_arb_install_fd(g, session, &completion_dev_ops, &dev); | ||
411 | if (fd < 0) | ||
412 | return fd; | ||
413 | |||
414 | *request_fd = fd; | ||
415 | |||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | int nvgpu_clk_arb_commit_request_fd(struct gk20a *g, | ||
420 | struct nvgpu_clk_session *session, int request_fd) | ||
421 | { | ||
422 | struct nvgpu_clk_arb *arb = g->clk_arb; | ||
423 | struct nvgpu_clk_dev *dev; | ||
424 | struct fd fd; | ||
425 | int err = 0; | ||
426 | |||
427 | clk_arb_dbg(g, " "); | ||
428 | |||
429 | fd = fdget(request_fd); | ||
430 | if (!fd.file) | ||
431 | return -EINVAL; | ||
432 | |||
433 | if (fd.file->f_op != &completion_dev_ops) { | ||
434 | err = -EINVAL; | ||
435 | goto fdput_fd; | ||
436 | } | ||
437 | |||
438 | dev = (struct nvgpu_clk_dev *) fd.file->private_data; | ||
439 | |||
440 | if (!dev || dev->session != session) { | ||
441 | err = -EINVAL; | ||
442 | goto fdput_fd; | ||
443 | } | ||
444 | |||
445 | clk_arb_dbg(g, "requested target = %u\n", | ||
446 | (u32)dev->gpc2clk_target_mhz); | ||
447 | |||
448 | nvgpu_atomic_inc(&g->clk_arb_global_nr); | ||
449 | nvgpu_ref_get(&dev->refcount); | ||
450 | nvgpu_spinlock_acquire(&session->session_lock); | ||
451 | nvgpu_list_add(&dev->node, &session->targets); | ||
452 | nvgpu_spinlock_release(&session->session_lock); | ||
453 | nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item); | ||
454 | |||
455 | fdput_fd: | ||
456 | fdput(fd); | ||
457 | return err; | ||
458 | } | ||
459 | |||
460 | int nvgpu_clk_arb_set_session_target_mhz(struct nvgpu_clk_session *session, | ||
461 | int request_fd, u32 api_domain, u16 target_mhz) | ||
462 | { | ||
463 | struct nvgpu_clk_dev *dev; | ||
464 | struct fd fd; | ||
465 | int err = 0; | ||
466 | |||
467 | clk_arb_dbg(session->g, | ||
468 | "domain=0x%08x target_mhz=%u", api_domain, target_mhz); | ||
469 | |||
470 | fd = fdget(request_fd); | ||
471 | if (!fd.file) | ||
472 | return -EINVAL; | ||
473 | |||
474 | if (fd.file->f_op != &completion_dev_ops) { | ||
475 | err = -EINVAL; | ||
476 | goto fdput_fd; | ||
477 | } | ||
478 | |||
479 | dev = fd.file->private_data; | ||
480 | if (!dev || dev->session != session) { | ||
481 | err = -EINVAL; | ||
482 | goto fdput_fd; | ||
483 | } | ||
484 | |||
485 | switch (api_domain) { | ||
486 | case NVGPU_CLK_DOMAIN_MCLK: | ||
487 | dev->mclk_target_mhz = target_mhz; | ||
488 | break; | ||
489 | |||
490 | case NVGPU_CLK_DOMAIN_GPCCLK: | ||
491 | dev->gpc2clk_target_mhz = target_mhz * 2ULL; | ||
492 | break; | ||
493 | |||
494 | default: | ||
495 | err = -EINVAL; | ||
496 | } | ||
497 | |||
498 | fdput_fd: | ||
499 | fdput(fd); | ||
500 | return err; | ||
501 | } | ||
502 | |||
503 | u32 nvgpu_clk_arb_get_arbiter_clk_domains(struct gk20a *g) | ||
504 | { | ||
505 | u32 clk_domains = g->ops.clk_arb.get_arbiter_clk_domains(g); | ||
506 | u32 api_domains = 0; | ||
507 | |||
508 | if (clk_domains & CTRL_CLK_DOMAIN_GPC2CLK) | ||
509 | api_domains |= BIT(NVGPU_GPU_CLK_DOMAIN_GPCCLK); | ||
510 | |||
511 | if (clk_domains & CTRL_CLK_DOMAIN_MCLK) | ||
512 | api_domains |= BIT(NVGPU_GPU_CLK_DOMAIN_MCLK); | ||
513 | |||
514 | return api_domains; | ||
515 | } | ||
516 | |||
517 | #ifdef CONFIG_DEBUG_FS | ||
518 | static int nvgpu_clk_arb_stats_show(struct seq_file *s, void *unused) | ||
519 | { | ||
520 | struct gk20a *g = s->private; | ||
521 | struct nvgpu_clk_arb *arb = g->clk_arb; | ||
522 | struct nvgpu_clk_arb_debug *debug; | ||
523 | |||
524 | u64 num; | ||
525 | s64 tmp, avg, std, max, min; | ||
526 | |||
527 | debug = NV_ACCESS_ONCE(arb->debug); | ||
528 | /* Make copy of structure and ensure no reordering */ | ||
529 | nvgpu_smp_rmb(); | ||
530 | if (!debug) | ||
531 | return -EINVAL; | ||
532 | |||
533 | std = debug->switch_std; | ||
534 | avg = debug->switch_avg; | ||
535 | max = debug->switch_max; | ||
536 | min = debug->switch_min; | ||
537 | num = debug->switch_num; | ||
538 | |||
539 | tmp = std; | ||
540 | do_div(tmp, num); | ||
541 | seq_printf(s, "Number of transitions: %lld\n", | ||
542 | num); | ||
543 | seq_printf(s, "max / min : %lld / %lld usec\n", | ||
544 | max, min); | ||
545 | seq_printf(s, "avg / std : %lld / %ld usec\n", | ||
546 | avg, int_sqrt(tmp)); | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static int nvgpu_clk_arb_stats_open(struct inode *inode, struct file *file) | ||
552 | { | ||
553 | return single_open(file, nvgpu_clk_arb_stats_show, inode->i_private); | ||
554 | } | ||
555 | |||
556 | static const struct file_operations nvgpu_clk_arb_stats_fops = { | ||
557 | .open = nvgpu_clk_arb_stats_open, | ||
558 | .read = seq_read, | ||
559 | .llseek = seq_lseek, | ||
560 | .release = single_release, | ||
561 | }; | ||
562 | |||
563 | |||
564 | int nvgpu_clk_arb_debugfs_init(struct gk20a *g) | ||
565 | { | ||
566 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
567 | struct dentry *gpu_root = l->debugfs; | ||
568 | struct dentry *d; | ||
569 | |||
570 | nvgpu_log(g, gpu_dbg_info, "g=%p", g); | ||
571 | |||
572 | d = debugfs_create_file( | ||
573 | "arb_stats", | ||
574 | S_IRUGO, | ||
575 | gpu_root, | ||
576 | g, | ||
577 | &nvgpu_clk_arb_stats_fops); | ||
578 | if (!d) | ||
579 | return -ENOMEM; | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | #endif | ||