summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/ioctl_clk_arb.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 15:59:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-15 20:47:31 -0400
commit2a2c16af5f9f1ccfc93a13e820d5381e5c881e92 (patch)
tree2e5d7b042270a649978e5bb540857012c85fb5b5 /drivers/gpu/nvgpu/os/linux/ioctl_clk_arb.c
parent98d996f4ffb0137d119b5849cae46d7b7e5693e1 (diff)
gpu: nvgpu: Move Linux files away from common
Move all Linux source code files to drivers/gpu/nvgpu/os/linux from drivers/gpu/nvgpu/common/linux. This changes the meaning of common to be OS independent. JIRA NVGPU-598 JIRA NVGPU-601 Change-Id: Ib7f2a43d3688bb0d0b7dcc48469a6783fd988ce9 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1747714 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/ioctl_clk_arb.c')
-rw-r--r--drivers/gpu/nvgpu/os/linux/ioctl_clk_arb.c562
1 files changed, 562 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_clk_arb.c b/drivers/gpu/nvgpu/os/linux/ioctl_clk_arb.c
new file mode 100644
index 00000000..501b5f93
--- /dev/null
+++ b/drivers/gpu/nvgpu/os/linux/ioctl_clk_arb.c
@@ -0,0 +1,562 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/cdev.h>
18#include <linux/file.h>
19#include <linux/anon_inodes.h>
20#include <linux/uaccess.h>
21#include <linux/poll.h>
22#ifdef CONFIG_DEBUG_FS
23#include <linux/debugfs.h>
24#endif
25#include <uapi/linux/nvgpu.h>
26
27#include <nvgpu/bitops.h>
28#include <nvgpu/lock.h>
29#include <nvgpu/kmem.h>
30#include <nvgpu/atomic.h>
31#include <nvgpu/bug.h>
32#include <nvgpu/kref.h>
33#include <nvgpu/log.h>
34#include <nvgpu/barrier.h>
35#include <nvgpu/cond.h>
36#include <nvgpu/list.h>
37#include <nvgpu/clk_arb.h>
38
39#include "gk20a/gk20a.h"
40#include "clk/clk.h"
41#include "pstate/pstate.h"
42#include "lpwr/lpwr.h"
43#include "volt/volt.h"
44
45#ifdef CONFIG_DEBUG_FS
46#include "os_linux.h"
47#endif
48
49static int nvgpu_clk_arb_release_completion_dev(struct inode *inode,
50 struct file *filp)
51{
52 struct nvgpu_clk_dev *dev = filp->private_data;
53 struct nvgpu_clk_session *session = dev->session;
54
55
56 clk_arb_dbg(session->g, " ");
57
58 nvgpu_ref_put(&session->refcount, nvgpu_clk_arb_free_session);
59 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
60 return 0;
61}
62
63static inline unsigned int nvgpu_convert_poll_mask(unsigned int nvgpu_poll_mask)
64{
65 unsigned int poll_mask = 0;
66
67 if (nvgpu_poll_mask & NVGPU_POLLIN)
68 poll_mask |= POLLIN;
69 if (nvgpu_poll_mask & NVGPU_POLLPRI)
70 poll_mask |= POLLPRI;
71 if (nvgpu_poll_mask & NVGPU_POLLOUT)
72 poll_mask |= POLLOUT;
73 if (nvgpu_poll_mask & NVGPU_POLLRDNORM)
74 poll_mask |= POLLRDNORM;
75 if (nvgpu_poll_mask & NVGPU_POLLHUP)
76 poll_mask |= POLLHUP;
77
78 return poll_mask;
79}
80
81static unsigned int nvgpu_clk_arb_poll_dev(struct file *filp, poll_table *wait)
82{
83 struct nvgpu_clk_dev *dev = filp->private_data;
84
85 clk_arb_dbg(dev->session->g, " ");
86
87 poll_wait(filp, &dev->readout_wq.wq, wait);
88 return nvgpu_convert_poll_mask(nvgpu_atomic_xchg(&dev->poll_mask, 0));
89}
90
91void nvgpu_clk_arb_event_post_event(struct nvgpu_clk_dev *dev)
92{
93 nvgpu_cond_broadcast_interruptible(&dev->readout_wq);
94}
95
96static int nvgpu_clk_arb_release_event_dev(struct inode *inode,
97 struct file *filp)
98{
99 struct nvgpu_clk_dev *dev = filp->private_data;
100 struct nvgpu_clk_session *session = dev->session;
101 struct nvgpu_clk_arb *arb;
102
103 arb = session->g->clk_arb;
104
105 clk_arb_dbg(session->g, " ");
106
107 if (arb) {
108 nvgpu_spinlock_acquire(&arb->users_lock);
109 nvgpu_list_del(&dev->link);
110 nvgpu_spinlock_release(&arb->users_lock);
111 nvgpu_clk_notification_queue_free(arb->g, &dev->queue);
112 }
113
114 nvgpu_ref_put(&session->refcount, nvgpu_clk_arb_free_session);
115 nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
116
117 return 0;
118}
119
120static inline u32 nvgpu_convert_gpu_event(u32 nvgpu_event)
121{
122 u32 nvgpu_gpu_event;
123
124 switch (nvgpu_event) {
125 case NVGPU_EVENT_VF_UPDATE:
126 nvgpu_gpu_event = NVGPU_GPU_EVENT_VF_UPDATE;
127 break;
128 case NVGPU_EVENT_ALARM_TARGET_VF_NOT_POSSIBLE:
129 nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_TARGET_VF_NOT_POSSIBLE;
130 break;
131 case NVGPU_EVENT_ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE:
132 nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE;
133 break;
134 case NVGPU_EVENT_ALARM_CLOCK_ARBITER_FAILED:
135 nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_CLOCK_ARBITER_FAILED;
136 break;
137 case NVGPU_EVENT_ALARM_VF_TABLE_UPDATE_FAILED:
138 nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_VF_TABLE_UPDATE_FAILED;
139 break;
140 case NVGPU_EVENT_ALARM_THERMAL_ABOVE_THRESHOLD:
141 nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_THERMAL_ABOVE_THRESHOLD;
142 break;
143 case NVGPU_EVENT_ALARM_POWER_ABOVE_THRESHOLD:
144 nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_POWER_ABOVE_THRESHOLD;
145 break;
146 case NVGPU_EVENT_ALARM_GPU_LOST:
147 nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_GPU_LOST;
148 break;
149 default:
150 /* Control shouldn't come here */
151 nvgpu_gpu_event = NVGPU_GPU_EVENT_ALARM_GPU_LOST + 1;
152 break;
153 }
154 return nvgpu_gpu_event;
155}
156
157static inline u32 __pending_event(struct nvgpu_clk_dev *dev,
158 struct nvgpu_gpu_event_info *info) {
159
160 u32 tail, head;
161 u32 events = 0;
162 struct nvgpu_clk_notification *p_notif;
163
164 tail = nvgpu_atomic_read(&dev->queue.tail);
165 head = nvgpu_atomic_read(&dev->queue.head);
166
167 head = (tail - head) < dev->queue.size ? head : tail - dev->queue.size;
168
169 if (_WRAPGTEQ(tail, head) && info) {
170 head++;
171 p_notif = &dev->queue.notifications[head % dev->queue.size];
172 events |= nvgpu_convert_gpu_event(p_notif->notification);
173 info->event_id = ffs(events) - 1;
174 info->timestamp = p_notif->timestamp;
175 nvgpu_atomic_set(&dev->queue.head, head);
176 }
177
178 return events;
179}
180
181static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp, char __user *buf,
182 size_t size, loff_t *off)
183{
184 struct nvgpu_clk_dev *dev = filp->private_data;
185 struct nvgpu_gpu_event_info info;
186 ssize_t err;
187
188 clk_arb_dbg(dev->session->g,
189 "filp=%p, buf=%p, size=%zu", filp, buf, size);
190
191 if ((size - *off) < sizeof(info))
192 return 0;
193
194 memset(&info, 0, sizeof(info));
195 /* Get the oldest event from the queue */
196 while (!__pending_event(dev, &info)) {
197 if (filp->f_flags & O_NONBLOCK)
198 return -EAGAIN;
199 err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
200 __pending_event(dev, &info), 0);
201 if (err)
202 return err;
203 if (info.timestamp)
204 break;
205 }
206
207 if (copy_to_user(buf + *off, &info, sizeof(info)))
208 return -EFAULT;
209
210 return sizeof(info);
211}
212
213static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev,
214 struct nvgpu_gpu_set_event_filter_args *args)
215{
216 struct gk20a *g = dev->session->g;
217 u32 mask;
218
219 nvgpu_log(g, gpu_dbg_fn, " ");
220
221 if (args->flags)
222 return -EINVAL;
223
224 if (args->size != 1)
225 return -EINVAL;
226
227 if (copy_from_user(&mask, (void __user *) args->buffer,
228 args->size * sizeof(u32)))
229 return -EFAULT;
230
231 /* update alarm mask */
232 nvgpu_atomic_set(&dev->enabled_mask, mask);
233
234 return 0;
235}
236
237static long nvgpu_clk_arb_ioctl_event_dev(struct file *filp, unsigned int cmd,
238 unsigned long arg)
239{
240 struct nvgpu_clk_dev *dev = filp->private_data;
241 struct gk20a *g = dev->session->g;
242 u8 buf[NVGPU_EVENT_IOCTL_MAX_ARG_SIZE];
243 int err = 0;
244
245 nvgpu_log(g, gpu_dbg_fn, "nr=%d", _IOC_NR(cmd));
246
247 if ((_IOC_TYPE(cmd) != NVGPU_EVENT_IOCTL_MAGIC) || (_IOC_NR(cmd) == 0)
248 || (_IOC_NR(cmd) > NVGPU_EVENT_IOCTL_LAST))
249 return -EINVAL;
250
251 BUG_ON(_IOC_SIZE(cmd) > NVGPU_EVENT_IOCTL_MAX_ARG_SIZE);
252
253 memset(buf, 0, sizeof(buf));
254 if (_IOC_DIR(cmd) & _IOC_WRITE) {
255 if (copy_from_user(buf, (void __user *) arg, _IOC_SIZE(cmd)))
256 return -EFAULT;
257 }
258
259 switch (cmd) {
260 case NVGPU_EVENT_IOCTL_SET_FILTER:
261 err = nvgpu_clk_arb_set_event_filter(dev,
262 (struct nvgpu_gpu_set_event_filter_args *)buf);
263 break;
264 default:
265 nvgpu_warn(g, "unrecognized event ioctl cmd: 0x%x", cmd);
266 err = -ENOTTY;
267 }
268
269 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
270 err = copy_to_user((void __user *) arg, buf, _IOC_SIZE(cmd));
271
272 return err;
273}
274
275static const struct file_operations completion_dev_ops = {
276 .owner = THIS_MODULE,
277 .release = nvgpu_clk_arb_release_completion_dev,
278 .poll = nvgpu_clk_arb_poll_dev,
279};
280
281static const struct file_operations event_dev_ops = {
282 .owner = THIS_MODULE,
283 .release = nvgpu_clk_arb_release_event_dev,
284 .poll = nvgpu_clk_arb_poll_dev,
285 .read = nvgpu_clk_arb_read_event_dev,
286#ifdef CONFIG_COMPAT
287 .compat_ioctl = nvgpu_clk_arb_ioctl_event_dev,
288#endif
289 .unlocked_ioctl = nvgpu_clk_arb_ioctl_event_dev,
290};
291
292static int nvgpu_clk_arb_install_fd(struct gk20a *g,
293 struct nvgpu_clk_session *session,
294 const struct file_operations *fops,
295 struct nvgpu_clk_dev **_dev)
296{
297 struct file *file;
298 int fd;
299 int err;
300 int status;
301 char name[64];
302 struct nvgpu_clk_dev *dev;
303
304 clk_arb_dbg(g, " ");
305
306 dev = nvgpu_kzalloc(g, sizeof(*dev));
307 if (!dev)
308 return -ENOMEM;
309
310 status = nvgpu_clk_notification_queue_alloc(g, &dev->queue,
311 DEFAULT_EVENT_NUMBER);
312 if (status < 0) {
313 err = status;
314 goto fail;
315 }
316
317 fd = get_unused_fd_flags(O_RDWR);
318 if (fd < 0) {
319 err = fd;
320 goto fail;
321 }
322
323 snprintf(name, sizeof(name), "%s-clk-fd%d", g->name, fd);
324 file = anon_inode_getfile(name, fops, dev, O_RDWR);
325 if (IS_ERR(file)) {
326 err = PTR_ERR(file);
327 goto fail_fd;
328 }
329
330 fd_install(fd, file);
331
332 nvgpu_cond_init(&dev->readout_wq);
333
334 nvgpu_atomic_set(&dev->poll_mask, 0);
335
336 dev->session = session;
337 nvgpu_ref_init(&dev->refcount);
338
339 nvgpu_ref_get(&session->refcount);
340
341 *_dev = dev;
342
343 return fd;
344
345fail_fd:
346 put_unused_fd(fd);
347fail:
348 nvgpu_kfree(g, dev);
349
350 return err;
351}
352
353int nvgpu_clk_arb_install_event_fd(struct gk20a *g,
354 struct nvgpu_clk_session *session, int *event_fd, u32 alarm_mask)
355{
356 struct nvgpu_clk_arb *arb = g->clk_arb;
357 struct nvgpu_clk_dev *dev;
358 int fd;
359
360 clk_arb_dbg(g, " ");
361
362 fd = nvgpu_clk_arb_install_fd(g, session, &event_dev_ops, &dev);
363 if (fd < 0)
364 return fd;
365
366 /* TODO: alarm mask needs to be set to default value to prevent
367 * failures of legacy tests. This will be removed when sanity is
368 * updated
369 */
370 if (alarm_mask)
371 nvgpu_atomic_set(&dev->enabled_mask, alarm_mask);
372 else
373 nvgpu_atomic_set(&dev->enabled_mask, EVENT(VF_UPDATE));
374
375 dev->arb_queue_head = nvgpu_atomic_read(&arb->notification_queue.head);
376
377 nvgpu_spinlock_acquire(&arb->users_lock);
378 nvgpu_list_add_tail(&dev->link, &arb->users);
379 nvgpu_spinlock_release(&arb->users_lock);
380
381 *event_fd = fd;
382
383 return 0;
384}
385
386int nvgpu_clk_arb_install_request_fd(struct gk20a *g,
387 struct nvgpu_clk_session *session, int *request_fd)
388{
389 struct nvgpu_clk_dev *dev;
390 int fd;
391
392 clk_arb_dbg(g, " ");
393
394 fd = nvgpu_clk_arb_install_fd(g, session, &completion_dev_ops, &dev);
395 if (fd < 0)
396 return fd;
397
398 *request_fd = fd;
399
400 return 0;
401}
402
403int nvgpu_clk_arb_commit_request_fd(struct gk20a *g,
404 struct nvgpu_clk_session *session, int request_fd)
405{
406 struct nvgpu_clk_arb *arb = g->clk_arb;
407 struct nvgpu_clk_dev *dev;
408 struct fd fd;
409 int err = 0;
410
411 clk_arb_dbg(g, " ");
412
413 fd = fdget(request_fd);
414 if (!fd.file)
415 return -EINVAL;
416
417 if (fd.file->f_op != &completion_dev_ops) {
418 err = -EINVAL;
419 goto fdput_fd;
420 }
421
422 dev = (struct nvgpu_clk_dev *) fd.file->private_data;
423
424 if (!dev || dev->session != session) {
425 err = -EINVAL;
426 goto fdput_fd;
427 }
428 nvgpu_ref_get(&dev->refcount);
429 nvgpu_spinlock_acquire(&session->session_lock);
430 nvgpu_list_add(&dev->node, &session->targets);
431 nvgpu_spinlock_release(&session->session_lock);
432 nvgpu_clk_arb_worker_enqueue(g, &arb->update_arb_work_item);
433
434fdput_fd:
435 fdput(fd);
436 return err;
437}
438
439int nvgpu_clk_arb_set_session_target_mhz(struct nvgpu_clk_session *session,
440 int request_fd, u32 api_domain, u16 target_mhz)
441{
442 struct nvgpu_clk_dev *dev;
443 struct fd fd;
444 int err = 0;
445
446 clk_arb_dbg(session->g,
447 "domain=0x%08x target_mhz=%u", api_domain, target_mhz);
448
449 fd = fdget(request_fd);
450 if (!fd.file)
451 return -EINVAL;
452
453 if (fd.file->f_op != &completion_dev_ops) {
454 err = -EINVAL;
455 goto fdput_fd;
456 }
457
458 dev = fd.file->private_data;
459 if (!dev || dev->session != session) {
460 err = -EINVAL;
461 goto fdput_fd;
462 }
463
464 switch (api_domain) {
465 case NVGPU_CLK_DOMAIN_MCLK:
466 dev->mclk_target_mhz = target_mhz;
467 break;
468
469 case NVGPU_CLK_DOMAIN_GPCCLK:
470 dev->gpc2clk_target_mhz = target_mhz * 2ULL;
471 break;
472
473 default:
474 err = -EINVAL;
475 }
476
477fdput_fd:
478 fdput(fd);
479 return err;
480}
481
482u32 nvgpu_clk_arb_get_arbiter_clk_domains(struct gk20a *g)
483{
484 u32 clk_domains = g->ops.clk_arb.get_arbiter_clk_domains(g);
485 u32 api_domains = 0;
486
487 if (clk_domains & CTRL_CLK_DOMAIN_GPC2CLK)
488 api_domains |= BIT(NVGPU_GPU_CLK_DOMAIN_GPCCLK);
489
490 if (clk_domains & CTRL_CLK_DOMAIN_MCLK)
491 api_domains |= BIT(NVGPU_GPU_CLK_DOMAIN_MCLK);
492
493 return api_domains;
494}
495
496#ifdef CONFIG_DEBUG_FS
497static int nvgpu_clk_arb_stats_show(struct seq_file *s, void *unused)
498{
499 struct gk20a *g = s->private;
500 struct nvgpu_clk_arb *arb = g->clk_arb;
501 struct nvgpu_clk_arb_debug *debug;
502
503 u64 num;
504 s64 tmp, avg, std, max, min;
505
506 debug = NV_ACCESS_ONCE(arb->debug);
507 /* Make copy of structure and ensure no reordering */
508 nvgpu_smp_rmb();
509 if (!debug)
510 return -EINVAL;
511
512 std = debug->switch_std;
513 avg = debug->switch_avg;
514 max = debug->switch_max;
515 min = debug->switch_min;
516 num = debug->switch_num;
517
518 tmp = std;
519 do_div(tmp, num);
520 seq_printf(s, "Number of transitions: %lld\n",
521 num);
522 seq_printf(s, "max / min : %lld / %lld usec\n",
523 max, min);
524 seq_printf(s, "avg / std : %lld / %ld usec\n",
525 avg, int_sqrt(tmp));
526
527 return 0;
528}
529
530static int nvgpu_clk_arb_stats_open(struct inode *inode, struct file *file)
531{
532 return single_open(file, nvgpu_clk_arb_stats_show, inode->i_private);
533}
534
535static const struct file_operations nvgpu_clk_arb_stats_fops = {
536 .open = nvgpu_clk_arb_stats_open,
537 .read = seq_read,
538 .llseek = seq_lseek,
539 .release = single_release,
540};
541
542
543int nvgpu_clk_arb_debugfs_init(struct gk20a *g)
544{
545 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
546 struct dentry *gpu_root = l->debugfs;
547 struct dentry *d;
548
549 nvgpu_log(g, gpu_dbg_info, "g=%p", g);
550
551 d = debugfs_create_file(
552 "arb_stats",
553 S_IRUGO,
554 gpu_root,
555 g,
556 &nvgpu_clk_arb_stats_fops);
557 if (!d)
558 return -ENOMEM;
559
560 return 0;
561}
562#endif