summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/Kconfig1
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/nvpps/Kconfig6
-rw-r--r--drivers/nvpps/Makefile6
-rw-r--r--drivers/nvpps/nvpps_main.c773
-rw-r--r--include/linux/nvpps_ioctl.h69
6 files changed, 856 insertions, 0 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index e326c6cec..f9aeaba02 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -5,6 +5,7 @@ source "drivers/misc/tegra186-ahc/Kconfig"
5source "drivers/nvpmodel/Kconfig" 5source "drivers/nvpmodel/Kconfig"
6source "drivers/nvlink/Kconfig" 6source "drivers/nvlink/Kconfig"
7source "drivers/iommu/Kconfig" 7source "drivers/iommu/Kconfig"
8source "drivers/nvpps/Kconfig"
8endif 9endif
9 10
10endmenu 11endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 6ea96ec16..69927e062 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -13,3 +13,4 @@ subdir-ccflags-y += -Werror
13 13
14obj-$(CONFIG_NVPMODEL_EMC) += nvpmodel/ 14obj-$(CONFIG_NVPMODEL_EMC) += nvpmodel/
15obj-$(CONFIG_TEGRA_NVLINK) += nvlink/ 15obj-$(CONFIG_TEGRA_NVLINK) += nvlink/
16obj-$(CONFIG_NVPPS) += nvpps/
diff --git a/drivers/nvpps/Kconfig b/drivers/nvpps/Kconfig
new file mode 100644
index 000000000..00657c3fe
--- /dev/null
+++ b/drivers/nvpps/Kconfig
@@ -0,0 +1,6 @@
1config NVPPS
2 tristate "Tegra NVPPS Support"
3 depends on ARCH_TEGRA_19x_SOC
4 help
5 This config enables the Tegra NVPPS driver. NVPPS is the PPS
6 source driver for the Tegra SOC.
diff --git a/drivers/nvpps/Makefile b/drivers/nvpps/Makefile
new file mode 100644
index 000000000..1a585ab12
--- /dev/null
+++ b/drivers/nvpps/Makefile
@@ -0,0 +1,6 @@
1#
2# NVPPS driver.
3#
4ccflags-y += -Werror -O2
5obj-$(CONFIG_NVPPS) := nvpps.o
6nvpps-y = nvpps_main.o
diff --git a/drivers/nvpps/nvpps_main.c b/drivers/nvpps/nvpps_main.c
new file mode 100644
index 000000000..1850f4ed5
--- /dev/null
+++ b/drivers/nvpps/nvpps_main.c
@@ -0,0 +1,773 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/slab.h>
21#include <linux/cdev.h>
22#include <linux/poll.h>
23#include <linux/gpio.h>
24#include <linux/list.h>
25#include <linux/of_device.h>
26#include <linux/of_gpio.h>
27#include <asm/arch_timer.h>
28#include <linux/platform/tegra/ptp-notifier.h>
29#include <linux/nvpps_ioctl.h>
30
31
32//#define NVPPS_MAP_EQOS_REGS
33//#define NVPPS_ARM_COUNTER_PROFILING
34//#define NVPPS_EQOS_REG_PROFILING
35
36
37
38#define MAX_NVPPS_SOURCES 1
39#define NVPPS_DEF_MODE NVPPS_MODE_GPIO
40
41/* statics */
42static struct class *s_nvpps_class;
43static dev_t s_nvpps_devt;
44static DEFINE_MUTEX(s_nvpps_lock);
45static DEFINE_IDR(s_nvpps_idr);
46
47
48
49/* platform device instance data */
50struct nvpps_device_data {
51 struct platform_device *pdev;
52 struct cdev cdev;
53 struct device *dev;
54 unsigned int id;
55 unsigned int gpio_pin;
56 int irq;
57 bool irq_registered;
58
59 bool pps_event_id_valid;
60 unsigned int pps_event_id;
61 u64 tsc;
62 u64 phc;
63 u64 irq_latency;
64 u64 tsc_res_ns;
65 raw_spinlock_t lock;
66
67 u32 evt_mode;
68 u32 tsc_mode;
69
70 struct timer_list timer;
71 volatile bool timer_inited;
72
73 wait_queue_head_t pps_event_queue;
74 struct fasync_struct *pps_event_async_queue;
75
76#ifdef NVPPS_MAP_EQOS_REGS
77 u64 eqos_base_addr;
78#endif /*NVPPS_MAP_EQOS_REGS*/
79};
80
81
82/* file instance data */
83struct nvpps_file_data {
84 struct nvpps_device_data *pdev_data;
85 unsigned int pps_event_id_rd;
86};
87
88
89
90#ifdef NVPPS_MAP_EQOS_REGS
91
92#define EQOS_BASE_ADDR 0x2490000
93#define BASE_ADDRESS pdev_data->eqos_base_addr
94#define MAC_STNSR_TSSS_LPOS 0
95#define MAC_STNSR_TSSS_HPOS 30
96
97#define GET_VALUE(data, lbit, hbit) ((data >> lbit) & (~(~0<<(hbit-lbit+1))))
98#define MAC_STNSR_OFFSET ((volatile u32 *)(BASE_ADDRESS + 0xb0c))
99#define MAC_STNSR_RD(data) (data) = ioread32((void *)MAC_STNSR_OFFSET);
100#define MAC_STSR_OFFSET ((volatile u32 *)(BASE_ADDRESS + 0xb08))
101#define MAC_STSR_RD(data) (data) = ioread32((void *)MAC_STSR_OFFSET);
102
103#endif /*NVPPS_MAP_EQOS_REGS*/
104
105
106
107static inline u64 __arch_counter_get_cntvct(void)
108{
109 u64 cval;
110
111 asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
112
113 return cval;
114}
115
116
117#ifdef NVPPS_MAP_EQOS_REGS
118static inline u64 get_systime(struct nvpps_device_data *pdev_data, u64 *tsc)
119{
120 u64 ns1, ns2, ns;
121 u32 varmac_stnsr1, varmac_stnsr2;
122 u32 varmac_stsr;
123
124 /* read the PHC */
125 MAC_STNSR_RD(varmac_stnsr1);
126 MAC_STSR_RD(varmac_stsr);
127 /* read the TSC */
128 *tsc = __arch_counter_get_cntvct();
129
130 /* read the nsec part of the PHC one more time */
131 MAC_STNSR_RD(varmac_stnsr2);
132
133 ns1 = GET_VALUE(varmac_stnsr1, MAC_STNSR_TSSS_LPOS, MAC_STNSR_TSSS_HPOS);
134 ns2 = GET_VALUE(varmac_stnsr2, MAC_STNSR_TSSS_LPOS, MAC_STNSR_TSSS_HPOS);
135
136 /* if ns1 is greater than ns2, it means nsec counter rollover
137 * happened. In that case read the updated sec counter again
138 */
139 if (ns1 > ns2) {
140 /* let's read the TSC again */
141 *tsc = __arch_counter_get_cntvct();
142 /* read the second portion of the PHC */
143 MAC_STSR_RD(varmac_stsr);
144 /* convert sec/high time value to nanosecond */
145 ns = ns2 + (varmac_stsr * 1000000000ull);
146 } else {
147 /* convert sec/high time value to nanosecond */
148 ns = ns1 + (varmac_stsr * 1000000000ull);
149 }
150
151 return ns;
152}
153#endif /*NVPPS_MAP_EQOS_REGS*/
154
155
156
157/*
158 * Report the PPS event
159 */
160__attribute__((optimize("align-functions=64")))
161static void nvpps_get_ts(struct nvpps_device_data *pdev_data, bool in_isr)
162{
163 u64 tsc;
164 u64 irq_tsc = 0;
165 u64 phc = 0;
166 u64 irq_latency = 0;
167 unsigned long flags;
168
169 /* get the gpio interrupt timestamp */
170 if (in_isr) {
171 irq_tsc = __arch_counter_get_cntvct();
172 } else {
173 irq_tsc = __arch_counter_get_cntvct();//0;
174 }
175
176#ifdef NVPPS_MAP_EQOS_REGS
177 /* get the PTP timestamp */
178 if (pdev_data->eqos_base_addr) {
179 /* get both the phc and tsc */
180 phc = get_systime(pdev_data, &tsc);
181 } else {
182#endif /*NVPPS_MAP_EQOS_REGS*/
183 /* get the phc from eqos driver */
184 get_ptp_hwtime(&phc);
185 /* get the current TSC time */
186 tsc = __arch_counter_get_cntvct();
187#ifdef NVPPS_MAP_EQOS_REGS
188 }
189#endif /*NVPPS_MAP_EQOS_REGS*/
190
191#ifdef NVPPS_ARM_COUNTER_PROFILING
192 {
193 u64 tmp;
194 int i;
195 irq_tsc = __arch_counter_get_cntvct();
196 for (i = 0; i < 98; i++) {
197 tmp = __arch_counter_get_cntvct();
198 }
199 tsc = __arch_counter_get_cntvct();
200 }
201#endif /*NVPPS_ARM_COUNTER_PROFILING*/
202
203#ifdef NVPPS_EQOS_REG_PROFILING
204 {
205 u32 varmac_stnsr;
206 u32 varmac_stsr;
207 int i;
208 irq_tsc = __arch_counter_get_cntvct();
209 for (i = 0; i < 100; i++) {
210 MAC_STNSR_RD(varmac_stnsr);
211 MAC_STSR_RD(varmac_stsr)
212 }
213 tsc = __arch_counter_get_cntvct();
214 }
215#endif /*NVPPS_EQOS_REG_PROFILING*/
216
217 /* get the interrupt latency */
218 if (irq_tsc) {
219 irq_latency = (tsc - irq_tsc) * pdev_data->tsc_res_ns;
220 }
221
222 raw_spin_lock_irqsave(&pdev_data->lock, flags);
223 pdev_data->pps_event_id_valid = true;
224 pdev_data->pps_event_id++;
225 pdev_data->tsc = irq_tsc ? irq_tsc : tsc;
226 /* adjust the ptp time for the interrupt latency */
227#if defined (NVPPS_ARM_COUNTER_PROFILING) || defined (NVPPS_EQOS_REG_PROFILING)
228 pdev_data->phc = phc;
229#else /* !NVPPS_ARM_COUNTER_PROFILING && !NVPPS_EQOS_REG_PROFILING */
230 pdev_data->phc = phc ? phc - irq_latency : phc;
231#endif /* NVPPS_ARM_COUNTER_PROFILING || NVPPS_EQOS_REG_PROFILING */
232 pdev_data->irq_latency = irq_latency;
233 raw_spin_unlock_irqrestore(&pdev_data->lock, flags);
234
235 /*dev_info(pdev_data->dev, "evt(%d) tsc(%llu) phc(%llu)\n", pdev_data->pps_event_id, pdev_data->tsc, pdev_data->phc);*/
236
237 /* event notification */
238 wake_up_interruptible(&pdev_data->pps_event_queue);
239 kill_fasync(&pdev_data->pps_event_async_queue, SIGIO, POLL_IN);
240}
241
242
243__attribute__((optimize("align-functions=64")))
244static irqreturn_t nvpps_gpio_isr(int irq, void *data)
245{
246 struct nvpps_device_data *pdev_data = (struct nvpps_device_data *)data;
247
248 /* get timestamps for this event */
249 nvpps_get_ts(pdev_data, true);
250
251 return IRQ_HANDLED;
252}
253
254
255__attribute__((optimize("align-functions=64")))
256static void nvpps_timer_callback(unsigned long data)
257{
258 struct nvpps_device_data *pdev_data = (struct nvpps_device_data *)data;
259
260 /* get timestamps for this event */
261 nvpps_get_ts(pdev_data, false);
262
263 /* set the next expire time */
264 if (pdev_data->timer_inited) {
265 mod_timer(&pdev_data->timer, jiffies + msecs_to_jiffies(1000));
266 }
267}
268
269
270
271static int set_mode(struct nvpps_device_data *pdev_data, u32 mode)
272{
273 int err = 0;
274 if (mode != pdev_data->evt_mode) {
275 switch (mode) {
276 case NVPPS_MODE_GPIO:
277 if (pdev_data->timer_inited) {
278 pdev_data->timer_inited = false;
279 del_timer_sync(&pdev_data->timer);
280 }
281 if (!pdev_data->irq_registered) {
282 /* register IRQ handler */
283 err = request_irq(pdev_data->irq, nvpps_gpio_isr,
284 IRQF_TRIGGER_RISING | IRQF_NO_THREAD, "nvpps_isr", pdev_data);
285 if (err) {
286 dev_err(pdev_data->dev, "failed to acquire IRQ %d\n", pdev_data->irq);
287 } else {
288 pdev_data->irq_registered = true;
289 dev_info(pdev_data->dev, "Registered IRQ %d for nvpps\n", pdev_data->irq);
290 }
291 }
292 break;
293
294 case NVPPS_MODE_TIMER:
295 if (pdev_data->irq_registered) {
296 /* unregister IRQ handler */
297 devm_free_irq(&pdev_data->pdev->dev, pdev_data->irq, pdev_data);
298 pdev_data->irq_registered = false;
299 }
300 if (!pdev_data->timer_inited) {
301 setup_timer(&pdev_data->timer, nvpps_timer_callback, (unsigned long)pdev_data);
302 pdev_data->timer_inited = true;
303 /* setup timer interval to 1000 msecs */
304 mod_timer(&pdev_data->timer, jiffies + msecs_to_jiffies(1000));
305 }
306 break;
307
308 default:
309 return -EINVAL;
310 }
311 }
312 return err;
313}
314
315
316
317/* Character device stuff */
318static unsigned int nvpps_poll(struct file *file, poll_table *wait)
319{
320 struct nvpps_file_data *pfile_data = (struct nvpps_file_data *)file->private_data;
321 struct nvpps_device_data *pdev_data = pfile_data->pdev_data;
322
323 poll_wait(file, &pdev_data->pps_event_queue, wait);
324 if (pdev_data->pps_event_id_valid &&
325 (pfile_data->pps_event_id_rd != pdev_data->pps_event_id)) {
326 return POLLIN | POLLRDNORM;
327 } else {
328 return 0;
329 }
330}
331
332
333static int nvpps_fasync(int fd, struct file *file, int on)
334{
335 struct nvpps_file_data *pfile_data = (struct nvpps_file_data *)file->private_data;
336 struct nvpps_device_data *pdev_data = pfile_data->pdev_data;
337
338 return fasync_helper(fd, file, on, &pdev_data->pps_event_async_queue);
339}
340
341
342static long nvpps_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
343{
344 struct nvpps_file_data *pfile_data = (struct nvpps_file_data *)file->private_data;
345 struct nvpps_device_data *pdev_data = pfile_data->pdev_data;
346 struct nvpps_params params;
347 void __user *uarg = (void __user *)arg;
348 int err;
349
350 switch (cmd) {
351 case NVPPS_GETVERSION: {
352 struct nvpps_version version;
353
354 dev_dbg(pdev_data->dev, "NVPPS_GETVERSION\n");
355
356 /* Get the current parameters */
357 version.version.major = NVPPS_VERSION_MAJOR;
358 version.version.minor = NVPPS_VERSION_MINOR;
359 version.api.major = NVPPS_API_MAJOR;
360 version.api.minor = NVPPS_API_MINOR;
361
362 err = copy_to_user(uarg, &version, sizeof(struct nvpps_version));
363 if (err) {
364 return -EFAULT;
365 }
366 break;
367 }
368
369 case NVPPS_GETPARAMS:
370 dev_dbg(pdev_data->dev, "NVPPS_GETPARAMS\n");
371
372 /* Get the current parameters */
373 params.evt_mode = pdev_data->evt_mode;
374 params.tsc_mode = pdev_data->tsc_mode;
375
376 err = copy_to_user(uarg, &params, sizeof(struct nvpps_params));
377 if (err) {
378 return -EFAULT;
379 }
380 break;
381
382 case NVPPS_SETPARAMS:
383 dev_dbg(pdev_data->dev, "NVPPS_SETPARAMS\n");
384
385 err = copy_from_user(&params, uarg, sizeof(struct nvpps_params));
386 if (err) {
387 return -EFAULT;
388 }
389 err = set_mode(pdev_data, params.evt_mode);
390 if (err) {
391 dev_dbg(pdev_data->dev, "switch_mode to %d failed err(%d)\n", params.evt_mode, err);
392 return err;
393 }
394 pdev_data->evt_mode = params.evt_mode;
395 pdev_data->tsc_mode = params.tsc_mode;
396 break;
397
398 case NVPPS_GETEVENT: {
399 struct nvpps_timeevent time_event;
400 unsigned long flags;
401
402 dev_dbg(pdev_data->dev, "NVPPS_GETEVENT\n");
403
404 /* Return the fetched timestamp */
405 raw_spin_lock_irqsave(&pdev_data->lock, flags);
406 pfile_data->pps_event_id_rd = pdev_data->pps_event_id;
407 time_event.evt_nb = pdev_data->pps_event_id;
408 time_event.tsc = pdev_data->tsc;
409 time_event.ptp = pdev_data->phc;
410 time_event.irq_latency = pdev_data->irq_latency;
411 raw_spin_unlock_irqrestore(&pdev_data->lock, flags);
412 if (NVPPS_TSC_NSEC == pdev_data->tsc_mode) {
413 time_event.tsc *= pdev_data->tsc_res_ns;
414 }
415 time_event.tsc_res_ns = pdev_data->tsc_res_ns;
416 time_event.evt_mode = pdev_data->evt_mode;
417 time_event.tsc_mode = pdev_data->tsc_mode;
418
419 err = copy_to_user(uarg, &time_event, sizeof(struct nvpps_timeevent));
420 if (err) {
421 return -EFAULT;
422 }
423
424 break;
425 }
426
427 default:
428 return -ENOTTY;
429 }
430
431 return 0;
432}
433
434
435
436static int nvpps_open(struct inode *inode, struct file *file)
437{
438 struct nvpps_device_data *pdev_data = container_of(inode->i_cdev, struct nvpps_device_data, cdev);
439 struct nvpps_file_data *pfile_data;
440
441 pfile_data = kzalloc(sizeof(struct nvpps_file_data), GFP_KERNEL);
442 if (!pfile_data) {
443 dev_err(&pdev_data->pdev->dev, "nvpps_open kzalloc() failed\n");
444 return -ENOMEM;
445 }
446
447 pfile_data->pdev_data = pdev_data;
448 pfile_data->pps_event_id_rd = (unsigned int)-1;
449
450 file->private_data = pfile_data;
451 kobject_get(&pdev_data->dev->kobj);
452 return 0;
453}
454
455
456
457static int nvpps_close(struct inode *inode, struct file *file)
458{
459 struct nvpps_device_data *pdev_data = container_of(inode->i_cdev, struct nvpps_device_data, cdev);
460
461 if (file->private_data) {
462 kfree(file->private_data);
463 }
464 kobject_put(&pdev_data->dev->kobj);
465 return 0;
466}
467
468
469
470static const struct file_operations nvpps_fops = {
471 .owner = THIS_MODULE,
472 .poll = nvpps_poll,
473 .fasync = nvpps_fasync,
474 .unlocked_ioctl = nvpps_ioctl,
475 .open = nvpps_open,
476 .release = nvpps_close,
477};
478
479
480
481static void nvpps_dev_release(struct device *dev)
482{
483 struct nvpps_device_data *pdev_data = dev_get_drvdata(dev);
484
485 cdev_del(&pdev_data->cdev);
486
487 mutex_lock(&s_nvpps_lock);
488 idr_remove(&s_nvpps_idr, pdev_data->id);
489 mutex_unlock(&s_nvpps_lock);
490
491 kfree(dev);
492 kfree(pdev_data);
493}
494
495
496
497static int nvpps_probe(struct platform_device *pdev)
498{
499 struct nvpps_device_data *pdev_data;
500 struct device_node *np = pdev->dev.of_node;
501 dev_t devt;
502 int err;
503
504 dev_info(&pdev->dev, "%s\n", __FUNCTION__);
505
506 if (!np) {
507 dev_err(&pdev->dev, "no valid device node, probe failed\n");
508 return -EINVAL;
509 }
510
511 pdev_data = kzalloc(sizeof(struct nvpps_device_data), GFP_KERNEL);
512 if (!pdev_data) {
513 return -ENOMEM;
514 }
515
516 err = of_get_gpio(np, 0);
517 if (err < 0) {
518 dev_err(&pdev->dev, "unable to get GPIO from device tree\n");
519 return err;
520 } else {
521 pdev_data->gpio_pin = (unsigned int)err;
522 dev_info(&pdev->dev, "gpio_pin(%d)\n", pdev_data->gpio_pin);
523 }
524
525 /* GPIO setup */
526 if (gpio_is_valid(pdev_data->gpio_pin)) {
527 err = devm_gpio_request(&pdev->dev, pdev_data->gpio_pin, "gpio_pps");
528 if (err) {
529 dev_err(&pdev->dev, "failed to request GPIO %u\n",
530 pdev_data->gpio_pin);
531 return err;
532 }
533
534 err = gpio_direction_input(pdev_data->gpio_pin);
535 if (err) {
536 dev_err(&pdev->dev, "failed to set pin direction\n");
537 return -EINVAL;
538 }
539
540 /* IRQ setup */
541 err = gpio_to_irq(pdev_data->gpio_pin);
542 if (err < 0) {
543 dev_err(&pdev->dev, "failed to map GPIO to IRQ: %d\n", err);
544 return -EINVAL;
545 }
546 pdev_data->irq = err;
547 dev_info(&pdev->dev, "gpio_to_irq(%d)\n", pdev_data->irq);
548 }
549
550#ifdef NVPPS_MAP_EQOS_REGS
551 /* remap base address for eqos*/
552 pdev_data->eqos_base_addr = (u64)devm_ioremap_nocache(&pdev->dev,
553 EQOS_BASE_ADDR, 4096);
554 dev_info(&pdev->dev, "map EQOS to (%p)\n", (void *)pdev_data->eqos_base_addr);
555#endif /*NVPPS_MAP_EQOS_REGS*/
556
557 init_waitqueue_head(&pdev_data->pps_event_queue);
558 raw_spin_lock_init(&pdev_data->lock);
559 pdev_data->pdev = pdev;
560 pdev_data->evt_mode = 0; /*NVPPS_MODE_GPIO*/
561 pdev_data->tsc_mode = NVPPS_TSC_NSEC;
562 #define _PICO_SECS (1000000000000ULL)
563 pdev_data->tsc_res_ns = (_PICO_SECS / (u64)arch_timer_get_cntfrq()) / 1000;
564 #undef _PICO_SECS
565 dev_info(&pdev->dev, "tsc_res_ns(%llu)\n", pdev_data->tsc_res_ns);
566
567 /* character device setup */
568#ifndef NVPPS_NO_DT
569 s_nvpps_class = class_create(THIS_MODULE, "nvpps");
570 if (IS_ERR(s_nvpps_class)) {
571 dev_err(&pdev->dev, "failed to allocate class\n");
572 return PTR_ERR(s_nvpps_class);
573 }
574
575 err = alloc_chrdev_region(&s_nvpps_devt, 0, MAX_NVPPS_SOURCES, "nvpps");
576 if (err < 0) {
577 dev_err(&pdev->dev, "failed to allocate char device region\n");
578 class_destroy(s_nvpps_class);
579 return err;
580 }
581#endif /* !NVPPS_NO_DT */
582
583 /* get an idr for the device */
584 mutex_lock(&s_nvpps_lock);
585 err = idr_alloc(&s_nvpps_idr, pdev_data, 0, MAX_NVPPS_SOURCES, GFP_KERNEL);
586 if (err < 0) {
587 if (err == -ENOSPC) {
588 dev_err(&pdev->dev, "nvpps: out of idr \n");
589 err = -EBUSY;
590 }
591 mutex_unlock(&s_nvpps_lock);
592 return err;
593 }
594 pdev_data->id = err;
595 mutex_unlock(&s_nvpps_lock);
596
597 /* associate the cdev with the file operations */
598 cdev_init(&pdev_data->cdev, &nvpps_fops);
599
600 /* build up the device number */
601 devt = MKDEV(MAJOR(s_nvpps_devt), pdev_data->id);
602 pdev_data->cdev.owner = THIS_MODULE;
603
604 /* create the device node */
605 pdev_data->dev = device_create(s_nvpps_class, NULL, devt, pdev_data, "nvpps%d", pdev_data->id);
606 if (IS_ERR(pdev_data->dev)) {
607 err = PTR_ERR(pdev_data->dev);
608 goto error_ret;
609 }
610
611 pdev_data->dev->release = nvpps_dev_release;
612
613 err = cdev_add(&pdev_data->cdev, devt, 1);
614 if (err) {
615 dev_err(&pdev->dev, "nvpps: failed to add char device %d:%d\n", MAJOR(s_nvpps_devt), pdev_data->id);
616 device_destroy(s_nvpps_class, pdev_data->dev->devt);
617 return err;
618 }
619
620 dev_info(&pdev->dev, "nvpps cdev(%d:%d)\n", MAJOR(s_nvpps_devt), pdev_data->id);
621 platform_set_drvdata(pdev, pdev_data);
622
623 /* setup PPS event hndler */
624 err = set_mode(pdev_data, NVPPS_DEF_MODE);
625 if (err) {
626 dev_err(&pdev->dev, "set_mode failed err = %d\n", err);
627 device_destroy(s_nvpps_class, pdev_data->dev->devt);
628 return err;
629 }
630 pdev_data->evt_mode = NVPPS_DEF_MODE;
631
632 return 0;
633
634error_ret:
635 cdev_del(&pdev_data->cdev);
636 mutex_lock(&s_nvpps_lock);
637 idr_remove(&s_nvpps_idr, pdev_data->id);
638 mutex_unlock(&s_nvpps_lock);
639 return err;
640}
641
642
643static int nvpps_remove(struct platform_device *pdev)
644{
645 struct nvpps_device_data *pdev_data = platform_get_drvdata(pdev);
646
647 printk("%s\n", __FUNCTION__);
648
649 if (pdev_data) {
650 if (pdev_data->irq_registered) {
651 /* unregister IRQ handler */
652 free_irq(pdev_data->irq, pdev_data);
653 pdev_data->irq_registered = false;
654 dev_info(&pdev->dev, "removed IRQ %d for nvpps\n", pdev_data->irq);
655 }
656 if (pdev_data->timer_inited) {
657 pdev_data->timer_inited = false;
658 del_timer_sync(&pdev_data->timer);
659 }
660#ifdef NVPPS_MAP_EQOS_REGS
661 if (pdev_data->eqos_base_addr) {
662 devm_iounmap(&pdev->dev, (void *)pdev_data->eqos_base_addr);
663 dev_info(&pdev->dev, "unmap EQOS reg space %p for nvpps\n", (void *)pdev_data->eqos_base_addr);
664 }
665#endif /*NVPPS_MAP_EQOS_REGS*/
666 device_destroy(s_nvpps_class, pdev_data->dev->devt);
667 }
668
669#ifndef NVPPS_NO_DT
670 class_unregister(s_nvpps_class);
671 class_destroy(s_nvpps_class);
672 unregister_chrdev_region(s_nvpps_devt, MAX_NVPPS_SOURCES);
673#endif /* !NVPPS_NO_DT */
674 return 0;
675}
676
677
678#ifdef CONFIG_PM
679static int nvpps_suspend(struct platform_device *pdev, pm_message_t state)
680{
681 /*struct nvpps_device_data *pdev_data = platform_get_drvdata(pdev);*/
682
683 return 0;
684}
685
686static int nvpps_resume(struct platform_device *pdev)
687{
688 /*struct nvpps_device_data *pdev_data = platform_get_drvdata(pdev);*/
689
690 return 0;
691}
692#endif /*CONFIG_PM*/
693
694
695#ifndef NVPPS_NO_DT
696static const struct of_device_id nvpps_of_table[] = {
697 { .compatible = "nvidia,tegra194-nvpps", },
698 { /* sentinel */ }
699};
700MODULE_DEVICE_TABLE(of, nvpps_of_table);
701#endif /*!NVPPS_NO_DT*/
702
703
704static struct platform_driver nvpps_plat_driver = {
705 .driver = {
706 .name = KBUILD_MODNAME,
707 .owner = THIS_MODULE,
708#ifndef NVPPS_NO_DT
709 .of_match_table = of_match_ptr(nvpps_of_table),
710#endif /*!NVPPS_NO_DT*/
711 },
712 .probe = nvpps_probe,
713 .remove = nvpps_remove,
714#ifdef CONFIG_PM
715 .suspend = nvpps_suspend,
716 .resume = nvpps_resume,
717#endif /*CONFIG_PM*/
718};
719
720
721#ifdef NVPPS_NO_DT
722/* module init
723*/
724static int __init nvpps_init(void)
725{
726 int err;
727
728 printk("%s\n", __FUNCTION__);
729
730 s_nvpps_class = class_create(THIS_MODULE, "nvpps");
731 if (IS_ERR(s_nvpps_class)) {
732 printk("nvpps: failed to allocate class\n");
733 return PTR_ERR(s_nvpps_class);
734 }
735
736 err = alloc_chrdev_region(&s_nvpps_devt, 0, MAX_NVPPS_SOURCES, "nvpps");
737 if (err < 0) {
738 printk("nvpps: failed to allocate char device region\n");
739 class_destroy(s_nvpps_class);
740 return err;
741 }
742
743 printk("nvpps registered\n");
744
745 return platform_driver_register(&nvpps_plat_driver);
746}
747
748
749/* module fini
750*/
751static void __exit nvpps_exit(void)
752{
753 printk("%s\n", __FUNCTION__);
754 platform_driver_unregister(&nvpps_plat_driver);
755
756 class_unregister(s_nvpps_class);
757 class_destroy(s_nvpps_class);
758 unregister_chrdev_region(s_nvpps_devt, MAX_NVPPS_SOURCES);
759}
760
761#endif /* NVPPS_NO_DT */
762
763
764#ifdef NVPPS_NO_DT
765module_init(nvpps_init);
766module_exit(nvpps_exit);
767#else /* !NVPPS_NO_DT */
768module_platform_driver(nvpps_plat_driver);
769#endif /* NVPPS_NO_DT */
770
771MODULE_DESCRIPTION("NVidia Tegra PPS Driver");
772MODULE_AUTHOR("David Tao tehyut@nvidia.com");
773MODULE_LICENSE("GPL");
diff --git a/include/linux/nvpps_ioctl.h b/include/linux/nvpps_ioctl.h
new file mode 100644
index 000000000..23c782b1d
--- /dev/null
+++ b/include/linux/nvpps_ioctl.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef __NVPPS_IOCTL_H__
16#define __NVPPS_IOCTL_H__
17
18#include <linux/types.h>
19#include <linux/ioctl.h>
20
21
22struct nvpps_version {
23 struct _version {
24 __u32 major;
25 __u32 minor;
26 } version;
27 struct _api {
28 __u32 major;
29 __u32 minor;
30 } api;
31};
32
33#define NVPPS_VERSION_MAJOR 0
34#define NVPPS_VERSION_MINOR 1
35#define NVPPS_API_MAJOR 0
36#define NVPPS_API_MINOR 1
37
38struct nvpps_params {
39 __u32 evt_mode;
40 __u32 tsc_mode;
41};
42
43
44/* evt_mode */
45#define NVPPS_MODE_GPIO 0x01
46#define NVPPS_MODE_TIMER 0x02
47
48/* tsc_mode */
49#define NVPPS_TSC_NSEC 0
50#define NVPPS_TSC_COUNTER 1
51
52
53struct nvpps_timeevent {
54 __u32 evt_nb;
55 __u64 tsc;
56 __u64 ptp;
57 __u64 tsc_res_ns;
58 __u32 evt_mode;
59 __u32 tsc_mode;
60 __u64 irq_latency;
61};
62
63
64#define NVPPS_GETVERSION _IOR('p', 0x1, struct nvpps_version *)
65#define NVPPS_GETPARAMS _IOR('p', 0x2, struct nvpps_params *)
66#define NVPPS_SETPARAMS _IOW('p', 0x3, struct nvpps_params *)
67#define NVPPS_GETEVENT _IOR('p', 0x4, struct nvpps_timeevent *)
68
69#endif /* !__NVPPS_IOCTL_H__ */