aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/tegra/tegra_dtv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/video/tegra/tegra_dtv.c')
-rw-r--r--drivers/media/video/tegra/tegra_dtv.c1089
1 files changed, 1089 insertions, 0 deletions
diff --git a/drivers/media/video/tegra/tegra_dtv.c b/drivers/media/video/tegra/tegra_dtv.c
new file mode 100644
index 00000000000..95270c4c3be
--- /dev/null
+++ b/drivers/media/video/tegra/tegra_dtv.c
@@ -0,0 +1,1089 @@
1/*
2 * tegra_dtv.c - Tegra DTV interface driver
3 *
4 * Author: Adam Jiang <chaoj@nvidia.com>
5 * Copyright (c) 2011, NVIDIA Corporation.
6 * Copyright (c) 2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/io.h>
26#include <linux/clk.h>
27#include <linux/fs.h>
28#include <linux/completion.h>
29#include <linux/dma-mapping.h>
30#include <linux/vmalloc.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/mutex.h>
34#include <linux/workqueue.h>
35#include <linux/wakelock.h>
36#include <linux/platform_device.h>
37#include <linux/miscdevice.h>
38#include <linux/debugfs.h>
39#include <linux/seq_file.h>
40#include <linux/delay.h>
41
42#include <media/tegra_dtv.h>
43
44#include <linux/uaccess.h>
45#include <mach/iomap.h>
46#include <mach/dma.h>
47
48/* offsets from TEGRA_DTV_BASE */
49#define DTV_SPI_CONTROL 0x40
50#define DTV_MODE 0x44
51#define DTV_CTRL 0x48
52#define DTV_PACKET_COUNT 0x4c
53#define DTV_ERROR_COUNT 0x50
54#define DTV_INTERRUPT_STATUS 0x54
55#define DTV_STATUS 0x58
56#define DTV_RX_FIFO 0x5c
57
58/* DTV_SPI_CONTROL */
59#define DTV_SPI_CONTROL_ENABLE_DTV 1
60
61/* DTV_MODE_0 */
62#define DTV_MODE_BYTE_SWIZZLE_SHIFT 6
63#define DTV_MODE_BYTE_SWIZZLE (1 << DTV_MODE_BYTE_SWIZZLE_SHIFT)
64#define DTV_MODE_BIT_SWIZZLE_SHIFT 5
65#define DTV_MODE_BIT_SWIZZLE (1 << DTV_MODE_BIT_SWIZZLE_SHIFT)
66#define DTV_MODE_CLK_EDGE_SHIFT 4
67#define DTV_MODE_CLK_EDGE_MASK 1
68#define DTV_MODE_CLK_EDGE_NEG (1 << DTV_MODE_CLK_EDGE_SHIFT)
69#define DTV_MODE_PRTL_SEL_SHIFT 2
70#define DTV_MODE_PRTL_SEL_MASK (0x3 << DTV_MODE_PRTL_SEL_SHIFT)
71#define DTV_MODE_CLK_MODE_SHIFT 1
72#define DTV_MODE_CLK_MODE_MASK (0x1 << DTV_MODE_CLK_MODE_SHIFT)
73#define DTV_MODE_PRTL_ENABLE 1
74
75/* DTV_CONTROL_0 */
76#define DTV_CTRL_FEC_SIZE_SHIFT 24
77#define DTV_CTRL_FEC_SIZE_MASK (0x7F << DTV_CTRL_FEC_SIZE_SHIFT)
78#define DTV_CTRL_BODY_SIZE_SHIFT 16
79#define DTV_CTRL_BODY_SIZE_MASK (0xFF << DTV_CTRL_BODY_SIZE_SHIFT)
80#define DTV_CTRL_FIFO_ATTN_LEVEL_SHIFT 8
81#define DTV_CTRL_FIFO_ATTN_LEVEL_MASK (0x1F << DTV_CTRL_FIFO_ATTN_LEVEL_SHIFT)
82#define DTV_CTRL_FIFO_ATTN_ONE_WORD (0 << DTV_CTRL_FIFO_ATTN_LEVEL_SHIFT)
83#define DTV_CTRL_FIFO_ATTN_TWO_WORD (1 << DTV_CTRL_FIFO_ATTN_LEVEL_SHIFT)
84#define DTV_CTRL_FIFO_ATTN_THREE_WORD (2 << DTV_CTRL_FIFO_ATTN_LEVEL_SHIFT)
85#define DTV_CTRL_FIFO_ATTN_FOUR_WORD (3 << DTV_CTRL_FIFO_ATTN_LEVEL_SHIFT)
86#define DTV_CTRL_BODY_VALID_SEL_SHIFT 6
87#define DTV_CTRL_BODY_VALID_SEL_MASK (1 << DTV_CTRL_BODY_VALID_SEL_SHIFT)
88#define DTV_CTRL_START_SEL_SHIFT 4
89#define DTV_CTRL_START_SEL_MASK (1 << DTV_CTRL_START_SEL_SHIFT)
90#define DTV_CTRL_ERROR_POLARITY_SHIFT 2
91#define DTV_CTRL_ERROR_POLARITY_MASK (1 << DTV_CTRL_ERROR_POLARITY_SHIFT)
92#define DTV_CTRL_PSYNC_POLARITY_SHIFT 1
93#define DTV_CTRL_PSYNC_POLARITY_MASK (1 << DTV_CTRL_PSYNC_POLARITY_SHIFT)
94#define DTV_CTRL_VALID_POLARITY_SHIFT 0
95#define DTV_CTRL_VALID_POLARITY_MASK (1 << DTV_CTRL_VALID_POLARITY_SHIFT)
96
97/* DTV_INTERRUPT_STATUS_0 */
98#define DTV_INTERRUPT_PACKET_UNDERRUN_ERR 8
99#define DTV_INTERRUPT_BODY_OVERRUN_ERR 4
100#define DTV_INTERRUPT_BODY_UNDERRUN_ERR 2
101#define DTV_INTERRUPT_UPSTREAM_ERR 1
102
103/* DTV_STATUS_0 */
104#define DTV_STATUS_RXF_UNDERRUN 4
105#define DTV_STATUS_RXF_EMPTY 2
106#define DTV_STATUS_RXF_FULL 1
107
108#define TEGRA_DTV_NAME "tegra_dtv"
109
110/* default sw config */
111#define DTV_BUF_SIZE_ORDER PAGE_SHIFT
112#define DTV_MAX_NUM_BUFS 4
113
114#define DTV_FIFO_ATN_LVL_LOW_GEAR 0
115#define DTV_FIFO_ATN_LVL_SECOND_GEAR 1
116#define DTV_FIFO_ATN_LVL_THIRD_GEAR 2
117#define DTV_FIFO_ATN_LVL_TOP_GEAR 3
118
119struct dtv_stream {
120 struct mutex mtx;
121
122 bool xferring; /* is DMA in progress */
123 unsigned num_bufs;
124 void *buffer[DTV_MAX_NUM_BUFS];
125 dma_addr_t buf_phy[DTV_MAX_NUM_BUFS];
126 struct completion comp[DTV_MAX_NUM_BUFS];
127 struct tegra_dma_req dma_req[DTV_MAX_NUM_BUFS];
128 int last_queued;
129
130 int fifo_atn_level;
131
132 struct tegra_dma_channel *dma_chan;
133 bool stopped;
134 struct completion stop_completion;
135 spinlock_t dma_req_lock;
136 size_t buf_size;
137
138 struct work_struct work;
139 struct wake_lock wake_lock;
140 char wake_lock_name[16];
141};
142
143struct tegra_dtv_context {
144 struct tegra_dtv_hw_config config;
145 struct clk *clk;
146 int clk_enabled;
147
148 phys_addr_t phys;
149 void * __iomem base;
150 unsigned long dma_req_sel;
151
152 struct dtv_stream stream;
153 /* debugfs */
154 struct dentry *d;
155 /* for refer back */
156 struct platform_device *pdev;
157 struct miscdevice miscdev;
158};
159
160static inline struct tegra_dtv_context *to_ctx(struct dtv_stream *s)
161{
162 return container_of(s, struct tegra_dtv_context, stream);
163}
164
165/* access control */
166static atomic_t tegra_dtv_instance_nr = ATOMIC_INIT(1);
167
168static inline u32 tegra_dtv_readl(struct tegra_dtv_context *dtv,
169 unsigned long reg)
170{
171 BUG_ON(!dtv->clk_enabled);
172 return readl(dtv->base + reg);
173}
174
175static inline void tegra_dtv_writel(struct tegra_dtv_context *dtv,
176 u32 val, unsigned long reg)
177{
178 BUG_ON(!dtv->clk_enabled);
179 writel(val, dtv->base + reg);
180}
181
182/* process */
183static inline void prevent_suspend(struct dtv_stream *s)
184{
185 pr_debug("%s called.\n", __func__);
186 cancel_work_sync(&s->work);
187 wake_lock(&s->wake_lock);
188}
189
190static void tegra_dtv_worker(struct work_struct *w)
191{
192 struct dtv_stream *s = container_of(w, struct dtv_stream, work);
193 pr_debug("%s called.\n", __func__);
194 wake_unlock(&s->wake_lock);
195}
196
197static inline void wakeup_suspend(struct dtv_stream *s)
198{
199 schedule_work(&s->work);
200}
201
202static inline bool wait_till_stopped(struct dtv_stream *s)
203{
204 int ret;
205
206 pr_debug("%s: wait for completion\n", __func__);
207
208 ret = wait_for_completion_timeout(
209 &s->stop_completion, HZ);
210 if (!ret)
211 pr_err("%s: wait timed out", __func__);
212 if (ret < 0)
213 pr_err("%s: wait error %d\n", __func__, ret);
214
215 wakeup_suspend(s);
216
217 pr_debug("%s: done: %d\n", __func__, ret);
218
219 return true;
220}
221
222/* dma transfer */
223static inline bool are_xfers_pending(struct dtv_stream *s)
224{
225 int i;
226
227 pr_debug("%s called\n", __func__);
228
229 for (i = 0; i < s->num_bufs; i++)
230 if (!completion_done(&s->comp[i]))
231 return true;
232 return false;
233}
234
235static void tegra_dtv_rx_dma_complete(struct tegra_dma_req *req)
236{
237 unsigned long flags;
238 unsigned req_num;
239 struct dtv_stream *s = req->dev;
240
241 spin_lock_irqsave(&s->dma_req_lock, flags);
242
243 pr_debug("%s called.\n", __func__);
244
245 req_num = req - s->dma_req;
246 pr_debug("%s: complete buffer %d size %d bytes\n",
247 __func__, req_num, req->bytes_transferred);
248 BUG_ON(req_num >= s->num_bufs);
249
250 complete(&s->comp[req_num]);
251
252 if (!are_xfers_pending(s))
253 pr_debug("%s: overflow.\n", __func__);
254
255 spin_unlock_irqrestore(&s->dma_req_lock, flags);
256}
257
258/* hw */
259static inline void _dtv_enable_protocol(struct tegra_dtv_context *dtv_ctx)
260{
261 u32 val;
262
263 val = tegra_dtv_readl(dtv_ctx, DTV_MODE);
264 val &= ~0x01;
265 val |= DTV_MODE_PRTL_ENABLE;
266 tegra_dtv_writel(dtv_ctx, val, DTV_MODE);
267}
268
269static inline void _dtv_disable_protocol(struct tegra_dtv_context *dtv_ctx)
270{
271 u32 val;
272
273 val = tegra_dtv_readl(dtv_ctx, DTV_MODE);
274 val &= ~DTV_MODE_PRTL_ENABLE;
275 tegra_dtv_writel(dtv_ctx, val, DTV_MODE);
276}
277
278static inline u32 _dtv_get_status(struct tegra_dtv_context *dtv_ctx)
279{
280 return tegra_dtv_readl(dtv_ctx, DTV_STATUS);
281}
282
283static inline void _dtv_set_attn_level(struct tegra_dtv_context *dtv_ctx)
284{
285 /* TODO: consider have this set to corresponding transfer request */
286 u32 val;
287
288 val = tegra_dtv_readl(dtv_ctx, DTV_CTRL);
289 val &= ~DTV_CTRL_FIFO_ATTN_LEVEL_MASK;
290 val |= DTV_CTRL_FIFO_ATTN_FOUR_WORD;
291 tegra_dtv_writel(dtv_ctx, val, DTV_CTRL);
292}
293
294/* ioctl */
295static inline void _dtv_set_hw_params(struct tegra_dtv_context *dtv_ctx)
296{
297 u32 val = 0;
298 u32 reg;
299 struct tegra_dtv_hw_config *cfg = &dtv_ctx->config;
300
301 val = (cfg->clk_edge << DTV_MODE_CLK_EDGE_SHIFT) |
302 (cfg->protocol_sel << DTV_MODE_PRTL_SEL_SHIFT) |
303 (cfg->clk_mode << DTV_MODE_CLK_MODE_SHIFT);
304 reg = tegra_dtv_readl(dtv_ctx, DTV_MODE);
305 reg &= ~(DTV_MODE_CLK_EDGE_MASK |
306 DTV_MODE_PRTL_SEL_MASK |
307 DTV_MODE_CLK_MODE_MASK);
308 reg |= val;
309 tegra_dtv_writel(dtv_ctx, reg, DTV_MODE);
310
311 val = 0;
312 reg = 0;
313 val = (cfg->fec_size << DTV_CTRL_FEC_SIZE_SHIFT) |
314 (cfg->body_size << DTV_CTRL_BODY_SIZE_SHIFT) |
315 (cfg->body_valid_sel << DTV_CTRL_BODY_VALID_SEL_SHIFT) |
316 (cfg->start_sel << DTV_CTRL_START_SEL_SHIFT) |
317 (cfg->err_pol << DTV_CTRL_ERROR_POLARITY_SHIFT) |
318 (cfg->psync_pol << DTV_CTRL_PSYNC_POLARITY_SHIFT) |
319 (cfg->valid_pol << DTV_CTRL_VALID_POLARITY_SHIFT);
320 reg = tegra_dtv_readl(dtv_ctx, DTV_CTRL);
321 reg &= ~(DTV_CTRL_FEC_SIZE_MASK |
322 DTV_CTRL_BODY_SIZE_MASK |
323 DTV_CTRL_BODY_VALID_SEL_MASK |
324 DTV_CTRL_START_SEL_MASK |
325 DTV_CTRL_ERROR_POLARITY_MASK |
326 DTV_CTRL_PSYNC_POLARITY_MASK |
327 DTV_CTRL_VALID_POLARITY_MASK);
328 reg |= val;
329 tegra_dtv_writel(dtv_ctx, reg, DTV_CTRL);
330}
331
332#define DTV_GET_REG_VAL(x, reg, seg) \
333 ((x & reg##_##seg##_MASK) >> reg##_##seg##_SHIFT)
334
335static inline void _dtv_get_hw_params(struct tegra_dtv_context *dtv_ctx,
336 struct tegra_dtv_hw_config *cfg)
337{
338 u32 reg;
339
340 reg = tegra_dtv_readl(dtv_ctx, DTV_MODE);
341 cfg->clk_edge = DTV_GET_REG_VAL(reg, DTV_MODE, CLK_EDGE);
342 cfg->protocol_sel = DTV_GET_REG_VAL(reg, DTV_MODE, PRTL_SEL);
343 cfg->clk_mode = DTV_GET_REG_VAL(reg, DTV_MODE, CLK_MODE);
344
345 reg = tegra_dtv_readl(dtv_ctx, DTV_CTRL);
346 cfg->fec_size = DTV_GET_REG_VAL(reg, DTV_CTRL, FEC_SIZE);
347 cfg->body_size = DTV_GET_REG_VAL(reg, DTV_CTRL, BODY_SIZE);
348 cfg->body_valid_sel = DTV_GET_REG_VAL(reg, DTV_CTRL, BODY_VALID_SEL);
349 cfg->start_sel = DTV_GET_REG_VAL(reg, DTV_CTRL, START_SEL);
350 cfg->err_pol = DTV_GET_REG_VAL(reg, DTV_CTRL, ERROR_POLARITY);
351 cfg->psync_pol = DTV_GET_REG_VAL(reg, DTV_CTRL, PSYNC_POLARITY);
352 cfg->valid_pol = DTV_GET_REG_VAL(reg, DTV_CTRL, VALID_POLARITY);
353}
354
355/* must call with stream->dma_req_lock held. */
356static int stop_xfer_unsafe(struct dtv_stream *s)
357{
358 int spin = 0;
359 struct tegra_dtv_context *dtv_ctx = to_ctx(s);
360
361 pr_debug("%s called\n", __func__);
362 tegra_dma_cancel(s->dma_chan);
363 _dtv_disable_protocol(dtv_ctx);
364 while ((_dtv_get_status(dtv_ctx) & DTV_STATUS_RXF_FULL) &&
365 spin < 100) {
366 udelay(10);
367 if (spin++ > 50)
368 pr_info("%s : spin %d\n", __func__, spin);
369 }
370 if (spin == 100)
371 pr_warn("%s : spinny\n", __func__);
372
373 return 0;
374}
375
376/* must call with stream->mtx held */
377static void __force_xfer_stop(struct dtv_stream *s)
378{
379 int i;
380
381 pr_debug("%s called.\n", __func__);
382
383 if (!s->stopped) {
384 s->stopped = true;
385 if (are_xfers_pending(s))
386 wait_till_stopped(s);
387 for (i = 0; i < s->num_bufs; i++) {
388 init_completion(&s->comp[i]);
389 complete(&s->comp[i]);
390 }
391 }
392
393 /* just in case. dma should be cancelled before this */
394 if (!tegra_dma_is_empty(s->dma_chan))
395 pr_err("%s: DMA channel is not empty!\n", __func__);
396 tegra_dma_cancel(s->dma_chan);
397 s->xferring = false;
398
399 pr_debug("%s: done\n", __func__);
400}
401
402static long tegra_dtv_ioctl(struct file *file, unsigned int cmd,
403 unsigned long arg)
404{
405 int ret = 0;
406 struct tegra_dtv_context *dtv_ctx;
407 struct dtv_stream *s;
408
409 dtv_ctx = (struct tegra_dtv_context *) file->private_data;
410 s = &dtv_ctx->stream;
411
412 /* process may sleep on this */
413 mutex_lock(&s->mtx);
414
415 switch (cmd) {
416 case TEGRA_DTV_IOCTL_START:
417 pr_debug("%s: run serial ts handling.\n", __func__);
418 s->stopped = false;
419 break;
420 case TEGRA_DTV_IOCTL_STOP:
421 pr_debug("%s: stop serial ts handling.\n", __func__);
422 if (s->xferring) {
423 stop_xfer_unsafe(s);
424 complete(&s->stop_completion);
425 __force_xfer_stop(s);
426 s->stopped = true;
427 }
428 break;
429 case TEGRA_DTV_IOCTL_SET_HW_CONFIG:
430 {
431 struct tegra_dtv_hw_config cfg;
432
433 if (s->xferring) {
434 pr_err("%s: tranfering is in progress.\n", __func__);
435 ret = -EBUSY;
436 break;
437 }
438
439 if (copy_from_user(&cfg, (const void __user *) arg,
440 sizeof(cfg))) {
441 ret = -EFAULT;
442 break;
443 }
444
445 dtv_ctx->config = cfg;
446 _dtv_set_hw_params(dtv_ctx);
447 break;
448 }
449 case TEGRA_DTV_IOCTL_GET_HW_CONFIG:
450 {
451 struct tegra_dtv_hw_config cfg;
452
453 _dtv_get_hw_params(dtv_ctx, &cfg);
454
455 if (copy_to_user((void __user *)arg, &cfg,
456 sizeof(cfg)))
457 ret = -EFAULT;
458 break;
459 }
460 default:
461 ret = -EINVAL;
462 }
463
464 mutex_unlock(&s->mtx);
465
466 return ret;
467}
468
469/* must call with stream->dma_req_lock held. */
470static int start_xfer_unsafe(struct dtv_stream *s, size_t size)
471{
472 int i;
473 u32 reg;
474 struct tegra_dtv_context *dtv_ctx = to_ctx(s);
475
476 BUG_ON(are_xfers_pending(s));
477
478 pr_debug("%s called.\n", __func__);
479
480 for (i = 0; i < s->num_bufs; i++) {
481 init_completion(&s->comp[i]);
482 s->dma_req[i].dest_addr = s->buf_phy[i];
483 s->dma_req[i].size = size;
484 tegra_dma_enqueue_req(s->dma_chan, &s->dma_req[i]);
485 }
486
487 s->last_queued = s->num_bufs - 1;
488
489 /* too late ? */
490 _dtv_set_attn_level(dtv_ctx);
491 _dtv_enable_protocol(dtv_ctx);
492
493 reg = tegra_dtv_readl(dtv_ctx, DTV_MODE);
494 pr_debug("DTV_MODE = 0x%08x\n", reg);
495
496 return 0;
497}
498
499static int try_start_fill_buf(struct dtv_stream *s, size_t size)
500{
501 int ret = 0;
502 unsigned long flags;
503
504 pr_debug("%s called\n", __func__);
505
506 prevent_suspend(s);
507
508 spin_lock_irqsave(&s->dma_req_lock, flags);
509 if (!s->stopped && !are_xfers_pending(s)) {
510 ret = start_xfer_unsafe(s, size);
511 if (ret) {
512 pr_err("%s: start tranfer failed.\n", __func__);
513 /* let process not wait stupid */
514 wakeup_suspend(s);
515 }
516 }
517 spin_unlock_irqrestore(&s->dma_req_lock, flags);
518
519 return ret;
520}
521
522static ssize_t tegra_dtv_read(struct file *file, char __user *buf,
523 size_t size, loff_t *off)
524{
525 ssize_t ret;
526 ssize_t xfer_size = 0;
527 int buf_no;
528 struct tegra_dma_req *req;
529 struct tegra_dtv_context *dtv_ctx;
530
531 dtv_ctx = (struct tegra_dtv_context *) file->private_data;
532
533 mutex_lock(&dtv_ctx->stream.mtx);
534
535 if (!IS_ALIGNED(size, 4) || size < 4 ||
536 size > dtv_ctx->stream.buf_size) {
537 pr_err("%s: invalid user size %d\n", __func__, size);
538 ret = -EINVAL;
539 mutex_unlock(&dtv_ctx->stream.mtx);
540 return ret;
541 }
542
543 pr_debug("%s: read %d bytes.\n", __func__, size);
544
545 if (dtv_ctx->stream.stopped) {
546 pr_debug("%s: tegra dtv transferring is stopped.\n",
547 __func__);
548 ret = 0;
549 mutex_unlock(&dtv_ctx->stream.mtx);
550 return ret;
551 }
552
553 /* start dma transfer */
554 ret = try_start_fill_buf(&dtv_ctx->stream, size);
555 if (ret < 0 && ret != -EALREADY) {
556 pr_err("%s: could not start recording.\n", __func__);
557 mutex_unlock(&dtv_ctx->stream.mtx);
558 return ret;
559 }
560 dtv_ctx->stream.xferring = true;
561
562 buf_no = (dtv_ctx->stream.last_queued + 1) % dtv_ctx->stream.num_bufs;
563 pr_debug("%s: buf_no = %d\n", __func__, buf_no);
564
565 /* Wait for the buffers to be filled up. The maximum timeout
566 *value should be caculated dynamically based on
567 * buf_size(dtv_ctx->stream).buf_size. For isdb-t 1seg signal,
568 *it bit rate is 300 - 456 kpbs, if buf_size = 4096 bytes, then
569 * to fill up one buffer takes ~77ms.
570 */
571 ret = wait_for_completion_interruptible_timeout(
572 &dtv_ctx->stream.comp[buf_no], HZ);
573 if (!ret) {
574 pr_err("%s: timeout", __func__);
575 ret = -ETIMEDOUT;
576 mutex_unlock(&dtv_ctx->stream.mtx);
577 return ret;
578 } else if (ret < 0) {
579 pr_err("%s: wait error %d", __func__, ret);
580 mutex_unlock(&dtv_ctx->stream.mtx);
581 return ret;
582 }
583
584 req = &dtv_ctx->stream.dma_req[buf_no];
585
586 /* xfer cannot exceed buffer size */
587 xfer_size = size > req->size ? req->size : size;
588 req->size = size;
589 dma_sync_single_for_cpu(NULL,
590 dtv_ctx->stream.dma_req[buf_no].dest_addr,
591 dtv_ctx->stream.dma_req[buf_no].size,
592 DMA_FROM_DEVICE);
593 ret = copy_to_user(buf, dtv_ctx->stream.buffer[buf_no], xfer_size);
594 if (ret) {
595 ret = -EFAULT;
596 mutex_unlock(&dtv_ctx->stream.mtx);
597 return ret;
598 }
599
600 /* not stopped, reinitial stop */
601 init_completion(&dtv_ctx->stream.stop_completion);
602
603 dtv_ctx->stream.last_queued = buf_no;
604
605 /* refill copied buffer */
606 ret = tegra_dma_enqueue_req(dtv_ctx->stream.dma_chan, req);
607 BUG_ON(ret);
608
609 ret = xfer_size;
610 *off += xfer_size;
611
612 mutex_unlock(&dtv_ctx->stream.mtx);
613
614 pr_debug("%s : done with ret = %d\n", __func__, ret);
615
616 return ret;
617}
618
619static int tegra_dtv_open(struct inode *inode, struct file *file)
620{
621 int i;
622 struct miscdevice *miscdev = file->private_data;
623 struct tegra_dtv_context *dtv_ctx =
624 container_of(miscdev, struct tegra_dtv_context, miscdev);
625 file->private_data = dtv_ctx;
626
627 dtv_ctx = (struct tegra_dtv_context *) file->private_data;
628
629 pr_debug("%s called\n", __func__);
630
631 /* can be opened once */
632 if (!atomic_dec_and_test(&tegra_dtv_instance_nr)) {
633 atomic_inc(&tegra_dtv_instance_nr);
634 pr_err("tegra_dtv device can only be opened once.\n");
635 return -EBUSY;
636 }
637
638 mutex_lock(&dtv_ctx->stream.mtx);
639
640 dtv_ctx->stream.stopped = false;
641
642 /* cleanup completion */
643 for (i = 0; i < dtv_ctx->stream.num_bufs; i++) {
644 init_completion(&dtv_ctx->stream.comp[i]);
645 /* complete all */
646 complete(&dtv_ctx->stream.comp[i]);
647 }
648
649 mutex_unlock(&dtv_ctx->stream.mtx);
650
651 return 0;
652}
653
654static int tegra_dtv_release(struct inode *inode, struct file *file)
655{
656 struct tegra_dtv_context *dtv_ctx =
657 (struct tegra_dtv_context *) file->private_data;
658
659 pr_debug("%s called\n", __func__);
660
661 atomic_inc(&tegra_dtv_instance_nr);
662
663 mutex_lock(&dtv_ctx->stream.mtx);
664 if (dtv_ctx->stream.xferring) {
665 stop_xfer_unsafe(&dtv_ctx->stream);
666 /* clean up stop condition */
667 complete(&dtv_ctx->stream.stop_completion);
668 __force_xfer_stop(&dtv_ctx->stream);
669 }
670 /* wakeup any pending process */
671 wakeup_suspend(&dtv_ctx->stream);
672 mutex_unlock(&dtv_ctx->stream.mtx);
673
674 pr_debug("%s : done\n", __func__);
675
676 return 0;
677}
678
679static const struct file_operations tegra_dtv_fops = {
680 .owner = THIS_MODULE,
681 .open = tegra_dtv_open,
682 .read = tegra_dtv_read,
683 .unlocked_ioctl = tegra_dtv_ioctl,
684 .release = tegra_dtv_release,
685};
686
687#ifdef CONFIG_DEBUG_FS
688static int dtv_reg_show(struct seq_file *s, void *unused)
689{
690 struct tegra_dtv_context *dtv_ctx = s->private;
691
692 seq_printf(s, "tegra_dtv register list\n");
693 seq_printf(s, "-------------------------------\n");
694 seq_printf(s, "DTV_SPI_CONTROL_0: 0x%08x\n",
695 tegra_dtv_readl(dtv_ctx, DTV_SPI_CONTROL));
696 seq_printf(s, "DTV_MODE_0: 0x%08x\n",
697 tegra_dtv_readl(dtv_ctx, DTV_MODE));
698 seq_printf(s, "DTV_CONTROL: 0x%08x\n",
699 tegra_dtv_readl(dtv_ctx, DTV_CTRL));
700 seq_printf(s, "DTV_FIFO: 0x%08x\n",
701 tegra_dtv_readl(dtv_ctx, DTV_RX_FIFO));
702
703 return 0;
704
705}
706
707static int dtv_debugfs_open(struct inode *inode, struct file *file)
708{
709 return single_open(file, dtv_reg_show, inode->i_private);
710}
711
712static const struct file_operations dtv_debugfs_fops = {
713 .open = dtv_debugfs_open,
714 .read = seq_read,
715 .llseek = seq_lseek,
716 .release = single_release,
717};
718
719static int dtv_debugfs_init(struct tegra_dtv_context *dtv_ctx)
720{
721 struct dentry *d;
722
723 d = debugfs_create_file("tegra_dtv", S_IRUGO, NULL, dtv_ctx,
724 &dtv_debugfs_fops);
725 if (!d)
726 return -ENOMEM;
727
728 dtv_ctx->d = d;
729
730 return 0;
731}
732
733static void dtv_debugfs_exit(struct tegra_dtv_context *dtv_ctx)
734{
735 debugfs_remove(dtv_ctx->d);
736}
737#else
738static int dtv_debugfs_init(struct tegra_dtv_context *dtv_ctx) { return 0; }
739static void dtv_debugfs_exit(struct tegra_dtv_context *dtv_ctx) {};
740#endif
741
742static void setup_dma_rx_request(struct tegra_dma_req *req,
743 struct dtv_stream *s)
744{
745 struct tegra_dtv_context *dtv_ctx;
746
747 pr_debug("%s before to_ctx\n", __func__);
748 dtv_ctx = to_ctx(s);
749
750 pr_debug("%s called\n", __func__);
751
752 memset(req, 0, sizeof(*req));
753
754 req->complete = tegra_dtv_rx_dma_complete;
755 req->dev = s;
756 req->to_memory = true;
757 req->req_sel = TEGRA_DMA_REQ_SEL_DTV;
758
759 req->source_addr = dtv_ctx->phys + DTV_RX_FIFO;
760 req->source_wrap = 4;
761 req->source_bus_width = 32;
762 req->fixed_burst_size = 1;
763
764 req->dest_wrap = 0;
765 req->dest_bus_width = 32;
766}
767
768static int setup_dma(struct tegra_dtv_context *dtv_ctx)
769{
770 int ret = 0;
771 int i;
772
773 pr_debug("%s called\n", __func__);
774
775 for (i = 0; i < dtv_ctx->stream.num_bufs; i++) {
776 dtv_ctx->stream.buf_phy[i] = dma_map_single(
777 &dtv_ctx->pdev->dev,
778 dtv_ctx->stream.buffer[i],
779 dtv_ctx->stream.buf_size,
780 DMA_FROM_DEVICE);
781 BUG_ON(!dtv_ctx->stream.buf_phy[i]);
782 setup_dma_rx_request(&dtv_ctx->stream.dma_req[i],
783 &dtv_ctx->stream);
784 dtv_ctx->stream.dma_req[i].dest_addr =
785 dtv_ctx->stream.buf_phy[i];
786 }
787 dtv_ctx->stream.dma_chan = tegra_dma_allocate_channel(
788 TEGRA_DMA_MODE_CONTINUOUS_SINGLE,
789 "tegra_dtv_rx", dtv_ctx->dma_req_sel);
790 if (!dtv_ctx->stream.dma_chan) {
791 pr_err("%s : cannot allocate input DMA channel: %ld\n",
792 __func__, PTR_ERR(dtv_ctx->stream.dma_chan));
793 ret = -ENODEV;
794 /* release */
795 for (i = 0; i < dtv_ctx->stream.num_bufs; i++) {
796 dma_unmap_single(&dtv_ctx->pdev->dev,
797 dtv_ctx->stream.buf_phy[i],
798 1 << DTV_BUF_SIZE_ORDER,
799 DMA_FROM_DEVICE);
800 dtv_ctx->stream.buf_phy[i] = 0;
801 }
802 tegra_dma_free_channel(dtv_ctx->stream.dma_chan);
803 dtv_ctx->stream.dma_chan = 0;
804
805 return ret;
806 }
807
808 return ret;
809}
810
811static void tear_down_dma(struct tegra_dtv_context *dtv_ctx)
812{
813 int i;
814
815 pr_debug("%s called\n", __func__);
816
817 for (i = 0; i < dtv_ctx->stream.num_bufs; i++) {
818 dma_unmap_single(&dtv_ctx->pdev->dev,
819 dtv_ctx->stream.buf_phy[i],
820 1 << DTV_BUF_SIZE_ORDER,
821 DMA_FROM_DEVICE);
822 dtv_ctx->stream.buf_phy[i] = 0;
823 }
824 tegra_dma_free_channel(dtv_ctx->stream.dma_chan);
825 dtv_ctx->stream.dma_chan = 0;
826}
827
828static int init_stream_buffer(struct dtv_stream *s, unsigned num)
829{
830 int ret;
831 int i, j;
832
833 pr_debug("%s (num %d)\n", __func__, num);
834
835 for (i = 0; i < num; i++) {
836 kfree(s->buffer[i]);
837 s->buffer[i] = kmalloc((1 << DTV_BUF_SIZE_ORDER),
838 GFP_KERNEL | GFP_DMA);
839 if (!s->buffer[i]) {
840 pr_err("%s : cannot allocate buffer.\n", __func__);
841 for (j = i - 1; j >= 0; j--) {
842 kfree(s->buffer[j]);
843 s->buffer[j] = 0;
844 }
845 ret = -ENOMEM;
846 return ret;
847 }
848 }
849 return 0;
850}
851
852static void release_stream_buffer(struct dtv_stream *s, unsigned num)
853{
854 int i;
855
856 pr_debug("%s (num %d)\n", __func__, num);
857
858 for (i = 0; i < num; i++) {
859 kfree(s->buffer[i]);
860 s->buffer[i] = 0;
861 }
862}
863
864static int setup_stream(struct dtv_stream *stream)
865{
866 int ret = 0;
867 int i;
868
869 pr_debug("%s called\n", __func__);
870
871 stream->xferring = false;
872 mutex_init(&stream->mtx);
873 init_completion(&stream->stop_completion);
874 spin_lock_init(&stream->dma_req_lock);
875 stream->dma_chan = NULL;
876 stream->fifo_atn_level = DTV_FIFO_ATN_LVL_TOP_GEAR;
877 stream->buf_size = 1 << DTV_BUF_SIZE_ORDER;
878 stream->num_bufs = DTV_MAX_NUM_BUFS;
879 /* init each buffer */
880 for (i = 0; i < stream->num_bufs; i++) {
881 init_completion(&stream->comp[i]);
882 /* complete all at this moment */
883 complete(&stream->comp[i]);
884 stream->buffer[i] = 0;
885 stream->buf_phy[i] = 0;
886 }
887 stream->last_queued = 0;
888 ret = init_stream_buffer(stream, stream->num_bufs);
889 if (ret < 0)
890 return ret;
891
892 INIT_WORK(&stream->work, tegra_dtv_worker);
893 wake_lock_init(&stream->wake_lock, WAKE_LOCK_SUSPEND, "tegra_dtv");
894
895 return ret;
896}
897
898static int tegra_dtv_probe(struct platform_device *pdev)
899{
900 int ret;
901 struct tegra_dtv_context *dtv_ctx;
902 struct clk *clk;
903 struct resource *res;
904
905 pr_info("%s: probing dtv.\n", __func__);
906
907 dtv_ctx = devm_kzalloc(&pdev->dev, sizeof(struct tegra_dtv_context),
908 GFP_KERNEL);
909 if (!dtv_ctx) {
910 pr_err("%s: Failed to allocate memory for dtv context.\n",
911 __func__);
912 ret = -ENOMEM;
913 return ret;
914 }
915 platform_set_drvdata(pdev, dtv_ctx);
916
917 /* for refer back */
918 dtv_ctx->pdev = pdev;
919
920 /* enable clk for dtv */
921 clk = clk_get(&pdev->dev, NULL);
922 if (!clk) {
923 dev_err(&pdev->dev, "cannot get clock for tegra_dtv.\n");
924 ret = -EIO;
925 goto fail_no_clk;
926 }
927 ret = clk_enable(clk);
928 if (ret < 0) {
929 dev_err(&pdev->dev, "cannot enable clk for tegra_dtv.\n");
930 goto fail_clk_enable;
931 }
932 dtv_ctx->clk_enabled = 1;
933
934 /* get resource */
935 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
936 if (unlikely(!res)) {
937 pr_err("%s: Failed to get resource for dtv.\n",
938 __func__);
939 ret = -ENODEV;
940 goto fail_no_res;
941 }
942
943 if (!devm_request_mem_region(&pdev->dev, res->start,
944 resource_size(res), dev_name(&pdev->dev))) {
945 ret = -EBUSY;
946 return ret;
947 }
948 dtv_ctx->phys = res->start;
949 dtv_ctx->base = devm_ioremap(&pdev->dev, res->start,
950 resource_size(res));
951 if (!dtv_ctx->base) {
952 dev_err(&pdev->dev, "cannot ioremap iomem.\n");
953 ret = -ENOMEM;
954 return ret;
955 }
956
957 ret = setup_stream(&dtv_ctx->stream);
958 if (ret < 0)
959 goto fail_setup_stream;
960
961 ret = setup_dma(dtv_ctx);
962 if (ret < 0)
963 goto fail_setup_dma;
964
965 /* register as a misc device */
966 dtv_ctx->miscdev.minor = MISC_DYNAMIC_MINOR;
967 dtv_ctx->miscdev.name = TEGRA_DTV_NAME;
968 dtv_ctx->miscdev.fops = &tegra_dtv_fops;
969 ret = misc_register(&dtv_ctx->miscdev);
970 if (ret) {
971 pr_err("%s: Unable to register misc device.\n",
972 __func__);
973 ret = -ENODEV;
974 goto fail_misc_reg;
975 }
976
977 ret = dtv_debugfs_init(dtv_ctx);
978 if (ret) {
979 pr_err("%s: Unable to register debugfs entry.\n",
980 __func__);
981 goto fail_debugfs_reg;
982 }
983
984 return 0;
985
986fail_debugfs_reg:
987 dtv_debugfs_exit(dtv_ctx);
988fail_misc_reg:
989 misc_deregister(&dtv_ctx->miscdev);
990fail_setup_stream:
991fail_setup_dma:
992 tear_down_dma(dtv_ctx);
993fail_no_res:
994fail_clk_enable:
995fail_no_clk:
996 if (clk)
997 clk_put(clk);
998
999 return ret;
1000}
1001
1002static int __devexit tegra_dtv_remove(struct platform_device *pdev)
1003{
1004 struct tegra_dtv_context *dtv_ctx;
1005
1006 pr_info("%s: remove dtv.\n", __func__);
1007
1008 dtv_ctx = platform_get_drvdata(pdev);
1009
1010 dtv_debugfs_exit(dtv_ctx);
1011 tear_down_dma(dtv_ctx);
1012 release_stream_buffer(&dtv_ctx->stream, dtv_ctx->stream.num_bufs);
1013
1014 clk_put(dtv_ctx->clk);
1015
1016 misc_deregister(&dtv_ctx->miscdev);
1017
1018 return 0;
1019}
1020
1021#ifdef CONFIG_PM
1022static int tegra_dtv_suspend(struct platform_device *pdev, pm_message_t state)
1023{
1024 struct tegra_dtv_context *dtv_ctx;
1025
1026 pr_info("%s: suspend dtv.\n", __func__);
1027
1028 dtv_ctx = platform_get_drvdata(pdev);
1029
1030 /* stop xferring */
1031 mutex_lock(&dtv_ctx->stream.mtx);
1032 if (dtv_ctx->stream.xferring) {
1033 stop_xfer_unsafe(&dtv_ctx->stream);
1034 /* clean up stop condition */
1035 complete(&dtv_ctx->stream.stop_completion);
1036 __force_xfer_stop(&dtv_ctx->stream);
1037 }
1038 /* wakeup any pending process */
1039 wakeup_suspend(&dtv_ctx->stream);
1040 mutex_unlock(&dtv_ctx->stream.mtx);
1041
1042 clk_disable(dtv_ctx->clk);
1043
1044 return 0;
1045}
1046
1047static int tegra_dtv_resume(struct platform_device *pdev)
1048{
1049 struct tegra_dtv_context *dtv_ctx;
1050
1051 pr_info("%s: resume dtv.\n", __func__);
1052
1053 dtv_ctx = platform_get_drvdata(pdev);
1054 clk_enable(dtv_ctx->clk);
1055
1056 return 0;
1057}
1058#endif /* CONFIG_PM */
1059
1060static struct platform_driver tegra_dtv_driver = {
1061 .driver = {
1062 .name = TEGRA_DTV_NAME,
1063 .owner = THIS_MODULE,
1064 },
1065 .probe = tegra_dtv_probe,
1066 .remove = __devexit_p(tegra_dtv_remove),
1067#ifdef CONFIG_PM
1068 .suspend = tegra_dtv_suspend,
1069 .resume = tegra_dtv_resume,
1070#endif
1071};
1072
1073static int __init tegra_dtv_init(void)
1074{
1075 return platform_driver_register(&tegra_dtv_driver);
1076}
1077
1078static void __exit tegra_dtv_exit(void)
1079{
1080 platform_driver_unregister(&tegra_dtv_driver);
1081}
1082
1083module_init(tegra_dtv_init);
1084module_exit(tegra_dtv_exit);
1085
1086MODULE_AUTHOR("Adam Jiang <chaoj@nvidia.com>");
1087MODULE_DESCRIPTION("Tegra DTV interface driver");
1088MODULE_LICENSE("GPL");
1089MODULE_ALIAS("platform:" TEGRA_DTV_NAME);