aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/tegra/nvavp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/video/tegra/nvavp')
-rw-r--r--drivers/media/video/tegra/nvavp/Kconfig10
-rw-r--r--drivers/media/video/tegra/nvavp/Makefile3
-rw-r--r--drivers/media/video/tegra/nvavp/nvavp_dev.c1441
-rw-r--r--drivers/media/video/tegra/nvavp/nvavp_os.h103
4 files changed, 1557 insertions, 0 deletions
diff --git a/drivers/media/video/tegra/nvavp/Kconfig b/drivers/media/video/tegra/nvavp/Kconfig
new file mode 100644
index 00000000000..2d3af3f79fb
--- /dev/null
+++ b/drivers/media/video/tegra/nvavp/Kconfig
@@ -0,0 +1,10 @@
1config TEGRA_NVAVP
2 bool "Enable support for Tegra NVAVP driver"
3 depends on ARCH_TEGRA && TEGRA_GRHOST
4 default n
5 help
6 Enables support for the push-buffer mechanism based driver for the Tegra
7 multimedia framework. Exports the Tegra nvavp interface on device node
8 /dev/tegra_avpchannel.
9
10 If unsure, say N
diff --git a/drivers/media/video/tegra/nvavp/Makefile b/drivers/media/video/tegra/nvavp/Makefile
new file mode 100644
index 00000000000..82b4238fd08
--- /dev/null
+++ b/drivers/media/video/tegra/nvavp/Makefile
@@ -0,0 +1,3 @@
1GCOV_PROFILE := y
2obj-$(CONFIG_TEGRA_NVAVP) += nvavp_dev.o
3obj-$(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) += ../avp/headavp.o
diff --git a/drivers/media/video/tegra/nvavp/nvavp_dev.c b/drivers/media/video/tegra/nvavp/nvavp_dev.c
new file mode 100644
index 00000000000..407e35b40c4
--- /dev/null
+++ b/drivers/media/video/tegra/nvavp/nvavp_dev.c
@@ -0,0 +1,1441 @@
1/*
2 * drivers/media/video/tegra/nvavp/nvavp_dev.c
3 *
4 * Copyright (C) 2011-2012 NVIDIA Corp.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 */
10
11#include <linux/uaccess.h>
12#include <linux/clk.h>
13#include <linux/completion.h>
14#include <linux/delay.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/firmware.h>
18#include <linux/fs.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/ioctl.h>
22#include <linux/irq.h>
23#include <linux/kref.h>
24#include <linux/list.h>
25#include <linux/miscdevice.h>
26#include <linux/mutex.h>
27#include <linux/nvhost.h>
28#include <linux/platform_device.h>
29#include <linux/rbtree.h>
30#include <linux/seq_file.h>
31#include <linux/slab.h>
32#include <linux/string.h>
33#include <linux/tegra_nvavp.h>
34#include <linux/types.h>
35#include <linux/vmalloc.h>
36#include <linux/workqueue.h>
37
38#include <mach/clk.h>
39#include <mach/hardware.h>
40#include <mach/io.h>
41#include <mach/iomap.h>
42#include <mach/legacy_irq.h>
43#include <mach/nvmap.h>
44
45#include "../../../../video/tegra/nvmap/nvmap.h"
46#include "../../../../video/tegra/host/host1x/host1x_syncpt.h"
47#include "../../../../video/tegra/host/dev.h"
48#include "../../../../video/tegra/host/nvhost_acm.h"
49
50#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
51#include "../avp/headavp.h"
52#endif
53#include "nvavp_os.h"
54
55#define TEGRA_NVAVP_NAME "nvavp"
56
57#define NVAVP_PUSHBUFFER_SIZE 4096
58
59#define NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE (sizeof(u32) * 3)
60
61#define TEGRA_NVAVP_RESET_VECTOR_ADDR \
62 (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
63
64#define FLOW_CTRL_HALT_COP_EVENTS IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
65#define FLOW_MODE_STOP (0x2 << 29)
66#define FLOW_MODE_NONE 0x0
67
68#define NVAVP_OS_INBOX IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
69#define NVAVP_OS_OUTBOX IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
70
71#define NVAVP_INBOX_VALID (1 << 29)
72
73/* AVP behavior params */
74#define NVAVP_OS_IDLE_TIMEOUT 100 /* milli-seconds */
75
76struct nvavp_info {
77 u32 clk_enabled;
78 struct clk *bsev_clk;
79 struct clk *vde_clk;
80 struct clk *cop_clk;
81
82 /* used for dvfs */
83 struct clk *sclk;
84 struct clk *emc_clk;
85 unsigned long sclk_rate;
86 unsigned long emc_clk_rate;
87
88 int mbox_from_avp_pend_irq;
89
90 struct mutex open_lock;
91 int refcount;
92 int initialized;
93
94 struct work_struct clock_disable_work;
95
96 /* os information */
97 struct nvavp_os_info os_info;
98
99 /* ucode information */
100 struct nvavp_ucode_info ucode_info;
101
102 /* client for driver allocations, persistent */
103 struct nvmap_client *nvmap;
104
105 struct mutex pushbuffer_lock;
106 struct nvmap_handle_ref *pushbuf_handle;
107 unsigned long pushbuf_phys;
108 u8 *pushbuf_data;
109 u32 pushbuf_index;
110 u32 pushbuf_fence;
111
112 struct nv_e276_control *os_control;
113
114 struct nvhost_syncpt *nvhost_syncpt;
115 u32 syncpt_id;
116 u32 syncpt_value;
117
118 struct nvhost_device *nvhost_dev;
119 struct miscdevice misc_dev;
120};
121
122struct nvavp_clientctx {
123 struct nvmap_client *nvmap;
124 struct nvavp_pushbuffer_submit_hdr submit_hdr;
125 struct nvavp_reloc relocs[NVAVP_MAX_RELOCATION_COUNT];
126 struct nvmap_handle_ref *gather_mem;
127 int num_relocs;
128 struct nvavp_info *nvavp;
129};
130
131static struct clk *nvavp_clk_get(struct nvavp_info *nvavp, int id)
132{
133 if (!nvavp)
134 return NULL;
135
136 if (id == NVAVP_MODULE_ID_AVP)
137 return nvavp->sclk;
138 if (id == NVAVP_MODULE_ID_VDE)
139 return nvavp->vde_clk;
140 if (id == NVAVP_MODULE_ID_EMC)
141 return nvavp->emc_clk;
142
143 return NULL;
144}
145
146static void nvavp_clk_ctrl(struct nvavp_info *nvavp, u32 clk_en)
147{
148 if (clk_en && !nvavp->clk_enabled) {
149 nvhost_module_busy(nvhost_get_host(nvavp->nvhost_dev)->dev);
150 clk_enable(nvavp->bsev_clk);
151 clk_enable(nvavp->vde_clk);
152 clk_set_rate(nvavp->emc_clk, nvavp->emc_clk_rate);
153 clk_set_rate(nvavp->sclk, nvavp->sclk_rate);
154 nvavp->clk_enabled = 1;
155 dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting sclk to %lu\n",
156 __func__, nvavp->sclk_rate);
157 dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting emc_clk to %lu\n",
158 __func__, nvavp->emc_clk_rate);
159 } else if (!clk_en && nvavp->clk_enabled) {
160 clk_disable(nvavp->bsev_clk);
161 clk_disable(nvavp->vde_clk);
162 clk_set_rate(nvavp->emc_clk, 0);
163 clk_set_rate(nvavp->sclk, 0);
164 nvhost_module_idle(nvhost_get_host(nvavp->nvhost_dev)->dev);
165 nvavp->clk_enabled = 0;
166 dev_dbg(&nvavp->nvhost_dev->dev, "%s: resetting emc_clk "
167 "and sclk\n", __func__);
168 }
169}
170
171static u32 nvavp_check_idle(struct nvavp_info *nvavp)
172{
173 struct nv_e276_control *control = nvavp->os_control;
174 return (control->put == control->get) ? 1 : 0;
175}
176
177static void clock_disable_handler(struct work_struct *work)
178{
179 struct nvavp_info *nvavp;
180
181 nvavp = container_of(work, struct nvavp_info,
182 clock_disable_work);
183
184 mutex_lock(&nvavp->pushbuffer_lock);
185 nvavp_clk_ctrl(nvavp, !nvavp_check_idle(nvavp));
186 mutex_unlock(&nvavp->pushbuffer_lock);
187}
188
189static int nvavp_service(struct nvavp_info *nvavp)
190{
191 struct nvavp_os_info *os = &nvavp->os_info;
192 u8 *debug_print;
193 u32 inbox;
194
195 inbox = readl(NVAVP_OS_INBOX);
196 if (!(inbox & NVAVP_INBOX_VALID))
197 inbox = 0x00000000;
198
199 writel(0x00000000, NVAVP_OS_INBOX);
200
201 if (inbox & NVE276_OS_INTERRUPT_VIDEO_IDLE)
202 schedule_work(&nvavp->clock_disable_work);
203
204 if (inbox & NVE276_OS_INTERRUPT_DEBUG_STRING) {
205 /* Should only occur with debug AVP OS builds */
206 debug_print = os->data;
207 debug_print += os->debug_offset;
208 dev_info(&nvavp->nvhost_dev->dev, "%s\n", debug_print);
209 }
210 if (inbox & (NVE276_OS_INTERRUPT_SEMAPHORE_AWAKEN |
211 NVE276_OS_INTERRUPT_EXECUTE_AWAKEN)) {
212 dev_info(&nvavp->nvhost_dev->dev,
213 "AVP awaken event (0x%x)\n", inbox);
214 }
215 if (inbox & NVE276_OS_INTERRUPT_AVP_FATAL_ERROR) {
216 dev_err(&nvavp->nvhost_dev->dev,
217 "fatal AVP error (0x%08X)\n", inbox);
218 }
219 if (inbox & NVE276_OS_INTERRUPT_AVP_BREAKPOINT)
220 dev_err(&nvavp->nvhost_dev->dev, "AVP breakpoint hit\n");
221 if (inbox & NVE276_OS_INTERRUPT_TIMEOUT)
222 dev_err(&nvavp->nvhost_dev->dev, "AVP timeout\n");
223
224 return 0;
225}
226
227static irqreturn_t nvavp_mbox_pending_isr(int irq, void *data)
228{
229 struct nvavp_info *nvavp = data;
230
231 nvavp_service(nvavp);
232
233 return IRQ_HANDLED;
234}
235
236static void nvavp_halt_avp(struct nvavp_info *nvavp)
237{
238 /* ensure the AVP is halted */
239 writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
240 tegra_periph_reset_assert(nvavp->cop_clk);
241
242 writel(0, NVAVP_OS_OUTBOX);
243 writel(0, NVAVP_OS_INBOX);
244}
245
246static int nvavp_reset_avp(struct nvavp_info *nvavp, unsigned long reset_addr)
247{
248#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
249 unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
250 dma_addr_t stub_data_phys;
251
252 _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
253 _tegra_avp_boot_stub_data.jump_addr = reset_addr;
254 wmb();
255 stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
256 sizeof(_tegra_avp_boot_stub_data),
257 DMA_TO_DEVICE);
258 rmb();
259 reset_addr = (unsigned long)stub_data_phys;
260#endif
261 writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
262
263 writel(reset_addr, TEGRA_NVAVP_RESET_VECTOR_ADDR);
264
265 clk_enable(nvavp->sclk);
266 clk_enable(nvavp->emc_clk);
267
268 /* If sclk_rate and emc_clk is not set by user space,
269 * max clock in dvfs table will be used to get best performance.
270 */
271 nvavp->sclk_rate = ULONG_MAX;
272 nvavp->emc_clk_rate = ULONG_MAX;
273
274 tegra_periph_reset_assert(nvavp->cop_clk);
275 udelay(2);
276 tegra_periph_reset_deassert(nvavp->cop_clk);
277
278 writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
279
280#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
281 dma_unmap_single(NULL, stub_data_phys,
282 sizeof(_tegra_avp_boot_stub_data),
283 DMA_TO_DEVICE);
284#endif
285 return 0;
286}
287
288static void nvavp_halt_vde(struct nvavp_info *nvavp)
289{
290 if (nvavp->clk_enabled) {
291 tegra_periph_reset_assert(nvavp->bsev_clk);
292 clk_disable(nvavp->bsev_clk);
293 tegra_periph_reset_assert(nvavp->vde_clk);
294 clk_disable(nvavp->vde_clk);
295 nvhost_module_idle(nvhost_get_host(nvavp->nvhost_dev)->dev);
296 nvavp->clk_enabled = 0;
297 }
298}
299
300static int nvavp_reset_vde(struct nvavp_info *nvavp)
301{
302 if (!nvavp->clk_enabled)
303 nvhost_module_busy(nvhost_get_host(nvavp->nvhost_dev)->dev);
304
305 clk_enable(nvavp->bsev_clk);
306 tegra_periph_reset_assert(nvavp->bsev_clk);
307 udelay(2);
308 tegra_periph_reset_deassert(nvavp->bsev_clk);
309
310 clk_enable(nvavp->vde_clk);
311 tegra_periph_reset_assert(nvavp->vde_clk);
312 udelay(2);
313 tegra_periph_reset_deassert(nvavp->vde_clk);
314
315 /*
316 * VDE clock is set to max freq by default.
317 * VDE clock can be set to different freq if needed
318 * through ioctl.
319 */
320 clk_set_rate(nvavp->vde_clk, ULONG_MAX);
321
322 nvavp->clk_enabled = 1;
323 return 0;
324}
325
326static int nvavp_pushbuffer_alloc(struct nvavp_info *nvavp)
327{
328 int ret = 0;
329
330 nvavp->pushbuf_handle = nvmap_alloc(nvavp->nvmap, NVAVP_PUSHBUFFER_SIZE,
331 SZ_1M, NVMAP_HANDLE_UNCACHEABLE, 0);
332 if (IS_ERR(nvavp->pushbuf_handle)) {
333 dev_err(&nvavp->nvhost_dev->dev,
334 "cannot create pushbuffer handle\n");
335 ret = PTR_ERR(nvavp->pushbuf_handle);
336 goto err_pushbuf_alloc;
337 }
338 nvavp->pushbuf_data = (u8 *)nvmap_mmap(nvavp->pushbuf_handle);
339 if (!nvavp->pushbuf_data) {
340 dev_err(&nvavp->nvhost_dev->dev,
341 "cannot map pushbuffer handle\n");
342 ret = -ENOMEM;
343 goto err_pushbuf_mmap;
344 }
345 nvavp->pushbuf_phys = nvmap_pin(nvavp->nvmap, nvavp->pushbuf_handle);
346 if (IS_ERR((void *)nvavp->pushbuf_phys)) {
347 dev_err(&nvavp->nvhost_dev->dev,
348 "cannot pin pushbuffer handle\n");
349 ret = PTR_ERR((void *)nvavp->pushbuf_phys);
350 goto err_pushbuf_pin;
351 }
352
353 memset(nvavp->pushbuf_data, 0, NVAVP_PUSHBUFFER_SIZE);
354
355 return 0;
356
357err_pushbuf_pin:
358 nvmap_munmap(nvavp->pushbuf_handle, nvavp->pushbuf_data);
359err_pushbuf_mmap:
360 nvmap_free(nvavp->nvmap, nvavp->pushbuf_handle);
361err_pushbuf_alloc:
362 return ret;
363}
364
365static void nvavp_pushbuffer_free(struct nvavp_info *nvavp)
366{
367 nvmap_unpin(nvavp->nvmap, nvavp->pushbuf_handle);
368 nvmap_munmap(nvavp->pushbuf_handle, nvavp->pushbuf_data);
369 nvmap_free(nvavp->nvmap, nvavp->pushbuf_handle);
370}
371
372static int nvavp_pushbuffer_init(struct nvavp_info *nvavp)
373{
374 void *ptr;
375 struct nvavp_os_info *os = &nvavp->os_info;
376 struct nv_e276_control *control;
377 u32 temp;
378 int ret;
379
380 ret = nvavp_pushbuffer_alloc(nvavp);
381 if (ret) {
382 dev_err(&nvavp->nvhost_dev->dev,
383 "unable to alloc pushbuffer\n");
384 return ret;
385 }
386
387 ptr = os->data;
388 ptr += os->control_offset;
389 nvavp->os_control = (struct nv_e276_control *)ptr;
390
391 control = nvavp->os_control;
392 memset(control, 0, sizeof(struct nvavp_os_info));
393
394 /* init get and put pointers */
395 writel(0x0, &control->put);
396 writel(0x0, &control->get);
397
398 /* enable avp VDE clock control and disable iram clock gating */
399 writel(0x0, &control->idle_clk_enable);
400 writel(0x0, &control->iram_clk_gating);
401
402 /* enable avp idle timeout interrupt */
403 writel(0x1, &control->idle_notify_enable);
404 writel(NVAVP_OS_IDLE_TIMEOUT, &control->idle_notify_delay);
405
406 /* init dma start and end pointers */
407 writel(nvavp->pushbuf_phys, &control->dma_start);
408 writel((nvavp->pushbuf_phys + NVAVP_PUSHBUFFER_SIZE),
409 &control->dma_end);
410
411 writel(0x00, &nvavp->pushbuf_index);
412 temp = NVAVP_PUSHBUFFER_SIZE - NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE;
413 writel(temp, &nvavp->pushbuf_fence);
414
415 nvavp->syncpt_id = NVSYNCPT_AVP_0;
416 nvavp->syncpt_value = nvhost_syncpt_read(nvavp->nvhost_syncpt,
417 nvavp->syncpt_id);
418
419 return 0;
420}
421
422static void nvavp_pushbuffer_deinit(struct nvavp_info *nvavp)
423{
424 nvavp_pushbuffer_free(nvavp);
425}
426
427static int nvavp_pushbuffer_update(struct nvavp_info *nvavp, u32 phys_addr,
428 u32 gather_count, struct nvavp_syncpt *syncpt,
429 u32 ext_ucode_flag)
430{
431 struct nv_e276_control *control = nvavp->os_control;
432 u32 gather_cmd, setucode_cmd, sync = 0;
433 u32 wordcount = 0;
434 u32 index, value = -1;
435
436 mutex_lock(&nvavp->pushbuffer_lock);
437
438 /* check for pushbuffer wrapping */
439 if (nvavp->pushbuf_index >= nvavp->pushbuf_fence)
440 nvavp->pushbuf_index = 0;
441
442 if (!ext_ucode_flag) {
443 setucode_cmd =
444 NVE26E_CH_OPCODE_INCR(NVE276_SET_MICROCODE_A, 3);
445
446 index = wordcount + nvavp->pushbuf_index;
447 writel(setucode_cmd, (nvavp->pushbuf_data + index));
448 wordcount += sizeof(u32);
449
450 index = wordcount + nvavp->pushbuf_index;
451 writel(0, (nvavp->pushbuf_data + index));
452 wordcount += sizeof(u32);
453
454 index = wordcount + nvavp->pushbuf_index;
455 writel(nvavp->ucode_info.phys, (nvavp->pushbuf_data + index));
456 wordcount += sizeof(u32);
457
458 index = wordcount + nvavp->pushbuf_index;
459 writel(nvavp->ucode_info.size, (nvavp->pushbuf_data + index));
460 wordcount += sizeof(u32);
461 }
462
463 gather_cmd = NVE26E_CH_OPCODE_GATHER(0, 0, 0, gather_count);
464
465 if (syncpt) {
466 value = ++nvavp->syncpt_value;
467 /* XXX: NvSchedValueWrappingComparison */
468 sync = NVE26E_CH_OPCODE_IMM(NVE26E_HOST1X_INCR_SYNCPT,
469 (NVE26E_HOST1X_INCR_SYNCPT_COND_OP_DONE << 8) |
470 (nvavp->syncpt_id & 0xFF));
471 }
472
473 /* write commands out */
474 index = wordcount + nvavp->pushbuf_index;
475 writel(gather_cmd, (nvavp->pushbuf_data + index));
476 wordcount += sizeof(u32);
477
478 index = wordcount + nvavp->pushbuf_index;
479 writel(phys_addr, (nvavp->pushbuf_data + index));
480 wordcount += sizeof(u32);
481
482 if (syncpt) {
483 index = wordcount + nvavp->pushbuf_index;
484 writel(sync, (nvavp->pushbuf_data + index));
485 wordcount += sizeof(u32);
486 }
487
488 /* enable clocks to VDE/BSEV */
489 nvavp_clk_ctrl(nvavp, 1);
490
491 /* update put pointer */
492 nvavp->pushbuf_index = (nvavp->pushbuf_index + wordcount) &
493 (NVAVP_PUSHBUFFER_SIZE - 1);
494 writel(nvavp->pushbuf_index, &control->put);
495 wmb();
496
497 /* wake up avp */
498 writel(0xA0000001, NVAVP_OS_OUTBOX);
499
500 /* Fill out fence struct */
501 if (syncpt) {
502 syncpt->id = nvavp->syncpt_id;
503 syncpt->value = value;
504 }
505
506 mutex_unlock(&nvavp->pushbuffer_lock);
507
508 return 0;
509}
510
511static void nvavp_unload_ucode(struct nvavp_info *nvavp)
512{
513 nvmap_unpin(nvavp->nvmap, nvavp->ucode_info.handle);
514 nvmap_munmap(nvavp->ucode_info.handle, nvavp->ucode_info.data);
515 nvmap_free(nvavp->nvmap, nvavp->ucode_info.handle);
516 kfree(nvavp->ucode_info.ucode_bin);
517}
518
519static int nvavp_load_ucode(struct nvavp_info *nvavp)
520{
521 struct nvavp_ucode_info *ucode_info = &nvavp->ucode_info;
522 const struct firmware *nvavp_ucode_fw;
523 char fw_ucode_file[32];
524 void *ptr;
525 int ret = 0;
526
527 if (!ucode_info->ucode_bin) {
528 sprintf(fw_ucode_file, "nvavp_vid_ucode.bin");
529
530 ret = request_firmware(&nvavp_ucode_fw, fw_ucode_file,
531 nvavp->misc_dev.this_device);
532 if (ret) {
533 /* Try alternative version */
534 sprintf(fw_ucode_file, "nvavp_vid_ucode_alt.bin");
535
536 ret = request_firmware(&nvavp_ucode_fw,
537 fw_ucode_file,
538 nvavp->misc_dev.this_device);
539
540 if (ret) {
541 dev_err(&nvavp->nvhost_dev->dev,
542 "cannot read ucode firmware '%s'\n",
543 fw_ucode_file);
544 goto err_req_ucode;
545 }
546 }
547
548 dev_info(&nvavp->nvhost_dev->dev,
549 "read ucode firmware from '%s' (%d bytes)\n",
550 fw_ucode_file, nvavp_ucode_fw->size);
551
552 ptr = (void *)nvavp_ucode_fw->data;
553
554 if (strncmp((const char *)ptr, "NVAVPAPP", 8)) {
555 dev_info(&nvavp->nvhost_dev->dev,
556 "ucode hdr string mismatch\n");
557 ret = -EINVAL;
558 goto err_req_ucode;
559 }
560 ptr += 8;
561 ucode_info->size = nvavp_ucode_fw->size - 8;
562
563 ucode_info->ucode_bin = kzalloc(ucode_info->size,
564 GFP_KERNEL);
565 if (!ucode_info->ucode_bin) {
566 dev_err(&nvavp->nvhost_dev->dev,
567 "cannot allocate ucode bin\n");
568 ret = -ENOMEM;
569 goto err_ubin_alloc;
570 }
571
572 ucode_info->handle = nvmap_alloc(nvavp->nvmap,
573 nvavp->ucode_info.size,
574 SZ_1M, NVMAP_HANDLE_UNCACHEABLE, 0);
575 if (IS_ERR(ucode_info->handle)) {
576 dev_err(&nvavp->nvhost_dev->dev,
577 "cannot create ucode handle\n");
578 ret = PTR_ERR(ucode_info->handle);
579 goto err_ucode_alloc;
580 }
581 ucode_info->data = (u8 *)nvmap_mmap(ucode_info->handle);
582 if (!ucode_info->data) {
583 dev_err(&nvavp->nvhost_dev->dev,
584 "cannot map ucode handle\n");
585 ret = -ENOMEM;
586 goto err_ucode_mmap;
587 }
588 ucode_info->phys = nvmap_pin(nvavp->nvmap, ucode_info->handle);
589 if (IS_ERR((void *)ucode_info->phys)) {
590 dev_err(&nvavp->nvhost_dev->dev,
591 "cannot pin ucode handle\n");
592 ret = PTR_ERR((void *)ucode_info->phys);
593 goto err_ucode_pin;
594 }
595 memcpy(ucode_info->ucode_bin, ptr, ucode_info->size);
596 release_firmware(nvavp_ucode_fw);
597 }
598
599 memcpy(ucode_info->data, ucode_info->ucode_bin, ucode_info->size);
600 return 0;
601
602err_ucode_pin:
603 nvmap_munmap(ucode_info->handle, ucode_info->data);
604err_ucode_mmap:
605 nvmap_free(nvavp->nvmap, ucode_info->handle);
606err_ucode_alloc:
607 kfree(nvavp->ucode_info.ucode_bin);
608err_ubin_alloc:
609 release_firmware(nvavp_ucode_fw);
610err_req_ucode:
611 return ret;
612}
613
614static void nvavp_unload_os(struct nvavp_info *nvavp)
615{
616 nvmap_unpin(nvavp->nvmap, nvavp->os_info.handle);
617 nvmap_munmap(nvavp->os_info.handle, nvavp->os_info.data);
618#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
619 nvmap_free(nvavp->nvmap, nvavp->os_info.handle);
620#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU)
621 nvmap_free_iovm(nvavp->nvmap, nvavp->os_info.handle);
622#endif
623 kfree(nvavp->os_info.os_bin);
624}
625
626static int nvavp_load_os(struct nvavp_info *nvavp, char *fw_os_file)
627{
628 struct nvavp_os_info *os_info = &nvavp->os_info;
629 const struct firmware *nvavp_os_fw;
630 void *ptr;
631 u32 size;
632 int ret = 0;
633
634 if (!os_info->os_bin) {
635 ret = request_firmware(&nvavp_os_fw, fw_os_file,
636 nvavp->misc_dev.this_device);
637 if (ret) {
638 dev_err(&nvavp->nvhost_dev->dev,
639 "cannot read os firmware '%s'\n", fw_os_file);
640 goto err_req_fw;
641 }
642
643 dev_info(&nvavp->nvhost_dev->dev,
644 "read firmware from '%s' (%d bytes)\n",
645 fw_os_file, nvavp_os_fw->size);
646
647 ptr = (void *)nvavp_os_fw->data;
648
649 if (strncmp((const char *)ptr, "NVAVP-OS", 8)) {
650 dev_info(&nvavp->nvhost_dev->dev,
651 "os hdr string mismatch\n");
652 ret = -EINVAL;
653 goto err_os_bin;
654 }
655
656 ptr += 8;
657 os_info->entry_offset = *((u32 *)ptr);
658 ptr += sizeof(u32);
659 os_info->control_offset = *((u32 *)ptr);
660 ptr += sizeof(u32);
661 os_info->debug_offset = *((u32 *)ptr);
662 ptr += sizeof(u32);
663
664 size = *((u32 *)ptr); ptr += sizeof(u32);
665
666 os_info->size = size;
667 os_info->os_bin = kzalloc(os_info->size,
668 GFP_KERNEL);
669 if (!os_info->os_bin) {
670 dev_err(&nvavp->nvhost_dev->dev,
671 "cannot allocate os bin\n");
672 ret = -ENOMEM;
673 goto err_os_bin;
674 }
675
676 memcpy(os_info->os_bin, ptr, os_info->size);
677 memset(os_info->data + os_info->size, 0, SZ_1M - os_info->size);
678
679 dev_info(&nvavp->nvhost_dev->dev,
680 "entry=%08x control=%08x debug=%08x size=%d\n",
681 os_info->entry_offset, os_info->control_offset,
682 os_info->debug_offset, os_info->size);
683 release_firmware(nvavp_os_fw);
684 }
685
686 memcpy(os_info->data, os_info->os_bin, os_info->size);
687 os_info->reset_addr = os_info->phys + os_info->entry_offset;
688
689 dev_info(&nvavp->nvhost_dev->dev,
690 "AVP os at vaddr=%p paddr=%lx reset_addr=%p\n",
691 os_info->data, (unsigned long)(os_info->phys),
692 (void *)os_info->reset_addr);
693 return 0;
694
695err_os_bin:
696 release_firmware(nvavp_os_fw);
697err_req_fw:
698 return ret;
699}
700
701static int nvavp_init(struct nvavp_info *nvavp)
702{
703 char fw_os_file[32];
704 int ret = 0;
705
706 if (nvavp->initialized)
707 return ret;
708
709#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
710 /* paddr is any address returned from nvmap_pin */
711 /* vaddr is AVP_KERNEL_VIRT_BASE */
712 dev_info(&nvavp->nvhost_dev->dev,
713 "using AVP MMU to relocate AVP os\n");
714 sprintf(fw_os_file, "nvavp_os.bin");
715 nvavp->os_info.reset_addr = AVP_KERNEL_VIRT_BASE;
716#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
717 /* paddr is any address behind SMMU */
718 /* vaddr is TEGRA_SMMU_BASE */
719 dev_info(&nvavp->nvhost_dev->dev,
720 "using SMMU at %lx to load AVP kernel\n",
721 (unsigned long)nvavp->os_info.phys);
722 BUG_ON(nvavp->os_info.phys != 0xeff00000
723 && nvavp->os_info.phys != 0x0ff00000);
724 sprintf(fw_os_file, "nvavp_os_%08lx.bin",
725 (unsigned long)nvavp->os_info.phys);
726 nvavp->os_info.reset_addr = nvavp->os_info.phys;
727#else /* nvmem= carveout */
728 /* paddr is found in nvmem= carveout */
729 /* vaddr is same as paddr */
730 /* Find nvmem carveout */
731 if (!pfn_valid(__phys_to_pfn(0x8e000000))) {
732 nvavp->os_info.phys = 0x8e000000;
733 } else if (!pfn_valid(__phys_to_pfn(0x9e000000))) {
734 nvavp->os_info.phys = 0x9e000000;
735 } else if (!pfn_valid(__phys_to_pfn(0xbe000000))) {
736 nvavp->os_info.phys = 0xbe000000;
737 } else {
738 dev_err(&nvavp->nvhost_dev->dev,
739 "cannot find nvmem= carveout to load AVP os\n");
740 dev_err(&nvavp->nvhost_dev->dev,
741 "check kernel command line "
742 "to see if nvmem= is defined\n");
743 BUG();
744 }
745 dev_info(&nvavp->nvhost_dev->dev,
746 "using nvmem= carveout at %lx to load AVP os\n",
747 nvavp->os_info.phys);
748 sprintf(fw_os_file, "nvavp_os_%08lx.bin", nvavp->os_info.phys);
749 nvavp->os_info.reset_addr = nvavp->os_info.phys;
750 nvavp->os_info.data = ioremap(nvavp->os_info.phys, SZ_1M);
751#endif
752
753 ret = nvavp_load_os(nvavp, fw_os_file);
754 if (ret) {
755 dev_err(&nvavp->nvhost_dev->dev,
756 "unable to load os firmware '%s'\n", fw_os_file);
757 goto err_exit;
758 }
759
760 ret = nvavp_pushbuffer_init(nvavp);
761 if (ret) {
762 dev_err(&nvavp->nvhost_dev->dev,
763 "unable to init pushbuffer\n");
764 goto err_exit;
765 }
766
767 ret = nvavp_load_ucode(nvavp);
768 if (ret) {
769 dev_err(&nvavp->nvhost_dev->dev,
770 "unable to load ucode\n");
771 goto err_exit;
772 }
773
774 tegra_init_legacy_irq_cop();
775
776 nvavp_reset_vde(nvavp);
777 nvavp_reset_avp(nvavp, nvavp->os_info.reset_addr);
778 enable_irq(nvavp->mbox_from_avp_pend_irq);
779
780 nvavp->initialized = 1;
781
782err_exit:
783 return ret;
784}
785
786static void nvavp_uninit(struct nvavp_info *nvavp)
787{
788 if (!nvavp->initialized)
789 return;
790
791 disable_irq(nvavp->mbox_from_avp_pend_irq);
792
793 cancel_work_sync(&nvavp->clock_disable_work);
794
795 nvavp_pushbuffer_deinit(nvavp);
796
797 nvavp_halt_vde(nvavp);
798 nvavp_halt_avp(nvavp);
799
800 clk_disable(nvavp->sclk);
801 clk_disable(nvavp->emc_clk);
802
803 nvavp->initialized = 0;
804}
805
806static int nvavp_set_clock_ioctl(struct file *filp, unsigned int cmd,
807 unsigned long arg)
808{
809 struct nvavp_clientctx *clientctx = filp->private_data;
810 struct nvavp_info *nvavp = clientctx->nvavp;
811 struct clk *c;
812 struct nvavp_clock_args config;
813
814 if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
815 return -EFAULT;
816
817 dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id=%d, clk_rate=%u\n",
818 __func__, config.id, config.rate);
819
820 if (config.id == NVAVP_MODULE_ID_AVP)
821 nvavp->sclk_rate = config.rate;
822 else if (config.id == NVAVP_MODULE_ID_EMC)
823 nvavp->emc_clk_rate = config.rate;
824
825 c = nvavp_clk_get(nvavp, config.id);
826 if (IS_ERR_OR_NULL(c))
827 return -EINVAL;
828
829 clk_enable(c);
830 clk_set_rate(c, config.rate);
831
832 config.rate = clk_get_rate(c);
833 clk_disable(c);
834 if (copy_to_user((void __user *)arg, &config, sizeof(struct nvavp_clock_args)))
835 return -EFAULT;
836
837 return 0;
838}
839
840static int nvavp_get_clock_ioctl(struct file *filp, unsigned int cmd,
841 unsigned long arg)
842{
843 struct nvavp_clientctx *clientctx = filp->private_data;
844 struct nvavp_info *nvavp = clientctx->nvavp;
845 struct clk *c;
846 struct nvavp_clock_args config;
847
848 if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
849 return -EFAULT;
850
851 c = nvavp_clk_get(nvavp, config.id);
852 if (IS_ERR_OR_NULL(c))
853 return -EINVAL;
854
855 clk_enable(c);
856 config.rate = clk_get_rate(c);
857 clk_disable(c);
858
859 if (copy_to_user((void __user *)arg, &config, sizeof(struct nvavp_clock_args)))
860 return -EFAULT;
861
862 return 0;
863}
864
865static int nvavp_get_syncpointid_ioctl(struct file *filp, unsigned int cmd,
866 unsigned long arg)
867{
868 struct nvavp_clientctx *clientctx = filp->private_data;
869 struct nvavp_info *nvavp = clientctx->nvavp;
870 u32 id = nvavp->syncpt_id;
871
872 if (_IOC_DIR(cmd) & _IOC_READ) {
873 if (copy_to_user((void __user *)arg, &id, sizeof(u32)))
874 return -EFAULT;
875 else
876 return 0;
877 }
878 return -EFAULT;
879}
880
881static int nvavp_set_nvmapfd_ioctl(struct file *filp, unsigned int cmd,
882 unsigned long arg)
883{
884 struct nvavp_clientctx *clientctx = filp->private_data;
885 struct nvavp_set_nvmap_fd_args buf;
886 struct nvmap_client *new_client;
887 int fd;
888
889 if (_IOC_DIR(cmd) & _IOC_WRITE) {
890 if (copy_from_user(&buf, (void __user *)arg, _IOC_SIZE(cmd)))
891 return -EFAULT;
892 }
893
894 fd = buf.fd;
895 new_client = nvmap_client_get_file(fd);
896 if (IS_ERR(new_client))
897 return PTR_ERR(new_client);
898
899 clientctx->nvmap = new_client;
900 return 0;
901}
902
903static int nvavp_pushbuffer_submit_ioctl(struct file *filp, unsigned int cmd,
904 unsigned long arg)
905{
906 struct nvavp_clientctx *clientctx = filp->private_data;
907 struct nvavp_info *nvavp = clientctx->nvavp;
908 struct nvavp_pushbuffer_submit_hdr hdr;
909 u32 *cmdbuf_data;
910 struct nvmap_handle *cmdbuf_handle = NULL;
911 struct nvmap_handle_ref *cmdbuf_dupe;
912 int ret = 0, i;
913 unsigned long phys_addr;
914 unsigned long virt_addr;
915 struct nvavp_pushbuffer_submit_hdr *user_hdr =
916 (struct nvavp_pushbuffer_submit_hdr *) arg;
917 struct nvavp_syncpt syncpt;
918
919 syncpt.id = NVSYNCPT_INVALID;
920 syncpt.value = 0;
921
922 if (_IOC_DIR(cmd) & _IOC_WRITE) {
923 if (copy_from_user(&hdr, (void __user *)arg,
924 sizeof(struct nvavp_pushbuffer_submit_hdr)))
925 return -EFAULT;
926 }
927
928 if (!hdr.cmdbuf.mem)
929 return 0;
930
931 if (copy_from_user(clientctx->relocs, (void __user *)hdr.relocs,
932 sizeof(struct nvavp_reloc) * hdr.num_relocs)) {
933 return -EFAULT;
934 }
935
936 cmdbuf_handle = nvmap_get_handle_id(clientctx->nvmap, hdr.cmdbuf.mem);
937 if (cmdbuf_handle == NULL) {
938 dev_err(&nvavp->nvhost_dev->dev,
939 "invalid cmd buffer handle %08x\n", hdr.cmdbuf.mem);
940 return -EPERM;
941 }
942
943 /* duplicate the new pushbuffer's handle into the nvavp driver's
944 * nvmap context, to ensure that the handle won't be freed as
945 * long as it is in-use by the fb driver */
946 cmdbuf_dupe = nvmap_duplicate_handle_id(nvavp->nvmap, hdr.cmdbuf.mem);
947 nvmap_handle_put(cmdbuf_handle);
948
949 if (IS_ERR(cmdbuf_dupe)) {
950 dev_err(&nvavp->nvhost_dev->dev,
951 "could not duplicate handle\n");
952 return PTR_ERR(cmdbuf_dupe);
953 }
954
955 phys_addr = nvmap_pin(nvavp->nvmap, cmdbuf_dupe);
956 if (IS_ERR((void *)phys_addr)) {
957 dev_err(&nvavp->nvhost_dev->dev, "could not pin handle\n");
958 nvmap_free(nvavp->nvmap, cmdbuf_dupe);
959 return PTR_ERR((void *)phys_addr);
960 }
961
962 virt_addr = (unsigned long)nvmap_mmap(cmdbuf_dupe);
963 if (!virt_addr) {
964 dev_err(&nvavp->nvhost_dev->dev, "cannot map cmdbuf handle\n");
965 ret = -ENOMEM;
966 goto err_cmdbuf_mmap;
967 }
968
969 cmdbuf_data = (u32 *)(virt_addr + hdr.cmdbuf.offset);
970
971 for (i = 0; i < hdr.num_relocs; i++) {
972 u32 *reloc_addr, target_phys_addr;
973
974 if (clientctx->relocs[i].cmdbuf_mem != hdr.cmdbuf.mem) {
975 dev_err(&nvavp->nvhost_dev->dev,
976 "reloc info does not match target bufferID\n");
977 ret = -EPERM;
978 goto err_reloc_info;
979 }
980
981 reloc_addr = cmdbuf_data +
982 (clientctx->relocs[i].cmdbuf_offset >> 2);
983
984 target_phys_addr = nvmap_handle_address(clientctx->nvmap,
985 clientctx->relocs[i].target);
986 target_phys_addr += clientctx->relocs[i].target_offset;
987 writel(target_phys_addr, reloc_addr);
988 }
989
990 if (hdr.syncpt) {
991 ret = nvavp_pushbuffer_update(nvavp,
992 (phys_addr + hdr.cmdbuf.offset),
993 hdr.cmdbuf.words, &syncpt,
994 (hdr.flags & NVAVP_UCODE_EXT));
995
996 if (copy_to_user((void __user *)user_hdr->syncpt, &syncpt,
997 sizeof(struct nvavp_syncpt))) {
998 ret = -EFAULT;
999 goto err_reloc_info;
1000 }
1001 } else {
1002 ret = nvavp_pushbuffer_update(nvavp,
1003 (phys_addr + hdr.cmdbuf.offset),
1004 hdr.cmdbuf.words, NULL,
1005 (hdr.flags & NVAVP_UCODE_EXT));
1006 }
1007
1008err_reloc_info:
1009 nvmap_munmap(cmdbuf_dupe, (void *)virt_addr);
1010err_cmdbuf_mmap:
1011 nvmap_unpin(nvavp->nvmap, cmdbuf_dupe);
1012 nvmap_free(nvavp->nvmap, cmdbuf_dupe);
1013 return ret;
1014}
1015
1016static int tegra_nvavp_open(struct inode *inode, struct file *filp)
1017{
1018 struct miscdevice *miscdev = filp->private_data;
1019 struct nvavp_info *nvavp = dev_get_drvdata(miscdev->parent);
1020 int ret = 0;
1021 struct nvavp_clientctx *clientctx;
1022
1023 dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
1024
1025 nonseekable_open(inode, filp);
1026
1027 clientctx = kzalloc(sizeof(*clientctx), GFP_KERNEL);
1028 if (!clientctx)
1029 return -ENOMEM;
1030
1031 mutex_lock(&nvavp->open_lock);
1032
1033 if (!nvavp->refcount)
1034 ret = nvavp_init(nvavp);
1035
1036 if (!ret)
1037 nvavp->refcount++;
1038
1039 clientctx->nvmap = nvavp->nvmap;
1040 clientctx->nvavp = nvavp;
1041
1042 filp->private_data = clientctx;
1043
1044 mutex_unlock(&nvavp->open_lock);
1045
1046 return ret;
1047}
1048
1049static int tegra_nvavp_release(struct inode *inode, struct file *filp)
1050{
1051 struct nvavp_clientctx *clientctx = filp->private_data;
1052 struct nvavp_info *nvavp = clientctx->nvavp;
1053 int ret = 0;
1054
1055 dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
1056
1057 filp->private_data = NULL;
1058
1059 mutex_lock(&nvavp->open_lock);
1060
1061 if (!nvavp->refcount) {
1062 dev_err(&nvavp->nvhost_dev->dev,
1063 "releasing while in invalid state\n");
1064 ret = -EINVAL;
1065 goto out;
1066 }
1067
1068 if (nvavp->refcount > 0)
1069 nvavp->refcount--;
1070 if (!nvavp->refcount)
1071 nvavp_uninit(nvavp);
1072
1073out:
1074 nvmap_client_put(clientctx->nvmap);
1075 mutex_unlock(&nvavp->open_lock);
1076 kfree(clientctx);
1077 return ret;
1078}
1079
1080static long tegra_nvavp_ioctl(struct file *filp, unsigned int cmd,
1081 unsigned long arg)
1082{
1083 int ret = 0;
1084
1085 if (_IOC_TYPE(cmd) != NVAVP_IOCTL_MAGIC ||
1086 _IOC_NR(cmd) < NVAVP_IOCTL_MIN_NR ||
1087 _IOC_NR(cmd) > NVAVP_IOCTL_MAX_NR)
1088 return -EFAULT;
1089
1090 switch (cmd) {
1091 case NVAVP_IOCTL_SET_NVMAP_FD:
1092 ret = nvavp_set_nvmapfd_ioctl(filp, cmd, arg);
1093 break;
1094 case NVAVP_IOCTL_GET_SYNCPOINT_ID:
1095 ret = nvavp_get_syncpointid_ioctl(filp, cmd, arg);
1096 break;
1097 case NVAVP_IOCTL_PUSH_BUFFER_SUBMIT:
1098 ret = nvavp_pushbuffer_submit_ioctl(filp, cmd, arg);
1099 break;
1100 case NVAVP_IOCTL_SET_CLOCK:
1101 ret = nvavp_set_clock_ioctl(filp, cmd, arg);
1102 break;
1103 case NVAVP_IOCTL_GET_CLOCK:
1104 ret = nvavp_get_clock_ioctl(filp, cmd, arg);
1105 break;
1106 default:
1107 ret = -EINVAL;
1108 break;
1109 }
1110 return ret;
1111}
1112
1113static const struct file_operations tegra_nvavp_fops = {
1114 .owner = THIS_MODULE,
1115 .open = tegra_nvavp_open,
1116 .release = tegra_nvavp_release,
1117 .unlocked_ioctl = tegra_nvavp_ioctl,
1118};
1119
1120static int tegra_nvavp_probe(struct nvhost_device *ndev)
1121{
1122 struct nvavp_info *nvavp;
1123 int irq;
1124 unsigned int heap_mask;
1125 u32 iovmm_addr;
1126 int ret = 0;
1127
1128 irq = nvhost_get_irq_byname(ndev, "mbox_from_nvavp_pending");
1129 if (irq < 0) {
1130 dev_err(&ndev->dev, "invalid nvhost data\n");
1131 return -EINVAL;
1132 }
1133
1134
1135 nvavp = kzalloc(sizeof(struct nvavp_info), GFP_KERNEL);
1136 if (!nvavp) {
1137 dev_err(&ndev->dev, "cannot allocate avp_info\n");
1138 return -ENOMEM;
1139 }
1140
1141 memset(nvavp, 0, sizeof(*nvavp));
1142
1143 nvavp->nvhost_syncpt = &nvhost_get_host(ndev)->syncpt;
1144 if (!nvavp->nvhost_syncpt) {
1145 dev_err(&ndev->dev, "cannot get syncpt handle\n");
1146 ret = -ENOENT;
1147 goto err_get_syncpt;
1148 }
1149
1150 nvavp->nvmap = nvmap_create_client(nvmap_dev, "nvavp_drv");
1151 if (IS_ERR_OR_NULL(nvavp->nvmap)) {
1152 dev_err(&ndev->dev, "cannot create nvmap client\n");
1153 ret = PTR_ERR(nvavp->nvmap);
1154 goto err_nvmap_create_drv_client;
1155 }
1156
1157#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
1158 heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC;
1159#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
1160 heap_mask = NVMAP_HEAP_IOVMM;
1161#else /* nvmem= carveout */
1162 heap_mask = 0;
1163#endif
1164 switch (heap_mask) {
1165 case NVMAP_HEAP_IOVMM:
1166
1167#ifdef CONFIG_TEGRA_SMMU_BASE_AT_E0000000
1168 iovmm_addr = 0xeff00000;
1169#else
1170 iovmm_addr = 0x0ff00000;
1171#endif
1172
1173 /* Tegra3 A01 has different SMMU address */
1174 if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3
1175 && tegra_get_revision() == TEGRA_REVISION_A01) {
1176 iovmm_addr = 0xeff00000;
1177 }
1178
1179 nvavp->os_info.handle = nvmap_alloc_iovm(nvavp->nvmap, SZ_1M,
1180 L1_CACHE_BYTES,
1181 NVMAP_HANDLE_UNCACHEABLE,
1182 iovmm_addr);
1183 if (IS_ERR_OR_NULL(nvavp->os_info.handle)) {
1184 dev_err(&ndev->dev,
1185 "cannot create os handle\n");
1186 ret = PTR_ERR(nvavp->os_info.handle);
1187 goto err_nvmap_alloc;
1188 }
1189
1190 nvavp->os_info.data = nvmap_mmap(nvavp->os_info.handle);
1191 if (!nvavp->os_info.data) {
1192 dev_err(&ndev->dev,
1193 "cannot map os handle\n");
1194 ret = -ENOMEM;
1195 goto err_nvmap_mmap;
1196 }
1197
1198 nvavp->os_info.phys =
1199 nvmap_pin(nvavp->nvmap, nvavp->os_info.handle);
1200 if (IS_ERR_OR_NULL((void *)nvavp->os_info.phys)) {
1201 dev_err(&ndev->dev,
1202 "cannot pin os handle\n");
1203 ret = PTR_ERR((void *)nvavp->os_info.phys);
1204 goto err_nvmap_pin;
1205 }
1206
1207 dev_info(&ndev->dev,
1208 "allocated IOVM at %lx for AVP os\n",
1209 (unsigned long)nvavp->os_info.phys);
1210 break;
1211 case NVMAP_HEAP_CARVEOUT_GENERIC:
1212 nvavp->os_info.handle = nvmap_alloc(nvavp->nvmap, SZ_1M, SZ_1M,
1213 NVMAP_HANDLE_UNCACHEABLE, 0);
1214 if (IS_ERR_OR_NULL(nvavp->os_info.handle)) {
1215 dev_err(&ndev->dev, "cannot create AVP os handle\n");
1216 ret = PTR_ERR(nvavp->os_info.handle);
1217 goto err_nvmap_alloc;
1218 }
1219
1220 nvavp->os_info.data = nvmap_mmap(nvavp->os_info.handle);
1221 if (!nvavp->os_info.data) {
1222 dev_err(&ndev->dev, "cannot map AVP os handle\n");
1223 ret = -ENOMEM;
1224 goto err_nvmap_mmap;
1225 }
1226
1227 nvavp->os_info.phys = nvmap_pin(nvavp->nvmap,
1228 nvavp->os_info.handle);
1229 if (IS_ERR_OR_NULL((void *)nvavp->os_info.phys)) {
1230 dev_err(&ndev->dev, "cannot pin AVP os handle\n");
1231 ret = PTR_ERR((void *)nvavp->os_info.phys);
1232 goto err_nvmap_pin;
1233 }
1234
1235 dev_info(&ndev->dev,
1236 "allocated carveout memory at %lx for AVP os\n",
1237 (unsigned long)nvavp->os_info.phys);
1238 break;
1239 default:
1240 dev_err(&ndev->dev, "invalid/non-supported heap for AVP os\n");
1241 ret = -EINVAL;
1242 goto err_get_syncpt;
1243 }
1244
1245 nvavp->mbox_from_avp_pend_irq = irq;
1246 mutex_init(&nvavp->open_lock);
1247 mutex_init(&nvavp->pushbuffer_lock);
1248
1249 /* TODO DO NOT USE NVAVP DEVICE */
1250 nvavp->cop_clk = clk_get(&ndev->dev, "cop");
1251 if (IS_ERR(nvavp->cop_clk)) {
1252 dev_err(&ndev->dev, "cannot get cop clock\n");
1253 ret = -ENOENT;
1254 goto err_get_cop_clk;
1255 }
1256
1257 nvavp->vde_clk = clk_get(&ndev->dev, "vde");
1258 if (IS_ERR(nvavp->vde_clk)) {
1259 dev_err(&ndev->dev, "cannot get vde clock\n");
1260 ret = -ENOENT;
1261 goto err_get_vde_clk;
1262 }
1263
1264 nvavp->bsev_clk = clk_get(&ndev->dev, "bsev");
1265 if (IS_ERR(nvavp->bsev_clk)) {
1266 dev_err(&ndev->dev, "cannot get bsev clock\n");
1267 ret = -ENOENT;
1268 goto err_get_bsev_clk;
1269 }
1270
1271 nvavp->sclk = clk_get(&ndev->dev, "sclk");
1272 if (IS_ERR(nvavp->sclk)) {
1273 dev_err(&ndev->dev, "cannot get avp.sclk clock\n");
1274 ret = -ENOENT;
1275 goto err_get_sclk;
1276 }
1277
1278 nvavp->emc_clk = clk_get(&ndev->dev, "emc");
1279 if (IS_ERR(nvavp->emc_clk)) {
1280 dev_err(&ndev->dev, "cannot get emc clock\n");
1281 ret = -ENOENT;
1282 goto err_get_emc_clk;
1283 }
1284
1285 nvavp->clk_enabled = 0;
1286 nvavp_halt_avp(nvavp);
1287
1288 INIT_WORK(&nvavp->clock_disable_work, clock_disable_handler);
1289
1290 nvavp->misc_dev.minor = MISC_DYNAMIC_MINOR;
1291 nvavp->misc_dev.name = "tegra_avpchannel";
1292 nvavp->misc_dev.fops = &tegra_nvavp_fops;
1293 nvavp->misc_dev.mode = S_IRWXUGO;
1294 nvavp->misc_dev.parent = &ndev->dev;
1295
1296 ret = misc_register(&nvavp->misc_dev);
1297 if (ret) {
1298 dev_err(&ndev->dev, "unable to register misc device!\n");
1299 goto err_misc_reg;
1300 }
1301
1302 ret = request_irq(irq, nvavp_mbox_pending_isr, 0,
1303 TEGRA_NVAVP_NAME, nvavp);
1304 if (ret) {
1305 dev_err(&ndev->dev, "cannot register irq handler\n");
1306 goto err_req_irq_pend;
1307 }
1308 disable_irq(nvavp->mbox_from_avp_pend_irq);
1309
1310 nvhost_set_drvdata(ndev, nvavp);
1311 nvavp->nvhost_dev = ndev;
1312
1313 return 0;
1314
1315err_req_irq_pend:
1316 misc_deregister(&nvavp->misc_dev);
1317err_misc_reg:
1318 clk_put(nvavp->emc_clk);
1319err_get_emc_clk:
1320 clk_put(nvavp->sclk);
1321err_get_sclk:
1322 clk_put(nvavp->bsev_clk);
1323err_get_bsev_clk:
1324 clk_put(nvavp->vde_clk);
1325err_get_vde_clk:
1326 clk_put(nvavp->cop_clk);
1327err_get_cop_clk:
1328 nvmap_unpin(nvavp->nvmap, nvavp->os_info.handle);
1329err_nvmap_pin:
1330 nvmap_munmap(nvavp->os_info.handle, nvavp->os_info.data);
1331err_nvmap_mmap:
1332#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
1333 nvmap_free(nvavp->nvmap, nvavp->os_info.handle);
1334#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU)
1335 nvmap_free_iovm(nvavp->nvmap, nvavp->os_info.handle);
1336#endif
1337err_nvmap_alloc:
1338 nvmap_client_put(nvavp->nvmap);
1339err_nvmap_create_drv_client:
1340err_get_syncpt:
1341 kfree(nvavp);
1342 return ret;
1343}
1344
1345static int tegra_nvavp_remove(struct nvhost_device *ndev)
1346{
1347 struct nvavp_info *nvavp = nvhost_get_drvdata(ndev);
1348
1349 if (!nvavp)
1350 return 0;
1351
1352 mutex_lock(&nvavp->open_lock);
1353 if (nvavp->refcount) {
1354 mutex_unlock(&nvavp->open_lock);
1355 return -EBUSY;
1356 }
1357 mutex_unlock(&nvavp->open_lock);
1358
1359 nvavp_unload_ucode(nvavp);
1360 nvavp_unload_os(nvavp);
1361
1362 misc_deregister(&nvavp->misc_dev);
1363
1364 clk_put(nvavp->bsev_clk);
1365 clk_put(nvavp->vde_clk);
1366 clk_put(nvavp->cop_clk);
1367
1368 clk_put(nvavp->emc_clk);
1369 clk_put(nvavp->sclk);
1370
1371 nvmap_client_put(nvavp->nvmap);
1372
1373 kfree(nvavp);
1374 return 0;
1375}
1376
1377#ifdef CONFIG_PM
1378static int tegra_nvavp_suspend(struct nvhost_device *ndev, pm_message_t state)
1379{
1380 struct nvavp_info *nvavp = nvhost_get_drvdata(ndev);
1381 int ret = 0;
1382
1383 mutex_lock(&nvavp->open_lock);
1384
1385 if (nvavp->refcount) {
1386 if (nvavp_check_idle(nvavp))
1387 nvavp_uninit(nvavp);
1388 else
1389 ret = -EBUSY;
1390 }
1391
1392 mutex_unlock(&nvavp->open_lock);
1393
1394 return ret;
1395}
1396
1397static int tegra_nvavp_resume(struct nvhost_device *ndev)
1398{
1399 struct nvavp_info *nvavp = nvhost_get_drvdata(ndev);
1400
1401 mutex_lock(&nvavp->open_lock);
1402
1403 if (nvavp->refcount)
1404 nvavp_init(nvavp);
1405
1406 mutex_unlock(&nvavp->open_lock);
1407
1408 return 0;
1409}
1410#endif
1411
1412static struct nvhost_driver tegra_nvavp_driver = {
1413 .driver = {
1414 .name = TEGRA_NVAVP_NAME,
1415 .owner = THIS_MODULE,
1416 },
1417 .probe = tegra_nvavp_probe,
1418 .remove = tegra_nvavp_remove,
1419#ifdef CONFIG_PM
1420 .suspend = tegra_nvavp_suspend,
1421 .resume = tegra_nvavp_resume,
1422#endif
1423};
1424
1425static int __init tegra_nvavp_init(void)
1426{
1427 return nvhost_driver_register(&tegra_nvavp_driver);
1428}
1429
1430static void __exit tegra_nvavp_exit(void)
1431{
1432 nvhost_driver_unregister(&tegra_nvavp_driver);
1433}
1434
1435module_init(tegra_nvavp_init);
1436module_exit(tegra_nvavp_exit);
1437
1438MODULE_AUTHOR("NVIDIA");
1439MODULE_DESCRIPTION("Channel based AVP driver for Tegra");
1440MODULE_VERSION("1.0");
1441MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/media/video/tegra/nvavp/nvavp_os.h b/drivers/media/video/tegra/nvavp/nvavp_os.h
new file mode 100644
index 00000000000..4d7f6776f11
--- /dev/null
+++ b/drivers/media/video/tegra/nvavp/nvavp_os.h
@@ -0,0 +1,103 @@
1/*
2 * drivers/media/video/tegra/nvavp/nvavp_os.h
3 *
4 * Copyright (C) 2011 NVIDIA Corp.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 */
10
11#ifndef __MEDIA_VIDEO_TEGRA_NVAVP_OS_H
12#define __MEDIA_VIDEO_TEGRA_NVAVP_OS_H
13
14#include <linux/types.h>
15
16#include "../../../../video/tegra/nvmap/nvmap.h"
17
18#define NVE2_AVP (0x0000E276)
19
20struct nv_e276_control {
21 u32 reserved00[5];
22 u32 dma_start;
23 u32 reserved01[2];
24 u32 dma_end;
25 u32 reserved02[7];
26 u32 put;
27 u32 reserved03[15];
28 u32 get;
29 u32 reserved04[10];
30 u32 watchdog_timeout;
31 u32 idle_notify_enable;
32 u32 idle_notify_delay;
33 u32 idle_clk_enable;
34 u32 iram_clk_gating;
35 u32 idle;
36 u32 outbox_data;
37 u32 app_intr_enable;
38 u32 app_start_time;
39 u32 app_in_iram;
40 u32 iram_ucode_addr;
41 u32 iram_ucode_size;
42 u32 dbg_state[57];
43 u32 os_method_data[16];
44 u32 app_method_data[128];
45};
46
47#define NVE26E_HOST1X_INCR_SYNCPT (0x00000000)
48#define NVE26E_HOST1X_INCR_SYNCPT_COND_OP_DONE (0x00000001)
49
50#define NVE26E_CH_OPCODE_INCR(Addr, Count) \
51 /* op, addr, count */ \
52 ((1UL << 28) | ((Addr) << 16) | (Count))
53
54#define NVE26E_CH_OPCODE_IMM(addr, value) \
55 /* op, addr, count */ \
56 ((4UL << 28) | ((addr) << 16) | (value))
57
58#define NVE26E_CH_OPCODE_GATHER(off, ins, type, cnt) \
59 /* op, offset, insert, type, count */ \
60 ((6UL << 28) | ((off) << 16) | ((ins) << 15) | ((type) << 14) | cnt)
61
62/* AVP OS methods */
63#define NVE276_NOP (0x00000080)
64#define NVE276_SET_APP_TIMEOUT (0x00000084)
65#define NVE276_SET_MICROCODE_A (0x00000085)
66#define NVE276_SET_MICROCODE_B (0x00000086)
67#define NVE276_SET_MICROCODE_C (0x00000087)
68
69/* Interrupt codes through inbox/outbox data codes (cpu->avp or avp->cpu) */
70#define NVE276_OS_INTERRUPT_NOP (0x00000000) /* wake up avp */
71#define NVE276_OS_INTERRUPT_TIMEOUT (0x00000001)
72#define NVE276_OS_INTERRUPT_SEMAPHORE_AWAKEN (0x00000002)
73#define NVE276_OS_INTERRUPT_EXECUTE_AWAKEN (0x00000004)
74#define NVE276_OS_INTERRUPT_DEBUG_STRING (0x00000008)
75#define NVE276_OS_INTERRUPT_DH_KEYEXCHANGE (0x00000010)
76#define NVE276_OS_INTERRUPT_APP_NOTIFY (0x00000020)
77#define NVE276_OS_INTERRUPT_VIDEO_IDLE (0x00000040)
78#define NVE276_OS_INTERRUPT_AUDIO_IDLE (0x00000080)
79#define NVE276_OS_INTERRUPT_AVP_BREAKPOINT (0x00800000)
80#define NVE276_OS_INTERRUPT_AVP_FATAL_ERROR (0x01000000)
81
82struct nvavp_os_info {
83 u32 entry_offset;
84 u32 control_offset;
85 u32 debug_offset;
86
87 struct nvmap_handle_ref *handle;
88 void *data;
89 u32 size;
90 phys_addr_t phys;
91 void *os_bin;
92 phys_addr_t reset_addr;
93};
94
95struct nvavp_ucode_info {
96 struct nvmap_handle_ref *handle;
97 void *data;
98 u32 size;
99 phys_addr_t phys;
100 void *ucode_bin;
101};
102
103#endif /* __MEDIA_VIDEO_TEGRA_NVAVP_OS_H */