aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/tegra/avp
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/media/video/tegra/avp
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/media/video/tegra/avp')
-rw-r--r--drivers/media/video/tegra/avp/Kconfig25
-rw-r--r--drivers/media/video/tegra/avp/Makefile7
-rw-r--r--drivers/media/video/tegra/avp/avp.c1949
-rw-r--r--drivers/media/video/tegra/avp/avp.h32
-rw-r--r--drivers/media/video/tegra/avp/avp_msg.h358
-rw-r--r--drivers/media/video/tegra/avp/avp_svc.c890
-rw-r--r--drivers/media/video/tegra/avp/headavp.S68
-rw-r--r--drivers/media/video/tegra/avp/headavp.h41
-rw-r--r--drivers/media/video/tegra/avp/nvavp.h53
-rw-r--r--drivers/media/video/tegra/avp/tegra_rpc.c796
-rw-r--r--drivers/media/video/tegra/avp/trpc.h80
-rw-r--r--drivers/media/video/tegra/avp/trpc_local.c419
-rw-r--r--drivers/media/video/tegra/avp/trpc_sema.c244
-rw-r--r--drivers/media/video/tegra/avp/trpc_sema.h30
14 files changed, 4992 insertions, 0 deletions
diff --git a/drivers/media/video/tegra/avp/Kconfig b/drivers/media/video/tegra/avp/Kconfig
new file mode 100644
index 00000000000..fdd208510fc
--- /dev/null
+++ b/drivers/media/video/tegra/avp/Kconfig
@@ -0,0 +1,25 @@
1config TEGRA_RPC
2 bool "Enable support for Tegra RPC"
3 depends on ARCH_TEGRA
4 default y
5 help
6 Enables support for the RPC mechanism necessary for the Tegra
7 multimedia framework. It is both used to communicate locally on the
8 CPU between multiple multimedia components as well as to communicate
9 with the AVP for offloading media decode.
10
11 Exports the local tegra RPC interface on device node
12 /dev/tegra_rpc. Also provides tegra fd based semaphores needed by
13 the tegra multimedia framework.
14
15 If unsure, say Y
16
17config TEGRA_AVP
18 bool "Enable support for the AVP multimedia offload engine"
19 depends on ARCH_TEGRA && TEGRA_RPC
20 default y
21 help
22 Enables support for the multimedia offload engine used by Tegra
23 multimedia framework.
24
25 If unsure, say Y
diff --git a/drivers/media/video/tegra/avp/Makefile b/drivers/media/video/tegra/avp/Makefile
new file mode 100644
index 00000000000..148265648a4
--- /dev/null
+++ b/drivers/media/video/tegra/avp/Makefile
@@ -0,0 +1,7 @@
1GCOV_PROFILE := y
2obj-$(CONFIG_TEGRA_RPC) += tegra_rpc.o
3obj-$(CONFIG_TEGRA_RPC) += trpc_local.o
4obj-$(CONFIG_TEGRA_RPC) += trpc_sema.o
5obj-$(CONFIG_TEGRA_AVP) += avp.o
6obj-$(CONFIG_TEGRA_AVP) += avp_svc.o
7obj-$(CONFIG_TEGRA_AVP) += headavp.o
diff --git a/drivers/media/video/tegra/avp/avp.c b/drivers/media/video/tegra/avp/avp.c
new file mode 100644
index 00000000000..074a42f125b
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp.c
@@ -0,0 +1,1949 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Dima Zavin <dima@android.com>
4 *
5 * Copyright (C) 2010-2012 NVIDIA Corporation
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/clk.h>
19#include <linux/completion.h>
20#include <linux/delay.h>
21#include <linux/dma-mapping.h>
22#include <linux/err.h>
23#include <linux/firmware.h>
24#include <linux/fs.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/ioctl.h>
28#include <linux/irq.h>
29#include <linux/kref.h>
30#include <linux/list.h>
31#include <linux/miscdevice.h>
32#include <linux/mutex.h>
33#include <linux/platform_device.h>
34#include <linux/rbtree.h>
35#include <linux/seq_file.h>
36#include <linux/slab.h>
37#include <linux/tegra_rpc.h>
38#include <linux/types.h>
39#include <linux/uaccess.h>
40#include <linux/workqueue.h>
41
42#include <mach/clk.h>
43#include <mach/io.h>
44#include <mach/iomap.h>
45#include <mach/nvmap.h>
46#include <mach/legacy_irq.h>
47#include <mach/hardware.h>
48
49#include "../../../../video/tegra/nvmap/nvmap.h"
50
51#include "headavp.h"
52#include "avp_msg.h"
53#include "trpc.h"
54#include "avp.h"
55#include "nvavp.h"
56
57enum {
58 AVP_DBG_TRACE_XPC = 1U << 0,
59 AVP_DBG_TRACE_XPC_IRQ = 1U << 1,
60 AVP_DBG_TRACE_XPC_MSG = 1U << 2,
61 AVP_DBG_TRACE_XPC_CONN = 1U << 3,
62 AVP_DBG_TRACE_TRPC_MSG = 1U << 4,
63 AVP_DBG_TRACE_TRPC_CONN = 1U << 5,
64 AVP_DBG_TRACE_LIB = 1U << 6,
65};
66
67static u32 avp_debug_mask =
68 AVP_DBG_TRACE_XPC |
69 /* AVP_DBG_TRACE_XPC_IRQ | */
70 /* AVP_DBG_TRACE_XPC_MSG | */
71 /* AVP_DBG_TRACE_TRPC_MSG | */
72 AVP_DBG_TRACE_XPC_CONN |
73 AVP_DBG_TRACE_TRPC_CONN |
74 AVP_DBG_TRACE_LIB;
75
76module_param_named(debug_mask, avp_debug_mask, uint, S_IWUSR | S_IRUGO);
77
78#define DBG(flag, args...) \
79 do { if (unlikely(avp_debug_mask & (flag))) pr_info(args); } while (0)
80
81#define TEGRA_AVP_NAME "tegra-avp"
82
83#define TEGRA_AVP_RESET_VECTOR_ADDR \
84 (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
85
86#define TEGRA_AVP_RESUME_ADDR IO_ADDRESS(TEGRA_IRAM_BASE + \
87 TEGRA_RESET_HANDLER_SIZE)
88
89#define FLOW_CTRL_HALT_COP_EVENTS IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
90#define FLOW_MODE_STOP (0x2 << 29)
91#define FLOW_MODE_NONE 0x0
92
93#define MBOX_FROM_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
94#define MBOX_TO_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
95
96/* Layout of the mailbox registers:
97 * bit 31 - pending message interrupt enable (mailbox full, i.e. valid=1)
98 * bit 30 - message cleared interrupt enable (mailbox empty, i.e. valid=0)
99 * bit 29 - message valid. peer clears this bit after reading msg
100 * bits 27:0 - message data
101 */
102#define MBOX_MSG_PENDING_INT_EN (1 << 31)
103#define MBOX_MSG_READ_INT_EN (1 << 30)
104#define MBOX_MSG_VALID (1 << 29)
105
106#define AVP_MSG_MAX_CMD_LEN 16
107#define AVP_MSG_AREA_SIZE (AVP_MSG_MAX_CMD_LEN + TEGRA_RPC_MAX_MSG_LEN)
108
109struct tegra_avp_info {
110 struct clk *cop_clk;
111
112 int mbox_from_avp_pend_irq;
113
114 dma_addr_t msg_area_addr;
115 u32 msg;
116 void *msg_to_avp;
117 void *msg_from_avp;
118 struct mutex to_avp_lock;
119 struct mutex from_avp_lock;
120
121 struct work_struct recv_work;
122 struct workqueue_struct *recv_wq;
123
124 struct trpc_node *rpc_node;
125 struct miscdevice misc_dev;
126 int refcount;
127 struct mutex open_lock;
128
129 spinlock_t state_lock;
130 bool initialized;
131 bool shutdown;
132 bool suspending;
133 bool defer_remote;
134
135 struct mutex libs_lock;
136 struct list_head libs;
137 struct nvmap_client *nvmap_libs;
138
139 /* client for driver allocations, persistent */
140 struct nvmap_client *nvmap_drv;
141 struct nvmap_handle_ref *kernel_handle;
142 void *kernel_data;
143 phys_addr_t kernel_phys;
144
145 struct nvmap_handle_ref *iram_backup_handle;
146 void *iram_backup_data;
147 phys_addr_t iram_backup_phys;
148 unsigned long resume_addr;
149 unsigned long reset_addr;
150
151 struct trpc_endpoint *avp_ep;
152 struct rb_root endpoints;
153
154 struct avp_svc_info *avp_svc;
155};
156
157struct remote_info {
158 u32 loc_id;
159 u32 rem_id;
160 struct kref ref;
161
162 struct trpc_endpoint *trpc_ep;
163 struct rb_node rb_node;
164};
165
166struct lib_item {
167 struct list_head list;
168 u32 handle;
169 char name[TEGRA_AVP_LIB_MAX_NAME];
170};
171
172static struct tegra_avp_info *tegra_avp;
173
174static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len);
175static void avp_trpc_close(struct trpc_endpoint *ep);
176static void avp_trpc_show(struct seq_file *s, struct trpc_endpoint *ep);
177static void libs_cleanup(struct tegra_avp_info *avp);
178
179static struct trpc_ep_ops remote_ep_ops = {
180 .send = avp_trpc_send,
181 .close = avp_trpc_close,
182 .show = avp_trpc_show,
183};
184
185static struct remote_info *rinfo_alloc(struct tegra_avp_info *avp)
186{
187 struct remote_info *rinfo;
188
189 rinfo = kzalloc(sizeof(struct remote_info), GFP_KERNEL);
190 if (!rinfo)
191 return NULL;
192 kref_init(&rinfo->ref);
193 return rinfo;
194}
195
196static void _rinfo_release(struct kref *ref)
197{
198 struct remote_info *rinfo = container_of(ref, struct remote_info, ref);
199 kfree(rinfo);
200}
201
202static inline void rinfo_get(struct remote_info *rinfo)
203{
204 kref_get(&rinfo->ref);
205}
206
207static inline void rinfo_put(struct remote_info *rinfo)
208{
209 kref_put(&rinfo->ref, _rinfo_release);
210}
211
212static int remote_insert(struct tegra_avp_info *avp, struct remote_info *rinfo)
213{
214 struct rb_node **p;
215 struct rb_node *parent;
216 struct remote_info *tmp;
217
218 p = &avp->endpoints.rb_node;
219 parent = NULL;
220 while (*p) {
221 parent = *p;
222 tmp = rb_entry(parent, struct remote_info, rb_node);
223
224 if (rinfo->loc_id < tmp->loc_id)
225 p = &(*p)->rb_left;
226 else if (rinfo->loc_id > tmp->loc_id)
227 p = &(*p)->rb_right;
228 else {
229 pr_info("%s: avp endpoint id=%x (%s) already exists\n",
230 __func__, rinfo->loc_id,
231 trpc_name(rinfo->trpc_ep));
232 return -EEXIST;
233 }
234 }
235 rb_link_node(&rinfo->rb_node, parent, p);
236 rb_insert_color(&rinfo->rb_node, &avp->endpoints);
237 rinfo_get(rinfo);
238 return 0;
239}
240
241static struct remote_info *remote_find(struct tegra_avp_info *avp, u32 local_id)
242{
243 struct rb_node *n = avp->endpoints.rb_node;
244 struct remote_info *rinfo;
245
246 while (n) {
247 rinfo = rb_entry(n, struct remote_info, rb_node);
248
249 if (local_id < rinfo->loc_id)
250 n = n->rb_left;
251 else if (local_id > rinfo->loc_id)
252 n = n->rb_right;
253 else
254 return rinfo;
255 }
256 return NULL;
257}
258
259static void remote_remove(struct tegra_avp_info *avp, struct remote_info *rinfo)
260{
261 rb_erase(&rinfo->rb_node, &avp->endpoints);
262 rinfo_put(rinfo);
263}
264
265/* test whether or not the trpc endpoint provided is a valid AVP node
266 * endpoint */
267static struct remote_info *validate_trpc_ep(struct tegra_avp_info *avp,
268 struct trpc_endpoint *ep)
269{
270 struct remote_info *tmp = trpc_priv(ep);
271 struct remote_info *rinfo;
272
273 if (!tmp)
274 return NULL;
275 rinfo = remote_find(avp, tmp->loc_id);
276 if (rinfo && rinfo == tmp && rinfo->trpc_ep == ep)
277 return rinfo;
278 return NULL;
279}
280
281static void avp_trpc_show(struct seq_file *s, struct trpc_endpoint *ep)
282{
283 struct tegra_avp_info *avp = tegra_avp;
284 struct remote_info *rinfo;
285 unsigned long flags;
286
287 spin_lock_irqsave(&avp->state_lock, flags);
288 rinfo = validate_trpc_ep(avp, ep);
289 if (!rinfo) {
290 seq_printf(s, " <unknown>\n");
291 goto out;
292 }
293 seq_printf(s, " loc_id:0x%x\n rem_id:0x%x\n",
294 rinfo->loc_id, rinfo->rem_id);
295out:
296 spin_unlock_irqrestore(&avp->state_lock, flags);
297}
298
299static inline void mbox_writel(u32 val, void __iomem *mbox)
300{
301 writel(val, mbox);
302}
303
304static inline u32 mbox_readl(void __iomem *mbox)
305{
306 return readl(mbox);
307}
308
309static inline void msg_ack_remote(struct tegra_avp_info *avp, u32 cmd, u32 arg)
310{
311 struct msg_ack *ack = avp->msg_from_avp;
312
313 /* must make sure the arg is there first */
314 ack->arg = arg;
315 wmb();
316 ack->cmd = cmd;
317 wmb();
318}
319
320static inline u32 msg_recv_get_cmd(struct tegra_avp_info *avp)
321{
322 volatile u32 *cmd = avp->msg_from_avp;
323 rmb();
324 return *cmd;
325}
326
327static inline int __msg_write(struct tegra_avp_info *avp, void *hdr,
328 size_t hdr_len, void *buf, size_t len)
329{
330 memcpy(avp->msg_to_avp, hdr, hdr_len);
331 if (buf && len)
332 memcpy(avp->msg_to_avp + hdr_len, buf, len);
333 mbox_writel(avp->msg, MBOX_TO_AVP);
334 return 0;
335}
336
337static inline int msg_write(struct tegra_avp_info *avp, void *hdr,
338 size_t hdr_len, void *buf, size_t len)
339{
340 /* rem_ack is a pointer into shared memory that the AVP modifies */
341 volatile u32 *rem_ack = avp->msg_to_avp;
342 unsigned long endtime = jiffies + HZ;
343
344 /* the other side ack's the message by clearing the first word,
345 * wait for it to do so */
346 rmb();
347 while (*rem_ack != 0 && time_before(jiffies, endtime)) {
348 usleep_range(100, 2000);
349 rmb();
350 }
351 if (*rem_ack != 0)
352 return -ETIMEDOUT;
353 __msg_write(avp, hdr, hdr_len, buf, len);
354 return 0;
355}
356
357static inline int msg_check_ack(struct tegra_avp_info *avp, u32 cmd, u32 *arg)
358{
359 struct msg_ack ack;
360
361 rmb();
362 memcpy(&ack, avp->msg_to_avp, sizeof(ack));
363 if (ack.cmd != cmd)
364 return -ENOENT;
365 if (arg)
366 *arg = ack.arg;
367 return 0;
368}
369
370/* XXX: add timeout */
371static int msg_wait_ack_locked(struct tegra_avp_info *avp, u32 cmd, u32 *arg)
372{
373 /* rem_ack is a pointer into shared memory that the AVP modifies */
374 volatile u32 *rem_ack = avp->msg_to_avp;
375 unsigned long endtime = jiffies + msecs_to_jiffies(400);
376 int ret;
377
378 do {
379 ret = msg_check_ack(avp, cmd, arg);
380 usleep_range(1000, 5000);
381 } while (ret && time_before(jiffies, endtime));
382
383 /* if we timed out, try one more time */
384 if (ret)
385 ret = msg_check_ack(avp, cmd, arg);
386
387 /* clear out the ack */
388 *rem_ack = 0;
389 wmb();
390 return ret;
391}
392
393static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len)
394{
395 struct tegra_avp_info *avp = tegra_avp;
396 struct remote_info *rinfo;
397 struct msg_port_data msg;
398 int ret;
399 unsigned long flags;
400
401 DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: ep=%p priv=%p buf=%p len=%d\n",
402 __func__, ep, trpc_priv(ep), buf, len);
403
404 spin_lock_irqsave(&avp->state_lock, flags);
405 if (unlikely(avp->suspending && trpc_peer(ep) != avp->avp_ep)) {
406 ret = -EBUSY;
407 goto err_state_locked;
408 } else if (avp->shutdown) {
409 ret = -ENODEV;
410 goto err_state_locked;
411 }
412 rinfo = validate_trpc_ep(avp, ep);
413 if (!rinfo) {
414 ret = -ENOTTY;
415 goto err_state_locked;
416 }
417 rinfo_get(rinfo);
418 spin_unlock_irqrestore(&avp->state_lock, flags);
419
420 msg.cmd = CMD_MESSAGE;
421 msg.port_id = rinfo->rem_id;
422 msg.msg_len = len;
423
424 mutex_lock(&avp->to_avp_lock);
425 ret = msg_write(avp, &msg, sizeof(msg), buf, len);
426 mutex_unlock(&avp->to_avp_lock);
427
428 DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: msg sent for %s (%x->%x) (%d)\n",
429 __func__, trpc_name(ep), rinfo->loc_id, rinfo->rem_id, ret);
430 rinfo_put(rinfo);
431 return ret;
432
433err_state_locked:
434 spin_unlock_irqrestore(&avp->state_lock, flags);
435 return ret;
436}
437
438static int _send_disconnect(struct tegra_avp_info *avp, u32 port_id)
439{
440 struct msg_disconnect msg;
441 int ret;
442
443 msg.cmd = CMD_DISCONNECT;
444 msg.port_id = port_id;
445
446 mutex_lock(&avp->to_avp_lock);
447 ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
448 if (ret) {
449 pr_err("%s: remote has not acked last message (%x)\n", __func__,
450 port_id);
451 goto err_msg_write;
452 }
453
454 ret = msg_wait_ack_locked(avp, CMD_ACK, NULL);
455 if (ret) {
456 pr_err("%s: remote end won't respond for %x\n", __func__,
457 port_id);
458 goto err_wait_ack;
459 }
460
461 DBG(AVP_DBG_TRACE_XPC_CONN, "%s: sent disconnect msg for %x\n",
462 __func__, port_id);
463
464err_wait_ack:
465err_msg_write:
466 mutex_unlock(&avp->to_avp_lock);
467 return ret;
468}
469
470/* Note: Assumes that the rinfo was previously successfully added to the
471 * endpoints rb_tree. The initial refcnt of 1 is inherited by the port when the
472 * trpc endpoint is created with thi trpc_xxx functions. Thus, on close,
473 * we must drop that reference here.
474 * The avp->endpoints rb_tree keeps its own reference on rinfo objects.
475 *
476 * The try_connect function does not use this on error because it needs to
477 * split the close of trpc_ep port and the put.
478 */
479static inline void remote_close(struct remote_info *rinfo)
480{
481 trpc_close(rinfo->trpc_ep);
482 rinfo_put(rinfo);
483}
484
485static void avp_trpc_close(struct trpc_endpoint *ep)
486{
487 struct tegra_avp_info *avp = tegra_avp;
488 struct remote_info *rinfo;
489 unsigned long flags;
490 int ret;
491
492 spin_lock_irqsave(&avp->state_lock, flags);
493 if (avp->shutdown) {
494 spin_unlock_irqrestore(&avp->state_lock, flags);
495 return;
496 }
497
498 rinfo = validate_trpc_ep(avp, ep);
499 if (!rinfo) {
500 pr_err("%s: tried to close invalid port '%s' endpoint (%p)\n",
501 __func__, trpc_name(ep), ep);
502 spin_unlock_irqrestore(&avp->state_lock, flags);
503 return;
504 }
505 rinfo_get(rinfo);
506 remote_remove(avp, rinfo);
507 spin_unlock_irqrestore(&avp->state_lock, flags);
508
509 DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: closing '%s' (%x)\n", __func__,
510 trpc_name(ep), rinfo->rem_id);
511
512 ret = _send_disconnect(avp, rinfo->rem_id);
513 if (ret)
514 pr_err("%s: error while closing remote port '%s' (%x)\n",
515 __func__, trpc_name(ep), rinfo->rem_id);
516 remote_close(rinfo);
517 rinfo_put(rinfo);
518}
519
520/* takes and holds avp->from_avp_lock */
521static void recv_msg_lock(struct tegra_avp_info *avp)
522{
523 unsigned long flags;
524
525 mutex_lock(&avp->from_avp_lock);
526 spin_lock_irqsave(&avp->state_lock, flags);
527 avp->defer_remote = true;
528 spin_unlock_irqrestore(&avp->state_lock, flags);
529}
530
531/* MUST be called with avp->from_avp_lock held */
532static void recv_msg_unlock(struct tegra_avp_info *avp)
533{
534 unsigned long flags;
535
536 spin_lock_irqsave(&avp->state_lock, flags);
537 avp->defer_remote = false;
538 spin_unlock_irqrestore(&avp->state_lock, flags);
539 mutex_unlock(&avp->from_avp_lock);
540}
541
542static int avp_node_try_connect(struct trpc_node *node,
543 struct trpc_node *src_node,
544 struct trpc_endpoint *from)
545{
546 struct tegra_avp_info *avp = tegra_avp;
547 const char *port_name = trpc_name(from);
548 struct remote_info *rinfo;
549 struct msg_connect msg;
550 int ret;
551 unsigned long flags;
552 int len;
553 const int max_retry_cnt = 6;
554 int cnt = 0;
555
556 DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: trying connect from %s\n", __func__,
557 port_name);
558
559 if (node != avp->rpc_node || node->priv != avp)
560 return -ENODEV;
561
562 len = strlen(port_name);
563 if (len > XPC_PORT_NAME_LEN) {
564 pr_err("%s: port name (%s) too long\n", __func__, port_name);
565 return -EINVAL;
566 }
567
568 ret = 0;
569 spin_lock_irqsave(&avp->state_lock, flags);
570 if (avp->suspending) {
571 ret = -EBUSY;
572 } else if (likely(src_node != avp->rpc_node)) {
573 /* only check for initialized when the source is not ourselves
574 * since we'll end up calling into here during initialization */
575 if (!avp->initialized)
576 ret = -ENODEV;
577 } else if (strncmp(port_name, "RPC_AVP_PORT", XPC_PORT_NAME_LEN)) {
578 /* we only allow connections to ourselves for the cpu-to-avp
579 port */
580 ret = -EINVAL;
581 }
582 spin_unlock_irqrestore(&avp->state_lock, flags);
583 if (ret)
584 return ret;
585
586 rinfo = rinfo_alloc(avp);
587 if (!rinfo) {
588 pr_err("%s: cannot alloc mem for rinfo\n", __func__);
589 ret = -ENOMEM;
590 goto err_alloc_rinfo;
591 }
592 rinfo->loc_id = (u32)rinfo;
593
594 msg.cmd = CMD_CONNECT;
595 msg.port_id = rinfo->loc_id;
596 memcpy(msg.name, port_name, len);
597 memset(msg.name + len, 0, XPC_PORT_NAME_LEN - len);
598
599 /* when trying to connect to remote, we need to block remote
600 * messages until we get our ack and can insert it into our lists.
601 * Otherwise, we can get a message from the other side for a port
602 * that we haven't finished setting up.
603 *
604 * 'defer_remote' will force the irq handler to not process messages
605 * at irq context but to schedule work to do so. The work function will
606 * take the from_avp_lock and everything should stay consistent.
607 */
608 recv_msg_lock(avp);
609 for (cnt = 0; cnt < max_retry_cnt; cnt++) {
610 /* Retry to connect to AVP at this function maximum 6 times.
611 * Because this section is protected by mutex and
612 * needed to re-send the CMD_CONNECT command by CPU
613 * if AVP didn't receive the command.
614 */
615 mutex_lock(&avp->to_avp_lock);
616 ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
617 if (ret) {
618 pr_err("%s: remote has not acked last message (%s)\n",
619 __func__, port_name);
620 mutex_unlock(&avp->to_avp_lock);
621 goto err_msg_write;
622 }
623 ret = msg_wait_ack_locked(avp, CMD_RESPONSE, &rinfo->rem_id);
624 mutex_unlock(&avp->to_avp_lock);
625 if (!ret && rinfo->rem_id)
626 break;
627
628 /* Skip the sleep function at last retry count */
629 if ((cnt + 1) < max_retry_cnt)
630 usleep_range(100, 2000);
631 }
632
633 if (ret) {
634 pr_err("%s: remote end won't respond for '%s'\n", __func__,
635 port_name);
636 goto err_wait_ack;
637 }
638 if (!rinfo->rem_id) {
639 pr_err("%s: can't connect to '%s'\n", __func__, port_name);
640 ret = -ECONNREFUSED;
641 goto err_nack;
642 }
643
644 DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: got conn ack '%s' (%x <-> %x)\n",
645 __func__, port_name, rinfo->loc_id, rinfo->rem_id);
646
647 rinfo->trpc_ep = trpc_create_peer(node, from, &remote_ep_ops,
648 rinfo);
649 if (!rinfo->trpc_ep) {
650 pr_err("%s: cannot create peer for %s\n", __func__, port_name);
651 ret = -EINVAL;
652 goto err_create_peer;
653 }
654
655 spin_lock_irqsave(&avp->state_lock, flags);
656 ret = remote_insert(avp, rinfo);
657 spin_unlock_irqrestore(&avp->state_lock, flags);
658 if (ret)
659 goto err_ep_insert;
660
661 recv_msg_unlock(avp);
662 return 0;
663
664err_ep_insert:
665 trpc_close(rinfo->trpc_ep);
666err_create_peer:
667 _send_disconnect(avp, rinfo->rem_id);
668err_nack:
669err_wait_ack:
670err_msg_write:
671 recv_msg_unlock(avp);
672 rinfo_put(rinfo);
673err_alloc_rinfo:
674 return ret;
675}
676
677static void process_disconnect_locked(struct tegra_avp_info *avp,
678 struct msg_data *raw_msg)
679{
680 struct msg_disconnect *disconn_msg = (struct msg_disconnect *)raw_msg;
681 unsigned long flags;
682 struct remote_info *rinfo;
683
684 DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got disconnect (%x)\n", __func__,
685 disconn_msg->port_id);
686
687 if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
688 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, disconn_msg,
689 sizeof(struct msg_disconnect));
690
691 spin_lock_irqsave(&avp->state_lock, flags);
692 rinfo = remote_find(avp, disconn_msg->port_id);
693 if (!rinfo) {
694 spin_unlock_irqrestore(&avp->state_lock, flags);
695 pr_warning("%s: got disconnect for unknown port %x\n",
696 __func__, disconn_msg->port_id);
697 goto ack;
698 }
699 rinfo_get(rinfo);
700 remote_remove(avp, rinfo);
701 spin_unlock_irqrestore(&avp->state_lock, flags);
702
703 remote_close(rinfo);
704 rinfo_put(rinfo);
705ack:
706 msg_ack_remote(avp, CMD_ACK, 0);
707}
708
709static void process_connect_locked(struct tegra_avp_info *avp,
710 struct msg_data *raw_msg)
711{
712 struct msg_connect *conn_msg = (struct msg_connect *)raw_msg;
713 struct trpc_endpoint *trpc_ep;
714 struct remote_info *rinfo;
715 char name[XPC_PORT_NAME_LEN + 1];
716 int ret;
717 u32 local_port_id = 0;
718 unsigned long flags;
719
720 DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got connect (%x)\n", __func__,
721 conn_msg->port_id);
722 if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
723 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
724 conn_msg, sizeof(struct msg_connect));
725
726 rinfo = rinfo_alloc(avp);
727 if (!rinfo) {
728 pr_err("%s: cannot alloc mem for rinfo\n", __func__);
729 ret = -ENOMEM;
730 goto ack;
731 }
732 rinfo->loc_id = (u32)rinfo;
733 rinfo->rem_id = conn_msg->port_id;
734
735 memcpy(name, conn_msg->name, XPC_PORT_NAME_LEN);
736 name[XPC_PORT_NAME_LEN] = '\0';
737 trpc_ep = trpc_create_connect(avp->rpc_node, name, &remote_ep_ops,
738 rinfo, 0);
739 if (IS_ERR(trpc_ep)) {
740 pr_err("%s: remote requested unknown port '%s' (%d)\n",
741 __func__, name, (int)PTR_ERR(trpc_ep));
742 goto nack;
743 }
744 rinfo->trpc_ep = trpc_ep;
745
746 spin_lock_irqsave(&avp->state_lock, flags);
747 ret = remote_insert(avp, rinfo);
748 spin_unlock_irqrestore(&avp->state_lock, flags);
749 if (ret)
750 goto err_ep_insert;
751
752 local_port_id = rinfo->loc_id;
753 goto ack;
754
755err_ep_insert:
756 trpc_close(trpc_ep);
757nack:
758 rinfo_put(rinfo);
759 local_port_id = 0;
760ack:
761 msg_ack_remote(avp, CMD_RESPONSE, local_port_id);
762}
763
764static int process_message(struct tegra_avp_info *avp, struct msg_data *raw_msg,
765 gfp_t gfp_flags)
766{
767 struct msg_port_data *port_msg = (struct msg_port_data *)raw_msg;
768 struct remote_info *rinfo;
769 unsigned long flags;
770 int len;
771 int ret;
772
773 len = min(port_msg->msg_len, (u32)TEGRA_RPC_MAX_MSG_LEN);
774
775 if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG) {
776 pr_info("%s: got message cmd=%x port=%x len=%d\n", __func__,
777 port_msg->cmd, port_msg->port_id, port_msg->msg_len);
778 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, port_msg,
779 sizeof(struct msg_port_data) + len);
780 }
781
782 if (len != port_msg->msg_len)
783 pr_err("%s: message sent is too long (%d bytes)\n", __func__,
784 port_msg->msg_len);
785
786 spin_lock_irqsave(&avp->state_lock, flags);
787 rinfo = remote_find(avp, port_msg->port_id);
788 if (rinfo) {
789 rinfo_get(rinfo);
790 trpc_get(rinfo->trpc_ep);
791 } else {
792 pr_err("%s: port %x not found\n", __func__, port_msg->port_id);
793 spin_unlock_irqrestore(&avp->state_lock, flags);
794 ret = -ENOENT;
795 goto ack;
796 }
797 spin_unlock_irqrestore(&avp->state_lock, flags);
798
799 ret = trpc_send_msg(avp->rpc_node, rinfo->trpc_ep, port_msg->data,
800 len, gfp_flags);
801 if (ret == -ENOMEM) {
802 trpc_put(rinfo->trpc_ep);
803 rinfo_put(rinfo);
804 goto no_ack;
805 } else if (ret) {
806 pr_err("%s: cannot queue message for port %s/%x (%d)\n",
807 __func__, trpc_name(rinfo->trpc_ep), rinfo->loc_id,
808 ret);
809 } else {
810 DBG(AVP_DBG_TRACE_XPC_MSG, "%s: msg queued\n", __func__);
811 }
812
813 trpc_put(rinfo->trpc_ep);
814 rinfo_put(rinfo);
815ack:
816 msg_ack_remote(avp, CMD_ACK, 0);
817no_ack:
818 return ret;
819}
820
821static void process_avp_message(struct work_struct *work)
822{
823 struct tegra_avp_info *avp = container_of(work, struct tegra_avp_info,
824 recv_work);
825 struct msg_data *msg = avp->msg_from_avp;
826
827 mutex_lock(&avp->from_avp_lock);
828 rmb();
829 switch (msg->cmd) {
830 case CMD_CONNECT:
831 process_connect_locked(avp, msg);
832 break;
833 case CMD_DISCONNECT:
834 process_disconnect_locked(avp, msg);
835 break;
836 case CMD_MESSAGE:
837 process_message(avp, msg, GFP_KERNEL);
838 break;
839 default:
840 pr_err("%s: unknown cmd (%x) received\n", __func__, msg->cmd);
841 break;
842 }
843 mutex_unlock(&avp->from_avp_lock);
844}
845
846static irqreturn_t avp_mbox_pending_isr(int irq, void *data)
847{
848 struct tegra_avp_info *avp = data;
849 struct msg_data *msg = avp->msg_from_avp;
850 u32 mbox_msg;
851 unsigned long flags;
852 int ret;
853
854 mbox_msg = mbox_readl(MBOX_FROM_AVP);
855 mbox_writel(0, MBOX_FROM_AVP);
856
857 DBG(AVP_DBG_TRACE_XPC_IRQ, "%s: got msg %x\n", __func__, mbox_msg);
858
859 /* XXX: re-use previous message? */
860 if (!(mbox_msg & MBOX_MSG_VALID)) {
861 WARN_ON(1);
862 goto done;
863 }
864
865 mbox_msg <<= 4;
866 if (mbox_msg == 0x2f00bad0UL) {
867 pr_info("%s: petting watchdog\n", __func__);
868 goto done;
869 }
870
871 spin_lock_irqsave(&avp->state_lock, flags);
872 if (avp->shutdown) {
873 spin_unlock_irqrestore(&avp->state_lock, flags);
874 goto done;
875 } else if (avp->defer_remote) {
876 spin_unlock_irqrestore(&avp->state_lock, flags);
877 goto defer;
878 }
879 spin_unlock_irqrestore(&avp->state_lock, flags);
880
881 rmb();
882 if (msg->cmd == CMD_MESSAGE) {
883 ret = process_message(avp, msg, GFP_ATOMIC);
884 if (ret != -ENOMEM)
885 goto done;
886 pr_info("%s: deferring message (%d)\n", __func__, ret);
887 }
888defer:
889 queue_work(avp->recv_wq, &avp->recv_work);
890done:
891 return IRQ_HANDLED;
892}
893
894static int avp_reset(struct tegra_avp_info *avp, unsigned long reset_addr)
895{
896 unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
897 dma_addr_t stub_data_phys;
898 unsigned long timeout;
899 int ret = 0;
900
901 writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
902
903 _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
904 _tegra_avp_boot_stub_data.jump_addr = reset_addr;
905 wmb();
906 stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
907 sizeof(_tegra_avp_boot_stub_data),
908 DMA_TO_DEVICE);
909
910 writel(stub_code_phys, TEGRA_AVP_RESET_VECTOR_ADDR);
911
912 pr_debug("%s: TEGRA_AVP_RESET_VECTOR=%x\n", __func__, readl(TEGRA_AVP_RESET_VECTOR_ADDR));
913 pr_info("%s: Resetting AVP: reset_addr=%lx\n", __func__, reset_addr);
914
915 tegra_periph_reset_assert(avp->cop_clk);
916 udelay(10);
917 tegra_periph_reset_deassert(avp->cop_clk);
918
919 writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
920
921 /* the AVP firmware will reprogram its reset vector as the kernel
922 * starts, so a dead kernel can be detected by polling this value */
923 timeout = jiffies + msecs_to_jiffies(2000);
924 while (time_before(jiffies, timeout)) {
925 pr_debug("%s: TEGRA_AVP_RESET_VECTOR=%x\n", __func__, readl(TEGRA_AVP_RESET_VECTOR_ADDR));
926 if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) != stub_code_phys)
927 break;
928 cpu_relax();
929 }
930 if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) == stub_code_phys) {
931 pr_err("%s: Timed out waiting for AVP kernel to start\n", __func__);
932 ret = -EINVAL;
933 }
934 pr_debug("%s: TEGRA_AVP_RESET_VECTOR=%x\n", __func__, readl(TEGRA_AVP_RESET_VECTOR_ADDR));
935 WARN_ON(ret);
936 dma_unmap_single(NULL, stub_data_phys,
937 sizeof(_tegra_avp_boot_stub_data),
938 DMA_TO_DEVICE);
939 return ret;
940}
941
942static void avp_halt(struct tegra_avp_info *avp)
943{
944 /* ensure the AVP is halted */
945 writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
946 tegra_periph_reset_assert(avp->cop_clk);
947
948 /* set up the initial memory areas and mailbox contents */
949 *((u32 *)avp->msg_from_avp) = 0;
950 *((u32 *)avp->msg_to_avp) = 0xfeedf00d;
951 mbox_writel(0, MBOX_FROM_AVP);
952 mbox_writel(0, MBOX_TO_AVP);
953}
954
955/* Note: CPU_PORT server and AVP_PORT client are registered with the avp
956 * node, but are actually meant to be processed on our side (either
957 * by the svc thread for processing remote calls or by the client
958 * of the char dev for receiving replies for managing remote
959 * libraries/modules. */
960
961static int avp_init(struct tegra_avp_info *avp)
962{
963 const struct firmware *avp_fw;
964 int ret;
965 struct trpc_endpoint *ep;
966 char fw_file[30];
967
968 avp->nvmap_libs = nvmap_create_client(nvmap_dev, "avp_libs");
969 if (IS_ERR_OR_NULL(avp->nvmap_libs)) {
970 pr_err("%s: cannot create libs nvmap client\n", __func__);
971 ret = PTR_ERR(avp->nvmap_libs);
972 goto err_nvmap_create_libs_client;
973 }
974
975 /* put the address of the shared mem area into the mailbox for AVP
976 * to read out when its kernel boots. */
977 mbox_writel(avp->msg, MBOX_TO_AVP);
978
979#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
980 /* paddr is any address returned from nvmap_pin */
981 /* vaddr is AVP_KERNEL_VIRT_BASE */
982 pr_info("%s: Using AVP MMU to relocate AVP kernel\n", __func__);
983 sprintf(fw_file, "nvrm_avp.bin");
984 avp->reset_addr = AVP_KERNEL_VIRT_BASE;
985#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
986 /* paddr is any address behind SMMU */
987 /* vaddr is TEGRA_SMMU_BASE */
988 pr_info("%s: Using SMMU at %lx to load AVP kernel\n",
989 __func__, (unsigned long)avp->kernel_phys);
990 BUG_ON(avp->kernel_phys != 0xeff00000
991 && avp->kernel_phys != 0x0ff00000);
992 sprintf(fw_file, "nvrm_avp_%08lx.bin", (unsigned long)avp->kernel_phys);
993 avp->reset_addr = avp->kernel_phys;
994#else /* nvmem= carveout */
995 /* paddr is found in nvmem= carveout */
996 /* vaddr is same as paddr */
997 /* Find nvmem carveout */
998 if (!pfn_valid(__phys_to_pfn(0x8e000000))) {
999 avp->kernel_phys = 0x8e000000;
1000 }
1001 else if (!pfn_valid(__phys_to_pfn(0x9e000000))) {
1002 avp->kernel_phys = 0x9e000000;
1003 }
1004 else if (!pfn_valid(__phys_to_pfn(0xbe000000))) {
1005 avp->kernel_phys = 0xbe000000;
1006 }
1007 else {
1008 pr_err("Cannot find nvmem= carveout to load AVP kernel\n");
1009 pr_err("Check kernel command line "
1010 "to see if nvmem= is defined\n");
1011 BUG();
1012 }
1013 pr_info("%s: Using nvmem= carveout at %lx to load AVP kernel\n",
1014 __func__, (unsigned long)avp->kernel_phys);
1015 sprintf(fw_file, "nvrm_avp_%08lx.bin", (unsigned long)avp->kernel_phys);
1016 avp->reset_addr = avp->kernel_phys;
1017 avp->kernel_data = ioremap(avp->kernel_phys, SZ_1M);
1018#endif
1019
1020 ret = request_firmware(&avp_fw, fw_file, avp->misc_dev.this_device);
1021 if (ret) {
1022 pr_err("%s: Cannot read firmware '%s'\n", __func__, fw_file);
1023 goto err_req_fw;
1024 }
1025 pr_info("%s: Reading firmware from '%s' (%d bytes)\n", __func__,
1026 fw_file, avp_fw->size);
1027
1028 pr_info("%s: Loading AVP kernel at vaddr=%p paddr=%lx\n",
1029 __func__, avp->kernel_data, (unsigned long)avp->kernel_phys);
1030 memcpy(avp->kernel_data, avp_fw->data, avp_fw->size);
1031 memset(avp->kernel_data + avp_fw->size, 0, SZ_1M - avp_fw->size);
1032
1033 wmb();
1034 release_firmware(avp_fw);
1035
1036 tegra_init_legacy_irq_cop();
1037
1038 ret = avp_reset(avp, avp->reset_addr);
1039 if (ret) {
1040 pr_err("%s: cannot reset the AVP.. aborting..\n", __func__);
1041 goto err_reset;
1042 }
1043
1044 enable_irq(avp->mbox_from_avp_pend_irq);
1045 /* Initialize the avp_svc *first*. This creates RPC_CPU_PORT to be
1046 * ready for remote commands. Then, connect to the
1047 * remote RPC_AVP_PORT to be able to send library load/unload and
1048 * suspend commands to it */
1049 ret = avp_svc_start(avp->avp_svc);
1050 if (ret)
1051 goto err_avp_svc_start;
1052
1053 ep = trpc_create_connect(avp->rpc_node, "RPC_AVP_PORT", NULL,
1054 NULL, -1);
1055 if (IS_ERR(ep)) {
1056 pr_err("%s: can't connect to RPC_AVP_PORT server\n", __func__);
1057 ret = PTR_ERR(ep);
1058 goto err_rpc_avp_port;
1059 }
1060 avp->avp_ep = ep;
1061
1062 avp->initialized = true;
1063 smp_wmb();
1064 pr_info("%s: avp init done\n", __func__);
1065 return 0;
1066
1067err_rpc_avp_port:
1068 avp_svc_stop(avp->avp_svc);
1069err_avp_svc_start:
1070 disable_irq(avp->mbox_from_avp_pend_irq);
1071err_reset:
1072 avp_halt(avp);
1073err_req_fw:
1074 nvmap_client_put(avp->nvmap_libs);
1075err_nvmap_create_libs_client:
1076 avp->nvmap_libs = NULL;
1077 return ret;
1078}
1079
1080static void avp_uninit(struct tegra_avp_info *avp)
1081{
1082 unsigned long flags;
1083 struct rb_node *n;
1084 struct remote_info *rinfo;
1085
1086 spin_lock_irqsave(&avp->state_lock, flags);
1087 avp->initialized = false;
1088 avp->shutdown = true;
1089 spin_unlock_irqrestore(&avp->state_lock, flags);
1090
1091 disable_irq(avp->mbox_from_avp_pend_irq);
1092 cancel_work_sync(&avp->recv_work);
1093
1094 avp_halt(avp);
1095
1096 spin_lock_irqsave(&avp->state_lock, flags);
1097 while ((n = rb_first(&avp->endpoints)) != NULL) {
1098 rinfo = rb_entry(n, struct remote_info, rb_node);
1099 rinfo_get(rinfo);
1100 remote_remove(avp, rinfo);
1101 spin_unlock_irqrestore(&avp->state_lock, flags);
1102
1103 remote_close(rinfo);
1104 rinfo_put(rinfo);
1105
1106 spin_lock_irqsave(&avp->state_lock, flags);
1107 }
1108 spin_unlock_irqrestore(&avp->state_lock, flags);
1109
1110 avp_svc_stop(avp->avp_svc);
1111
1112 if (avp->avp_ep) {
1113 trpc_close(avp->avp_ep);
1114 avp->avp_ep = NULL;
1115 }
1116
1117 libs_cleanup(avp);
1118
1119 avp->shutdown = false;
1120 smp_wmb();
1121 pr_info("%s: avp teardown done\n", __func__);
1122}
1123
1124/* returns the remote lib handle in lib->handle */
1125static int _load_lib(struct tegra_avp_info *avp, struct tegra_avp_lib *lib,
1126 bool from_user)
1127{
1128 struct svc_lib_attach svc;
1129 struct svc_lib_attach_resp resp;
1130 const struct firmware *fw;
1131 void *args;
1132 struct nvmap_handle_ref *lib_handle;
1133 void *lib_data;
1134 phys_addr_t lib_phys;
1135 int ret;
1136
1137 DBG(AVP_DBG_TRACE_LIB, "avp_lib: loading library '%s'\n", lib->name);
1138
1139 args = kmalloc(lib->args_len, GFP_KERNEL);
1140 if (!args) {
1141 pr_err("avp_lib: can't alloc mem for args (%d)\n",
1142 lib->args_len);
1143 return -ENOMEM;
1144 }
1145
1146 if (!from_user)
1147 memcpy(args, lib->args, lib->args_len);
1148 else if (copy_from_user(args, lib->args, lib->args_len)) {
1149 pr_err("avp_lib: can't copy lib args\n");
1150 ret = -EFAULT;
1151 goto err_cp_args;
1152 }
1153
1154 ret = request_firmware(&fw, lib->name, avp->misc_dev.this_device);
1155 if (ret) {
1156 pr_err("avp_lib: Cannot read firmware '%s'\n", lib->name);
1157 goto err_req_fw;
1158 }
1159
1160 lib_handle = nvmap_alloc(avp->nvmap_libs, fw->size, L1_CACHE_BYTES,
1161 NVMAP_HANDLE_UNCACHEABLE, 0);
1162 if (IS_ERR_OR_NULL(lib_handle)) {
1163 pr_err("avp_lib: can't nvmap alloc for lib '%s'\n", lib->name);
1164 ret = PTR_ERR(lib_handle);
1165 goto err_nvmap_alloc;
1166 }
1167
1168 lib_data = nvmap_mmap(lib_handle);
1169 if (!lib_data) {
1170 pr_err("avp_lib: can't nvmap map for lib '%s'\n", lib->name);
1171 ret = -ENOMEM;
1172 goto err_nvmap_mmap;
1173 }
1174
1175 lib_phys = nvmap_pin(avp->nvmap_libs, lib_handle);
1176 if (IS_ERR_VALUE(lib_phys)) {
1177 pr_err("avp_lib: can't nvmap pin for lib '%s'\n", lib->name);
1178 ret = lib_phys;
1179 goto err_nvmap_pin;
1180 }
1181
1182 memcpy(lib_data, fw->data, fw->size);
1183
1184 svc.svc_id = SVC_LIBRARY_ATTACH;
1185 svc.address = lib_phys;
1186 svc.args_len = lib->args_len;
1187 svc.lib_size = fw->size;
1188 svc.reason = lib->greedy ? AVP_LIB_REASON_ATTACH_GREEDY :
1189 AVP_LIB_REASON_ATTACH;
1190 memcpy(svc.args, args, lib->args_len);
1191 wmb();
1192
1193 /* send message, wait for reply */
1194 ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
1195 GFP_KERNEL);
1196 if (ret)
1197 goto err_send_msg;
1198
1199 ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
1200 sizeof(resp), -1);
1201 if (ret != sizeof(resp)) {
1202 pr_err("avp_lib: Couldn't get lib load reply (%d)\n", ret);
1203 goto err_recv_msg;
1204 } else if (resp.err) {
1205 pr_err("avp_lib: got remote error (%d) while loading lib %s\n",
1206 resp.err, lib->name);
1207 ret = -EPROTO;
1208 goto err_recv_msg;
1209 }
1210 lib->handle = resp.lib_id;
1211 ret = 0;
1212 DBG(AVP_DBG_TRACE_LIB,
1213 "avp_lib: Successfully loaded library %s (lib_id=%x)\n",
1214 lib->name, resp.lib_id);
1215
1216 /* We free the memory here because by this point the AVP has already
1217 * requested memory for the library for all the sections since it does
1218 * it's own relocation and memory management. So, our allocations were
1219 * temporary to hand the library code over to the AVP.
1220 */
1221
1222err_recv_msg:
1223err_send_msg:
1224 nvmap_unpin(avp->nvmap_libs, lib_handle);
1225err_nvmap_pin:
1226 nvmap_munmap(lib_handle, lib_data);
1227err_nvmap_mmap:
1228 nvmap_free(avp->nvmap_libs, lib_handle);
1229err_nvmap_alloc:
1230 release_firmware(fw);
1231err_req_fw:
1232err_cp_args:
1233 kfree(args);
1234 return ret;
1235}
1236
1237static int send_unload_lib_msg(struct tegra_avp_info *avp, u32 handle,
1238 const char *name)
1239{
1240 struct svc_lib_detach svc;
1241 struct svc_lib_detach_resp resp;
1242 int ret;
1243
1244 svc.svc_id = SVC_LIBRARY_DETACH;
1245 svc.reason = AVP_LIB_REASON_DETACH;
1246 svc.lib_id = handle;
1247
1248 ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
1249 GFP_KERNEL);
1250 if (ret) {
1251 pr_err("avp_lib: can't send unload message to avp for '%s'\n",
1252 name);
1253 goto err;
1254 }
1255
1256 /* Give it a few extra moments to unload. */
1257 msleep(20);
1258
1259 ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
1260 sizeof(resp), -1);
1261 if (ret != sizeof(resp)) {
1262 pr_err("avp_lib: Couldn't get unload reply for '%s' (%d)\n",
1263 name, ret);
1264 } else if (resp.err) {
1265 pr_err("avp_lib: remote error (%d) while unloading lib %s\n",
1266 resp.err, name);
1267 ret = -EPROTO;
1268 } else {
1269 pr_info("avp_lib: Successfully unloaded '%s'\n",
1270 name);
1271 ret = 0;
1272 }
1273
1274err:
1275 return ret;
1276}
1277
1278static struct lib_item *_find_lib_locked(struct tegra_avp_info *avp, u32 handle)
1279{
1280 struct lib_item *item;
1281
1282 list_for_each_entry(item, &avp->libs, list) {
1283 if (item->handle == handle)
1284 return item;
1285 }
1286 return NULL;
1287}
1288
1289static int _insert_lib_locked(struct tegra_avp_info *avp, u32 handle,
1290 char *name)
1291{
1292 struct lib_item *item;
1293
1294 item = kzalloc(sizeof(struct lib_item), GFP_KERNEL);
1295 if (!item)
1296 return -ENOMEM;
1297 item->handle = handle;
1298 strlcpy(item->name, name, TEGRA_AVP_LIB_MAX_NAME);
1299 list_add_tail(&item->list, &avp->libs);
1300 return 0;
1301}
1302
1303static void _delete_lib_locked(struct tegra_avp_info *avp,
1304 struct lib_item *item)
1305{
1306 list_del(&item->list);
1307 kfree(item);
1308}
1309
1310static int handle_load_lib_ioctl(struct tegra_avp_info *avp, unsigned long arg)
1311{
1312 struct tegra_avp_lib lib;
1313 int ret;
1314
1315 pr_debug("%s: ioctl\n", __func__);
1316 if (copy_from_user(&lib, (void __user *)arg, sizeof(lib)))
1317 return -EFAULT;
1318 lib.name[TEGRA_AVP_LIB_MAX_NAME - 1] = '\0';
1319
1320 if (lib.args_len > TEGRA_AVP_LIB_MAX_ARGS) {
1321 pr_err("%s: library args too long (%d)\n", __func__,
1322 lib.args_len);
1323 return -E2BIG;
1324 }
1325
1326 mutex_lock(&avp->libs_lock);
1327 ret = _load_lib(avp, &lib, true);
1328 if (ret)
1329 goto err_load_lib;
1330
1331 if (copy_to_user((void __user *)arg, &lib, sizeof(lib))) {
1332 /* TODO: probably need to free the library from remote
1333 * we just loaded */
1334 ret = -EFAULT;
1335 goto err_copy_to_user;
1336 }
1337 ret = _insert_lib_locked(avp, lib.handle, lib.name);
1338 if (ret) {
1339 pr_err("%s: can't insert lib (%d)\n", __func__, ret);
1340 goto err_insert_lib;
1341 }
1342
1343 mutex_unlock(&avp->libs_lock);
1344 return 0;
1345
1346err_insert_lib:
1347err_copy_to_user:
1348 send_unload_lib_msg(avp, lib.handle, lib.name);
1349err_load_lib:
1350 mutex_unlock(&avp->libs_lock);
1351 return ret;
1352}
1353
1354static void libs_cleanup(struct tegra_avp_info *avp)
1355{
1356 struct lib_item *lib;
1357 struct lib_item *lib_tmp;
1358
1359 mutex_lock(&avp->libs_lock);
1360 list_for_each_entry_safe(lib, lib_tmp, &avp->libs, list) {
1361 _delete_lib_locked(avp, lib);
1362 }
1363
1364 nvmap_client_put(avp->nvmap_libs);
1365 avp->nvmap_libs = NULL;
1366 mutex_unlock(&avp->libs_lock);
1367}
1368
1369static long tegra_avp_ioctl(struct file *file, unsigned int cmd,
1370 unsigned long arg)
1371{
1372 struct tegra_avp_info *avp = tegra_avp;
1373 int ret;
1374
1375 if (_IOC_TYPE(cmd) != TEGRA_AVP_IOCTL_MAGIC ||
1376 _IOC_NR(cmd) < TEGRA_AVP_IOCTL_MIN_NR ||
1377 _IOC_NR(cmd) > TEGRA_AVP_IOCTL_MAX_NR)
1378 return -ENOTTY;
1379
1380 switch (cmd) {
1381 case TEGRA_AVP_IOCTL_LOAD_LIB:
1382 ret = handle_load_lib_ioctl(avp, arg);
1383 break;
1384 case TEGRA_AVP_IOCTL_UNLOAD_LIB:
1385 ret = tegra_avp_unload_lib(avp, arg);
1386 break;
1387 default:
1388 pr_err("avp_lib: Unknown tegra_avp ioctl 0x%x\n", _IOC_NR(cmd));
1389 ret = -ENOTTY;
1390 break;
1391 }
1392 return ret;
1393}
1394
1395int tegra_avp_open(struct tegra_avp_info **avp)
1396{
1397 struct tegra_avp_info *new_avp = tegra_avp;
1398 int ret = 0;
1399
1400 pr_debug("%s: open\n", __func__);
1401 mutex_lock(&new_avp->open_lock);
1402
1403 if (!new_avp->refcount)
1404 ret = avp_init(new_avp);
1405
1406 if (ret < 0) {
1407 mutex_unlock(&new_avp->open_lock);
1408 new_avp = 0;
1409 goto out;
1410 }
1411
1412 new_avp->refcount++;
1413
1414 mutex_unlock(&new_avp->open_lock);
1415out:
1416 *avp = new_avp;
1417 return ret;
1418}
1419
1420static int tegra_avp_open_fops(struct inode *inode, struct file *file)
1421{
1422 struct tegra_avp_info *avp;
1423
1424 nonseekable_open(inode, file);
1425 return tegra_avp_open(&avp);
1426}
1427
1428int tegra_avp_release(struct tegra_avp_info *avp)
1429{
1430 int ret = 0;
1431
1432 pr_debug("%s: close\n", __func__);
1433 mutex_lock(&avp->open_lock);
1434 if (!avp->refcount) {
1435 pr_err("%s: releasing while in invalid state\n", __func__);
1436 ret = -EINVAL;
1437 goto out;
1438 }
1439 if (avp->refcount > 0)
1440 avp->refcount--;
1441 if (!avp->refcount)
1442 avp_uninit(avp);
1443
1444out:
1445 mutex_unlock(&avp->open_lock);
1446 return ret;
1447}
1448
1449static int tegra_avp_release_fops(struct inode *inode, struct file *file)
1450{
1451 struct tegra_avp_info *avp = tegra_avp;
1452 return tegra_avp_release(avp);
1453}
1454
1455static int avp_enter_lp0(struct tegra_avp_info *avp)
1456{
1457 volatile u32 *avp_suspend_done = avp->iram_backup_data
1458 + TEGRA_IRAM_SIZE - TEGRA_RESET_HANDLER_SIZE;
1459 struct svc_enter_lp0 svc;
1460 unsigned long endtime;
1461 int ret;
1462
1463 svc.svc_id = SVC_ENTER_LP0;
1464 svc.src_addr = (u32)TEGRA_IRAM_BASE + TEGRA_RESET_HANDLER_SIZE;
1465 svc.buf_addr = (u32)avp->iram_backup_phys;
1466 svc.buf_size = TEGRA_IRAM_SIZE - TEGRA_RESET_HANDLER_SIZE;
1467
1468 *avp_suspend_done = 0;
1469 wmb();
1470
1471 ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
1472 GFP_KERNEL);
1473 if (ret) {
1474 pr_err("%s: cannot send AVP suspend message\n", __func__);
1475 return ret;
1476 }
1477
1478 endtime = jiffies + msecs_to_jiffies(1000);
1479 rmb();
1480 while ((*avp_suspend_done == 0) && time_before(jiffies, endtime)) {
1481 udelay(10);
1482 rmb();
1483 }
1484
1485 rmb();
1486 if (*avp_suspend_done == 0) {
1487 pr_err("%s: AVP failed to suspend\n", __func__);
1488 ret = -ETIMEDOUT;
1489 goto err;
1490 }
1491
1492 return 0;
1493
1494err:
1495 return ret;
1496}
1497
1498static int tegra_avp_suspend(struct platform_device *pdev, pm_message_t state)
1499{
1500 struct tegra_avp_info *avp = tegra_avp;
1501 unsigned long flags;
1502 int ret;
1503
1504 pr_info("%s()+\n", __func__);
1505 spin_lock_irqsave(&avp->state_lock, flags);
1506 if (!avp->initialized) {
1507 spin_unlock_irqrestore(&avp->state_lock, flags);
1508 return 0;
1509 }
1510 avp->suspending = true;
1511 spin_unlock_irqrestore(&avp->state_lock, flags);
1512
1513 ret = avp_enter_lp0(avp);
1514 if (ret)
1515 goto err;
1516
1517 avp->resume_addr = readl(TEGRA_AVP_RESUME_ADDR);
1518 if (!avp->resume_addr) {
1519 pr_err("%s: AVP failed to set it's resume address\n", __func__);
1520 ret = -EINVAL;
1521 goto err;
1522 }
1523
1524 disable_irq(avp->mbox_from_avp_pend_irq);
1525
1526 pr_info("avp_suspend: resume_addr=%lx\n", avp->resume_addr);
1527 avp->resume_addr &= 0xfffffffeUL;
1528 pr_info("%s()-\n", __func__);
1529
1530 return 0;
1531
1532err:
1533 /* TODO: we need to kill the AVP so that when we come back
1534 * it could be reinitialized.. We'd probably need to kill
1535 * the users of it so they don't have the wrong state.
1536 */
1537 return ret;
1538}
1539
1540static int tegra_avp_resume(struct platform_device *pdev)
1541{
1542 struct tegra_avp_info *avp = tegra_avp;
1543 int ret = 0;
1544
1545 pr_info("%s()+\n", __func__);
1546 smp_rmb();
1547 if (!avp->initialized)
1548 goto out;
1549
1550 BUG_ON(!avp->resume_addr);
1551
1552 avp_reset(avp, avp->resume_addr);
1553 avp->resume_addr = 0;
1554 avp->suspending = false;
1555 smp_wmb();
1556 enable_irq(avp->mbox_from_avp_pend_irq);
1557
1558 pr_info("%s()-\n", __func__);
1559
1560out:
1561 return ret;
1562}
1563
1564static const struct file_operations tegra_avp_fops = {
1565 .owner = THIS_MODULE,
1566 .open = tegra_avp_open_fops,
1567 .release = tegra_avp_release_fops,
1568 .unlocked_ioctl = tegra_avp_ioctl,
1569};
1570
1571static struct trpc_node avp_trpc_node = {
1572 .name = "avp-remote",
1573 .type = TRPC_NODE_REMOTE,
1574 .try_connect = avp_node_try_connect,
1575};
1576
1577static int tegra_avp_probe(struct platform_device *pdev)
1578{
1579 void *msg_area;
1580 struct tegra_avp_info *avp;
1581 int ret = 0;
1582 int irq;
1583 unsigned int heap_mask;
1584
1585 irq = platform_get_irq_byname(pdev, "mbox_from_avp_pending");
1586 if (irq < 0) {
1587 pr_err("%s: invalid platform data\n", __func__);
1588 return -EINVAL;
1589 }
1590
1591 avp = kzalloc(sizeof(struct tegra_avp_info), GFP_KERNEL);
1592 if (!avp) {
1593 pr_err("%s: cannot allocate tegra_avp_info\n", __func__);
1594 return -ENOMEM;
1595 }
1596
1597 avp->nvmap_drv = nvmap_create_client(nvmap_dev, "avp_core");
1598 if (IS_ERR_OR_NULL(avp->nvmap_drv)) {
1599 pr_err("%s: cannot create drv nvmap client\n", __func__);
1600 ret = PTR_ERR(avp->nvmap_drv);
1601 goto err_nvmap_create_drv_client;
1602 }
1603
1604#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
1605 heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC;
1606#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
1607 heap_mask = NVMAP_HEAP_IOVMM;
1608#else /* nvmem= carveout */
1609 heap_mask = 0;
1610#endif
1611
1612 if (heap_mask == NVMAP_HEAP_IOVMM) {
1613 int i;
1614 /* Tegra3 A01 has different SMMU address in 0xe00000000- */
1615 u32 iovmm_addr[] = {0x0ff00000, 0xeff00000};
1616
1617 for (i = 0; i < ARRAY_SIZE(iovmm_addr); i++) {
1618 avp->kernel_handle = nvmap_alloc_iovm(avp->nvmap_drv,
1619 SZ_1M, L1_CACHE_BYTES,
1620 NVMAP_HANDLE_WRITE_COMBINE,
1621 iovmm_addr[i]);
1622 if (!IS_ERR_OR_NULL(avp->kernel_handle))
1623 break;
1624 }
1625 if (IS_ERR_OR_NULL(avp->kernel_handle)) {
1626 pr_err("%s: cannot create handle\n", __func__);
1627 ret = PTR_ERR(avp->kernel_handle);
1628 goto err_nvmap_alloc;
1629 }
1630
1631 avp->kernel_data = nvmap_mmap(avp->kernel_handle);
1632 if (!avp->kernel_data) {
1633 pr_err("%s: cannot map kernel handle\n", __func__);
1634 ret = -ENOMEM;
1635 goto err_nvmap_mmap;
1636 }
1637
1638 avp->kernel_phys =
1639 nvmap_pin(avp->nvmap_drv, avp->kernel_handle);
1640 if (IS_ERR_VALUE(avp->kernel_phys)) {
1641 pr_err("%s: cannot pin kernel handle\n", __func__);
1642 ret = avp->kernel_phys;
1643 goto err_nvmap_pin;
1644 }
1645
1646 pr_info("%s: allocated IOVM at %lx for AVP kernel\n",
1647 __func__, (unsigned long)avp->kernel_phys);
1648 }
1649
1650 if (heap_mask == NVMAP_HEAP_CARVEOUT_GENERIC) {
1651 avp->kernel_handle = nvmap_alloc(avp->nvmap_drv, SZ_1M, SZ_1M,
1652 NVMAP_HANDLE_UNCACHEABLE, 0);
1653 if (IS_ERR_OR_NULL(avp->kernel_handle)) {
1654 pr_err("%s: cannot create handle\n", __func__);
1655 ret = PTR_ERR(avp->kernel_handle);
1656 goto err_nvmap_alloc;
1657 }
1658
1659 avp->kernel_data = nvmap_mmap(avp->kernel_handle);
1660 if (!avp->kernel_data) {
1661 pr_err("%s: cannot map kernel handle\n", __func__);
1662 ret = -ENOMEM;
1663 goto err_nvmap_mmap;
1664 }
1665
1666 avp->kernel_phys = nvmap_pin(avp->nvmap_drv,
1667 avp->kernel_handle);
1668 if (IS_ERR_VALUE(avp->kernel_phys)) {
1669 pr_err("%s: cannot pin kernel handle\n", __func__);
1670 ret = avp->kernel_phys;
1671 goto err_nvmap_pin;
1672 }
1673
1674 pr_info("%s: allocated carveout memory at %lx for AVP kernel\n",
1675 __func__, (unsigned long)avp->kernel_phys);
1676 }
1677
1678 /* allocate an extra 4 bytes at the end which AVP uses to signal to
1679 * us that it is done suspending.
1680 */
1681 avp->iram_backup_handle =
1682 nvmap_alloc(avp->nvmap_drv, TEGRA_IRAM_SIZE + 4,
1683 L1_CACHE_BYTES, NVMAP_HANDLE_UNCACHEABLE, 0);
1684 if (IS_ERR_OR_NULL(avp->iram_backup_handle)) {
1685 pr_err("%s: cannot create handle for iram backup\n", __func__);
1686 ret = PTR_ERR(avp->iram_backup_handle);
1687 goto err_iram_nvmap_alloc;
1688 }
1689 avp->iram_backup_data = nvmap_mmap(avp->iram_backup_handle);
1690 if (!avp->iram_backup_data) {
1691 pr_err("%s: cannot map iram backup handle\n", __func__);
1692 ret = -ENOMEM;
1693 goto err_iram_nvmap_mmap;
1694 }
1695 avp->iram_backup_phys = nvmap_pin(avp->nvmap_drv,
1696 avp->iram_backup_handle);
1697 if (IS_ERR_VALUE(avp->iram_backup_phys)) {
1698 pr_err("%s: cannot pin iram backup handle\n", __func__);
1699 ret = avp->iram_backup_phys;
1700 goto err_iram_nvmap_pin;
1701 }
1702
1703 avp->mbox_from_avp_pend_irq = irq;
1704 avp->endpoints = RB_ROOT;
1705 spin_lock_init(&avp->state_lock);
1706 mutex_init(&avp->open_lock);
1707 mutex_init(&avp->to_avp_lock);
1708 mutex_init(&avp->from_avp_lock);
1709 INIT_WORK(&avp->recv_work, process_avp_message);
1710
1711 mutex_init(&avp->libs_lock);
1712 INIT_LIST_HEAD(&avp->libs);
1713
1714 avp->recv_wq = alloc_workqueue("avp-msg-recv",
1715 WQ_NON_REENTRANT | WQ_HIGHPRI, 1);
1716 if (!avp->recv_wq) {
1717 pr_err("%s: can't create recve workqueue\n", __func__);
1718 ret = -ENOMEM;
1719 goto err_create_wq;
1720 }
1721
1722 avp->cop_clk = clk_get(&pdev->dev, "cop");
1723 if (IS_ERR_OR_NULL(avp->cop_clk)) {
1724 pr_err("%s: Couldn't get cop clock\n", TEGRA_AVP_NAME);
1725 ret = -ENOENT;
1726 goto err_get_cop_clk;
1727 }
1728
1729 msg_area = dma_alloc_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2,
1730 &avp->msg_area_addr, GFP_KERNEL);
1731 if (!msg_area) {
1732 pr_err("%s: cannot allocate msg_area\n", __func__);
1733 ret = -ENOMEM;
1734 goto err_alloc_msg_area;
1735 }
1736 memset(msg_area, 0, AVP_MSG_AREA_SIZE * 2);
1737 avp->msg = ((avp->msg_area_addr >> 4) |
1738 MBOX_MSG_VALID | MBOX_MSG_PENDING_INT_EN);
1739 avp->msg_to_avp = msg_area;
1740 avp->msg_from_avp = msg_area + AVP_MSG_AREA_SIZE;
1741
1742 avp_halt(avp);
1743
1744 avp_trpc_node.priv = avp;
1745 ret = trpc_node_register(&avp_trpc_node);
1746 if (ret) {
1747 pr_err("%s: Can't register avp rpc node\n", __func__);
1748 goto err_node_reg;
1749 }
1750 avp->rpc_node = &avp_trpc_node;
1751
1752 avp->avp_svc = avp_svc_init(pdev, avp->rpc_node);
1753 if (IS_ERR_OR_NULL(avp->avp_svc)) {
1754 pr_err("%s: Cannot initialize avp_svc\n", __func__);
1755 ret = PTR_ERR(avp->avp_svc);
1756 goto err_avp_svc_init;
1757 }
1758
1759 avp->misc_dev.minor = MISC_DYNAMIC_MINOR;
1760 avp->misc_dev.name = "tegra_avp";
1761 avp->misc_dev.fops = &tegra_avp_fops;
1762
1763 ret = misc_register(&avp->misc_dev);
1764 if (ret) {
1765 pr_err("%s: Unable to register misc device!\n", TEGRA_AVP_NAME);
1766 goto err_misc_reg;
1767 }
1768
1769 ret = request_irq(irq, avp_mbox_pending_isr, 0, TEGRA_AVP_NAME, avp);
1770 if (ret) {
1771 pr_err("%s: cannot register irq handler\n", __func__);
1772 goto err_req_irq_pend;
1773 }
1774 disable_irq(avp->mbox_from_avp_pend_irq);
1775
1776 tegra_avp = avp;
1777
1778 pr_info("%s: message area %lx/%lx\n", __func__,
1779 (unsigned long)avp->msg_area_addr,
1780 (unsigned long)avp->msg_area_addr + AVP_MSG_AREA_SIZE);
1781
1782 return 0;
1783
1784err_req_irq_pend:
1785 misc_deregister(&avp->misc_dev);
1786err_misc_reg:
1787 avp_svc_destroy(avp->avp_svc);
1788err_avp_svc_init:
1789 trpc_node_unregister(avp->rpc_node);
1790err_node_reg:
1791 dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, msg_area,
1792 avp->msg_area_addr);
1793err_alloc_msg_area:
1794 clk_put(avp->cop_clk);
1795err_get_cop_clk:
1796 destroy_workqueue(avp->recv_wq);
1797err_create_wq:
1798 nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
1799err_iram_nvmap_pin:
1800 nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
1801err_iram_nvmap_mmap:
1802 nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
1803err_iram_nvmap_alloc:
1804 nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
1805err_nvmap_pin:
1806 nvmap_munmap(avp->kernel_handle, avp->kernel_data);
1807err_nvmap_mmap:
1808 nvmap_free(avp->nvmap_drv, avp->kernel_handle);
1809err_nvmap_alloc:
1810 nvmap_client_put(avp->nvmap_drv);
1811err_nvmap_create_drv_client:
1812 kfree(avp);
1813 tegra_avp = NULL;
1814 return ret;
1815}
1816
1817static int tegra_avp_remove(struct platform_device *pdev)
1818{
1819 struct tegra_avp_info *avp = tegra_avp;
1820
1821 if (!avp)
1822 return 0;
1823
1824 mutex_lock(&avp->open_lock);
1825 /* ensure that noone can open while we tear down */
1826 if (avp->refcount) {
1827 mutex_unlock(&avp->open_lock);
1828 return -EBUSY;
1829 }
1830 mutex_unlock(&avp->open_lock);
1831
1832 misc_deregister(&avp->misc_dev);
1833
1834 avp_halt(avp);
1835
1836 avp_svc_destroy(avp->avp_svc);
1837 trpc_node_unregister(avp->rpc_node);
1838 dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, avp->msg_to_avp,
1839 avp->msg_area_addr);
1840 clk_put(avp->cop_clk);
1841 destroy_workqueue(avp->recv_wq);
1842 nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
1843 nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
1844 nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
1845 nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
1846 nvmap_munmap(avp->kernel_handle, avp->kernel_data);
1847 nvmap_free(avp->nvmap_drv, avp->kernel_handle);
1848 nvmap_client_put(avp->nvmap_drv);
1849 kfree(avp);
1850 tegra_avp = NULL;
1851 return 0;
1852}
1853
1854int tegra_avp_load_lib(struct tegra_avp_info *avp, struct tegra_avp_lib *lib)
1855{
1856 int ret;
1857
1858 if (!avp)
1859 return -ENODEV;
1860
1861 if (!lib)
1862 return -EFAULT;
1863
1864 lib->name[TEGRA_AVP_LIB_MAX_NAME - 1] = '\0';
1865
1866 if (lib->args_len > TEGRA_AVP_LIB_MAX_ARGS) {
1867 pr_err("%s: library args too long (%d)\n", __func__,
1868 lib->args_len);
1869 return -E2BIG;
1870 }
1871
1872 mutex_lock(&avp->libs_lock);
1873 ret = _load_lib(avp, lib, false);
1874 if (ret)
1875 goto err_load_lib;
1876
1877 ret = _insert_lib_locked(avp, lib->handle, lib->name);
1878 if (ret) {
1879 pr_err("%s: can't insert lib (%d)\n", __func__, ret);
1880 goto err_insert_lib;
1881 }
1882
1883 mutex_unlock(&avp->libs_lock);
1884 return 0;
1885
1886err_insert_lib:
1887 ret = send_unload_lib_msg(avp, lib->handle, lib->name);
1888 if (!ret)
1889 DBG(AVP_DBG_TRACE_LIB, "avp_lib: unloaded '%s'\n", lib->name);
1890 else
1891 pr_err("avp_lib: can't unload lib '%s' (%d)\n", lib->name, ret);
1892 lib->handle = 0;
1893err_load_lib:
1894 mutex_unlock(&avp->libs_lock);
1895 return ret;
1896}
1897
1898int tegra_avp_unload_lib(struct tegra_avp_info *avp, unsigned long handle)
1899{
1900 struct lib_item *item;
1901 int ret;
1902
1903 if (!avp)
1904 return -ENODEV;
1905
1906 mutex_lock(&avp->libs_lock);
1907 item = _find_lib_locked(avp, handle);
1908 if (!item) {
1909 pr_err("avp_lib: avp lib with handle 0x%x not found\n",
1910 (u32)handle);
1911 ret = -ENOENT;
1912 goto err_find;
1913 }
1914 ret = send_unload_lib_msg(avp, item->handle, item->name);
1915 if (!ret)
1916 DBG(AVP_DBG_TRACE_LIB, "avp_lib: unloaded '%s'\n", item->name);
1917 else
1918 pr_err("avp_lib: can't unload lib '%s'/0x%x (%d)\n", item->name,
1919 item->handle, ret);
1920 _delete_lib_locked(avp, item);
1921
1922err_find:
1923 mutex_unlock(&avp->libs_lock);
1924 return ret;
1925}
1926
1927static struct platform_driver tegra_avp_driver = {
1928 .probe = tegra_avp_probe,
1929 .remove = tegra_avp_remove,
1930 .suspend = tegra_avp_suspend,
1931 .resume = tegra_avp_resume,
1932 .driver = {
1933 .name = TEGRA_AVP_NAME,
1934 .owner = THIS_MODULE,
1935 },
1936};
1937
1938static int __init tegra_avp_init(void)
1939{
1940 return platform_driver_register(&tegra_avp_driver);
1941}
1942
1943static void __exit tegra_avp_exit(void)
1944{
1945 platform_driver_unregister(&tegra_avp_driver);
1946}
1947
1948module_init(tegra_avp_init);
1949module_exit(tegra_avp_exit);
diff --git a/drivers/media/video/tegra/avp/avp.h b/drivers/media/video/tegra/avp/avp.h
new file mode 100644
index 00000000000..4f2287743a0
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Dima Zavin <dima@android.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#ifndef __MEDIA_VIDEO_TEGRA_AVP_H
17#define __MEDIA_VIDEO_TEGRA_AVP_H
18
19#include <linux/platform_device.h>
20#include <linux/types.h>
21
22#include "trpc.h"
23
24struct avp_svc_info;
25
26struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
27 struct trpc_node *rpc_node);
28void avp_svc_destroy(struct avp_svc_info *avp_svc);
29int avp_svc_start(struct avp_svc_info *svc);
30void avp_svc_stop(struct avp_svc_info *svc);
31
32#endif
diff --git a/drivers/media/video/tegra/avp/avp_msg.h b/drivers/media/video/tegra/avp/avp_msg.h
new file mode 100644
index 00000000000..615d890d544
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp_msg.h
@@ -0,0 +1,358 @@
1/* drivers/media/video/tegra/avp/avp_msg.h
2 *
3 * Copyright (C) 2010 Google, Inc.
4 * Author: Dima Zavin <dima@android.com>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#ifndef __MEDIA_VIDEO_TEGRA_AVP_MSG_H
18#define __MEDIA_VIDEO_TEGRA_AVP_MSG_H
19
20#include <linux/tegra_avp.h>
21#include <linux/types.h>
22
23/* Note: the port name string is not NUL terminated, so make sure to
24 * allocate appropriate space locally when operating on the string */
25#define XPC_PORT_NAME_LEN 16
26
27#define SVC_ARGS_MAX_LEN 220
28#define SVC_MAX_STRING_LEN 200
29
30#define AVP_ERR_ENOTSUP 0x2
31#define AVP_ERR_EINVAL 0x4
32#define AVP_ERR_ENOMEM 0x6
33#define AVP_ERR_EACCES 0x00030010
34
35enum {
36 SVC_NVMAP_CREATE = 0,
37 SVC_NVMAP_CREATE_RESPONSE = 1,
38 SVC_NVMAP_FREE = 3,
39 SVC_NVMAP_ALLOC = 4,
40 SVC_NVMAP_ALLOC_RESPONSE = 5,
41 SVC_NVMAP_PIN = 6,
42 SVC_NVMAP_PIN_RESPONSE = 7,
43 SVC_NVMAP_UNPIN = 8,
44 SVC_NVMAP_UNPIN_RESPONSE = 9,
45 SVC_NVMAP_GET_ADDRESS = 10,
46 SVC_NVMAP_GET_ADDRESS_RESPONSE = 11,
47 SVC_NVMAP_FROM_ID = 12,
48 SVC_NVMAP_FROM_ID_RESPONSE = 13,
49 SVC_MODULE_CLOCK = 14,
50 SVC_MODULE_CLOCK_RESPONSE = 15,
51 SVC_MODULE_RESET = 16,
52 SVC_MODULE_RESET_RESPONSE = 17,
53 SVC_POWER_REGISTER = 18,
54 SVC_POWER_UNREGISTER = 19,
55 SVC_POWER_STARVATION = 20,
56 SVC_POWER_BUSY_HINT = 21,
57 SVC_POWER_BUSY_HINT_MULTI = 22,
58 SVC_DFS_GETSTATE = 23,
59 SVC_DFS_GETSTATE_RESPONSE = 24,
60 SVC_POWER_RESPONSE = 25,
61 SVC_POWER_MAXFREQ = 26,
62 SVC_ENTER_LP0 = 27,
63 SVC_ENTER_LP0_RESPONSE = 28,
64 SVC_PRINTF = 29,
65 SVC_LIBRARY_ATTACH = 30,
66 SVC_LIBRARY_ATTACH_RESPONSE = 31,
67 SVC_LIBRARY_DETACH = 32,
68 SVC_LIBRARY_DETACH_RESPONSE = 33,
69 SVC_AVP_WDT_RESET = 34,
70 SVC_DFS_GET_CLK_UTIL = 35,
71 SVC_DFS_GET_CLK_UTIL_RESPONSE = 36,
72 SVC_MODULE_CLOCK_SET = 37,
73 SVC_MODULE_CLOCK_SET_RESPONSE = 38,
74 SVC_MODULE_CLOCK_GET = 39,
75 SVC_MODULE_CLOCK_GET_RESPONSE = 40,
76};
77
78struct svc_msg {
79 u32 svc_id;
80 u8 data[0];
81};
82
83struct svc_common_resp {
84 u32 svc_id;
85 u32 err;
86};
87
88struct svc_printf {
89 u32 svc_id;
90 const char str[SVC_MAX_STRING_LEN];
91};
92
93struct svc_enter_lp0 {
94 u32 svc_id;
95 u32 src_addr;
96 u32 buf_addr;
97 u32 buf_size;
98};
99
100/* nvmap messages */
101struct svc_nvmap_create {
102 u32 svc_id;
103 u32 size;
104};
105
106struct svc_nvmap_create_resp {
107 u32 svc_id;
108 u32 handle_id;
109 u32 err;
110};
111
112enum {
113 AVP_NVMAP_HEAP_EXTERNAL = 1,
114 AVP_NVMAP_HEAP_GART = 2,
115 AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT = 3,
116 AVP_NVMAP_HEAP_IRAM = 4,
117};
118
119struct svc_nvmap_alloc {
120 u32 svc_id;
121 u32 handle_id;
122 u32 heaps[4];
123 u32 num_heaps;
124 u32 align;
125 u32 mapping_type;
126};
127
128struct svc_nvmap_free {
129 u32 svc_id;
130 u32 handle_id;
131};
132
133struct svc_nvmap_pin {
134 u32 svc_id;
135 u32 handle_id;
136};
137
138struct svc_nvmap_pin_resp {
139 u32 svc_id;
140 u32 addr;
141};
142
143struct svc_nvmap_unpin {
144 u32 svc_id;
145 u32 handle_id;
146};
147
148struct svc_nvmap_from_id {
149 u32 svc_id;
150 u32 handle_id;
151};
152
153struct svc_nvmap_get_addr {
154 u32 svc_id;
155 u32 handle_id;
156 u32 offs;
157};
158
159struct svc_nvmap_get_addr_resp {
160 u32 svc_id;
161 u32 addr;
162};
163
164/* library management messages */
165enum {
166 AVP_LIB_REASON_ATTACH = 0,
167 AVP_LIB_REASON_DETACH = 1,
168 AVP_LIB_REASON_ATTACH_GREEDY = 2,
169};
170
171struct svc_lib_attach {
172 u32 svc_id;
173 u32 address;
174 u32 args_len;
175 u32 lib_size;
176 u8 args[SVC_ARGS_MAX_LEN];
177 u32 reason;
178};
179
180struct svc_lib_attach_resp {
181 u32 svc_id;
182 u32 err;
183 u32 lib_id;
184};
185
186struct svc_lib_detach {
187 u32 svc_id;
188 u32 reason;
189 u32 lib_id;
190};
191
192struct svc_lib_detach_resp {
193 u32 svc_id;
194 u32 err;
195};
196
197/* hw module management from the AVP side */
198enum {
199 AVP_MODULE_ID_AVP = 2,
200 AVP_MODULE_ID_VCP = 3,
201 AVP_MODULE_ID_BSEA = 27,
202 AVP_MODULE_ID_VDE = 28,
203 AVP_MODULE_ID_MPE = 29,
204};
205
206struct svc_module_ctrl {
207 u32 svc_id;
208 u32 module_id;
209 u32 client_id;
210 u8 enable;
211};
212
213struct svc_clock_ctrl {
214 u32 svc_id;
215 u32 module_id;
216 u32 clk_freq;
217};
218
219struct svc_clock_ctrl_response {
220 u32 svc_id;
221 u32 err;
222 u32 act_freq;
223};
224
225/* power messages */
226struct svc_pwr_register {
227 u32 svc_id;
228 u32 client_id;
229 u32 unused;
230};
231
232struct svc_pwr_register_resp {
233 u32 svc_id;
234 u32 err;
235 u32 client_id;
236};
237
238struct svc_pwr_starve_hint {
239 u32 svc_id;
240 u32 dfs_clk_id;
241 u32 client_id;
242 u8 starving;
243};
244
245struct svc_pwr_busy_hint {
246 u32 svc_id;
247 u32 dfs_clk_id;
248 u32 client_id;
249 u32 boost_ms; /* duration */
250 u32 boost_freq; /* in khz */
251};
252
253struct svc_pwr_max_freq {
254 u32 svc_id;
255 u32 module_id;
256};
257
258struct svc_pwr_max_freq_resp {
259 u32 svc_id;
260 u32 freq;
261};
262
263/* dfs related messages */
264enum {
265 AVP_DFS_STATE_INVALID = 0,
266 AVP_DFS_STATE_DISABLED = 1,
267 AVP_DFS_STATE_STOPPED = 2,
268 AVP_DFS_STATE_CLOSED_LOOP = 3,
269 AVP_DFS_STATE_PROFILED_LOOP = 4,
270};
271
272struct svc_dfs_get_state_resp {
273 u32 svc_id;
274 u32 state;
275};
276
277enum {
278 AVP_DFS_CLK_CPU = 1,
279 AVP_DFS_CLK_AVP = 2,
280 AVP_DFS_CLK_SYSTEM = 3,
281 AVP_DFS_CLK_AHB = 4,
282 AVP_DFS_CLK_APB = 5,
283 AVP_DFS_CLK_VDE = 6,
284 /* external memory controller */
285 AVP_DFS_CLK_EMC = 7,
286};
287
288struct avp_clk_usage {
289 u32 min;
290 u32 max;
291 u32 curr_min;
292 u32 curr_max;
293 u32 curr;
294 u32 avg; /* average activity.. whatever that means */
295};
296
297struct svc_dfs_get_clk_util {
298 u32 svc_id;
299 u32 dfs_clk_id;
300};
301
302/* all units are in kHz */
303struct svc_dfs_get_clk_util_resp {
304 u32 svc_id;
305 u32 err;
306 struct avp_clk_usage usage;
307};
308
309/************************/
310
311enum {
312 CMD_ACK = 0,
313 CMD_CONNECT = 2,
314 CMD_DISCONNECT = 3,
315 CMD_MESSAGE = 4,
316 CMD_RESPONSE = 5,
317};
318
319struct msg_data {
320 u32 cmd;
321 u8 data[0];
322};
323
324struct msg_ack {
325 u32 cmd;
326 u32 arg;
327};
328
329struct msg_connect {
330 u32 cmd;
331 u32 port_id;
332 /* not NUL terminated, just 0 padded */
333 char name[XPC_PORT_NAME_LEN];
334};
335
336struct msg_connect_reply {
337 u32 cmd;
338 u32 port_id;
339};
340
341struct msg_disconnect {
342 u32 cmd;
343 u32 port_id;
344};
345
346struct msg_disconnect_reply {
347 u32 cmd;
348 u32 ack;
349};
350
351struct msg_port_data {
352 u32 cmd;
353 u32 port_id;
354 u32 msg_len;
355 u8 data[0];
356};
357
358#endif
diff --git a/drivers/media/video/tegra/avp/avp_svc.c b/drivers/media/video/tegra/avp/avp_svc.c
new file mode 100644
index 00000000000..17c8b8535a6
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp_svc.c
@@ -0,0 +1,890 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Dima Zavin <dima@android.com>
4 *
5 * Copyright (C) 2010-2011 NVIDIA Corporation
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/err.h>
22#include <linux/io.h>
23#include <linux/kthread.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/tegra_rpc.h>
28#include <linux/tegra_avp.h>
29#include <linux/types.h>
30
31#include <mach/clk.h>
32#include <mach/nvmap.h>
33
34#include "../../../../video/tegra/nvmap/nvmap.h"
35
36#include "avp_msg.h"
37#include "trpc.h"
38#include "avp.h"
39
40enum {
41 AVP_DBG_TRACE_SVC = 1U << 0,
42};
43
44static u32 debug_mask;
45module_param_named(debug_mask, debug_mask, uint, S_IWUSR | S_IRUGO);
46
47#define DBG(flag, args...) \
48 do { if (unlikely(debug_mask & (flag))) pr_info(args); } while (0)
49
50enum {
51 CLK_REQUEST_VCP = 0,
52 CLK_REQUEST_BSEA = 1,
53 CLK_REQUEST_VDE = 2,
54 CLK_REQUEST_AVP = 3,
55 NUM_CLK_REQUESTS,
56};
57
58struct avp_module {
59 const char *name;
60 u32 clk_req;
61};
62
63static struct avp_module avp_modules[] = {
64 [AVP_MODULE_ID_AVP] = {
65 .name = "cop",
66 .clk_req = CLK_REQUEST_AVP,
67 },
68 [AVP_MODULE_ID_VCP] = {
69 .name = "vcp",
70 .clk_req = CLK_REQUEST_VCP,
71 },
72 [AVP_MODULE_ID_BSEA] = {
73 .name = "bsea",
74 .clk_req = CLK_REQUEST_BSEA,
75 },
76 [AVP_MODULE_ID_VDE] = {
77 .name = "vde",
78 .clk_req = CLK_REQUEST_VDE,
79 },
80};
81#define NUM_AVP_MODULES ARRAY_SIZE(avp_modules)
82
83struct avp_clk {
84 struct clk *clk;
85 int refcnt;
86 struct avp_module *mod;
87};
88
89struct avp_svc_info {
90 struct avp_clk clks[NUM_CLK_REQUESTS];
91 /* used for dvfs */
92 struct clk *sclk;
93 struct clk *emcclk;
94
95 struct mutex clk_lock;
96
97 struct trpc_endpoint *cpu_ep;
98 struct task_struct *svc_thread;
99
100 /* client for remote allocations, for easy tear down */
101 struct nvmap_client *nvmap_remote;
102 struct trpc_node *rpc_node;
103 unsigned long max_avp_rate;
104 unsigned long emc_rate;
105
106 /* variable to check if video is present */
107 bool is_vde_on;
108};
109
110static void do_svc_nvmap_create(struct avp_svc_info *avp_svc,
111 struct svc_msg *_msg,
112 size_t len)
113{
114 struct svc_nvmap_create *msg = (struct svc_nvmap_create *)_msg;
115 struct svc_nvmap_create_resp resp;
116 struct nvmap_handle_ref *handle;
117 u32 handle_id = 0;
118 u32 err = 0;
119
120 handle = nvmap_create_handle(avp_svc->nvmap_remote, msg->size);
121 if (unlikely(IS_ERR(handle))) {
122 pr_err("avp_svc: error creating handle (%d bytes) for remote\n",
123 msg->size);
124 err = AVP_ERR_ENOMEM;
125 } else
126 handle_id = (u32)nvmap_ref_to_id(handle);
127
128 resp.svc_id = SVC_NVMAP_CREATE_RESPONSE;
129 resp.err = err;
130 resp.handle_id = handle_id;
131 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
132 sizeof(resp), GFP_KERNEL);
133 /* TODO: do we need to put the handle if send_msg failed? */
134}
135
136static void do_svc_nvmap_alloc(struct avp_svc_info *avp_svc,
137 struct svc_msg *_msg,
138 size_t len)
139{
140 struct svc_nvmap_alloc *msg = (struct svc_nvmap_alloc *)_msg;
141 struct svc_common_resp resp;
142 struct nvmap_handle *handle;
143 u32 err = 0;
144 u32 heap_mask = 0;
145 int i;
146 size_t align;
147
148 handle = nvmap_get_handle_id(avp_svc->nvmap_remote, msg->handle_id);
149 if (IS_ERR(handle)) {
150 pr_err("avp_svc: unknown remote handle 0x%x\n", msg->handle_id);
151 err = AVP_ERR_EACCES;
152 goto out;
153 }
154
155 if (msg->num_heaps > 4) {
156 pr_err("avp_svc: invalid remote alloc request (%d heaps?!)\n",
157 msg->num_heaps);
158 /* TODO: should we error out instead ? */
159 msg->num_heaps = 0;
160 }
161 if (msg->num_heaps == 0)
162 heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC | NVMAP_HEAP_SYSMEM;
163
164 for (i = 0; i < msg->num_heaps; i++) {
165 switch (msg->heaps[i]) {
166 case AVP_NVMAP_HEAP_EXTERNAL:
167 heap_mask |= NVMAP_HEAP_SYSMEM;
168 break;
169 case AVP_NVMAP_HEAP_GART:
170 heap_mask |= NVMAP_HEAP_IOVMM;
171 break;
172 case AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT:
173 heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
174 break;
175 case AVP_NVMAP_HEAP_IRAM:
176 heap_mask |= NVMAP_HEAP_CARVEOUT_IRAM;
177 break;
178 default:
179 break;
180 }
181 }
182
183 align = max_t(size_t, L1_CACHE_BYTES, msg->align);
184 err = nvmap_alloc_handle_id(avp_svc->nvmap_remote, msg->handle_id,
185 heap_mask, align, 0);
186 nvmap_handle_put(handle);
187 if (err) {
188 pr_err("avp_svc: can't allocate for handle 0x%x (%d)\n",
189 msg->handle_id, err);
190 err = AVP_ERR_ENOMEM;
191 }
192
193out:
194 resp.svc_id = SVC_NVMAP_ALLOC_RESPONSE;
195 resp.err = err;
196 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
197 sizeof(resp), GFP_KERNEL);
198}
199
200static void do_svc_nvmap_free(struct avp_svc_info *avp_svc,
201 struct svc_msg *_msg,
202 size_t len)
203{
204 struct svc_nvmap_free *msg = (struct svc_nvmap_free *)_msg;
205
206 nvmap_free_handle_id(avp_svc->nvmap_remote, msg->handle_id);
207}
208
209static void do_svc_nvmap_pin(struct avp_svc_info *avp_svc,
210 struct svc_msg *_msg,
211 size_t len)
212{
213 struct svc_nvmap_pin *msg = (struct svc_nvmap_pin *)_msg;
214 struct svc_nvmap_pin_resp resp;
215 struct nvmap_handle_ref *handle;
216 phys_addr_t addr = ~0UL;
217 unsigned long id = msg->handle_id;
218 int err;
219
220 handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote, id);
221 if (IS_ERR(handle)) {
222 pr_err("avp_svc: can't dup handle %lx\n", id);
223 goto out;
224 }
225 err = nvmap_pin_ids(avp_svc->nvmap_remote, 1, &id);
226 if (err) {
227 pr_err("avp_svc: can't pin for handle %lx (%d)\n", id, err);
228 goto out;
229 }
230 addr = nvmap_handle_address(avp_svc->nvmap_remote, id);
231
232out:
233 resp.svc_id = SVC_NVMAP_PIN_RESPONSE;
234 resp.addr = addr;
235 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
236 sizeof(resp), GFP_KERNEL);
237}
238
239static void do_svc_nvmap_unpin(struct avp_svc_info *avp_svc,
240 struct svc_msg *_msg,
241 size_t len)
242{
243 struct svc_nvmap_unpin *msg = (struct svc_nvmap_unpin *)_msg;
244 struct svc_common_resp resp;
245 unsigned long id = msg->handle_id;
246
247 nvmap_unpin_ids(avp_svc->nvmap_remote, 1, &id);
248 nvmap_free_handle_id(avp_svc->nvmap_remote, id);
249
250 resp.svc_id = SVC_NVMAP_UNPIN_RESPONSE;
251 resp.err = 0;
252 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
253 sizeof(resp), GFP_KERNEL);
254}
255
256static void do_svc_nvmap_from_id(struct avp_svc_info *avp_svc,
257 struct svc_msg *_msg,
258 size_t len)
259{
260 struct svc_nvmap_from_id *msg = (struct svc_nvmap_from_id *)_msg;
261 struct svc_common_resp resp;
262 struct nvmap_handle_ref *handle;
263 int err = 0;
264
265 handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote,
266 msg->handle_id);
267 if (IS_ERR(handle)) {
268 pr_err("avp_svc: can't duplicate handle for id 0x%x (%d)\n",
269 msg->handle_id, (int)PTR_ERR(handle));
270 err = AVP_ERR_ENOMEM;
271 }
272
273 resp.svc_id = SVC_NVMAP_FROM_ID_RESPONSE;
274 resp.err = err;
275 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
276 sizeof(resp), GFP_KERNEL);
277}
278
279static void do_svc_nvmap_get_addr(struct avp_svc_info *avp_svc,
280 struct svc_msg *_msg,
281 size_t len)
282{
283 struct svc_nvmap_get_addr *msg = (struct svc_nvmap_get_addr *)_msg;
284 struct svc_nvmap_get_addr_resp resp;
285
286 resp.svc_id = SVC_NVMAP_GET_ADDRESS_RESPONSE;
287 resp.addr = nvmap_handle_address(avp_svc->nvmap_remote, msg->handle_id);
288 resp.addr += msg->offs;
289 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
290 sizeof(resp), GFP_KERNEL);
291}
292
293static void do_svc_pwr_register(struct avp_svc_info *avp_svc,
294 struct svc_msg *_msg,
295 size_t len)
296{
297 struct svc_pwr_register *msg = (struct svc_pwr_register *)_msg;
298 struct svc_pwr_register_resp resp;
299
300 resp.svc_id = SVC_POWER_RESPONSE;
301 resp.err = 0;
302 resp.client_id = msg->client_id;
303
304 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
305 sizeof(resp), GFP_KERNEL);
306}
307
308static struct avp_module *find_avp_module(struct avp_svc_info *avp_svc, u32 id)
309{
310 if (id < NUM_AVP_MODULES && avp_modules[id].name)
311 return &avp_modules[id];
312 return NULL;
313}
314
315static void do_svc_module_reset(struct avp_svc_info *avp_svc,
316 struct svc_msg *_msg,
317 size_t len)
318{
319 struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
320 struct svc_common_resp resp;
321 struct avp_module *mod;
322 struct avp_clk *aclk;
323
324 mod = find_avp_module(avp_svc, msg->module_id);
325 if (!mod) {
326 if (msg->module_id == AVP_MODULE_ID_AVP)
327 pr_err("avp_svc: AVP suicidal?!?!\n");
328 else
329 pr_err("avp_svc: Unknown module reset requested: %d\n",
330 msg->module_id);
331 /* other side doesn't handle errors for reset */
332 resp.err = 0;
333 goto send_response;
334 }
335
336 aclk = &avp_svc->clks[mod->clk_req];
337 tegra_periph_reset_assert(aclk->clk);
338 udelay(10);
339 tegra_periph_reset_deassert(aclk->clk);
340 resp.err = 0;
341
342send_response:
343 resp.svc_id = SVC_MODULE_RESET_RESPONSE;
344 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
345 sizeof(resp), GFP_KERNEL);
346}
347
348static void do_svc_module_clock(struct avp_svc_info *avp_svc,
349 struct svc_msg *_msg,
350 size_t len)
351{
352 struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
353 struct svc_common_resp resp;
354 struct avp_module *mod;
355 struct avp_clk *aclk;
356 unsigned long emc_rate = 0;
357
358 mod = find_avp_module(avp_svc, msg->module_id);
359 if (!mod) {
360 pr_err("avp_svc: unknown module clock requested: %d\n",
361 msg->module_id);
362 resp.err = AVP_ERR_EINVAL;
363 goto send_response;
364 }
365
366 if (msg->module_id == AVP_MODULE_ID_VDE)
367 avp_svc->is_vde_on = msg->enable;
368
369 if (avp_svc->is_vde_on == true)
370 emc_rate = ULONG_MAX;
371
372 mutex_lock(&avp_svc->clk_lock);
373 aclk = &avp_svc->clks[mod->clk_req];
374 if (msg->enable) {
375 if (aclk->refcnt++ == 0) {
376 clk_set_rate(avp_svc->emcclk, emc_rate);
377 clk_enable(avp_svc->emcclk);
378 clk_enable(avp_svc->sclk);
379 clk_enable(aclk->clk);
380 }
381 } else {
382 if (unlikely(aclk->refcnt == 0)) {
383 pr_err("avp_svc: unbalanced clock disable for '%s'\n",
384 aclk->mod->name);
385 } else if (--aclk->refcnt == 0) {
386 clk_disable(aclk->clk);
387 clk_set_rate(avp_svc->sclk, 0);
388 clk_disable(avp_svc->sclk);
389 clk_set_rate(avp_svc->emcclk, 0);
390 clk_disable(avp_svc->emcclk);
391 }
392 }
393 mutex_unlock(&avp_svc->clk_lock);
394 resp.err = 0;
395
396send_response:
397 resp.svc_id = SVC_MODULE_CLOCK_RESPONSE;
398 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
399 sizeof(resp), GFP_KERNEL);
400}
401
402static void do_svc_null_response(struct avp_svc_info *avp_svc,
403 struct svc_msg *_msg,
404 size_t len, u32 resp_svc_id)
405{
406 struct svc_common_resp resp;
407 resp.svc_id = resp_svc_id;
408 resp.err = 0;
409 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
410 sizeof(resp), GFP_KERNEL);
411}
412
413static void do_svc_dfs_get_state(struct avp_svc_info *avp_svc,
414 struct svc_msg *_msg,
415 size_t len)
416{
417 struct svc_dfs_get_state_resp resp;
418 resp.svc_id = SVC_DFS_GETSTATE_RESPONSE;
419 resp.state = AVP_DFS_STATE_STOPPED;
420 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
421 sizeof(resp), GFP_KERNEL);
422}
423
424static void do_svc_dfs_get_clk_util(struct avp_svc_info *avp_svc,
425 struct svc_msg *_msg,
426 size_t len)
427{
428 struct svc_dfs_get_clk_util_resp resp;
429
430 resp.svc_id = SVC_DFS_GET_CLK_UTIL_RESPONSE;
431 resp.err = 0;
432 memset(&resp.usage, 0, sizeof(struct avp_clk_usage));
433 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
434 sizeof(resp), GFP_KERNEL);
435}
436
437static void do_svc_pwr_max_freq(struct avp_svc_info *avp_svc,
438 struct svc_msg *_msg,
439 size_t len)
440{
441 struct svc_pwr_max_freq_resp resp;
442
443 resp.svc_id = SVC_POWER_MAXFREQ;
444 resp.freq = 0;
445 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
446 sizeof(resp), GFP_KERNEL);
447}
448
449static void do_svc_printf(struct avp_svc_info *avp_svc, struct svc_msg *_msg,
450 size_t len)
451{
452 struct svc_printf *msg = (struct svc_printf *)_msg;
453 char tmp_str[SVC_MAX_STRING_LEN];
454
455 /* ensure we null terminate the source */
456 strlcpy(tmp_str, msg->str, SVC_MAX_STRING_LEN);
457 pr_info("[AVP]: %s", tmp_str);
458}
459
460static void do_svc_module_clock_set(struct avp_svc_info *avp_svc,
461 struct svc_msg *_msg,
462 size_t len)
463{
464 struct svc_clock_ctrl *msg = (struct svc_clock_ctrl *)_msg;
465 struct svc_clock_ctrl_response resp;
466 struct avp_module *mod;
467 struct avp_clk *aclk;
468 int ret = 0;
469
470 mod = find_avp_module(avp_svc, msg->module_id);
471 if (!mod) {
472 pr_err("avp_svc: unknown module clock requested: %d\n",
473 msg->module_id);
474 resp.err = AVP_ERR_EINVAL;
475 goto send_response;
476 }
477
478 mutex_lock(&avp_svc->clk_lock);
479 if (msg->module_id == AVP_MODULE_ID_AVP) {
480 /* check if max avp clock is asked and set max emc frequency */
481 if (msg->clk_freq >= avp_svc->max_avp_rate) {
482 clk_set_rate(avp_svc->emcclk, ULONG_MAX);
483 }
484 else {
485 /* if no, set emc frequency as per platform data.
486 * if no platform data is send, set it to maximum */
487 if (avp_svc->emc_rate)
488 clk_set_rate(avp_svc->emcclk, avp_svc->emc_rate);
489 else
490 clk_set_rate(avp_svc->emcclk, ULONG_MAX);
491 }
492 ret = clk_set_rate(avp_svc->sclk, msg->clk_freq);
493 } else {
494 aclk = &avp_svc->clks[mod->clk_req];
495 ret = clk_set_rate(aclk->clk, msg->clk_freq);
496 }
497 if (ret) {
498 pr_err("avp_svc: Failed to set module (id = %d) frequency to %d Hz\n",
499 msg->module_id, msg->clk_freq);
500 resp.err = AVP_ERR_EINVAL;
501 resp.act_freq = 0;
502 mutex_unlock(&avp_svc->clk_lock);
503 goto send_response;
504 }
505
506 if (msg->module_id == AVP_MODULE_ID_AVP)
507 resp.act_freq = clk_get_rate(avp_svc->sclk);
508 else
509 resp.act_freq = clk_get_rate(aclk->clk);
510
511 mutex_unlock(&avp_svc->clk_lock);
512 resp.err = 0;
513
514send_response:
515 resp.svc_id = SVC_MODULE_CLOCK_SET_RESPONSE;
516 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
517 sizeof(resp), GFP_KERNEL);
518}
519
520static void do_svc_unsupported_msg(struct avp_svc_info *avp_svc,
521 u32 resp_svc_id)
522{
523 struct svc_common_resp resp;
524
525 resp.err = AVP_ERR_ENOTSUP;
526 resp.svc_id = resp_svc_id;
527 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
528 sizeof(resp), GFP_KERNEL);
529}
530
531static void do_svc_module_clock_get(struct avp_svc_info *avp_svc,
532 struct svc_msg *_msg,
533 size_t len)
534{
535 struct svc_clock_ctrl *msg = (struct svc_clock_ctrl *)_msg;
536 struct svc_clock_ctrl_response resp;
537 struct avp_module *mod;
538 struct avp_clk *aclk;
539
540 mod = find_avp_module(avp_svc, msg->module_id);
541 if (!mod) {
542 pr_err("avp_svc: unknown module get clock requested: %d\n",
543 msg->module_id);
544 resp.err = AVP_ERR_EINVAL;
545 goto send_response;
546 }
547
548 mutex_lock(&avp_svc->clk_lock);
549 aclk = &avp_svc->clks[mod->clk_req];
550 resp.act_freq = clk_get_rate(aclk->clk);
551 mutex_unlock(&avp_svc->clk_lock);
552 resp.err = 0;
553
554send_response:
555 resp.svc_id = SVC_MODULE_CLOCK_GET_RESPONSE;
556 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
557 sizeof(resp), GFP_KERNEL);
558}
559
560static int dispatch_svc_message(struct avp_svc_info *avp_svc,
561 struct svc_msg *msg,
562 size_t len)
563{
564 int ret = 0;
565
566 switch (msg->svc_id) {
567 case SVC_NVMAP_CREATE:
568 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_create\n", __func__);
569 do_svc_nvmap_create(avp_svc, msg, len);
570 break;
571 case SVC_NVMAP_ALLOC:
572 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_alloc\n", __func__);
573 do_svc_nvmap_alloc(avp_svc, msg, len);
574 break;
575 case SVC_NVMAP_FREE:
576 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_free\n", __func__);
577 do_svc_nvmap_free(avp_svc, msg, len);
578 break;
579 case SVC_NVMAP_PIN:
580 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_pin\n", __func__);
581 do_svc_nvmap_pin(avp_svc, msg, len);
582 break;
583 case SVC_NVMAP_UNPIN:
584 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_unpin\n", __func__);
585 do_svc_nvmap_unpin(avp_svc, msg, len);
586 break;
587 case SVC_NVMAP_FROM_ID:
588 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_from_id\n", __func__);
589 do_svc_nvmap_from_id(avp_svc, msg, len);
590 break;
591 case SVC_NVMAP_GET_ADDRESS:
592 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_get_addr\n", __func__);
593 do_svc_nvmap_get_addr(avp_svc, msg, len);
594 break;
595 case SVC_POWER_REGISTER:
596 DBG(AVP_DBG_TRACE_SVC, "%s: got power_register\n", __func__);
597 do_svc_pwr_register(avp_svc, msg, len);
598 break;
599 case SVC_POWER_UNREGISTER:
600 DBG(AVP_DBG_TRACE_SVC, "%s: got power_unregister\n", __func__);
601 /* nothing to do */
602 break;
603 case SVC_POWER_BUSY_HINT_MULTI:
604 DBG(AVP_DBG_TRACE_SVC, "%s: got power_busy_hint_multi\n",
605 __func__);
606 /* nothing to do */
607 break;
608 case SVC_POWER_BUSY_HINT:
609 case SVC_POWER_STARVATION:
610 DBG(AVP_DBG_TRACE_SVC, "%s: got power busy/starve hint\n",
611 __func__);
612 do_svc_null_response(avp_svc, msg, len, SVC_POWER_RESPONSE);
613 break;
614 case SVC_POWER_MAXFREQ:
615 DBG(AVP_DBG_TRACE_SVC, "%s: got power get_max_freq\n",
616 __func__);
617 do_svc_pwr_max_freq(avp_svc, msg, len);
618 break;
619 case SVC_DFS_GETSTATE:
620 DBG(AVP_DBG_TRACE_SVC, "%s: got dfs_get_state\n", __func__);
621 do_svc_dfs_get_state(avp_svc, msg, len);
622 break;
623 case SVC_MODULE_RESET:
624 DBG(AVP_DBG_TRACE_SVC, "%s: got module_reset\n", __func__);
625 do_svc_module_reset(avp_svc, msg, len);
626 break;
627 case SVC_MODULE_CLOCK:
628 DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock\n", __func__);
629 do_svc_module_clock(avp_svc, msg, len);
630 break;
631 case SVC_DFS_GET_CLK_UTIL:
632 DBG(AVP_DBG_TRACE_SVC, "%s: got get_clk_util\n", __func__);
633 do_svc_dfs_get_clk_util(avp_svc, msg, len);
634 break;
635 case SVC_PRINTF:
636 DBG(AVP_DBG_TRACE_SVC, "%s: got remote printf\n", __func__);
637 do_svc_printf(avp_svc, msg, len);
638 break;
639 case SVC_AVP_WDT_RESET:
640 pr_err("avp_svc: AVP has been reset by watchdog\n");
641 break;
642 case SVC_MODULE_CLOCK_SET:
643 DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock_set\n", __func__);
644 do_svc_module_clock_set(avp_svc, msg, len);
645 break;
646 case SVC_MODULE_CLOCK_GET:
647 DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock_get\n", __func__);
648 do_svc_module_clock_get(avp_svc, msg, len);
649 break;
650 default:
651 pr_warning("avp_svc: Unsupported SVC call 0x%x\n", msg->svc_id);
652 do_svc_unsupported_msg(avp_svc, msg->svc_id);
653 ret = -ENOMSG;
654 break;
655 }
656
657 return ret;
658}
659
660static int avp_svc_thread(void *data)
661{
662 struct avp_svc_info *avp_svc = data;
663 u8 buf[TEGRA_RPC_MAX_MSG_LEN];
664 struct svc_msg *msg = (struct svc_msg *)buf;
665 int ret;
666 long timeout;
667
668 BUG_ON(!avp_svc->cpu_ep);
669
670 ret = trpc_wait_peer(avp_svc->cpu_ep, -1);
671 if (ret) {
672 pr_err("%s: no connection from AVP (%d)\n", __func__, ret);
673 goto err;
674 }
675
676 pr_info("%s: got remote peer\n", __func__);
677
678 while (!kthread_should_stop()) {
679 DBG(AVP_DBG_TRACE_SVC, "%s: waiting for message\n", __func__);
680 ret = trpc_recv_msg(avp_svc->rpc_node, avp_svc->cpu_ep, buf,
681 TEGRA_RPC_MAX_MSG_LEN, -1);
682 DBG(AVP_DBG_TRACE_SVC, "%s: got message\n", __func__);
683
684 if (ret == -ECONNRESET || ret == -ENOTCONN) {
685 wait_queue_head_t wq;
686 init_waitqueue_head(&wq);
687
688 pr_info("%s: AVP seems to be down; "
689 "wait for kthread_stop\n", __func__);
690 timeout = msecs_to_jiffies(100);
691 timeout = wait_event_interruptible_timeout(wq,
692 kthread_should_stop(), timeout);
693 if (timeout == 0)
694 pr_err("%s: timed out while waiting for "
695 "kthread_stop\n", __func__);
696 continue;
697 } else if (ret <= 0) {
698 pr_err("%s: couldn't receive msg (ret=%d)\n",
699 __func__, ret);
700 continue;
701 }
702 dispatch_svc_message(avp_svc, msg, ret);
703 }
704
705err:
706 trpc_put(avp_svc->cpu_ep);
707 pr_info("%s: exiting\n", __func__);
708 return ret;
709}
710
711int avp_svc_start(struct avp_svc_info *avp_svc)
712{
713 struct trpc_endpoint *ep;
714 int ret;
715
716 avp_svc->nvmap_remote = nvmap_create_client(nvmap_dev, "avp_remote");
717 if (IS_ERR(avp_svc->nvmap_remote)) {
718 pr_err("%s: cannot create remote nvmap client\n", __func__);
719 ret = PTR_ERR(avp_svc->nvmap_remote);
720 goto err_nvmap_create_remote_client;
721 }
722
723 ep = trpc_create(avp_svc->rpc_node, "RPC_CPU_PORT", NULL, NULL);
724 if (IS_ERR(ep)) {
725 pr_err("%s: can't create RPC_CPU_PORT\n", __func__);
726 ret = PTR_ERR(ep);
727 goto err_cpu_port_create;
728 }
729
730 /* TODO: protect this */
731 avp_svc->cpu_ep = ep;
732
733 /* the service thread should get an extra reference for the port */
734 trpc_get(avp_svc->cpu_ep);
735 avp_svc->svc_thread = kthread_run(avp_svc_thread, avp_svc,
736 "avp_svc_thread");
737 if (IS_ERR_OR_NULL(avp_svc->svc_thread)) {
738 avp_svc->svc_thread = NULL;
739 pr_err("%s: can't create svc thread\n", __func__);
740 ret = -ENOMEM;
741 goto err_kthread;
742 }
743 return 0;
744
745err_kthread:
746 trpc_close(avp_svc->cpu_ep);
747 trpc_put(avp_svc->cpu_ep);
748 avp_svc->cpu_ep = NULL;
749err_cpu_port_create:
750 nvmap_client_put(avp_svc->nvmap_remote);
751err_nvmap_create_remote_client:
752 avp_svc->nvmap_remote = NULL;
753 return ret;
754}
755
756void avp_svc_stop(struct avp_svc_info *avp_svc)
757{
758 int ret;
759 int i;
760
761 trpc_close(avp_svc->cpu_ep);
762 ret = kthread_stop(avp_svc->svc_thread);
763 if (ret == -EINTR) {
764 /* the thread never started, drop it's extra reference */
765 trpc_put(avp_svc->cpu_ep);
766 }
767 avp_svc->cpu_ep = NULL;
768
769 nvmap_client_put(avp_svc->nvmap_remote);
770 avp_svc->nvmap_remote = NULL;
771
772 mutex_lock(&avp_svc->clk_lock);
773 for (i = 0; i < NUM_CLK_REQUESTS; i++) {
774 struct avp_clk *aclk = &avp_svc->clks[i];
775 BUG_ON(aclk->refcnt < 0);
776 if (aclk->refcnt > 0) {
777 pr_info("%s: remote left clock '%s' on\n", __func__,
778 aclk->mod->name);
779 clk_disable(aclk->clk);
780 /* sclk/emcclk was enabled once for every clock */
781 clk_set_rate(avp_svc->sclk, 0);
782 clk_disable(avp_svc->sclk);
783 clk_set_rate(avp_svc->emcclk, 0);
784 clk_disable(avp_svc->emcclk);
785 }
786 aclk->refcnt = 0;
787 }
788 mutex_unlock(&avp_svc->clk_lock);
789}
790
791struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
792 struct trpc_node *rpc_node)
793{
794 struct tegra_avp_platform_data *pdata;
795 struct avp_svc_info *avp_svc;
796 int ret;
797 int i;
798 int cnt = 0;
799
800 BUG_ON(!rpc_node);
801
802 avp_svc = kzalloc(sizeof(struct avp_svc_info), GFP_KERNEL);
803 if (!avp_svc) {
804 ret = -ENOMEM;
805 goto err_alloc;
806 }
807
808 BUILD_BUG_ON(NUM_CLK_REQUESTS > BITS_PER_LONG);
809
810 pdata = pdev->dev.platform_data;
811
812 for (i = 0; i < NUM_AVP_MODULES; i++) {
813 struct avp_module *mod = &avp_modules[i];
814 struct clk *clk;
815 if (!mod->name)
816 continue;
817 BUG_ON(mod->clk_req >= NUM_CLK_REQUESTS ||
818 cnt++ >= NUM_CLK_REQUESTS);
819
820 clk = clk_get(&pdev->dev, mod->name);
821 if (IS_ERR(clk)) {
822 ret = PTR_ERR(clk);
823 pr_err("avp_svc: Couldn't get required clocks\n");
824 goto err_get_clks;
825 }
826 avp_svc->clks[mod->clk_req].clk = clk;
827 avp_svc->clks[mod->clk_req].mod = mod;
828 avp_svc->clks[mod->clk_req].refcnt = 0;
829 }
830
831 avp_svc->sclk = clk_get(&pdev->dev, "sclk");
832 if (IS_ERR(avp_svc->sclk)) {
833 pr_err("avp_svc: Couldn't get sclk for dvfs\n");
834 ret = -ENOENT;
835 goto err_get_clks;
836 }
837 avp_svc->max_avp_rate = clk_round_rate(avp_svc->sclk, ULONG_MAX);
838 clk_set_rate(avp_svc->sclk, 0);
839
840 avp_svc->emcclk = clk_get(&pdev->dev, "emc");
841 if (IS_ERR(avp_svc->emcclk)) {
842 pr_err("avp_svc: Couldn't get emcclk for dvfs\n");
843 ret = -ENOENT;
844 goto err_get_clks;
845 }
846
847 /*
848 * The emc is a shared clock, it will be set to the rate
849 * requested in platform data. Set the rate to ULONG_MAX
850 * if platform data is NULL.
851 */
852 avp_svc->emc_rate = 0;
853 if (pdata) {
854 clk_set_rate(avp_svc->emcclk, pdata->emc_clk_rate);
855 avp_svc->emc_rate = pdata->emc_clk_rate;
856 }
857 else {
858 clk_set_rate(avp_svc->emcclk, ULONG_MAX);
859 }
860
861 avp_svc->rpc_node = rpc_node;
862
863 mutex_init(&avp_svc->clk_lock);
864
865 return avp_svc;
866
867err_get_clks:
868 for (i = 0; i < NUM_CLK_REQUESTS; i++)
869 if (avp_svc->clks[i].clk)
870 clk_put(avp_svc->clks[i].clk);
871 if (!IS_ERR_OR_NULL(avp_svc->sclk))
872 clk_put(avp_svc->sclk);
873 if (!IS_ERR_OR_NULL(avp_svc->emcclk))
874 clk_put(avp_svc->emcclk);
875 kfree(avp_svc);
876err_alloc:
877 return ERR_PTR(ret);
878}
879
880void avp_svc_destroy(struct avp_svc_info *avp_svc)
881{
882 int i;
883
884 for (i = 0; i < NUM_CLK_REQUESTS; i++)
885 clk_put(avp_svc->clks[i].clk);
886 clk_put(avp_svc->sclk);
887 clk_put(avp_svc->emcclk);
888
889 kfree(avp_svc);
890}
diff --git a/drivers/media/video/tegra/avp/headavp.S b/drivers/media/video/tegra/avp/headavp.S
new file mode 100644
index 00000000000..c1f8e9fea1c
--- /dev/null
+++ b/drivers/media/video/tegra/avp/headavp.S
@@ -0,0 +1,68 @@
1/*
2 * arch/arm/mach-tegra/headavp.S
3 *
4 * AVP kernel launcher stub; programs the AVP MMU and jumps to the
5 * kernel code. Must use ONLY ARMv4 instructions, and must be compiled
6 * in ARM mode.
7 *
8 * Copyright (c) 2010, NVIDIA Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 */
24
25#include <linux/linkage.h>
26#include <asm/assembler.h>
27#include "headavp.h"
28
29#define PTE0_COMPARE 0
30/* the default translation will translate any VA within
31 * 0x0010:0000..0x001f:ffff to the (megabyte-aligned) value written to
32 * _tegra_avp_boot_stub_data.map_phys_addr
33 */
34#define PTE0_DEFAULT (AVP_KERNEL_VIRT_BASE | 0x3ff0)
35
36#define PTE0_TRANSLATE 4
37
38#define TRANSLATE_DATA (1 << 11)
39#define TRANSLATE_CODE (1 << 10)
40#define TRANSLATE_WR (1 << 9)
41#define TRANSLATE_RD (1 << 8)
42#define TRANSLATE_HIT (1 << 7)
43#define TRANSLATE_EN (1 << 2)
44
45#define TRANSLATE_OPT (TRANSLATE_DATA | TRANSLATE_CODE | TRANSLATE_WR | \
46 TRANSLATE_RD | TRANSLATE_HIT)
47
48ENTRY(_tegra_avp_boot_stub)
49 adr r4, _tegra_avp_boot_stub_data
50 ldmia r4, {r0-r3}
51#ifdef CONFIG_TEGRA_AVP_KERNEL_ON_MMU
52 str r2, [r0, #PTE0_COMPARE]
53 bic r3, r3, #0xff0
54 bic r3, r3, #0x00f
55 orr r3, r3, #TRANSLATE_OPT
56 orr r3, r3, #TRANSLATE_EN
57 str r3, [r0, #PTE0_TRANSLATE]
58#endif
59 bx r1
60 b .
61ENDPROC(_tegra_avp_boot_stub)
62 .type _tegra_avp_boot_stub_data, %object
63ENTRY(_tegra_avp_boot_stub_data)
64 .long AVP_MMU_TLB_BASE
65 .long 0xdeadbeef
66 .long PTE0_DEFAULT
67 .long 0xdeadd00d
68 .size _tegra_avp_boot_stub_data, . - _tegra_avp_boot_stub_data
diff --git a/drivers/media/video/tegra/avp/headavp.h b/drivers/media/video/tegra/avp/headavp.h
new file mode 100644
index 00000000000..2bcc3297bfa
--- /dev/null
+++ b/drivers/media/video/tegra/avp/headavp.h
@@ -0,0 +1,41 @@
1/*
2 * arch/arm/mach-tegra/headavp.h
3 *
4 * Copyright (c) 2010, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef _MACH_TEGRA_HEADAVP_H
22#define _MACH_TEGRA_HEADAVP_H
23
24#define AVP_MMU_TLB_BASE 0xF000F000
25#define AVP_KERNEL_VIRT_BASE 0x00100000
26
27#ifndef __ASSEMBLY__
28
29struct tegra_avp_boot_stub_data {
30 unsigned long mmu_tlb_base;
31 unsigned long jump_addr;
32 unsigned long map_virt_addr;
33 unsigned long map_phys_addr;
34};
35
36extern void _tegra_avp_boot_stub(void);
37extern struct tegra_avp_boot_stub_data _tegra_avp_boot_stub_data;
38
39#endif
40
41#endif
diff --git a/drivers/media/video/tegra/avp/nvavp.h b/drivers/media/video/tegra/avp/nvavp.h
new file mode 100644
index 00000000000..dbc62b48588
--- /dev/null
+++ b/drivers/media/video/tegra/avp/nvavp.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2011 Nvidia Corp
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef __MEDIA_VIDEO_TEGRA_NVAVP_H
16#define __MEDIA_VIDEO_TEGRA_NVAVP_H
17
18#include <linux/tegra_avp.h>
19
20struct tegra_avp_info;
21
22int tegra_avp_open(struct tegra_avp_info **avp);
23int tegra_avp_release(struct tegra_avp_info *avp);
24int tegra_avp_load_lib(struct tegra_avp_info *avp, struct tegra_avp_lib *lib);
25int tegra_avp_unload_lib(struct tegra_avp_info *avp, unsigned long handle);
26
27
28#include <linux/tegra_sema.h>
29
30struct tegra_sema_info;
31
32int tegra_sema_open(struct tegra_sema_info **sema);
33int tegra_sema_release(struct tegra_sema_info *sema);
34int tegra_sema_wait(struct tegra_sema_info *sema, long* timeout);
35int tegra_sema_signal(struct tegra_sema_info *sema);
36
37
38#include <linux/tegra_rpc.h>
39
40struct tegra_rpc_info;
41
42int tegra_rpc_open(struct tegra_rpc_info **rpc);
43int tegra_rpc_release(struct tegra_rpc_info *rpc);
44int tegra_rpc_port_create(struct tegra_rpc_info *rpc, char *name,
45 struct tegra_sema_info *sema);
46int tegra_rpc_get_name(struct tegra_rpc_info *rpc, char* name);
47int tegra_rpc_port_connect(struct tegra_rpc_info *rpc, long timeout);
48int tegra_rpc_port_listen(struct tegra_rpc_info *rpc, long timeout);
49int tegra_rpc_write(struct tegra_rpc_info *rpc, u8* buf, size_t size);
50int tegra_rpc_read(struct tegra_rpc_info *rpc, u8 *buf, size_t max);
51
52
53#endif
diff --git a/drivers/media/video/tegra/avp/tegra_rpc.c b/drivers/media/video/tegra/avp/tegra_rpc.c
new file mode 100644
index 00000000000..a0fd1dc999f
--- /dev/null
+++ b/drivers/media/video/tegra/avp/tegra_rpc.c
@@ -0,0 +1,796 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * Author:
5 * Dima Zavin <dima@android.com>
6 *
7 * Based on original NVRM code from NVIDIA, and a partial rewrite by:
8 * Gary King <gking@nvidia.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 */
20
21#include <linux/debugfs.h>
22#include <linux/delay.h>
23#include <linux/err.h>
24#include <linux/kref.h>
25#include <linux/list.h>
26#include <linux/module.h>
27#include <linux/mutex.h>
28#include <linux/rbtree.h>
29#include <linux/sched.h>
30#include <linux/seq_file.h>
31#include <linux/slab.h>
32#include <linux/tegra_rpc.h>
33#include <linux/types.h>
34#include <linux/wait.h>
35
36#include "trpc.h"
37
38struct trpc_port;
39struct trpc_endpoint {
40 struct list_head msg_list;
41 wait_queue_head_t msg_waitq;
42
43 struct trpc_endpoint *out;
44 struct trpc_port *port;
45
46 struct trpc_node *owner;
47
48 struct completion *connect_done;
49 bool ready;
50 struct trpc_ep_ops *ops;
51 void *priv;
52};
53
54struct trpc_port {
55 char name[TEGRA_RPC_MAX_NAME_LEN];
56
57 /* protects peer and closed state */
58 spinlock_t lock;
59 struct trpc_endpoint peers[2];
60 bool closed;
61
62 /* private */
63 struct kref ref;
64 struct rb_node rb_node;
65};
66
67enum {
68 TRPC_TRACE_MSG = 1U << 0,
69 TRPC_TRACE_CONN = 1U << 1,
70 TRPC_TRACE_PORT = 1U << 2,
71};
72
73static u32 trpc_debug_mask;
74module_param_named(debug_mask, trpc_debug_mask, uint, S_IWUSR | S_IRUGO);
75
76#define DBG(flag, args...) \
77 do { if (trpc_debug_mask & (flag)) pr_info(args); } while (0)
78
79struct tegra_rpc_info {
80 struct kmem_cache *msg_cache;
81
82 spinlock_t ports_lock;
83 struct rb_root ports;
84
85 struct list_head node_list;
86 struct mutex node_lock;
87};
88
89struct trpc_msg {
90 struct list_head list;
91
92 size_t len;
93 u8 payload[TEGRA_RPC_MAX_MSG_LEN];
94};
95
96static struct tegra_rpc_info *tegra_rpc;
97static struct dentry *trpc_debug_root;
98
99static struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep);
100
101/* a few accessors for the outside world to keep the trpc_endpoint struct
102 * definition private to this module */
103void *trpc_priv(struct trpc_endpoint *ep)
104{
105 return ep->priv;
106}
107
108struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep)
109{
110 return ep->out;
111}
112
113const char *trpc_name(struct trpc_endpoint *ep)
114{
115 return ep->port->name;
116}
117
118static inline bool is_connected(struct trpc_port *port)
119{
120 return port->peers[0].ready && port->peers[1].ready;
121}
122
123static inline bool is_closed(struct trpc_port *port)
124{
125 return port->closed;
126}
127
128static void rpc_port_free(struct tegra_rpc_info *info, struct trpc_port *port)
129{
130 struct trpc_msg *msg;
131 int i;
132
133 for (i = 0; i < 2; ++i) {
134 struct list_head *list = &port->peers[i].msg_list;
135 while (!list_empty(list)) {
136 msg = list_first_entry(list, struct trpc_msg, list);
137 list_del(&msg->list);
138 kmem_cache_free(info->msg_cache, msg);
139 }
140 }
141 kfree(port);
142}
143
144static void _rpc_port_release(struct kref *kref)
145{
146 struct tegra_rpc_info *info = tegra_rpc;
147 struct trpc_port *port = container_of(kref, struct trpc_port, ref);
148 unsigned long flags;
149
150 DBG(TRPC_TRACE_PORT, "%s: releasing port '%s' (%p)\n", __func__,
151 port->name, port);
152 spin_lock_irqsave(&info->ports_lock, flags);
153 rb_erase(&port->rb_node, &info->ports);
154 spin_unlock_irqrestore(&info->ports_lock, flags);
155 rpc_port_free(info, port);
156}
157
158/* note that the refcount is actually on the port and not on the endpoint */
159void trpc_put(struct trpc_endpoint *ep)
160{
161 kref_put(&ep->port->ref, _rpc_port_release);
162}
163
164void trpc_get(struct trpc_endpoint *ep)
165{
166 kref_get(&ep->port->ref);
167}
168
169/* Searches the rb_tree for a port with the provided name. If one is not found,
170 * the new port in inserted. Otherwise, the existing port is returned.
171 * Must be called with the ports_lock held */
172static struct trpc_port *rpc_port_find_insert(struct tegra_rpc_info *info,
173 struct trpc_port *port)
174{
175 struct rb_node **p;
176 struct rb_node *parent;
177 struct trpc_port *tmp;
178 int ret = 0;
179
180 p = &info->ports.rb_node;
181 parent = NULL;
182 while (*p) {
183 parent = *p;
184 tmp = rb_entry(parent, struct trpc_port, rb_node);
185
186 ret = strncmp(port->name, tmp->name, TEGRA_RPC_MAX_NAME_LEN);
187 if (ret < 0)
188 p = &(*p)->rb_left;
189 else if (ret > 0)
190 p = &(*p)->rb_right;
191 else
192 return tmp;
193 }
194 rb_link_node(&port->rb_node, parent, p);
195 rb_insert_color(&port->rb_node, &info->ports);
196 DBG(TRPC_TRACE_PORT, "%s: inserted port '%s' (%p)\n", __func__,
197 port->name, port);
198 return port;
199}
200
201static int nodes_try_connect(struct tegra_rpc_info *info,
202 struct trpc_node *src,
203 struct trpc_endpoint *from)
204{
205 struct trpc_node *node;
206 int ret;
207
208 mutex_lock(&info->node_lock);
209 list_for_each_entry(node, &info->node_list, list) {
210 if (!node->try_connect)
211 continue;
212 ret = node->try_connect(node, src, from);
213 if (!ret) {
214 mutex_unlock(&info->node_lock);
215 return 0;
216 }
217 }
218 mutex_unlock(&info->node_lock);
219 return -ECONNREFUSED;
220}
221
222static struct trpc_port *rpc_port_alloc(const char *name)
223{
224 struct trpc_port *port;
225 int i;
226
227 port = kzalloc(sizeof(struct trpc_port), GFP_KERNEL);
228 if (!port) {
229 pr_err("%s: can't alloc rpc_port\n", __func__);
230 return NULL;
231 }
232 BUILD_BUG_ON(2 != ARRAY_SIZE(port->peers));
233
234 spin_lock_init(&port->lock);
235 kref_init(&port->ref);
236 strlcpy(port->name, name, TEGRA_RPC_MAX_NAME_LEN);
237 for (i = 0; i < 2; i++) {
238 struct trpc_endpoint *ep = port->peers + i;
239 INIT_LIST_HEAD(&ep->msg_list);
240 init_waitqueue_head(&ep->msg_waitq);
241 ep->port = port;
242 }
243 port->peers[0].out = &port->peers[1];
244 port->peers[1].out = &port->peers[0];
245
246 return port;
247}
248
249/* must be holding the ports lock */
250static inline void handle_port_connected(struct trpc_port *port)
251{
252 int i;
253
254 DBG(TRPC_TRACE_CONN, "tegra_rpc: port '%s' connected\n", port->name);
255
256 for (i = 0; i < 2; i++)
257 if (port->peers[i].connect_done)
258 complete(port->peers[i].connect_done);
259}
260
261static inline void _ready_ep(struct trpc_endpoint *ep,
262 struct trpc_node *owner,
263 struct trpc_ep_ops *ops,
264 void *priv)
265{
266 ep->ready = true;
267 ep->owner = owner;
268 ep->ops = ops;
269 ep->priv = priv;
270}
271
272/* this keeps a reference on the port */
273static struct trpc_endpoint *_create_peer(struct tegra_rpc_info *info,
274 struct trpc_node *owner,
275 struct trpc_endpoint *ep,
276 struct trpc_ep_ops *ops,
277 void *priv)
278{
279 struct trpc_port *port = ep->port;
280 struct trpc_endpoint *peer = ep->out;
281 unsigned long flags;
282
283 spin_lock_irqsave(&port->lock, flags);
284 BUG_ON(port->closed);
285 if (peer->ready || !ep->ready) {
286 peer = NULL;
287 goto out;
288 }
289 _ready_ep(peer, owner, ops, priv);
290 if (WARN_ON(!is_connected(port)))
291 pr_warning("%s: created peer but no connection established?!\n",
292 __func__);
293 else
294 handle_port_connected(port);
295 trpc_get(peer);
296out:
297 spin_unlock_irqrestore(&port->lock, flags);
298 return peer;
299}
300
301/* Exported code. This is out interface to the outside world */
302struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name,
303 struct trpc_ep_ops *ops, void *priv)
304{
305 struct tegra_rpc_info *info = tegra_rpc;
306 struct trpc_endpoint *ep;
307 struct trpc_port *new_port;
308 struct trpc_port *port;
309 unsigned long flags;
310
311 BUG_ON(!owner);
312
313 /* we always allocate a new port even if one already might exist. This
314 * is slightly inefficient, but it allows us to do the allocation
315 * without holding our ports_lock spinlock. */
316 new_port = rpc_port_alloc(name);
317 if (!new_port) {
318 pr_err("%s: can't allocate memory for '%s'\n", __func__, name);
319 return ERR_PTR(-ENOMEM);
320 }
321
322 spin_lock_irqsave(&info->ports_lock, flags);
323 port = rpc_port_find_insert(info, new_port);
324 if (port != new_port) {
325 rpc_port_free(info, new_port);
326 /* There was already a port by that name in the rb_tree,
327 * so just try to create its peer[1], i.e. peer for peer[0]
328 */
329 ep = _create_peer(info, owner, &port->peers[0], ops, priv);
330 if (!ep) {
331 pr_err("%s: port '%s' is not in a connectable state\n",
332 __func__, port->name);
333 ep = ERR_PTR(-EINVAL);
334 }
335 goto out;
336 }
337 /* don't need to grab the individual port lock here since we must be
338 * holding the ports_lock to add the new element, and never dropped
339 * it, and thus noone could have gotten a reference to this port
340 * and thus the state couldn't have been touched */
341 ep = &port->peers[0];
342 _ready_ep(ep, owner, ops, priv);
343out:
344 spin_unlock_irqrestore(&info->ports_lock, flags);
345 return ep;
346}
347
348struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner,
349 struct trpc_endpoint *ep,
350 struct trpc_ep_ops *ops,
351 void *priv)
352{
353 struct tegra_rpc_info *info = tegra_rpc;
354 struct trpc_endpoint *peer;
355 unsigned long flags;
356
357 BUG_ON(!owner);
358
359 spin_lock_irqsave(&info->ports_lock, flags);
360 peer = _create_peer(info, owner, ep, ops, priv);
361 spin_unlock_irqrestore(&info->ports_lock, flags);
362 return peer;
363}
364
365/* timeout == -1, waits forever
366 * timeout == 0, return immediately
367 */
368int trpc_connect(struct trpc_endpoint *from, long timeout)
369{
370 struct tegra_rpc_info *info = tegra_rpc;
371 struct trpc_port *port = from->port;
372 struct trpc_node *src = from->owner;
373 int ret;
374 bool no_retry = !timeout;
375 unsigned long endtime = jiffies + msecs_to_jiffies(timeout);
376 unsigned long flags;
377
378 spin_lock_irqsave(&port->lock, flags);
379 /* XXX: add state for connections and ports to prevent invalid
380 * states like multiple connections, etc. ? */
381 if (unlikely(is_closed(port))) {
382 ret = -ECONNRESET;
383 pr_err("%s: can't connect to %s, closed\n", __func__,
384 port->name);
385 goto out;
386 } else if (is_connected(port)) {
387 ret = 0;
388 goto out;
389 }
390 spin_unlock_irqrestore(&port->lock, flags);
391
392 do {
393 ret = nodes_try_connect(info, src, from);
394
395 spin_lock_irqsave(&port->lock, flags);
396 if (is_connected(port)) {
397 ret = 0;
398 goto out;
399 } else if (no_retry) {
400 goto out;
401 } else if (signal_pending(current)) {
402 ret = -EINTR;
403 goto out;
404 }
405 spin_unlock_irqrestore(&port->lock, flags);
406 usleep_range(5000, 20000);
407 } while (timeout < 0 || time_before(jiffies, endtime));
408
409 return -ETIMEDOUT;
410
411out:
412 spin_unlock_irqrestore(&port->lock, flags);
413 return ret;
414}
415
416/* convenience function for doing this common pattern in a single call */
417struct trpc_endpoint *trpc_create_connect(struct trpc_node *src,
418 char *name,
419 struct trpc_ep_ops *ops,
420 void *priv,
421 long timeout)
422{
423 struct trpc_endpoint *ep;
424 int ret;
425
426 ep = trpc_create(src, name, ops, priv);
427 if (IS_ERR(ep))
428 return ep;
429
430 ret = trpc_connect(ep, timeout);
431 if (ret) {
432 trpc_close(ep);
433 return ERR_PTR(ret);
434 }
435
436 return ep;
437}
438
439void trpc_close(struct trpc_endpoint *ep)
440{
441 struct trpc_port *port = ep->port;
442 struct trpc_endpoint *peer = ep->out;
443 bool need_close_op = false;
444 unsigned long flags;
445
446 spin_lock_irqsave(&port->lock, flags);
447 BUG_ON(!ep->ready);
448 ep->ready = false;
449 port->closed = true;
450 if (peer->ready) {
451 need_close_op = true;
452 /* the peer may be waiting for a message */
453 wake_up_all(&peer->msg_waitq);
454 if (peer->connect_done)
455 complete(peer->connect_done);
456 }
457 spin_unlock_irqrestore(&port->lock, flags);
458 if (need_close_op && peer->ops && peer->ops->close)
459 peer->ops->close(peer);
460 trpc_put(ep);
461}
462
463int trpc_wait_peer(struct trpc_endpoint *ep, long timeout)
464{
465 struct trpc_port *port = ep->port;
466 DECLARE_COMPLETION_ONSTACK(event);
467 int ret;
468 unsigned long flags;
469
470 if (timeout < 0)
471 timeout = MAX_SCHEDULE_TIMEOUT;
472 else if (timeout > 0)
473 timeout = msecs_to_jiffies(timeout);
474
475 spin_lock_irqsave(&port->lock, flags);
476 if (ep->connect_done) {
477 ret = -EBUSY;
478 goto done;
479 } else if (is_connected(port)) {
480 ret = 0;
481 goto done;
482 } else if (is_closed(port)) {
483 ret = -ECONNRESET;
484 goto done;
485 } else if (!timeout) {
486 ret = -EAGAIN;
487 goto done;
488 }
489 ep->connect_done = &event;
490 spin_unlock_irqrestore(&port->lock, flags);
491
492 ret = wait_for_completion_interruptible_timeout(&event, timeout);
493
494 spin_lock_irqsave(&port->lock, flags);
495 ep->connect_done = NULL;
496
497 if (is_connected(port)) {
498 ret = 0;
499 } else {
500 if (is_closed(port))
501 ret = -ECONNRESET;
502 else if (ret == -ERESTARTSYS)
503 ret = -EINTR;
504 else if (!ret)
505 ret = -ETIMEDOUT;
506 }
507
508done:
509 spin_unlock_irqrestore(&port->lock, flags);
510 return ret;
511}
512
513static inline int _ep_id(struct trpc_endpoint *ep)
514{
515 return ep - ep->port->peers;
516}
517
518static int queue_msg(struct trpc_node *src, struct trpc_endpoint *from,
519 void *buf, size_t len, gfp_t gfp_flags)
520{
521 struct tegra_rpc_info *info = tegra_rpc;
522 struct trpc_endpoint *peer = from->out;
523 struct trpc_port *port = from->port;
524 struct trpc_msg *msg;
525 unsigned long flags;
526 int ret;
527
528 BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN);
529 /* shouldn't be enqueueing to the endpoint */
530 BUG_ON(peer->ops && peer->ops->send);
531
532 DBG(TRPC_TRACE_MSG, "%s: queueing message for %s.%d\n", __func__,
533 port->name, _ep_id(peer));
534
535 msg = kmem_cache_alloc(info->msg_cache, gfp_flags);
536 if (!msg) {
537 pr_err("%s: can't alloc memory for msg\n", __func__);
538 return -ENOMEM;
539 }
540
541 memcpy(msg->payload, buf, len);
542 msg->len = len;
543
544 spin_lock_irqsave(&port->lock, flags);
545 if (is_closed(port)) {
546 pr_err("%s: cannot send message for closed port %s.%d\n",
547 __func__, port->name, _ep_id(peer));
548 ret = -ECONNRESET;
549 goto err;
550 } else if (!is_connected(port)) {
551 pr_err("%s: cannot send message for unconnected port %s.%d\n",
552 __func__, port->name, _ep_id(peer));
553 ret = -ENOTCONN;
554 goto err;
555 }
556
557 list_add_tail(&msg->list, &peer->msg_list);
558 if (peer->ops && peer->ops->notify_recv)
559 peer->ops->notify_recv(peer);
560 wake_up_all(&peer->msg_waitq);
561 spin_unlock_irqrestore(&port->lock, flags);
562 return 0;
563
564err:
565 spin_unlock_irqrestore(&port->lock, flags);
566 kmem_cache_free(info->msg_cache, msg);
567 return ret;
568}
569
570/* Returns -ENOMEM if failed to allocate memory for the message. */
571int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *from,
572 void *buf, size_t len, gfp_t gfp_flags)
573{
574 struct trpc_endpoint *peer = from->out;
575 struct trpc_port *port = from->port;
576
577 BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN);
578
579 DBG(TRPC_TRACE_MSG, "%s: sending message from %s.%d to %s.%d\n",
580 __func__, port->name, _ep_id(from), port->name, _ep_id(peer));
581
582 if (peer->ops && peer->ops->send) {
583 might_sleep();
584 return peer->ops->send(peer, buf, len);
585 } else {
586 might_sleep_if(gfp_flags & __GFP_WAIT);
587 return queue_msg(src, from, buf, len, gfp_flags);
588 }
589}
590
591static inline struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep)
592{
593 struct trpc_msg *msg = NULL;
594
595 if (!list_empty(&ep->msg_list)) {
596 msg = list_first_entry(&ep->msg_list, struct trpc_msg, list);
597 list_del_init(&msg->list);
598 }
599
600 return msg;
601}
602
603static bool __should_wake(struct trpc_endpoint *ep)
604{
605 struct trpc_port *port = ep->port;
606 unsigned long flags;
607 bool ret;
608
609 spin_lock_irqsave(&port->lock, flags);
610 ret = !list_empty(&ep->msg_list) || is_closed(port);
611 spin_unlock_irqrestore(&port->lock, flags);
612 return ret;
613}
614
615int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep,
616 void *buf, size_t buf_len, long timeout)
617{
618 struct tegra_rpc_info *info = tegra_rpc;
619 struct trpc_port *port = ep->port;
620 struct trpc_msg *msg;
621 size_t len;
622 long ret;
623 unsigned long flags;
624
625 BUG_ON(buf_len > TEGRA_RPC_MAX_MSG_LEN);
626
627 spin_lock_irqsave(&port->lock, flags);
628 /* we allow closed ports to finish receiving already-queued messages */
629 msg = dequeue_msg_locked(ep);
630 if (msg) {
631 goto got_msg;
632 } else if (is_closed(port)) {
633 ret = -ECONNRESET;
634 goto out;
635 } else if (!is_connected(port)) {
636 ret = -ENOTCONN;
637 goto out;
638 }
639
640 if (timeout == 0) {
641 ret = 0;
642 goto out;
643 } else if (timeout < 0) {
644 timeout = MAX_SCHEDULE_TIMEOUT;
645 } else {
646 timeout = msecs_to_jiffies(timeout);
647 }
648 spin_unlock_irqrestore(&port->lock, flags);
649 DBG(TRPC_TRACE_MSG, "%s: waiting for message for %s.%d\n", __func__,
650 port->name, _ep_id(ep));
651
652 ret = wait_event_interruptible_timeout(ep->msg_waitq, __should_wake(ep),
653 timeout);
654
655 DBG(TRPC_TRACE_MSG, "%s: woke up for %s\n", __func__, port->name);
656 spin_lock_irqsave(&port->lock, flags);
657 msg = dequeue_msg_locked(ep);
658 if (!msg) {
659 if (is_closed(port))
660 ret = -ECONNRESET;
661 else if (!ret)
662 ret = -ETIMEDOUT;
663 else if (ret == -ERESTARTSYS)
664 ret = -EINTR;
665 else
666 pr_err("%s: error (%d) while receiving msg for '%s'\n",
667 __func__, (int)ret, port->name);
668 goto out;
669 }
670
671got_msg:
672 spin_unlock_irqrestore(&port->lock, flags);
673 len = min(buf_len, msg->len);
674 memcpy(buf, msg->payload, len);
675 kmem_cache_free(info->msg_cache, msg);
676 return len;
677
678out:
679 spin_unlock_irqrestore(&port->lock, flags);
680 return ret;
681}
682
683int trpc_node_register(struct trpc_node *node)
684{
685 struct tegra_rpc_info *info = tegra_rpc;
686
687 if (!info)
688 return -ENOMEM;
689
690 pr_info("%s: Adding '%s' to node list\n", __func__, node->name);
691
692 mutex_lock(&info->node_lock);
693 if (node->type == TRPC_NODE_LOCAL)
694 list_add(&node->list, &info->node_list);
695 else
696 list_add_tail(&node->list, &info->node_list);
697 mutex_unlock(&info->node_lock);
698 return 0;
699}
700
701void trpc_node_unregister(struct trpc_node *node)
702{
703 struct tegra_rpc_info *info = tegra_rpc;
704
705 mutex_lock(&info->node_lock);
706 list_del(&node->list);
707 mutex_unlock(&info->node_lock);
708}
709
710static int trpc_debug_ports_show(struct seq_file *s, void *data)
711{
712 struct tegra_rpc_info *info = s->private;
713 struct rb_node *n;
714 unsigned long flags;
715 int i;
716
717 spin_lock_irqsave(&info->ports_lock, flags);
718 for (n = rb_first(&info->ports); n; n = rb_next(n)) {
719 struct trpc_port *port = rb_entry(n, struct trpc_port, rb_node);
720 seq_printf(s, "port: %s\n closed:%s\n", port->name,
721 port->closed ? "yes" : "no");
722
723 spin_lock(&port->lock);
724 for (i = 0; i < ARRAY_SIZE(port->peers); i++) {
725 struct trpc_endpoint *ep = &port->peers[i];
726 seq_printf(s, " peer%d: %s\n ready:%s\n", i,
727 ep->owner ? ep->owner->name : "<none>",
728 ep->ready ? "yes" : "no");
729 if (ep->ops && ep->ops->show)
730 ep->ops->show(s, ep);
731 }
732 spin_unlock(&port->lock);
733 }
734 spin_unlock_irqrestore(&info->ports_lock, flags);
735
736 return 0;
737}
738
739static int trpc_debug_ports_open(struct inode *inode, struct file *file)
740{
741 return single_open(file, trpc_debug_ports_show, inode->i_private);
742}
743
744static const struct file_operations trpc_debug_ports_fops = {
745 .open = trpc_debug_ports_open,
746 .read = seq_read,
747 .llseek = seq_lseek,
748 .release = single_release,
749};
750
751static void trpc_debug_init(struct tegra_rpc_info *info)
752{
753 trpc_debug_root = debugfs_create_dir("tegra_rpc", NULL);
754 if (IS_ERR_OR_NULL(trpc_debug_root)) {
755 pr_err("%s: couldn't create debug files\n", __func__);
756 return;
757 }
758
759 debugfs_create_file("ports", 0664, trpc_debug_root, info,
760 &trpc_debug_ports_fops);
761}
762
763static int __init tegra_rpc_init(void)
764{
765 struct tegra_rpc_info *rpc_info;
766 int ret;
767
768 rpc_info = kzalloc(sizeof(struct tegra_rpc_info), GFP_KERNEL);
769 if (!rpc_info) {
770 pr_err("%s: error allocating rpc_info\n", __func__);
771 return -ENOMEM;
772 }
773
774 rpc_info->ports = RB_ROOT;
775 spin_lock_init(&rpc_info->ports_lock);
776 INIT_LIST_HEAD(&rpc_info->node_list);
777 mutex_init(&rpc_info->node_lock);
778
779 rpc_info->msg_cache = KMEM_CACHE(trpc_msg, 0);
780 if (!rpc_info->msg_cache) {
781 pr_err("%s: unable to create message cache\n", __func__);
782 ret = -ENOMEM;
783 goto err_kmem_cache;
784 }
785
786 trpc_debug_init(rpc_info);
787 tegra_rpc = rpc_info;
788
789 return 0;
790
791err_kmem_cache:
792 kfree(rpc_info);
793 return ret;
794}
795
796subsys_initcall(tegra_rpc_init);
diff --git a/drivers/media/video/tegra/avp/trpc.h b/drivers/media/video/tegra/avp/trpc.h
new file mode 100644
index 00000000000..e7b0d2d5578
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc.h
@@ -0,0 +1,80 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * Author:
5 * Dima Zavin <dima@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef __ARM_MACH_TEGRA_RPC_H
19#define __ARM_MACH_TEGRA_RPC_H
20
21#include <linux/list.h>
22#include <linux/seq_file.h>
23#include <linux/tegra_rpc.h>
24
25struct trpc_endpoint;
26struct trpc_ep_ops {
27 /* send is allowed to sleep */
28 int (*send)(struct trpc_endpoint *ep, void *buf, size_t len);
29 /* notify_recv is NOT allowed to sleep */
30 void (*notify_recv)(struct trpc_endpoint *ep);
31 /* close is allowed to sleep */
32 void (*close)(struct trpc_endpoint *ep);
33 /* not allowed to sleep, not allowed to call back into trpc */
34 void (*show)(struct seq_file *s, struct trpc_endpoint *ep);
35};
36
37enum {
38 TRPC_NODE_LOCAL,
39 TRPC_NODE_REMOTE,
40};
41
42struct trpc_node {
43 struct list_head list;
44 const char *name;
45 int type;
46 void *priv;
47
48 int (*try_connect)(struct trpc_node *node,
49 struct trpc_node *src,
50 struct trpc_endpoint *from);
51};
52
53struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep);
54void *trpc_priv(struct trpc_endpoint *ep);
55const char *trpc_name(struct trpc_endpoint *ep);
56
57void trpc_put(struct trpc_endpoint *ep);
58void trpc_get(struct trpc_endpoint *ep);
59
60int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *ep, void *buf,
61 size_t len, gfp_t gfp_flags);
62int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep,
63 void *buf, size_t len, long timeout);
64struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name,
65 struct trpc_ep_ops *ops, void *priv);
66struct trpc_endpoint *trpc_create_connect(struct trpc_node *src, char *name,
67 struct trpc_ep_ops *ops, void *priv,
68 long timeout);
69int trpc_connect(struct trpc_endpoint *from, long timeout);
70struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner,
71 struct trpc_endpoint *ep,
72 struct trpc_ep_ops *ops,
73 void *priv);
74void trpc_close(struct trpc_endpoint *ep);
75int trpc_wait_peer(struct trpc_endpoint *ep, long timeout);
76
77int trpc_node_register(struct trpc_node *node);
78void trpc_node_unregister(struct trpc_node *node);
79
80#endif
diff --git a/drivers/media/video/tegra/avp/trpc_local.c b/drivers/media/video/tegra/avp/trpc_local.c
new file mode 100644
index 00000000000..77692e09438
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_local.c
@@ -0,0 +1,419 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * Author:
5 * Dima Zavin <dima@android.com>
6 *
7 * Based on original NVRM code from NVIDIA, and a partial rewrite by
8 * Gary King <gking@nvidia.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 */
20
21#include <linux/err.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/list.h>
25#include <linux/miscdevice.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/tegra_rpc.h>
30#include <linux/types.h>
31#include <linux/uaccess.h>
32#include <linux/wait.h>
33
34#include "trpc.h"
35#include "trpc_sema.h"
36#include "nvavp.h"
37
38struct tegra_rpc_info {
39 struct trpc_endpoint *rpc_ep;
40 struct tegra_sema_info *sema;
41};
42
43/* ports names reserved for system functions, i.e. communicating with the
44 * AVP */
45static const char reserved_ports[][TEGRA_RPC_MAX_NAME_LEN] = {
46 "RPC_AVP_PORT",
47 "RPC_CPU_PORT",
48};
49static int num_reserved_ports = ARRAY_SIZE(reserved_ports);
50
51static void rpc_notify_recv(struct trpc_endpoint *ep);
52
53/* TODO: do we need to do anything when port is closed from the other side? */
54static struct trpc_ep_ops ep_ops = {
55 .notify_recv = rpc_notify_recv,
56};
57
58static struct trpc_node rpc_node = {
59 .name = "local",
60 .type = TRPC_NODE_LOCAL,
61};
62
63static void rpc_notify_recv(struct trpc_endpoint *ep)
64{
65 struct tegra_rpc_info *info = trpc_priv(ep);
66
67 if (WARN_ON(!info))
68 return;
69 if (info->sema)
70 tegra_sema_signal(info->sema);
71}
72
73int tegra_rpc_open(struct tegra_rpc_info **info)
74{
75 struct tegra_rpc_info *new_info;
76
77 new_info = kzalloc(sizeof(struct tegra_rpc_info), GFP_KERNEL);
78 if (!new_info)
79 return -ENOMEM;
80
81 *info = new_info;
82 return 0;
83}
84
85static int local_rpc_open(struct inode *inode, struct file *file)
86{
87 struct tegra_rpc_info *info;
88 int ret = 0;
89
90 ret = tegra_rpc_open(&info);
91 if (ret < 0)
92 return -ENOMEM;
93
94 nonseekable_open(inode, file);
95 file->private_data = info;
96 return 0;
97}
98
99int tegra_rpc_release(struct tegra_rpc_info *info)
100{
101 if (info->rpc_ep)
102 trpc_close(info->rpc_ep);
103 if (info->sema)
104 trpc_sema_put(info->sema);
105 kfree(info);
106 return 0;
107}
108EXPORT_SYMBOL(tegra_rpc_release);
109
110static int local_rpc_release(struct inode *inode, struct file *file)
111{
112 struct tegra_rpc_info *info = file->private_data;
113
114 tegra_rpc_release(info);
115 file->private_data = NULL;
116 return 0;
117}
118
119static char uniq_name[] = "aaaaaaaa+";
120static const int uniq_len = sizeof(uniq_name) - 1;
121static DEFINE_MUTEX(uniq_lock);
122
123static void _gen_port_name(char *new_name)
124{
125 int i;
126
127 mutex_lock(&uniq_lock);
128 for (i = 0; i < uniq_len - 1; i++) {
129 ++uniq_name[i];
130 if (uniq_name[i] != 'z')
131 break;
132 uniq_name[i] = 'a';
133 }
134 strlcpy(new_name, uniq_name, TEGRA_RPC_MAX_NAME_LEN);
135 mutex_unlock(&uniq_lock);
136}
137
138static int _validate_port_name(const char *name)
139{
140 int i;
141
142 for (i = 0; i < num_reserved_ports; i++)
143 if (!strncmp(name, reserved_ports[i], TEGRA_RPC_MAX_NAME_LEN))
144 return -EINVAL;
145 return 0;
146}
147
148int tegra_rpc_port_create(struct tegra_rpc_info *info, char *name,
149 struct tegra_sema_info *sema)
150{
151 struct trpc_endpoint *ep;
152 int ret = 0;
153
154 if (info->rpc_ep) {
155 ret = -EINVAL;
156 goto err;
157 }
158
159 name[TEGRA_RPC_MAX_NAME_LEN - 1] = '\0';
160 if (name[0]) {
161 ret = _validate_port_name(name);
162 if (ret)
163 goto err;
164 } else {
165 _gen_port_name(name);
166 }
167 ep = trpc_create(&rpc_node, name, &ep_ops, info);
168 if (IS_ERR(ep)) {
169 ret = PTR_ERR(ep);
170 goto err;
171 }
172 info->rpc_ep = ep;
173 info->sema = sema;
174 return 0;
175
176err:
177 return ret;
178}
179
180int tegra_rpc_get_name(struct tegra_rpc_info *info, char* name)
181{
182 if (!info->rpc_ep)
183 return -EINVAL;
184
185 strcpy(name, trpc_name(info->rpc_ep));
186 return 0;
187}
188
189int tegra_rpc_port_connect(struct tegra_rpc_info *info, long timeout)
190{
191 if (!info->rpc_ep)
192 return -EINVAL;
193
194 return trpc_connect(info->rpc_ep, timeout);
195
196}
197
198int tegra_rpc_port_listen(struct tegra_rpc_info *info, long timeout)
199{
200 if (!info->rpc_ep)
201 return -EINVAL;
202
203 return trpc_wait_peer(info->rpc_ep, timeout);
204}
205
206static long local_rpc_ioctl(struct file *file, unsigned int cmd,
207 unsigned long arg)
208{
209 struct tegra_rpc_info *info = file->private_data;
210 struct tegra_rpc_port_desc desc;
211 struct tegra_sema_info *sema = NULL;
212 int ret = 0;
213
214 if (_IOC_TYPE(cmd) != TEGRA_RPC_IOCTL_MAGIC ||
215 _IOC_NR(cmd) < TEGRA_RPC_IOCTL_MIN_NR ||
216 _IOC_NR(cmd) > TEGRA_RPC_IOCTL_MAX_NR) {
217 ret = -ENOTTY;
218 goto err;
219 }
220
221 switch (cmd) {
222 case TEGRA_RPC_IOCTL_PORT_CREATE:
223
224 if (_IOC_SIZE(cmd) != sizeof(struct tegra_rpc_port_desc))
225 return -EINVAL;
226 if (copy_from_user(&desc, (void __user *)arg, sizeof(desc)))
227 return -EFAULT;
228 if (desc.notify_fd != -1) {
229 sema = trpc_sema_get_from_fd(desc.notify_fd);
230 if (IS_ERR(sema)) {
231 ret = PTR_ERR(sema);
232 goto err;
233 }
234 }
235
236 ret = tegra_rpc_port_create(info, desc.name, sema);
237 if (ret < 0)
238 goto err;
239
240 break;
241 case TEGRA_RPC_IOCTL_PORT_GET_NAME:
242 if (!info->rpc_ep) {
243 ret = -EINVAL;
244 goto err;
245 }
246 if (copy_to_user((void __user *)arg,
247 trpc_name(info->rpc_ep),
248 TEGRA_RPC_MAX_NAME_LEN)) {
249 ret = -EFAULT;
250 goto err;
251 }
252 break;
253 case TEGRA_RPC_IOCTL_PORT_CONNECT:
254 if (!info->rpc_ep) {
255 ret = -EINVAL;
256 goto err;
257 }
258 ret = trpc_connect(info->rpc_ep, (long)arg);
259 if (ret) {
260 pr_err("%s: can't connect to '%s' (%d)\n", __func__,
261 trpc_name(info->rpc_ep), ret);
262 goto err;
263 }
264 break;
265 case TEGRA_RPC_IOCTL_PORT_LISTEN:
266 if (!info->rpc_ep) {
267 ret = -EINVAL;
268 goto err;
269 }
270 ret = trpc_wait_peer(info->rpc_ep, (long)arg);
271 if (ret) {
272 pr_err("%s: error waiting for peer for '%s' (%d)\n",
273 __func__, trpc_name(info->rpc_ep), ret);
274 goto err;
275 }
276 break;
277 default:
278 pr_err("%s: unknown cmd %d\n", __func__, _IOC_NR(cmd));
279 ret = -EINVAL;
280 goto err;
281 }
282
283 return 0;
284
285err:
286 if (ret && ret != -ERESTARTSYS)
287 pr_err("tegra_rpc: pid=%d ioctl=%x/%lx (%x) ret=%d\n",
288 current->pid, cmd, arg, _IOC_NR(cmd), ret);
289 return (long)ret;
290}
291
292int tegra_rpc_write(struct tegra_rpc_info *info, u8* buf, size_t size)
293{
294 int ret;
295
296 if (!info->rpc_ep)
297 return -EINVAL;
298
299 if (TEGRA_RPC_MAX_MSG_LEN < size)
300 return -EINVAL;
301
302 ret = trpc_send_msg(&rpc_node, info->rpc_ep, buf, size,
303 GFP_KERNEL);
304 if (ret)
305 return ret;
306 return size;
307}
308
309static ssize_t local_rpc_write(struct file *file, const char __user *buf,
310 size_t count, loff_t *ppos)
311{
312 struct tegra_rpc_info *info = file->private_data;
313 u8 data[TEGRA_RPC_MAX_MSG_LEN];
314 int ret;
315
316 if (!info)
317 return -EINVAL;
318 else if (count > TEGRA_RPC_MAX_MSG_LEN)
319 return -EINVAL;
320
321 if (copy_from_user(data, buf, count))
322 return -EFAULT;
323
324 ret = trpc_send_msg(&rpc_node, info->rpc_ep, data, count,
325 GFP_KERNEL);
326 if (ret)
327 return ret;
328 return count;
329}
330
331int tegra_rpc_read(struct tegra_rpc_info *info, u8 *buf, size_t max)
332{
333 int ret;
334
335 if (max > TEGRA_RPC_MAX_MSG_LEN)
336 return -EINVAL;
337
338 ret = trpc_recv_msg(&rpc_node, info->rpc_ep, buf,
339 TEGRA_RPC_MAX_MSG_LEN, 0);
340 if (ret == 0)
341 return 0;
342 else if (ret < 0)
343 return ret;
344 else if (ret > max)
345 return -ENOSPC;
346
347 return ret;
348}
349
350static ssize_t local_rpc_read(struct file *file, char __user *buf, size_t max,
351 loff_t *ppos)
352{
353 struct tegra_rpc_info *info = file->private_data;
354 int ret;
355 u8 data[TEGRA_RPC_MAX_MSG_LEN];
356
357 if (max > TEGRA_RPC_MAX_MSG_LEN)
358 return -EINVAL;
359
360 ret = trpc_recv_msg(&rpc_node, info->rpc_ep, data,
361 TEGRA_RPC_MAX_MSG_LEN, 0);
362 if (ret == 0)
363 return 0;
364 else if (ret < 0)
365 return ret;
366 else if (ret > max)
367 return -ENOSPC;
368 else if (copy_to_user(buf, data, ret))
369 return -EFAULT;
370
371 return ret;
372}
373
374static const struct file_operations local_rpc_misc_fops = {
375 .owner = THIS_MODULE,
376 .open = local_rpc_open,
377 .release = local_rpc_release,
378 .unlocked_ioctl = local_rpc_ioctl,
379 .write = local_rpc_write,
380 .read = local_rpc_read,
381};
382
383static struct miscdevice local_rpc_misc_device = {
384 .minor = MISC_DYNAMIC_MINOR,
385 .name = "tegra_rpc",
386 .fops = &local_rpc_misc_fops,
387};
388
389int __init rpc_local_init(void)
390{
391 int ret;
392
393 ret = trpc_sema_init();
394 if (ret) {
395 pr_err("%s: error in trpc_sema_init\n", __func__);
396 goto err_sema_init;
397 }
398
399 ret = misc_register(&local_rpc_misc_device);
400 if (ret) {
401 pr_err("%s: can't register misc device\n", __func__);
402 goto err_misc;
403 }
404
405 ret = trpc_node_register(&rpc_node);
406 if (ret) {
407 pr_err("%s: can't register rpc node\n", __func__);
408 goto err_node_reg;
409 }
410 return 0;
411
412err_node_reg:
413 misc_deregister(&local_rpc_misc_device);
414err_misc:
415err_sema_init:
416 return ret;
417}
418
419module_init(rpc_local_init);
diff --git a/drivers/media/video/tegra/avp/trpc_sema.c b/drivers/media/video/tegra/avp/trpc_sema.c
new file mode 100644
index 00000000000..cd717a1a0ca
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_sema.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * Author:
5 * Dima Zavin <dima@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/err.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/miscdevice.h>
22#include <linux/sched.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/tegra_sema.h>
26#include <linux/types.h>
27#include <linux/uaccess.h>
28#include <linux/wait.h>
29
30#include "trpc_sema.h"
31
32struct tegra_sema_info {
33 struct file *file;
34 wait_queue_head_t wq;
35 spinlock_t lock;
36 int count;
37};
38
39static int rpc_sema_minor = -1;
40
41static inline bool is_trpc_sema_file(struct file *file)
42{
43 dev_t rdev = file->f_dentry->d_inode->i_rdev;
44
45 if (MAJOR(rdev) == MISC_MAJOR && MINOR(rdev) == rpc_sema_minor)
46 return true;
47 return false;
48}
49
50struct tegra_sema_info *trpc_sema_get_from_fd(int fd)
51{
52 struct file *file;
53
54 file = fget(fd);
55 if (unlikely(file == NULL)) {
56 pr_err("%s: fd %d is invalid\n", __func__, fd);
57 return ERR_PTR(-EINVAL);
58 }
59
60 if (!is_trpc_sema_file(file)) {
61 pr_err("%s: fd (%d) is not a trpc_sema file\n", __func__, fd);
62 fput(file);
63 return ERR_PTR(-EINVAL);
64 }
65
66 return file->private_data;
67}
68
69void trpc_sema_put(struct tegra_sema_info *info)
70{
71 if (info->file)
72 fput(info->file);
73}
74
75int tegra_sema_signal(struct tegra_sema_info *info)
76{
77 unsigned long flags;
78
79 if (!info)
80 return -EINVAL;
81
82 spin_lock_irqsave(&info->lock, flags);
83 info->count++;
84 wake_up_interruptible_all(&info->wq);
85 spin_unlock_irqrestore(&info->lock, flags);
86 return 0;
87}
88
89int tegra_sema_wait(struct tegra_sema_info *info, long *timeout)
90{
91 unsigned long flags;
92 int ret = 0;
93 unsigned long endtime;
94 long timeleft = *timeout;
95
96 *timeout = 0;
97 if (timeleft < 0)
98 timeleft = MAX_SCHEDULE_TIMEOUT;
99
100 timeleft = msecs_to_jiffies(timeleft);
101 endtime = jiffies + timeleft;
102
103again:
104 if (timeleft)
105 ret = wait_event_interruptible_timeout(info->wq,
106 info->count > 0,
107 timeleft);
108 spin_lock_irqsave(&info->lock, flags);
109 if (info->count > 0) {
110 info->count--;
111 ret = 0;
112 } else if (ret == 0 || timeout == 0) {
113 ret = -ETIMEDOUT;
114 } else if (ret < 0) {
115 ret = -EINTR;
116 if (timeleft != MAX_SCHEDULE_TIMEOUT &&
117 time_before(jiffies, endtime))
118 *timeout = jiffies_to_msecs(endtime - jiffies);
119 else
120 *timeout = 0;
121 } else {
122 /* we woke up but someone else got the semaphore and we have
123 * time left, try again */
124 timeleft = ret;
125 spin_unlock_irqrestore(&info->lock, flags);
126 goto again;
127 }
128 spin_unlock_irqrestore(&info->lock, flags);
129 return ret;
130}
131
132int tegra_sema_open(struct tegra_sema_info **sema)
133{
134 struct tegra_sema_info *info;
135 info = kzalloc(sizeof(struct tegra_sema_info), GFP_KERNEL);
136 if (!info)
137 return -ENOMEM;
138
139 init_waitqueue_head(&info->wq);
140 spin_lock_init(&info->lock);
141 *sema = info;
142 return 0;
143}
144
145static int trpc_sema_open(struct inode *inode, struct file *file)
146{
147 struct tegra_sema_info *info;
148 int ret;
149
150 ret = tegra_sema_open(&info);
151 if (ret < 0)
152 return ret;
153
154 info->file = file;
155 nonseekable_open(inode, file);
156 file->private_data = info;
157 return 0;
158}
159
160int tegra_sema_release(struct tegra_sema_info *sema)
161{
162 kfree(sema);
163 return 0;
164}
165
166static int trpc_sema_release(struct inode *inode, struct file *file)
167{
168 struct tegra_sema_info *info = file->private_data;
169
170 file->private_data = NULL;
171 tegra_sema_release(info);
172 return 0;
173}
174
175static long trpc_sema_ioctl(struct file *file, unsigned int cmd,
176 unsigned long arg)
177{
178 struct tegra_sema_info *info = file->private_data;
179 int ret;
180 long timeout;
181
182 if (_IOC_TYPE(cmd) != TEGRA_SEMA_IOCTL_MAGIC ||
183 _IOC_NR(cmd) < TEGRA_SEMA_IOCTL_MIN_NR ||
184 _IOC_NR(cmd) > TEGRA_SEMA_IOCTL_MAX_NR)
185 return -ENOTTY;
186 else if (!info)
187 return -EINVAL;
188
189 switch (cmd) {
190 case TEGRA_SEMA_IOCTL_WAIT:
191 if (copy_from_user(&timeout, (void __user *)arg, sizeof(long)))
192 return -EFAULT;
193 ret = tegra_sema_wait(info, &timeout);
194 if (ret != -EINTR)
195 break;
196 if (copy_to_user((void __user *)arg, &timeout, sizeof(long)))
197 ret = -EFAULT;
198 break;
199 case TEGRA_SEMA_IOCTL_SIGNAL:
200 ret = tegra_sema_signal(info);
201 break;
202 default:
203 pr_err("%s: Unknown tegra_sema ioctl 0x%x\n", __func__,
204 _IOC_NR(cmd));
205 ret = -ENOTTY;
206 break;
207 }
208 return ret;
209}
210
211static const struct file_operations trpc_sema_misc_fops = {
212 .owner = THIS_MODULE,
213 .open = trpc_sema_open,
214 .release = trpc_sema_release,
215 .unlocked_ioctl = trpc_sema_ioctl,
216};
217
218static struct miscdevice trpc_sema_misc_device = {
219 .minor = MISC_DYNAMIC_MINOR,
220 .name = "tegra_sema",
221 .fops = &trpc_sema_misc_fops,
222};
223
224int __init trpc_sema_init(void)
225{
226 int ret;
227
228 if (rpc_sema_minor >= 0) {
229 pr_err("%s: trpc_sema already registered\n", __func__);
230 return -EBUSY;
231 }
232
233 ret = misc_register(&trpc_sema_misc_device);
234 if (ret) {
235 pr_err("%s: can't register misc device\n", __func__);
236 return ret;
237 }
238
239 rpc_sema_minor = trpc_sema_misc_device.minor;
240 pr_info("%s: registered misc dev %d:%d\n", __func__, MISC_MAJOR,
241 rpc_sema_minor);
242
243 return 0;
244}
diff --git a/drivers/media/video/tegra/avp/trpc_sema.h b/drivers/media/video/tegra/avp/trpc_sema.h
new file mode 100644
index 00000000000..2a7c42245b7
--- /dev/null
+++ b/drivers/media/video/tegra/avp/trpc_sema.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * Author:
5 * Dima Zavin <dima@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef __ARM_MACH_TEGRA_RPC_SEMA_H
19#define __ARM_MACH_TEGRA_RPC_SEMA_H
20
21#include <linux/types.h>
22#include <linux/fs.h>
23
24struct tegra_sema_info;
25
26struct tegra_sema_info *trpc_sema_get_from_fd(int fd);
27void trpc_sema_put(struct tegra_sema_info *sema);
28int __init trpc_sema_init(void);
29
30#endif