aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/tegra/avp/avp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/video/tegra/avp/avp.c')
-rw-r--r--drivers/media/video/tegra/avp/avp.c1949
1 files changed, 1949 insertions, 0 deletions
diff --git a/drivers/media/video/tegra/avp/avp.c b/drivers/media/video/tegra/avp/avp.c
new file mode 100644
index 00000000000..074a42f125b
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp.c
@@ -0,0 +1,1949 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Dima Zavin <dima@android.com>
4 *
5 * Copyright (C) 2010-2012 NVIDIA Corporation
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/clk.h>
19#include <linux/completion.h>
20#include <linux/delay.h>
21#include <linux/dma-mapping.h>
22#include <linux/err.h>
23#include <linux/firmware.h>
24#include <linux/fs.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/ioctl.h>
28#include <linux/irq.h>
29#include <linux/kref.h>
30#include <linux/list.h>
31#include <linux/miscdevice.h>
32#include <linux/mutex.h>
33#include <linux/platform_device.h>
34#include <linux/rbtree.h>
35#include <linux/seq_file.h>
36#include <linux/slab.h>
37#include <linux/tegra_rpc.h>
38#include <linux/types.h>
39#include <linux/uaccess.h>
40#include <linux/workqueue.h>
41
42#include <mach/clk.h>
43#include <mach/io.h>
44#include <mach/iomap.h>
45#include <mach/nvmap.h>
46#include <mach/legacy_irq.h>
47#include <mach/hardware.h>
48
49#include "../../../../video/tegra/nvmap/nvmap.h"
50
51#include "headavp.h"
52#include "avp_msg.h"
53#include "trpc.h"
54#include "avp.h"
55#include "nvavp.h"
56
57enum {
58 AVP_DBG_TRACE_XPC = 1U << 0,
59 AVP_DBG_TRACE_XPC_IRQ = 1U << 1,
60 AVP_DBG_TRACE_XPC_MSG = 1U << 2,
61 AVP_DBG_TRACE_XPC_CONN = 1U << 3,
62 AVP_DBG_TRACE_TRPC_MSG = 1U << 4,
63 AVP_DBG_TRACE_TRPC_CONN = 1U << 5,
64 AVP_DBG_TRACE_LIB = 1U << 6,
65};
66
67static u32 avp_debug_mask =
68 AVP_DBG_TRACE_XPC |
69 /* AVP_DBG_TRACE_XPC_IRQ | */
70 /* AVP_DBG_TRACE_XPC_MSG | */
71 /* AVP_DBG_TRACE_TRPC_MSG | */
72 AVP_DBG_TRACE_XPC_CONN |
73 AVP_DBG_TRACE_TRPC_CONN |
74 AVP_DBG_TRACE_LIB;
75
76module_param_named(debug_mask, avp_debug_mask, uint, S_IWUSR | S_IRUGO);
77
78#define DBG(flag, args...) \
79 do { if (unlikely(avp_debug_mask & (flag))) pr_info(args); } while (0)
80
81#define TEGRA_AVP_NAME "tegra-avp"
82
83#define TEGRA_AVP_RESET_VECTOR_ADDR \
84 (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
85
86#define TEGRA_AVP_RESUME_ADDR IO_ADDRESS(TEGRA_IRAM_BASE + \
87 TEGRA_RESET_HANDLER_SIZE)
88
89#define FLOW_CTRL_HALT_COP_EVENTS IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
90#define FLOW_MODE_STOP (0x2 << 29)
91#define FLOW_MODE_NONE 0x0
92
93#define MBOX_FROM_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
94#define MBOX_TO_AVP IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
95
96/* Layout of the mailbox registers:
97 * bit 31 - pending message interrupt enable (mailbox full, i.e. valid=1)
98 * bit 30 - message cleared interrupt enable (mailbox empty, i.e. valid=0)
99 * bit 29 - message valid. peer clears this bit after reading msg
100 * bits 27:0 - message data
101 */
102#define MBOX_MSG_PENDING_INT_EN (1 << 31)
103#define MBOX_MSG_READ_INT_EN (1 << 30)
104#define MBOX_MSG_VALID (1 << 29)
105
106#define AVP_MSG_MAX_CMD_LEN 16
107#define AVP_MSG_AREA_SIZE (AVP_MSG_MAX_CMD_LEN + TEGRA_RPC_MAX_MSG_LEN)
108
109struct tegra_avp_info {
110 struct clk *cop_clk;
111
112 int mbox_from_avp_pend_irq;
113
114 dma_addr_t msg_area_addr;
115 u32 msg;
116 void *msg_to_avp;
117 void *msg_from_avp;
118 struct mutex to_avp_lock;
119 struct mutex from_avp_lock;
120
121 struct work_struct recv_work;
122 struct workqueue_struct *recv_wq;
123
124 struct trpc_node *rpc_node;
125 struct miscdevice misc_dev;
126 int refcount;
127 struct mutex open_lock;
128
129 spinlock_t state_lock;
130 bool initialized;
131 bool shutdown;
132 bool suspending;
133 bool defer_remote;
134
135 struct mutex libs_lock;
136 struct list_head libs;
137 struct nvmap_client *nvmap_libs;
138
139 /* client for driver allocations, persistent */
140 struct nvmap_client *nvmap_drv;
141 struct nvmap_handle_ref *kernel_handle;
142 void *kernel_data;
143 phys_addr_t kernel_phys;
144
145 struct nvmap_handle_ref *iram_backup_handle;
146 void *iram_backup_data;
147 phys_addr_t iram_backup_phys;
148 unsigned long resume_addr;
149 unsigned long reset_addr;
150
151 struct trpc_endpoint *avp_ep;
152 struct rb_root endpoints;
153
154 struct avp_svc_info *avp_svc;
155};
156
157struct remote_info {
158 u32 loc_id;
159 u32 rem_id;
160 struct kref ref;
161
162 struct trpc_endpoint *trpc_ep;
163 struct rb_node rb_node;
164};
165
166struct lib_item {
167 struct list_head list;
168 u32 handle;
169 char name[TEGRA_AVP_LIB_MAX_NAME];
170};
171
172static struct tegra_avp_info *tegra_avp;
173
174static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len);
175static void avp_trpc_close(struct trpc_endpoint *ep);
176static void avp_trpc_show(struct seq_file *s, struct trpc_endpoint *ep);
177static void libs_cleanup(struct tegra_avp_info *avp);
178
179static struct trpc_ep_ops remote_ep_ops = {
180 .send = avp_trpc_send,
181 .close = avp_trpc_close,
182 .show = avp_trpc_show,
183};
184
185static struct remote_info *rinfo_alloc(struct tegra_avp_info *avp)
186{
187 struct remote_info *rinfo;
188
189 rinfo = kzalloc(sizeof(struct remote_info), GFP_KERNEL);
190 if (!rinfo)
191 return NULL;
192 kref_init(&rinfo->ref);
193 return rinfo;
194}
195
196static void _rinfo_release(struct kref *ref)
197{
198 struct remote_info *rinfo = container_of(ref, struct remote_info, ref);
199 kfree(rinfo);
200}
201
202static inline void rinfo_get(struct remote_info *rinfo)
203{
204 kref_get(&rinfo->ref);
205}
206
207static inline void rinfo_put(struct remote_info *rinfo)
208{
209 kref_put(&rinfo->ref, _rinfo_release);
210}
211
212static int remote_insert(struct tegra_avp_info *avp, struct remote_info *rinfo)
213{
214 struct rb_node **p;
215 struct rb_node *parent;
216 struct remote_info *tmp;
217
218 p = &avp->endpoints.rb_node;
219 parent = NULL;
220 while (*p) {
221 parent = *p;
222 tmp = rb_entry(parent, struct remote_info, rb_node);
223
224 if (rinfo->loc_id < tmp->loc_id)
225 p = &(*p)->rb_left;
226 else if (rinfo->loc_id > tmp->loc_id)
227 p = &(*p)->rb_right;
228 else {
229 pr_info("%s: avp endpoint id=%x (%s) already exists\n",
230 __func__, rinfo->loc_id,
231 trpc_name(rinfo->trpc_ep));
232 return -EEXIST;
233 }
234 }
235 rb_link_node(&rinfo->rb_node, parent, p);
236 rb_insert_color(&rinfo->rb_node, &avp->endpoints);
237 rinfo_get(rinfo);
238 return 0;
239}
240
241static struct remote_info *remote_find(struct tegra_avp_info *avp, u32 local_id)
242{
243 struct rb_node *n = avp->endpoints.rb_node;
244 struct remote_info *rinfo;
245
246 while (n) {
247 rinfo = rb_entry(n, struct remote_info, rb_node);
248
249 if (local_id < rinfo->loc_id)
250 n = n->rb_left;
251 else if (local_id > rinfo->loc_id)
252 n = n->rb_right;
253 else
254 return rinfo;
255 }
256 return NULL;
257}
258
259static void remote_remove(struct tegra_avp_info *avp, struct remote_info *rinfo)
260{
261 rb_erase(&rinfo->rb_node, &avp->endpoints);
262 rinfo_put(rinfo);
263}
264
265/* test whether or not the trpc endpoint provided is a valid AVP node
266 * endpoint */
267static struct remote_info *validate_trpc_ep(struct tegra_avp_info *avp,
268 struct trpc_endpoint *ep)
269{
270 struct remote_info *tmp = trpc_priv(ep);
271 struct remote_info *rinfo;
272
273 if (!tmp)
274 return NULL;
275 rinfo = remote_find(avp, tmp->loc_id);
276 if (rinfo && rinfo == tmp && rinfo->trpc_ep == ep)
277 return rinfo;
278 return NULL;
279}
280
281static void avp_trpc_show(struct seq_file *s, struct trpc_endpoint *ep)
282{
283 struct tegra_avp_info *avp = tegra_avp;
284 struct remote_info *rinfo;
285 unsigned long flags;
286
287 spin_lock_irqsave(&avp->state_lock, flags);
288 rinfo = validate_trpc_ep(avp, ep);
289 if (!rinfo) {
290 seq_printf(s, " <unknown>\n");
291 goto out;
292 }
293 seq_printf(s, " loc_id:0x%x\n rem_id:0x%x\n",
294 rinfo->loc_id, rinfo->rem_id);
295out:
296 spin_unlock_irqrestore(&avp->state_lock, flags);
297}
298
299static inline void mbox_writel(u32 val, void __iomem *mbox)
300{
301 writel(val, mbox);
302}
303
304static inline u32 mbox_readl(void __iomem *mbox)
305{
306 return readl(mbox);
307}
308
309static inline void msg_ack_remote(struct tegra_avp_info *avp, u32 cmd, u32 arg)
310{
311 struct msg_ack *ack = avp->msg_from_avp;
312
313 /* must make sure the arg is there first */
314 ack->arg = arg;
315 wmb();
316 ack->cmd = cmd;
317 wmb();
318}
319
320static inline u32 msg_recv_get_cmd(struct tegra_avp_info *avp)
321{
322 volatile u32 *cmd = avp->msg_from_avp;
323 rmb();
324 return *cmd;
325}
326
327static inline int __msg_write(struct tegra_avp_info *avp, void *hdr,
328 size_t hdr_len, void *buf, size_t len)
329{
330 memcpy(avp->msg_to_avp, hdr, hdr_len);
331 if (buf && len)
332 memcpy(avp->msg_to_avp + hdr_len, buf, len);
333 mbox_writel(avp->msg, MBOX_TO_AVP);
334 return 0;
335}
336
337static inline int msg_write(struct tegra_avp_info *avp, void *hdr,
338 size_t hdr_len, void *buf, size_t len)
339{
340 /* rem_ack is a pointer into shared memory that the AVP modifies */
341 volatile u32 *rem_ack = avp->msg_to_avp;
342 unsigned long endtime = jiffies + HZ;
343
344 /* the other side ack's the message by clearing the first word,
345 * wait for it to do so */
346 rmb();
347 while (*rem_ack != 0 && time_before(jiffies, endtime)) {
348 usleep_range(100, 2000);
349 rmb();
350 }
351 if (*rem_ack != 0)
352 return -ETIMEDOUT;
353 __msg_write(avp, hdr, hdr_len, buf, len);
354 return 0;
355}
356
357static inline int msg_check_ack(struct tegra_avp_info *avp, u32 cmd, u32 *arg)
358{
359 struct msg_ack ack;
360
361 rmb();
362 memcpy(&ack, avp->msg_to_avp, sizeof(ack));
363 if (ack.cmd != cmd)
364 return -ENOENT;
365 if (arg)
366 *arg = ack.arg;
367 return 0;
368}
369
370/* XXX: add timeout */
371static int msg_wait_ack_locked(struct tegra_avp_info *avp, u32 cmd, u32 *arg)
372{
373 /* rem_ack is a pointer into shared memory that the AVP modifies */
374 volatile u32 *rem_ack = avp->msg_to_avp;
375 unsigned long endtime = jiffies + msecs_to_jiffies(400);
376 int ret;
377
378 do {
379 ret = msg_check_ack(avp, cmd, arg);
380 usleep_range(1000, 5000);
381 } while (ret && time_before(jiffies, endtime));
382
383 /* if we timed out, try one more time */
384 if (ret)
385 ret = msg_check_ack(avp, cmd, arg);
386
387 /* clear out the ack */
388 *rem_ack = 0;
389 wmb();
390 return ret;
391}
392
393static int avp_trpc_send(struct trpc_endpoint *ep, void *buf, size_t len)
394{
395 struct tegra_avp_info *avp = tegra_avp;
396 struct remote_info *rinfo;
397 struct msg_port_data msg;
398 int ret;
399 unsigned long flags;
400
401 DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: ep=%p priv=%p buf=%p len=%d\n",
402 __func__, ep, trpc_priv(ep), buf, len);
403
404 spin_lock_irqsave(&avp->state_lock, flags);
405 if (unlikely(avp->suspending && trpc_peer(ep) != avp->avp_ep)) {
406 ret = -EBUSY;
407 goto err_state_locked;
408 } else if (avp->shutdown) {
409 ret = -ENODEV;
410 goto err_state_locked;
411 }
412 rinfo = validate_trpc_ep(avp, ep);
413 if (!rinfo) {
414 ret = -ENOTTY;
415 goto err_state_locked;
416 }
417 rinfo_get(rinfo);
418 spin_unlock_irqrestore(&avp->state_lock, flags);
419
420 msg.cmd = CMD_MESSAGE;
421 msg.port_id = rinfo->rem_id;
422 msg.msg_len = len;
423
424 mutex_lock(&avp->to_avp_lock);
425 ret = msg_write(avp, &msg, sizeof(msg), buf, len);
426 mutex_unlock(&avp->to_avp_lock);
427
428 DBG(AVP_DBG_TRACE_TRPC_MSG, "%s: msg sent for %s (%x->%x) (%d)\n",
429 __func__, trpc_name(ep), rinfo->loc_id, rinfo->rem_id, ret);
430 rinfo_put(rinfo);
431 return ret;
432
433err_state_locked:
434 spin_unlock_irqrestore(&avp->state_lock, flags);
435 return ret;
436}
437
438static int _send_disconnect(struct tegra_avp_info *avp, u32 port_id)
439{
440 struct msg_disconnect msg;
441 int ret;
442
443 msg.cmd = CMD_DISCONNECT;
444 msg.port_id = port_id;
445
446 mutex_lock(&avp->to_avp_lock);
447 ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
448 if (ret) {
449 pr_err("%s: remote has not acked last message (%x)\n", __func__,
450 port_id);
451 goto err_msg_write;
452 }
453
454 ret = msg_wait_ack_locked(avp, CMD_ACK, NULL);
455 if (ret) {
456 pr_err("%s: remote end won't respond for %x\n", __func__,
457 port_id);
458 goto err_wait_ack;
459 }
460
461 DBG(AVP_DBG_TRACE_XPC_CONN, "%s: sent disconnect msg for %x\n",
462 __func__, port_id);
463
464err_wait_ack:
465err_msg_write:
466 mutex_unlock(&avp->to_avp_lock);
467 return ret;
468}
469
470/* Note: Assumes that the rinfo was previously successfully added to the
471 * endpoints rb_tree. The initial refcnt of 1 is inherited by the port when the
472 * trpc endpoint is created with thi trpc_xxx functions. Thus, on close,
473 * we must drop that reference here.
474 * The avp->endpoints rb_tree keeps its own reference on rinfo objects.
475 *
476 * The try_connect function does not use this on error because it needs to
477 * split the close of trpc_ep port and the put.
478 */
479static inline void remote_close(struct remote_info *rinfo)
480{
481 trpc_close(rinfo->trpc_ep);
482 rinfo_put(rinfo);
483}
484
485static void avp_trpc_close(struct trpc_endpoint *ep)
486{
487 struct tegra_avp_info *avp = tegra_avp;
488 struct remote_info *rinfo;
489 unsigned long flags;
490 int ret;
491
492 spin_lock_irqsave(&avp->state_lock, flags);
493 if (avp->shutdown) {
494 spin_unlock_irqrestore(&avp->state_lock, flags);
495 return;
496 }
497
498 rinfo = validate_trpc_ep(avp, ep);
499 if (!rinfo) {
500 pr_err("%s: tried to close invalid port '%s' endpoint (%p)\n",
501 __func__, trpc_name(ep), ep);
502 spin_unlock_irqrestore(&avp->state_lock, flags);
503 return;
504 }
505 rinfo_get(rinfo);
506 remote_remove(avp, rinfo);
507 spin_unlock_irqrestore(&avp->state_lock, flags);
508
509 DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: closing '%s' (%x)\n", __func__,
510 trpc_name(ep), rinfo->rem_id);
511
512 ret = _send_disconnect(avp, rinfo->rem_id);
513 if (ret)
514 pr_err("%s: error while closing remote port '%s' (%x)\n",
515 __func__, trpc_name(ep), rinfo->rem_id);
516 remote_close(rinfo);
517 rinfo_put(rinfo);
518}
519
520/* takes and holds avp->from_avp_lock */
521static void recv_msg_lock(struct tegra_avp_info *avp)
522{
523 unsigned long flags;
524
525 mutex_lock(&avp->from_avp_lock);
526 spin_lock_irqsave(&avp->state_lock, flags);
527 avp->defer_remote = true;
528 spin_unlock_irqrestore(&avp->state_lock, flags);
529}
530
531/* MUST be called with avp->from_avp_lock held */
532static void recv_msg_unlock(struct tegra_avp_info *avp)
533{
534 unsigned long flags;
535
536 spin_lock_irqsave(&avp->state_lock, flags);
537 avp->defer_remote = false;
538 spin_unlock_irqrestore(&avp->state_lock, flags);
539 mutex_unlock(&avp->from_avp_lock);
540}
541
542static int avp_node_try_connect(struct trpc_node *node,
543 struct trpc_node *src_node,
544 struct trpc_endpoint *from)
545{
546 struct tegra_avp_info *avp = tegra_avp;
547 const char *port_name = trpc_name(from);
548 struct remote_info *rinfo;
549 struct msg_connect msg;
550 int ret;
551 unsigned long flags;
552 int len;
553 const int max_retry_cnt = 6;
554 int cnt = 0;
555
556 DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: trying connect from %s\n", __func__,
557 port_name);
558
559 if (node != avp->rpc_node || node->priv != avp)
560 return -ENODEV;
561
562 len = strlen(port_name);
563 if (len > XPC_PORT_NAME_LEN) {
564 pr_err("%s: port name (%s) too long\n", __func__, port_name);
565 return -EINVAL;
566 }
567
568 ret = 0;
569 spin_lock_irqsave(&avp->state_lock, flags);
570 if (avp->suspending) {
571 ret = -EBUSY;
572 } else if (likely(src_node != avp->rpc_node)) {
573 /* only check for initialized when the source is not ourselves
574 * since we'll end up calling into here during initialization */
575 if (!avp->initialized)
576 ret = -ENODEV;
577 } else if (strncmp(port_name, "RPC_AVP_PORT", XPC_PORT_NAME_LEN)) {
578 /* we only allow connections to ourselves for the cpu-to-avp
579 port */
580 ret = -EINVAL;
581 }
582 spin_unlock_irqrestore(&avp->state_lock, flags);
583 if (ret)
584 return ret;
585
586 rinfo = rinfo_alloc(avp);
587 if (!rinfo) {
588 pr_err("%s: cannot alloc mem for rinfo\n", __func__);
589 ret = -ENOMEM;
590 goto err_alloc_rinfo;
591 }
592 rinfo->loc_id = (u32)rinfo;
593
594 msg.cmd = CMD_CONNECT;
595 msg.port_id = rinfo->loc_id;
596 memcpy(msg.name, port_name, len);
597 memset(msg.name + len, 0, XPC_PORT_NAME_LEN - len);
598
599 /* when trying to connect to remote, we need to block remote
600 * messages until we get our ack and can insert it into our lists.
601 * Otherwise, we can get a message from the other side for a port
602 * that we haven't finished setting up.
603 *
604 * 'defer_remote' will force the irq handler to not process messages
605 * at irq context but to schedule work to do so. The work function will
606 * take the from_avp_lock and everything should stay consistent.
607 */
608 recv_msg_lock(avp);
609 for (cnt = 0; cnt < max_retry_cnt; cnt++) {
610 /* Retry to connect to AVP at this function maximum 6 times.
611 * Because this section is protected by mutex and
612 * needed to re-send the CMD_CONNECT command by CPU
613 * if AVP didn't receive the command.
614 */
615 mutex_lock(&avp->to_avp_lock);
616 ret = msg_write(avp, &msg, sizeof(msg), NULL, 0);
617 if (ret) {
618 pr_err("%s: remote has not acked last message (%s)\n",
619 __func__, port_name);
620 mutex_unlock(&avp->to_avp_lock);
621 goto err_msg_write;
622 }
623 ret = msg_wait_ack_locked(avp, CMD_RESPONSE, &rinfo->rem_id);
624 mutex_unlock(&avp->to_avp_lock);
625 if (!ret && rinfo->rem_id)
626 break;
627
628 /* Skip the sleep function at last retry count */
629 if ((cnt + 1) < max_retry_cnt)
630 usleep_range(100, 2000);
631 }
632
633 if (ret) {
634 pr_err("%s: remote end won't respond for '%s'\n", __func__,
635 port_name);
636 goto err_wait_ack;
637 }
638 if (!rinfo->rem_id) {
639 pr_err("%s: can't connect to '%s'\n", __func__, port_name);
640 ret = -ECONNREFUSED;
641 goto err_nack;
642 }
643
644 DBG(AVP_DBG_TRACE_TRPC_CONN, "%s: got conn ack '%s' (%x <-> %x)\n",
645 __func__, port_name, rinfo->loc_id, rinfo->rem_id);
646
647 rinfo->trpc_ep = trpc_create_peer(node, from, &remote_ep_ops,
648 rinfo);
649 if (!rinfo->trpc_ep) {
650 pr_err("%s: cannot create peer for %s\n", __func__, port_name);
651 ret = -EINVAL;
652 goto err_create_peer;
653 }
654
655 spin_lock_irqsave(&avp->state_lock, flags);
656 ret = remote_insert(avp, rinfo);
657 spin_unlock_irqrestore(&avp->state_lock, flags);
658 if (ret)
659 goto err_ep_insert;
660
661 recv_msg_unlock(avp);
662 return 0;
663
664err_ep_insert:
665 trpc_close(rinfo->trpc_ep);
666err_create_peer:
667 _send_disconnect(avp, rinfo->rem_id);
668err_nack:
669err_wait_ack:
670err_msg_write:
671 recv_msg_unlock(avp);
672 rinfo_put(rinfo);
673err_alloc_rinfo:
674 return ret;
675}
676
677static void process_disconnect_locked(struct tegra_avp_info *avp,
678 struct msg_data *raw_msg)
679{
680 struct msg_disconnect *disconn_msg = (struct msg_disconnect *)raw_msg;
681 unsigned long flags;
682 struct remote_info *rinfo;
683
684 DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got disconnect (%x)\n", __func__,
685 disconn_msg->port_id);
686
687 if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
688 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, disconn_msg,
689 sizeof(struct msg_disconnect));
690
691 spin_lock_irqsave(&avp->state_lock, flags);
692 rinfo = remote_find(avp, disconn_msg->port_id);
693 if (!rinfo) {
694 spin_unlock_irqrestore(&avp->state_lock, flags);
695 pr_warning("%s: got disconnect for unknown port %x\n",
696 __func__, disconn_msg->port_id);
697 goto ack;
698 }
699 rinfo_get(rinfo);
700 remote_remove(avp, rinfo);
701 spin_unlock_irqrestore(&avp->state_lock, flags);
702
703 remote_close(rinfo);
704 rinfo_put(rinfo);
705ack:
706 msg_ack_remote(avp, CMD_ACK, 0);
707}
708
709static void process_connect_locked(struct tegra_avp_info *avp,
710 struct msg_data *raw_msg)
711{
712 struct msg_connect *conn_msg = (struct msg_connect *)raw_msg;
713 struct trpc_endpoint *trpc_ep;
714 struct remote_info *rinfo;
715 char name[XPC_PORT_NAME_LEN + 1];
716 int ret;
717 u32 local_port_id = 0;
718 unsigned long flags;
719
720 DBG(AVP_DBG_TRACE_XPC_CONN, "%s: got connect (%x)\n", __func__,
721 conn_msg->port_id);
722 if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG)
723 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
724 conn_msg, sizeof(struct msg_connect));
725
726 rinfo = rinfo_alloc(avp);
727 if (!rinfo) {
728 pr_err("%s: cannot alloc mem for rinfo\n", __func__);
729 ret = -ENOMEM;
730 goto ack;
731 }
732 rinfo->loc_id = (u32)rinfo;
733 rinfo->rem_id = conn_msg->port_id;
734
735 memcpy(name, conn_msg->name, XPC_PORT_NAME_LEN);
736 name[XPC_PORT_NAME_LEN] = '\0';
737 trpc_ep = trpc_create_connect(avp->rpc_node, name, &remote_ep_ops,
738 rinfo, 0);
739 if (IS_ERR(trpc_ep)) {
740 pr_err("%s: remote requested unknown port '%s' (%d)\n",
741 __func__, name, (int)PTR_ERR(trpc_ep));
742 goto nack;
743 }
744 rinfo->trpc_ep = trpc_ep;
745
746 spin_lock_irqsave(&avp->state_lock, flags);
747 ret = remote_insert(avp, rinfo);
748 spin_unlock_irqrestore(&avp->state_lock, flags);
749 if (ret)
750 goto err_ep_insert;
751
752 local_port_id = rinfo->loc_id;
753 goto ack;
754
755err_ep_insert:
756 trpc_close(trpc_ep);
757nack:
758 rinfo_put(rinfo);
759 local_port_id = 0;
760ack:
761 msg_ack_remote(avp, CMD_RESPONSE, local_port_id);
762}
763
764static int process_message(struct tegra_avp_info *avp, struct msg_data *raw_msg,
765 gfp_t gfp_flags)
766{
767 struct msg_port_data *port_msg = (struct msg_port_data *)raw_msg;
768 struct remote_info *rinfo;
769 unsigned long flags;
770 int len;
771 int ret;
772
773 len = min(port_msg->msg_len, (u32)TEGRA_RPC_MAX_MSG_LEN);
774
775 if (avp_debug_mask & AVP_DBG_TRACE_XPC_MSG) {
776 pr_info("%s: got message cmd=%x port=%x len=%d\n", __func__,
777 port_msg->cmd, port_msg->port_id, port_msg->msg_len);
778 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, port_msg,
779 sizeof(struct msg_port_data) + len);
780 }
781
782 if (len != port_msg->msg_len)
783 pr_err("%s: message sent is too long (%d bytes)\n", __func__,
784 port_msg->msg_len);
785
786 spin_lock_irqsave(&avp->state_lock, flags);
787 rinfo = remote_find(avp, port_msg->port_id);
788 if (rinfo) {
789 rinfo_get(rinfo);
790 trpc_get(rinfo->trpc_ep);
791 } else {
792 pr_err("%s: port %x not found\n", __func__, port_msg->port_id);
793 spin_unlock_irqrestore(&avp->state_lock, flags);
794 ret = -ENOENT;
795 goto ack;
796 }
797 spin_unlock_irqrestore(&avp->state_lock, flags);
798
799 ret = trpc_send_msg(avp->rpc_node, rinfo->trpc_ep, port_msg->data,
800 len, gfp_flags);
801 if (ret == -ENOMEM) {
802 trpc_put(rinfo->trpc_ep);
803 rinfo_put(rinfo);
804 goto no_ack;
805 } else if (ret) {
806 pr_err("%s: cannot queue message for port %s/%x (%d)\n",
807 __func__, trpc_name(rinfo->trpc_ep), rinfo->loc_id,
808 ret);
809 } else {
810 DBG(AVP_DBG_TRACE_XPC_MSG, "%s: msg queued\n", __func__);
811 }
812
813 trpc_put(rinfo->trpc_ep);
814 rinfo_put(rinfo);
815ack:
816 msg_ack_remote(avp, CMD_ACK, 0);
817no_ack:
818 return ret;
819}
820
821static void process_avp_message(struct work_struct *work)
822{
823 struct tegra_avp_info *avp = container_of(work, struct tegra_avp_info,
824 recv_work);
825 struct msg_data *msg = avp->msg_from_avp;
826
827 mutex_lock(&avp->from_avp_lock);
828 rmb();
829 switch (msg->cmd) {
830 case CMD_CONNECT:
831 process_connect_locked(avp, msg);
832 break;
833 case CMD_DISCONNECT:
834 process_disconnect_locked(avp, msg);
835 break;
836 case CMD_MESSAGE:
837 process_message(avp, msg, GFP_KERNEL);
838 break;
839 default:
840 pr_err("%s: unknown cmd (%x) received\n", __func__, msg->cmd);
841 break;
842 }
843 mutex_unlock(&avp->from_avp_lock);
844}
845
846static irqreturn_t avp_mbox_pending_isr(int irq, void *data)
847{
848 struct tegra_avp_info *avp = data;
849 struct msg_data *msg = avp->msg_from_avp;
850 u32 mbox_msg;
851 unsigned long flags;
852 int ret;
853
854 mbox_msg = mbox_readl(MBOX_FROM_AVP);
855 mbox_writel(0, MBOX_FROM_AVP);
856
857 DBG(AVP_DBG_TRACE_XPC_IRQ, "%s: got msg %x\n", __func__, mbox_msg);
858
859 /* XXX: re-use previous message? */
860 if (!(mbox_msg & MBOX_MSG_VALID)) {
861 WARN_ON(1);
862 goto done;
863 }
864
865 mbox_msg <<= 4;
866 if (mbox_msg == 0x2f00bad0UL) {
867 pr_info("%s: petting watchdog\n", __func__);
868 goto done;
869 }
870
871 spin_lock_irqsave(&avp->state_lock, flags);
872 if (avp->shutdown) {
873 spin_unlock_irqrestore(&avp->state_lock, flags);
874 goto done;
875 } else if (avp->defer_remote) {
876 spin_unlock_irqrestore(&avp->state_lock, flags);
877 goto defer;
878 }
879 spin_unlock_irqrestore(&avp->state_lock, flags);
880
881 rmb();
882 if (msg->cmd == CMD_MESSAGE) {
883 ret = process_message(avp, msg, GFP_ATOMIC);
884 if (ret != -ENOMEM)
885 goto done;
886 pr_info("%s: deferring message (%d)\n", __func__, ret);
887 }
888defer:
889 queue_work(avp->recv_wq, &avp->recv_work);
890done:
891 return IRQ_HANDLED;
892}
893
894static int avp_reset(struct tegra_avp_info *avp, unsigned long reset_addr)
895{
896 unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
897 dma_addr_t stub_data_phys;
898 unsigned long timeout;
899 int ret = 0;
900
901 writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
902
903 _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
904 _tegra_avp_boot_stub_data.jump_addr = reset_addr;
905 wmb();
906 stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
907 sizeof(_tegra_avp_boot_stub_data),
908 DMA_TO_DEVICE);
909
910 writel(stub_code_phys, TEGRA_AVP_RESET_VECTOR_ADDR);
911
912 pr_debug("%s: TEGRA_AVP_RESET_VECTOR=%x\n", __func__, readl(TEGRA_AVP_RESET_VECTOR_ADDR));
913 pr_info("%s: Resetting AVP: reset_addr=%lx\n", __func__, reset_addr);
914
915 tegra_periph_reset_assert(avp->cop_clk);
916 udelay(10);
917 tegra_periph_reset_deassert(avp->cop_clk);
918
919 writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
920
921 /* the AVP firmware will reprogram its reset vector as the kernel
922 * starts, so a dead kernel can be detected by polling this value */
923 timeout = jiffies + msecs_to_jiffies(2000);
924 while (time_before(jiffies, timeout)) {
925 pr_debug("%s: TEGRA_AVP_RESET_VECTOR=%x\n", __func__, readl(TEGRA_AVP_RESET_VECTOR_ADDR));
926 if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) != stub_code_phys)
927 break;
928 cpu_relax();
929 }
930 if (readl(TEGRA_AVP_RESET_VECTOR_ADDR) == stub_code_phys) {
931 pr_err("%s: Timed out waiting for AVP kernel to start\n", __func__);
932 ret = -EINVAL;
933 }
934 pr_debug("%s: TEGRA_AVP_RESET_VECTOR=%x\n", __func__, readl(TEGRA_AVP_RESET_VECTOR_ADDR));
935 WARN_ON(ret);
936 dma_unmap_single(NULL, stub_data_phys,
937 sizeof(_tegra_avp_boot_stub_data),
938 DMA_TO_DEVICE);
939 return ret;
940}
941
942static void avp_halt(struct tegra_avp_info *avp)
943{
944 /* ensure the AVP is halted */
945 writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
946 tegra_periph_reset_assert(avp->cop_clk);
947
948 /* set up the initial memory areas and mailbox contents */
949 *((u32 *)avp->msg_from_avp) = 0;
950 *((u32 *)avp->msg_to_avp) = 0xfeedf00d;
951 mbox_writel(0, MBOX_FROM_AVP);
952 mbox_writel(0, MBOX_TO_AVP);
953}
954
955/* Note: CPU_PORT server and AVP_PORT client are registered with the avp
956 * node, but are actually meant to be processed on our side (either
957 * by the svc thread for processing remote calls or by the client
958 * of the char dev for receiving replies for managing remote
959 * libraries/modules. */
960
961static int avp_init(struct tegra_avp_info *avp)
962{
963 const struct firmware *avp_fw;
964 int ret;
965 struct trpc_endpoint *ep;
966 char fw_file[30];
967
968 avp->nvmap_libs = nvmap_create_client(nvmap_dev, "avp_libs");
969 if (IS_ERR_OR_NULL(avp->nvmap_libs)) {
970 pr_err("%s: cannot create libs nvmap client\n", __func__);
971 ret = PTR_ERR(avp->nvmap_libs);
972 goto err_nvmap_create_libs_client;
973 }
974
975 /* put the address of the shared mem area into the mailbox for AVP
976 * to read out when its kernel boots. */
977 mbox_writel(avp->msg, MBOX_TO_AVP);
978
979#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
980 /* paddr is any address returned from nvmap_pin */
981 /* vaddr is AVP_KERNEL_VIRT_BASE */
982 pr_info("%s: Using AVP MMU to relocate AVP kernel\n", __func__);
983 sprintf(fw_file, "nvrm_avp.bin");
984 avp->reset_addr = AVP_KERNEL_VIRT_BASE;
985#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
986 /* paddr is any address behind SMMU */
987 /* vaddr is TEGRA_SMMU_BASE */
988 pr_info("%s: Using SMMU at %lx to load AVP kernel\n",
989 __func__, (unsigned long)avp->kernel_phys);
990 BUG_ON(avp->kernel_phys != 0xeff00000
991 && avp->kernel_phys != 0x0ff00000);
992 sprintf(fw_file, "nvrm_avp_%08lx.bin", (unsigned long)avp->kernel_phys);
993 avp->reset_addr = avp->kernel_phys;
994#else /* nvmem= carveout */
995 /* paddr is found in nvmem= carveout */
996 /* vaddr is same as paddr */
997 /* Find nvmem carveout */
998 if (!pfn_valid(__phys_to_pfn(0x8e000000))) {
999 avp->kernel_phys = 0x8e000000;
1000 }
1001 else if (!pfn_valid(__phys_to_pfn(0x9e000000))) {
1002 avp->kernel_phys = 0x9e000000;
1003 }
1004 else if (!pfn_valid(__phys_to_pfn(0xbe000000))) {
1005 avp->kernel_phys = 0xbe000000;
1006 }
1007 else {
1008 pr_err("Cannot find nvmem= carveout to load AVP kernel\n");
1009 pr_err("Check kernel command line "
1010 "to see if nvmem= is defined\n");
1011 BUG();
1012 }
1013 pr_info("%s: Using nvmem= carveout at %lx to load AVP kernel\n",
1014 __func__, (unsigned long)avp->kernel_phys);
1015 sprintf(fw_file, "nvrm_avp_%08lx.bin", (unsigned long)avp->kernel_phys);
1016 avp->reset_addr = avp->kernel_phys;
1017 avp->kernel_data = ioremap(avp->kernel_phys, SZ_1M);
1018#endif
1019
1020 ret = request_firmware(&avp_fw, fw_file, avp->misc_dev.this_device);
1021 if (ret) {
1022 pr_err("%s: Cannot read firmware '%s'\n", __func__, fw_file);
1023 goto err_req_fw;
1024 }
1025 pr_info("%s: Reading firmware from '%s' (%d bytes)\n", __func__,
1026 fw_file, avp_fw->size);
1027
1028 pr_info("%s: Loading AVP kernel at vaddr=%p paddr=%lx\n",
1029 __func__, avp->kernel_data, (unsigned long)avp->kernel_phys);
1030 memcpy(avp->kernel_data, avp_fw->data, avp_fw->size);
1031 memset(avp->kernel_data + avp_fw->size, 0, SZ_1M - avp_fw->size);
1032
1033 wmb();
1034 release_firmware(avp_fw);
1035
1036 tegra_init_legacy_irq_cop();
1037
1038 ret = avp_reset(avp, avp->reset_addr);
1039 if (ret) {
1040 pr_err("%s: cannot reset the AVP.. aborting..\n", __func__);
1041 goto err_reset;
1042 }
1043
1044 enable_irq(avp->mbox_from_avp_pend_irq);
1045 /* Initialize the avp_svc *first*. This creates RPC_CPU_PORT to be
1046 * ready for remote commands. Then, connect to the
1047 * remote RPC_AVP_PORT to be able to send library load/unload and
1048 * suspend commands to it */
1049 ret = avp_svc_start(avp->avp_svc);
1050 if (ret)
1051 goto err_avp_svc_start;
1052
1053 ep = trpc_create_connect(avp->rpc_node, "RPC_AVP_PORT", NULL,
1054 NULL, -1);
1055 if (IS_ERR(ep)) {
1056 pr_err("%s: can't connect to RPC_AVP_PORT server\n", __func__);
1057 ret = PTR_ERR(ep);
1058 goto err_rpc_avp_port;
1059 }
1060 avp->avp_ep = ep;
1061
1062 avp->initialized = true;
1063 smp_wmb();
1064 pr_info("%s: avp init done\n", __func__);
1065 return 0;
1066
1067err_rpc_avp_port:
1068 avp_svc_stop(avp->avp_svc);
1069err_avp_svc_start:
1070 disable_irq(avp->mbox_from_avp_pend_irq);
1071err_reset:
1072 avp_halt(avp);
1073err_req_fw:
1074 nvmap_client_put(avp->nvmap_libs);
1075err_nvmap_create_libs_client:
1076 avp->nvmap_libs = NULL;
1077 return ret;
1078}
1079
1080static void avp_uninit(struct tegra_avp_info *avp)
1081{
1082 unsigned long flags;
1083 struct rb_node *n;
1084 struct remote_info *rinfo;
1085
1086 spin_lock_irqsave(&avp->state_lock, flags);
1087 avp->initialized = false;
1088 avp->shutdown = true;
1089 spin_unlock_irqrestore(&avp->state_lock, flags);
1090
1091 disable_irq(avp->mbox_from_avp_pend_irq);
1092 cancel_work_sync(&avp->recv_work);
1093
1094 avp_halt(avp);
1095
1096 spin_lock_irqsave(&avp->state_lock, flags);
1097 while ((n = rb_first(&avp->endpoints)) != NULL) {
1098 rinfo = rb_entry(n, struct remote_info, rb_node);
1099 rinfo_get(rinfo);
1100 remote_remove(avp, rinfo);
1101 spin_unlock_irqrestore(&avp->state_lock, flags);
1102
1103 remote_close(rinfo);
1104 rinfo_put(rinfo);
1105
1106 spin_lock_irqsave(&avp->state_lock, flags);
1107 }
1108 spin_unlock_irqrestore(&avp->state_lock, flags);
1109
1110 avp_svc_stop(avp->avp_svc);
1111
1112 if (avp->avp_ep) {
1113 trpc_close(avp->avp_ep);
1114 avp->avp_ep = NULL;
1115 }
1116
1117 libs_cleanup(avp);
1118
1119 avp->shutdown = false;
1120 smp_wmb();
1121 pr_info("%s: avp teardown done\n", __func__);
1122}
1123
1124/* returns the remote lib handle in lib->handle */
1125static int _load_lib(struct tegra_avp_info *avp, struct tegra_avp_lib *lib,
1126 bool from_user)
1127{
1128 struct svc_lib_attach svc;
1129 struct svc_lib_attach_resp resp;
1130 const struct firmware *fw;
1131 void *args;
1132 struct nvmap_handle_ref *lib_handle;
1133 void *lib_data;
1134 phys_addr_t lib_phys;
1135 int ret;
1136
1137 DBG(AVP_DBG_TRACE_LIB, "avp_lib: loading library '%s'\n", lib->name);
1138
1139 args = kmalloc(lib->args_len, GFP_KERNEL);
1140 if (!args) {
1141 pr_err("avp_lib: can't alloc mem for args (%d)\n",
1142 lib->args_len);
1143 return -ENOMEM;
1144 }
1145
1146 if (!from_user)
1147 memcpy(args, lib->args, lib->args_len);
1148 else if (copy_from_user(args, lib->args, lib->args_len)) {
1149 pr_err("avp_lib: can't copy lib args\n");
1150 ret = -EFAULT;
1151 goto err_cp_args;
1152 }
1153
1154 ret = request_firmware(&fw, lib->name, avp->misc_dev.this_device);
1155 if (ret) {
1156 pr_err("avp_lib: Cannot read firmware '%s'\n", lib->name);
1157 goto err_req_fw;
1158 }
1159
1160 lib_handle = nvmap_alloc(avp->nvmap_libs, fw->size, L1_CACHE_BYTES,
1161 NVMAP_HANDLE_UNCACHEABLE, 0);
1162 if (IS_ERR_OR_NULL(lib_handle)) {
1163 pr_err("avp_lib: can't nvmap alloc for lib '%s'\n", lib->name);
1164 ret = PTR_ERR(lib_handle);
1165 goto err_nvmap_alloc;
1166 }
1167
1168 lib_data = nvmap_mmap(lib_handle);
1169 if (!lib_data) {
1170 pr_err("avp_lib: can't nvmap map for lib '%s'\n", lib->name);
1171 ret = -ENOMEM;
1172 goto err_nvmap_mmap;
1173 }
1174
1175 lib_phys = nvmap_pin(avp->nvmap_libs, lib_handle);
1176 if (IS_ERR_VALUE(lib_phys)) {
1177 pr_err("avp_lib: can't nvmap pin for lib '%s'\n", lib->name);
1178 ret = lib_phys;
1179 goto err_nvmap_pin;
1180 }
1181
1182 memcpy(lib_data, fw->data, fw->size);
1183
1184 svc.svc_id = SVC_LIBRARY_ATTACH;
1185 svc.address = lib_phys;
1186 svc.args_len = lib->args_len;
1187 svc.lib_size = fw->size;
1188 svc.reason = lib->greedy ? AVP_LIB_REASON_ATTACH_GREEDY :
1189 AVP_LIB_REASON_ATTACH;
1190 memcpy(svc.args, args, lib->args_len);
1191 wmb();
1192
1193 /* send message, wait for reply */
1194 ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
1195 GFP_KERNEL);
1196 if (ret)
1197 goto err_send_msg;
1198
1199 ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
1200 sizeof(resp), -1);
1201 if (ret != sizeof(resp)) {
1202 pr_err("avp_lib: Couldn't get lib load reply (%d)\n", ret);
1203 goto err_recv_msg;
1204 } else if (resp.err) {
1205 pr_err("avp_lib: got remote error (%d) while loading lib %s\n",
1206 resp.err, lib->name);
1207 ret = -EPROTO;
1208 goto err_recv_msg;
1209 }
1210 lib->handle = resp.lib_id;
1211 ret = 0;
1212 DBG(AVP_DBG_TRACE_LIB,
1213 "avp_lib: Successfully loaded library %s (lib_id=%x)\n",
1214 lib->name, resp.lib_id);
1215
1216 /* We free the memory here because by this point the AVP has already
1217 * requested memory for the library for all the sections since it does
1218 * it's own relocation and memory management. So, our allocations were
1219 * temporary to hand the library code over to the AVP.
1220 */
1221
1222err_recv_msg:
1223err_send_msg:
1224 nvmap_unpin(avp->nvmap_libs, lib_handle);
1225err_nvmap_pin:
1226 nvmap_munmap(lib_handle, lib_data);
1227err_nvmap_mmap:
1228 nvmap_free(avp->nvmap_libs, lib_handle);
1229err_nvmap_alloc:
1230 release_firmware(fw);
1231err_req_fw:
1232err_cp_args:
1233 kfree(args);
1234 return ret;
1235}
1236
1237static int send_unload_lib_msg(struct tegra_avp_info *avp, u32 handle,
1238 const char *name)
1239{
1240 struct svc_lib_detach svc;
1241 struct svc_lib_detach_resp resp;
1242 int ret;
1243
1244 svc.svc_id = SVC_LIBRARY_DETACH;
1245 svc.reason = AVP_LIB_REASON_DETACH;
1246 svc.lib_id = handle;
1247
1248 ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
1249 GFP_KERNEL);
1250 if (ret) {
1251 pr_err("avp_lib: can't send unload message to avp for '%s'\n",
1252 name);
1253 goto err;
1254 }
1255
1256 /* Give it a few extra moments to unload. */
1257 msleep(20);
1258
1259 ret = trpc_recv_msg(avp->rpc_node, avp->avp_ep, &resp,
1260 sizeof(resp), -1);
1261 if (ret != sizeof(resp)) {
1262 pr_err("avp_lib: Couldn't get unload reply for '%s' (%d)\n",
1263 name, ret);
1264 } else if (resp.err) {
1265 pr_err("avp_lib: remote error (%d) while unloading lib %s\n",
1266 resp.err, name);
1267 ret = -EPROTO;
1268 } else {
1269 pr_info("avp_lib: Successfully unloaded '%s'\n",
1270 name);
1271 ret = 0;
1272 }
1273
1274err:
1275 return ret;
1276}
1277
1278static struct lib_item *_find_lib_locked(struct tegra_avp_info *avp, u32 handle)
1279{
1280 struct lib_item *item;
1281
1282 list_for_each_entry(item, &avp->libs, list) {
1283 if (item->handle == handle)
1284 return item;
1285 }
1286 return NULL;
1287}
1288
1289static int _insert_lib_locked(struct tegra_avp_info *avp, u32 handle,
1290 char *name)
1291{
1292 struct lib_item *item;
1293
1294 item = kzalloc(sizeof(struct lib_item), GFP_KERNEL);
1295 if (!item)
1296 return -ENOMEM;
1297 item->handle = handle;
1298 strlcpy(item->name, name, TEGRA_AVP_LIB_MAX_NAME);
1299 list_add_tail(&item->list, &avp->libs);
1300 return 0;
1301}
1302
1303static void _delete_lib_locked(struct tegra_avp_info *avp,
1304 struct lib_item *item)
1305{
1306 list_del(&item->list);
1307 kfree(item);
1308}
1309
1310static int handle_load_lib_ioctl(struct tegra_avp_info *avp, unsigned long arg)
1311{
1312 struct tegra_avp_lib lib;
1313 int ret;
1314
1315 pr_debug("%s: ioctl\n", __func__);
1316 if (copy_from_user(&lib, (void __user *)arg, sizeof(lib)))
1317 return -EFAULT;
1318 lib.name[TEGRA_AVP_LIB_MAX_NAME - 1] = '\0';
1319
1320 if (lib.args_len > TEGRA_AVP_LIB_MAX_ARGS) {
1321 pr_err("%s: library args too long (%d)\n", __func__,
1322 lib.args_len);
1323 return -E2BIG;
1324 }
1325
1326 mutex_lock(&avp->libs_lock);
1327 ret = _load_lib(avp, &lib, true);
1328 if (ret)
1329 goto err_load_lib;
1330
1331 if (copy_to_user((void __user *)arg, &lib, sizeof(lib))) {
1332 /* TODO: probably need to free the library from remote
1333 * we just loaded */
1334 ret = -EFAULT;
1335 goto err_copy_to_user;
1336 }
1337 ret = _insert_lib_locked(avp, lib.handle, lib.name);
1338 if (ret) {
1339 pr_err("%s: can't insert lib (%d)\n", __func__, ret);
1340 goto err_insert_lib;
1341 }
1342
1343 mutex_unlock(&avp->libs_lock);
1344 return 0;
1345
1346err_insert_lib:
1347err_copy_to_user:
1348 send_unload_lib_msg(avp, lib.handle, lib.name);
1349err_load_lib:
1350 mutex_unlock(&avp->libs_lock);
1351 return ret;
1352}
1353
1354static void libs_cleanup(struct tegra_avp_info *avp)
1355{
1356 struct lib_item *lib;
1357 struct lib_item *lib_tmp;
1358
1359 mutex_lock(&avp->libs_lock);
1360 list_for_each_entry_safe(lib, lib_tmp, &avp->libs, list) {
1361 _delete_lib_locked(avp, lib);
1362 }
1363
1364 nvmap_client_put(avp->nvmap_libs);
1365 avp->nvmap_libs = NULL;
1366 mutex_unlock(&avp->libs_lock);
1367}
1368
1369static long tegra_avp_ioctl(struct file *file, unsigned int cmd,
1370 unsigned long arg)
1371{
1372 struct tegra_avp_info *avp = tegra_avp;
1373 int ret;
1374
1375 if (_IOC_TYPE(cmd) != TEGRA_AVP_IOCTL_MAGIC ||
1376 _IOC_NR(cmd) < TEGRA_AVP_IOCTL_MIN_NR ||
1377 _IOC_NR(cmd) > TEGRA_AVP_IOCTL_MAX_NR)
1378 return -ENOTTY;
1379
1380 switch (cmd) {
1381 case TEGRA_AVP_IOCTL_LOAD_LIB:
1382 ret = handle_load_lib_ioctl(avp, arg);
1383 break;
1384 case TEGRA_AVP_IOCTL_UNLOAD_LIB:
1385 ret = tegra_avp_unload_lib(avp, arg);
1386 break;
1387 default:
1388 pr_err("avp_lib: Unknown tegra_avp ioctl 0x%x\n", _IOC_NR(cmd));
1389 ret = -ENOTTY;
1390 break;
1391 }
1392 return ret;
1393}
1394
1395int tegra_avp_open(struct tegra_avp_info **avp)
1396{
1397 struct tegra_avp_info *new_avp = tegra_avp;
1398 int ret = 0;
1399
1400 pr_debug("%s: open\n", __func__);
1401 mutex_lock(&new_avp->open_lock);
1402
1403 if (!new_avp->refcount)
1404 ret = avp_init(new_avp);
1405
1406 if (ret < 0) {
1407 mutex_unlock(&new_avp->open_lock);
1408 new_avp = 0;
1409 goto out;
1410 }
1411
1412 new_avp->refcount++;
1413
1414 mutex_unlock(&new_avp->open_lock);
1415out:
1416 *avp = new_avp;
1417 return ret;
1418}
1419
1420static int tegra_avp_open_fops(struct inode *inode, struct file *file)
1421{
1422 struct tegra_avp_info *avp;
1423
1424 nonseekable_open(inode, file);
1425 return tegra_avp_open(&avp);
1426}
1427
1428int tegra_avp_release(struct tegra_avp_info *avp)
1429{
1430 int ret = 0;
1431
1432 pr_debug("%s: close\n", __func__);
1433 mutex_lock(&avp->open_lock);
1434 if (!avp->refcount) {
1435 pr_err("%s: releasing while in invalid state\n", __func__);
1436 ret = -EINVAL;
1437 goto out;
1438 }
1439 if (avp->refcount > 0)
1440 avp->refcount--;
1441 if (!avp->refcount)
1442 avp_uninit(avp);
1443
1444out:
1445 mutex_unlock(&avp->open_lock);
1446 return ret;
1447}
1448
1449static int tegra_avp_release_fops(struct inode *inode, struct file *file)
1450{
1451 struct tegra_avp_info *avp = tegra_avp;
1452 return tegra_avp_release(avp);
1453}
1454
1455static int avp_enter_lp0(struct tegra_avp_info *avp)
1456{
1457 volatile u32 *avp_suspend_done = avp->iram_backup_data
1458 + TEGRA_IRAM_SIZE - TEGRA_RESET_HANDLER_SIZE;
1459 struct svc_enter_lp0 svc;
1460 unsigned long endtime;
1461 int ret;
1462
1463 svc.svc_id = SVC_ENTER_LP0;
1464 svc.src_addr = (u32)TEGRA_IRAM_BASE + TEGRA_RESET_HANDLER_SIZE;
1465 svc.buf_addr = (u32)avp->iram_backup_phys;
1466 svc.buf_size = TEGRA_IRAM_SIZE - TEGRA_RESET_HANDLER_SIZE;
1467
1468 *avp_suspend_done = 0;
1469 wmb();
1470
1471 ret = trpc_send_msg(avp->rpc_node, avp->avp_ep, &svc, sizeof(svc),
1472 GFP_KERNEL);
1473 if (ret) {
1474 pr_err("%s: cannot send AVP suspend message\n", __func__);
1475 return ret;
1476 }
1477
1478 endtime = jiffies + msecs_to_jiffies(1000);
1479 rmb();
1480 while ((*avp_suspend_done == 0) && time_before(jiffies, endtime)) {
1481 udelay(10);
1482 rmb();
1483 }
1484
1485 rmb();
1486 if (*avp_suspend_done == 0) {
1487 pr_err("%s: AVP failed to suspend\n", __func__);
1488 ret = -ETIMEDOUT;
1489 goto err;
1490 }
1491
1492 return 0;
1493
1494err:
1495 return ret;
1496}
1497
1498static int tegra_avp_suspend(struct platform_device *pdev, pm_message_t state)
1499{
1500 struct tegra_avp_info *avp = tegra_avp;
1501 unsigned long flags;
1502 int ret;
1503
1504 pr_info("%s()+\n", __func__);
1505 spin_lock_irqsave(&avp->state_lock, flags);
1506 if (!avp->initialized) {
1507 spin_unlock_irqrestore(&avp->state_lock, flags);
1508 return 0;
1509 }
1510 avp->suspending = true;
1511 spin_unlock_irqrestore(&avp->state_lock, flags);
1512
1513 ret = avp_enter_lp0(avp);
1514 if (ret)
1515 goto err;
1516
1517 avp->resume_addr = readl(TEGRA_AVP_RESUME_ADDR);
1518 if (!avp->resume_addr) {
1519 pr_err("%s: AVP failed to set it's resume address\n", __func__);
1520 ret = -EINVAL;
1521 goto err;
1522 }
1523
1524 disable_irq(avp->mbox_from_avp_pend_irq);
1525
1526 pr_info("avp_suspend: resume_addr=%lx\n", avp->resume_addr);
1527 avp->resume_addr &= 0xfffffffeUL;
1528 pr_info("%s()-\n", __func__);
1529
1530 return 0;
1531
1532err:
1533 /* TODO: we need to kill the AVP so that when we come back
1534 * it could be reinitialized.. We'd probably need to kill
1535 * the users of it so they don't have the wrong state.
1536 */
1537 return ret;
1538}
1539
1540static int tegra_avp_resume(struct platform_device *pdev)
1541{
1542 struct tegra_avp_info *avp = tegra_avp;
1543 int ret = 0;
1544
1545 pr_info("%s()+\n", __func__);
1546 smp_rmb();
1547 if (!avp->initialized)
1548 goto out;
1549
1550 BUG_ON(!avp->resume_addr);
1551
1552 avp_reset(avp, avp->resume_addr);
1553 avp->resume_addr = 0;
1554 avp->suspending = false;
1555 smp_wmb();
1556 enable_irq(avp->mbox_from_avp_pend_irq);
1557
1558 pr_info("%s()-\n", __func__);
1559
1560out:
1561 return ret;
1562}
1563
1564static const struct file_operations tegra_avp_fops = {
1565 .owner = THIS_MODULE,
1566 .open = tegra_avp_open_fops,
1567 .release = tegra_avp_release_fops,
1568 .unlocked_ioctl = tegra_avp_ioctl,
1569};
1570
1571static struct trpc_node avp_trpc_node = {
1572 .name = "avp-remote",
1573 .type = TRPC_NODE_REMOTE,
1574 .try_connect = avp_node_try_connect,
1575};
1576
1577static int tegra_avp_probe(struct platform_device *pdev)
1578{
1579 void *msg_area;
1580 struct tegra_avp_info *avp;
1581 int ret = 0;
1582 int irq;
1583 unsigned int heap_mask;
1584
1585 irq = platform_get_irq_byname(pdev, "mbox_from_avp_pending");
1586 if (irq < 0) {
1587 pr_err("%s: invalid platform data\n", __func__);
1588 return -EINVAL;
1589 }
1590
1591 avp = kzalloc(sizeof(struct tegra_avp_info), GFP_KERNEL);
1592 if (!avp) {
1593 pr_err("%s: cannot allocate tegra_avp_info\n", __func__);
1594 return -ENOMEM;
1595 }
1596
1597 avp->nvmap_drv = nvmap_create_client(nvmap_dev, "avp_core");
1598 if (IS_ERR_OR_NULL(avp->nvmap_drv)) {
1599 pr_err("%s: cannot create drv nvmap client\n", __func__);
1600 ret = PTR_ERR(avp->nvmap_drv);
1601 goto err_nvmap_create_drv_client;
1602 }
1603
1604#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
1605 heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC;
1606#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
1607 heap_mask = NVMAP_HEAP_IOVMM;
1608#else /* nvmem= carveout */
1609 heap_mask = 0;
1610#endif
1611
1612 if (heap_mask == NVMAP_HEAP_IOVMM) {
1613 int i;
1614 /* Tegra3 A01 has different SMMU address in 0xe00000000- */
1615 u32 iovmm_addr[] = {0x0ff00000, 0xeff00000};
1616
1617 for (i = 0; i < ARRAY_SIZE(iovmm_addr); i++) {
1618 avp->kernel_handle = nvmap_alloc_iovm(avp->nvmap_drv,
1619 SZ_1M, L1_CACHE_BYTES,
1620 NVMAP_HANDLE_WRITE_COMBINE,
1621 iovmm_addr[i]);
1622 if (!IS_ERR_OR_NULL(avp->kernel_handle))
1623 break;
1624 }
1625 if (IS_ERR_OR_NULL(avp->kernel_handle)) {
1626 pr_err("%s: cannot create handle\n", __func__);
1627 ret = PTR_ERR(avp->kernel_handle);
1628 goto err_nvmap_alloc;
1629 }
1630
1631 avp->kernel_data = nvmap_mmap(avp->kernel_handle);
1632 if (!avp->kernel_data) {
1633 pr_err("%s: cannot map kernel handle\n", __func__);
1634 ret = -ENOMEM;
1635 goto err_nvmap_mmap;
1636 }
1637
1638 avp->kernel_phys =
1639 nvmap_pin(avp->nvmap_drv, avp->kernel_handle);
1640 if (IS_ERR_VALUE(avp->kernel_phys)) {
1641 pr_err("%s: cannot pin kernel handle\n", __func__);
1642 ret = avp->kernel_phys;
1643 goto err_nvmap_pin;
1644 }
1645
1646 pr_info("%s: allocated IOVM at %lx for AVP kernel\n",
1647 __func__, (unsigned long)avp->kernel_phys);
1648 }
1649
1650 if (heap_mask == NVMAP_HEAP_CARVEOUT_GENERIC) {
1651 avp->kernel_handle = nvmap_alloc(avp->nvmap_drv, SZ_1M, SZ_1M,
1652 NVMAP_HANDLE_UNCACHEABLE, 0);
1653 if (IS_ERR_OR_NULL(avp->kernel_handle)) {
1654 pr_err("%s: cannot create handle\n", __func__);
1655 ret = PTR_ERR(avp->kernel_handle);
1656 goto err_nvmap_alloc;
1657 }
1658
1659 avp->kernel_data = nvmap_mmap(avp->kernel_handle);
1660 if (!avp->kernel_data) {
1661 pr_err("%s: cannot map kernel handle\n", __func__);
1662 ret = -ENOMEM;
1663 goto err_nvmap_mmap;
1664 }
1665
1666 avp->kernel_phys = nvmap_pin(avp->nvmap_drv,
1667 avp->kernel_handle);
1668 if (IS_ERR_VALUE(avp->kernel_phys)) {
1669 pr_err("%s: cannot pin kernel handle\n", __func__);
1670 ret = avp->kernel_phys;
1671 goto err_nvmap_pin;
1672 }
1673
1674 pr_info("%s: allocated carveout memory at %lx for AVP kernel\n",
1675 __func__, (unsigned long)avp->kernel_phys);
1676 }
1677
1678 /* allocate an extra 4 bytes at the end which AVP uses to signal to
1679 * us that it is done suspending.
1680 */
1681 avp->iram_backup_handle =
1682 nvmap_alloc(avp->nvmap_drv, TEGRA_IRAM_SIZE + 4,
1683 L1_CACHE_BYTES, NVMAP_HANDLE_UNCACHEABLE, 0);
1684 if (IS_ERR_OR_NULL(avp->iram_backup_handle)) {
1685 pr_err("%s: cannot create handle for iram backup\n", __func__);
1686 ret = PTR_ERR(avp->iram_backup_handle);
1687 goto err_iram_nvmap_alloc;
1688 }
1689 avp->iram_backup_data = nvmap_mmap(avp->iram_backup_handle);
1690 if (!avp->iram_backup_data) {
1691 pr_err("%s: cannot map iram backup handle\n", __func__);
1692 ret = -ENOMEM;
1693 goto err_iram_nvmap_mmap;
1694 }
1695 avp->iram_backup_phys = nvmap_pin(avp->nvmap_drv,
1696 avp->iram_backup_handle);
1697 if (IS_ERR_VALUE(avp->iram_backup_phys)) {
1698 pr_err("%s: cannot pin iram backup handle\n", __func__);
1699 ret = avp->iram_backup_phys;
1700 goto err_iram_nvmap_pin;
1701 }
1702
1703 avp->mbox_from_avp_pend_irq = irq;
1704 avp->endpoints = RB_ROOT;
1705 spin_lock_init(&avp->state_lock);
1706 mutex_init(&avp->open_lock);
1707 mutex_init(&avp->to_avp_lock);
1708 mutex_init(&avp->from_avp_lock);
1709 INIT_WORK(&avp->recv_work, process_avp_message);
1710
1711 mutex_init(&avp->libs_lock);
1712 INIT_LIST_HEAD(&avp->libs);
1713
1714 avp->recv_wq = alloc_workqueue("avp-msg-recv",
1715 WQ_NON_REENTRANT | WQ_HIGHPRI, 1);
1716 if (!avp->recv_wq) {
1717 pr_err("%s: can't create recve workqueue\n", __func__);
1718 ret = -ENOMEM;
1719 goto err_create_wq;
1720 }
1721
1722 avp->cop_clk = clk_get(&pdev->dev, "cop");
1723 if (IS_ERR_OR_NULL(avp->cop_clk)) {
1724 pr_err("%s: Couldn't get cop clock\n", TEGRA_AVP_NAME);
1725 ret = -ENOENT;
1726 goto err_get_cop_clk;
1727 }
1728
1729 msg_area = dma_alloc_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2,
1730 &avp->msg_area_addr, GFP_KERNEL);
1731 if (!msg_area) {
1732 pr_err("%s: cannot allocate msg_area\n", __func__);
1733 ret = -ENOMEM;
1734 goto err_alloc_msg_area;
1735 }
1736 memset(msg_area, 0, AVP_MSG_AREA_SIZE * 2);
1737 avp->msg = ((avp->msg_area_addr >> 4) |
1738 MBOX_MSG_VALID | MBOX_MSG_PENDING_INT_EN);
1739 avp->msg_to_avp = msg_area;
1740 avp->msg_from_avp = msg_area + AVP_MSG_AREA_SIZE;
1741
1742 avp_halt(avp);
1743
1744 avp_trpc_node.priv = avp;
1745 ret = trpc_node_register(&avp_trpc_node);
1746 if (ret) {
1747 pr_err("%s: Can't register avp rpc node\n", __func__);
1748 goto err_node_reg;
1749 }
1750 avp->rpc_node = &avp_trpc_node;
1751
1752 avp->avp_svc = avp_svc_init(pdev, avp->rpc_node);
1753 if (IS_ERR_OR_NULL(avp->avp_svc)) {
1754 pr_err("%s: Cannot initialize avp_svc\n", __func__);
1755 ret = PTR_ERR(avp->avp_svc);
1756 goto err_avp_svc_init;
1757 }
1758
1759 avp->misc_dev.minor = MISC_DYNAMIC_MINOR;
1760 avp->misc_dev.name = "tegra_avp";
1761 avp->misc_dev.fops = &tegra_avp_fops;
1762
1763 ret = misc_register(&avp->misc_dev);
1764 if (ret) {
1765 pr_err("%s: Unable to register misc device!\n", TEGRA_AVP_NAME);
1766 goto err_misc_reg;
1767 }
1768
1769 ret = request_irq(irq, avp_mbox_pending_isr, 0, TEGRA_AVP_NAME, avp);
1770 if (ret) {
1771 pr_err("%s: cannot register irq handler\n", __func__);
1772 goto err_req_irq_pend;
1773 }
1774 disable_irq(avp->mbox_from_avp_pend_irq);
1775
1776 tegra_avp = avp;
1777
1778 pr_info("%s: message area %lx/%lx\n", __func__,
1779 (unsigned long)avp->msg_area_addr,
1780 (unsigned long)avp->msg_area_addr + AVP_MSG_AREA_SIZE);
1781
1782 return 0;
1783
1784err_req_irq_pend:
1785 misc_deregister(&avp->misc_dev);
1786err_misc_reg:
1787 avp_svc_destroy(avp->avp_svc);
1788err_avp_svc_init:
1789 trpc_node_unregister(avp->rpc_node);
1790err_node_reg:
1791 dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, msg_area,
1792 avp->msg_area_addr);
1793err_alloc_msg_area:
1794 clk_put(avp->cop_clk);
1795err_get_cop_clk:
1796 destroy_workqueue(avp->recv_wq);
1797err_create_wq:
1798 nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
1799err_iram_nvmap_pin:
1800 nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
1801err_iram_nvmap_mmap:
1802 nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
1803err_iram_nvmap_alloc:
1804 nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
1805err_nvmap_pin:
1806 nvmap_munmap(avp->kernel_handle, avp->kernel_data);
1807err_nvmap_mmap:
1808 nvmap_free(avp->nvmap_drv, avp->kernel_handle);
1809err_nvmap_alloc:
1810 nvmap_client_put(avp->nvmap_drv);
1811err_nvmap_create_drv_client:
1812 kfree(avp);
1813 tegra_avp = NULL;
1814 return ret;
1815}
1816
1817static int tegra_avp_remove(struct platform_device *pdev)
1818{
1819 struct tegra_avp_info *avp = tegra_avp;
1820
1821 if (!avp)
1822 return 0;
1823
1824 mutex_lock(&avp->open_lock);
1825 /* ensure that noone can open while we tear down */
1826 if (avp->refcount) {
1827 mutex_unlock(&avp->open_lock);
1828 return -EBUSY;
1829 }
1830 mutex_unlock(&avp->open_lock);
1831
1832 misc_deregister(&avp->misc_dev);
1833
1834 avp_halt(avp);
1835
1836 avp_svc_destroy(avp->avp_svc);
1837 trpc_node_unregister(avp->rpc_node);
1838 dma_free_coherent(&pdev->dev, AVP_MSG_AREA_SIZE * 2, avp->msg_to_avp,
1839 avp->msg_area_addr);
1840 clk_put(avp->cop_clk);
1841 destroy_workqueue(avp->recv_wq);
1842 nvmap_unpin(avp->nvmap_drv, avp->iram_backup_handle);
1843 nvmap_munmap(avp->iram_backup_handle, avp->iram_backup_data);
1844 nvmap_free(avp->nvmap_drv, avp->iram_backup_handle);
1845 nvmap_unpin(avp->nvmap_drv, avp->kernel_handle);
1846 nvmap_munmap(avp->kernel_handle, avp->kernel_data);
1847 nvmap_free(avp->nvmap_drv, avp->kernel_handle);
1848 nvmap_client_put(avp->nvmap_drv);
1849 kfree(avp);
1850 tegra_avp = NULL;
1851 return 0;
1852}
1853
1854int tegra_avp_load_lib(struct tegra_avp_info *avp, struct tegra_avp_lib *lib)
1855{
1856 int ret;
1857
1858 if (!avp)
1859 return -ENODEV;
1860
1861 if (!lib)
1862 return -EFAULT;
1863
1864 lib->name[TEGRA_AVP_LIB_MAX_NAME - 1] = '\0';
1865
1866 if (lib->args_len > TEGRA_AVP_LIB_MAX_ARGS) {
1867 pr_err("%s: library args too long (%d)\n", __func__,
1868 lib->args_len);
1869 return -E2BIG;
1870 }
1871
1872 mutex_lock(&avp->libs_lock);
1873 ret = _load_lib(avp, lib, false);
1874 if (ret)
1875 goto err_load_lib;
1876
1877 ret = _insert_lib_locked(avp, lib->handle, lib->name);
1878 if (ret) {
1879 pr_err("%s: can't insert lib (%d)\n", __func__, ret);
1880 goto err_insert_lib;
1881 }
1882
1883 mutex_unlock(&avp->libs_lock);
1884 return 0;
1885
1886err_insert_lib:
1887 ret = send_unload_lib_msg(avp, lib->handle, lib->name);
1888 if (!ret)
1889 DBG(AVP_DBG_TRACE_LIB, "avp_lib: unloaded '%s'\n", lib->name);
1890 else
1891 pr_err("avp_lib: can't unload lib '%s' (%d)\n", lib->name, ret);
1892 lib->handle = 0;
1893err_load_lib:
1894 mutex_unlock(&avp->libs_lock);
1895 return ret;
1896}
1897
1898int tegra_avp_unload_lib(struct tegra_avp_info *avp, unsigned long handle)
1899{
1900 struct lib_item *item;
1901 int ret;
1902
1903 if (!avp)
1904 return -ENODEV;
1905
1906 mutex_lock(&avp->libs_lock);
1907 item = _find_lib_locked(avp, handle);
1908 if (!item) {
1909 pr_err("avp_lib: avp lib with handle 0x%x not found\n",
1910 (u32)handle);
1911 ret = -ENOENT;
1912 goto err_find;
1913 }
1914 ret = send_unload_lib_msg(avp, item->handle, item->name);
1915 if (!ret)
1916 DBG(AVP_DBG_TRACE_LIB, "avp_lib: unloaded '%s'\n", item->name);
1917 else
1918 pr_err("avp_lib: can't unload lib '%s'/0x%x (%d)\n", item->name,
1919 item->handle, ret);
1920 _delete_lib_locked(avp, item);
1921
1922err_find:
1923 mutex_unlock(&avp->libs_lock);
1924 return ret;
1925}
1926
1927static struct platform_driver tegra_avp_driver = {
1928 .probe = tegra_avp_probe,
1929 .remove = tegra_avp_remove,
1930 .suspend = tegra_avp_suspend,
1931 .resume = tegra_avp_resume,
1932 .driver = {
1933 .name = TEGRA_AVP_NAME,
1934 .owner = THIS_MODULE,
1935 },
1936};
1937
1938static int __init tegra_avp_init(void)
1939{
1940 return platform_driver_register(&tegra_avp_driver);
1941}
1942
1943static void __exit tegra_avp_exit(void)
1944{
1945 platform_driver_unregister(&tegra_avp_driver);
1946}
1947
1948module_init(tegra_avp_init);
1949module_exit(tegra_avp_exit);