diff options
Diffstat (limited to 'drivers/media/video/tegra/avp/tegra_rpc.c')
-rw-r--r-- | drivers/media/video/tegra/avp/tegra_rpc.c | 796 |
1 files changed, 796 insertions, 0 deletions
diff --git a/drivers/media/video/tegra/avp/tegra_rpc.c b/drivers/media/video/tegra/avp/tegra_rpc.c new file mode 100644 index 00000000000..a0fd1dc999f --- /dev/null +++ b/drivers/media/video/tegra/avp/tegra_rpc.c | |||
@@ -0,0 +1,796 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Google, Inc. | ||
3 | * | ||
4 | * Author: | ||
5 | * Dima Zavin <dima@android.com> | ||
6 | * | ||
7 | * Based on original NVRM code from NVIDIA, and a partial rewrite by: | ||
8 | * Gary King <gking@nvidia.com> | ||
9 | * | ||
10 | * This software is licensed under the terms of the GNU General Public | ||
11 | * License version 2, as published by the Free Software Foundation, and | ||
12 | * may be copied, distributed, and modified under those terms. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/debugfs.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/kref.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/rbtree.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/seq_file.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/tegra_rpc.h> | ||
33 | #include <linux/types.h> | ||
34 | #include <linux/wait.h> | ||
35 | |||
36 | #include "trpc.h" | ||
37 | |||
38 | struct trpc_port; | ||
39 | struct trpc_endpoint { | ||
40 | struct list_head msg_list; | ||
41 | wait_queue_head_t msg_waitq; | ||
42 | |||
43 | struct trpc_endpoint *out; | ||
44 | struct trpc_port *port; | ||
45 | |||
46 | struct trpc_node *owner; | ||
47 | |||
48 | struct completion *connect_done; | ||
49 | bool ready; | ||
50 | struct trpc_ep_ops *ops; | ||
51 | void *priv; | ||
52 | }; | ||
53 | |||
54 | struct trpc_port { | ||
55 | char name[TEGRA_RPC_MAX_NAME_LEN]; | ||
56 | |||
57 | /* protects peer and closed state */ | ||
58 | spinlock_t lock; | ||
59 | struct trpc_endpoint peers[2]; | ||
60 | bool closed; | ||
61 | |||
62 | /* private */ | ||
63 | struct kref ref; | ||
64 | struct rb_node rb_node; | ||
65 | }; | ||
66 | |||
67 | enum { | ||
68 | TRPC_TRACE_MSG = 1U << 0, | ||
69 | TRPC_TRACE_CONN = 1U << 1, | ||
70 | TRPC_TRACE_PORT = 1U << 2, | ||
71 | }; | ||
72 | |||
73 | static u32 trpc_debug_mask; | ||
74 | module_param_named(debug_mask, trpc_debug_mask, uint, S_IWUSR | S_IRUGO); | ||
75 | |||
76 | #define DBG(flag, args...) \ | ||
77 | do { if (trpc_debug_mask & (flag)) pr_info(args); } while (0) | ||
78 | |||
79 | struct tegra_rpc_info { | ||
80 | struct kmem_cache *msg_cache; | ||
81 | |||
82 | spinlock_t ports_lock; | ||
83 | struct rb_root ports; | ||
84 | |||
85 | struct list_head node_list; | ||
86 | struct mutex node_lock; | ||
87 | }; | ||
88 | |||
89 | struct trpc_msg { | ||
90 | struct list_head list; | ||
91 | |||
92 | size_t len; | ||
93 | u8 payload[TEGRA_RPC_MAX_MSG_LEN]; | ||
94 | }; | ||
95 | |||
96 | static struct tegra_rpc_info *tegra_rpc; | ||
97 | static struct dentry *trpc_debug_root; | ||
98 | |||
99 | static struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep); | ||
100 | |||
101 | /* a few accessors for the outside world to keep the trpc_endpoint struct | ||
102 | * definition private to this module */ | ||
103 | void *trpc_priv(struct trpc_endpoint *ep) | ||
104 | { | ||
105 | return ep->priv; | ||
106 | } | ||
107 | |||
108 | struct trpc_endpoint *trpc_peer(struct trpc_endpoint *ep) | ||
109 | { | ||
110 | return ep->out; | ||
111 | } | ||
112 | |||
113 | const char *trpc_name(struct trpc_endpoint *ep) | ||
114 | { | ||
115 | return ep->port->name; | ||
116 | } | ||
117 | |||
118 | static inline bool is_connected(struct trpc_port *port) | ||
119 | { | ||
120 | return port->peers[0].ready && port->peers[1].ready; | ||
121 | } | ||
122 | |||
123 | static inline bool is_closed(struct trpc_port *port) | ||
124 | { | ||
125 | return port->closed; | ||
126 | } | ||
127 | |||
128 | static void rpc_port_free(struct tegra_rpc_info *info, struct trpc_port *port) | ||
129 | { | ||
130 | struct trpc_msg *msg; | ||
131 | int i; | ||
132 | |||
133 | for (i = 0; i < 2; ++i) { | ||
134 | struct list_head *list = &port->peers[i].msg_list; | ||
135 | while (!list_empty(list)) { | ||
136 | msg = list_first_entry(list, struct trpc_msg, list); | ||
137 | list_del(&msg->list); | ||
138 | kmem_cache_free(info->msg_cache, msg); | ||
139 | } | ||
140 | } | ||
141 | kfree(port); | ||
142 | } | ||
143 | |||
144 | static void _rpc_port_release(struct kref *kref) | ||
145 | { | ||
146 | struct tegra_rpc_info *info = tegra_rpc; | ||
147 | struct trpc_port *port = container_of(kref, struct trpc_port, ref); | ||
148 | unsigned long flags; | ||
149 | |||
150 | DBG(TRPC_TRACE_PORT, "%s: releasing port '%s' (%p)\n", __func__, | ||
151 | port->name, port); | ||
152 | spin_lock_irqsave(&info->ports_lock, flags); | ||
153 | rb_erase(&port->rb_node, &info->ports); | ||
154 | spin_unlock_irqrestore(&info->ports_lock, flags); | ||
155 | rpc_port_free(info, port); | ||
156 | } | ||
157 | |||
158 | /* note that the refcount is actually on the port and not on the endpoint */ | ||
159 | void trpc_put(struct trpc_endpoint *ep) | ||
160 | { | ||
161 | kref_put(&ep->port->ref, _rpc_port_release); | ||
162 | } | ||
163 | |||
164 | void trpc_get(struct trpc_endpoint *ep) | ||
165 | { | ||
166 | kref_get(&ep->port->ref); | ||
167 | } | ||
168 | |||
169 | /* Searches the rb_tree for a port with the provided name. If one is not found, | ||
170 | * the new port in inserted. Otherwise, the existing port is returned. | ||
171 | * Must be called with the ports_lock held */ | ||
172 | static struct trpc_port *rpc_port_find_insert(struct tegra_rpc_info *info, | ||
173 | struct trpc_port *port) | ||
174 | { | ||
175 | struct rb_node **p; | ||
176 | struct rb_node *parent; | ||
177 | struct trpc_port *tmp; | ||
178 | int ret = 0; | ||
179 | |||
180 | p = &info->ports.rb_node; | ||
181 | parent = NULL; | ||
182 | while (*p) { | ||
183 | parent = *p; | ||
184 | tmp = rb_entry(parent, struct trpc_port, rb_node); | ||
185 | |||
186 | ret = strncmp(port->name, tmp->name, TEGRA_RPC_MAX_NAME_LEN); | ||
187 | if (ret < 0) | ||
188 | p = &(*p)->rb_left; | ||
189 | else if (ret > 0) | ||
190 | p = &(*p)->rb_right; | ||
191 | else | ||
192 | return tmp; | ||
193 | } | ||
194 | rb_link_node(&port->rb_node, parent, p); | ||
195 | rb_insert_color(&port->rb_node, &info->ports); | ||
196 | DBG(TRPC_TRACE_PORT, "%s: inserted port '%s' (%p)\n", __func__, | ||
197 | port->name, port); | ||
198 | return port; | ||
199 | } | ||
200 | |||
201 | static int nodes_try_connect(struct tegra_rpc_info *info, | ||
202 | struct trpc_node *src, | ||
203 | struct trpc_endpoint *from) | ||
204 | { | ||
205 | struct trpc_node *node; | ||
206 | int ret; | ||
207 | |||
208 | mutex_lock(&info->node_lock); | ||
209 | list_for_each_entry(node, &info->node_list, list) { | ||
210 | if (!node->try_connect) | ||
211 | continue; | ||
212 | ret = node->try_connect(node, src, from); | ||
213 | if (!ret) { | ||
214 | mutex_unlock(&info->node_lock); | ||
215 | return 0; | ||
216 | } | ||
217 | } | ||
218 | mutex_unlock(&info->node_lock); | ||
219 | return -ECONNREFUSED; | ||
220 | } | ||
221 | |||
222 | static struct trpc_port *rpc_port_alloc(const char *name) | ||
223 | { | ||
224 | struct trpc_port *port; | ||
225 | int i; | ||
226 | |||
227 | port = kzalloc(sizeof(struct trpc_port), GFP_KERNEL); | ||
228 | if (!port) { | ||
229 | pr_err("%s: can't alloc rpc_port\n", __func__); | ||
230 | return NULL; | ||
231 | } | ||
232 | BUILD_BUG_ON(2 != ARRAY_SIZE(port->peers)); | ||
233 | |||
234 | spin_lock_init(&port->lock); | ||
235 | kref_init(&port->ref); | ||
236 | strlcpy(port->name, name, TEGRA_RPC_MAX_NAME_LEN); | ||
237 | for (i = 0; i < 2; i++) { | ||
238 | struct trpc_endpoint *ep = port->peers + i; | ||
239 | INIT_LIST_HEAD(&ep->msg_list); | ||
240 | init_waitqueue_head(&ep->msg_waitq); | ||
241 | ep->port = port; | ||
242 | } | ||
243 | port->peers[0].out = &port->peers[1]; | ||
244 | port->peers[1].out = &port->peers[0]; | ||
245 | |||
246 | return port; | ||
247 | } | ||
248 | |||
249 | /* must be holding the ports lock */ | ||
250 | static inline void handle_port_connected(struct trpc_port *port) | ||
251 | { | ||
252 | int i; | ||
253 | |||
254 | DBG(TRPC_TRACE_CONN, "tegra_rpc: port '%s' connected\n", port->name); | ||
255 | |||
256 | for (i = 0; i < 2; i++) | ||
257 | if (port->peers[i].connect_done) | ||
258 | complete(port->peers[i].connect_done); | ||
259 | } | ||
260 | |||
261 | static inline void _ready_ep(struct trpc_endpoint *ep, | ||
262 | struct trpc_node *owner, | ||
263 | struct trpc_ep_ops *ops, | ||
264 | void *priv) | ||
265 | { | ||
266 | ep->ready = true; | ||
267 | ep->owner = owner; | ||
268 | ep->ops = ops; | ||
269 | ep->priv = priv; | ||
270 | } | ||
271 | |||
272 | /* this keeps a reference on the port */ | ||
273 | static struct trpc_endpoint *_create_peer(struct tegra_rpc_info *info, | ||
274 | struct trpc_node *owner, | ||
275 | struct trpc_endpoint *ep, | ||
276 | struct trpc_ep_ops *ops, | ||
277 | void *priv) | ||
278 | { | ||
279 | struct trpc_port *port = ep->port; | ||
280 | struct trpc_endpoint *peer = ep->out; | ||
281 | unsigned long flags; | ||
282 | |||
283 | spin_lock_irqsave(&port->lock, flags); | ||
284 | BUG_ON(port->closed); | ||
285 | if (peer->ready || !ep->ready) { | ||
286 | peer = NULL; | ||
287 | goto out; | ||
288 | } | ||
289 | _ready_ep(peer, owner, ops, priv); | ||
290 | if (WARN_ON(!is_connected(port))) | ||
291 | pr_warning("%s: created peer but no connection established?!\n", | ||
292 | __func__); | ||
293 | else | ||
294 | handle_port_connected(port); | ||
295 | trpc_get(peer); | ||
296 | out: | ||
297 | spin_unlock_irqrestore(&port->lock, flags); | ||
298 | return peer; | ||
299 | } | ||
300 | |||
301 | /* Exported code. This is out interface to the outside world */ | ||
302 | struct trpc_endpoint *trpc_create(struct trpc_node *owner, const char *name, | ||
303 | struct trpc_ep_ops *ops, void *priv) | ||
304 | { | ||
305 | struct tegra_rpc_info *info = tegra_rpc; | ||
306 | struct trpc_endpoint *ep; | ||
307 | struct trpc_port *new_port; | ||
308 | struct trpc_port *port; | ||
309 | unsigned long flags; | ||
310 | |||
311 | BUG_ON(!owner); | ||
312 | |||
313 | /* we always allocate a new port even if one already might exist. This | ||
314 | * is slightly inefficient, but it allows us to do the allocation | ||
315 | * without holding our ports_lock spinlock. */ | ||
316 | new_port = rpc_port_alloc(name); | ||
317 | if (!new_port) { | ||
318 | pr_err("%s: can't allocate memory for '%s'\n", __func__, name); | ||
319 | return ERR_PTR(-ENOMEM); | ||
320 | } | ||
321 | |||
322 | spin_lock_irqsave(&info->ports_lock, flags); | ||
323 | port = rpc_port_find_insert(info, new_port); | ||
324 | if (port != new_port) { | ||
325 | rpc_port_free(info, new_port); | ||
326 | /* There was already a port by that name in the rb_tree, | ||
327 | * so just try to create its peer[1], i.e. peer for peer[0] | ||
328 | */ | ||
329 | ep = _create_peer(info, owner, &port->peers[0], ops, priv); | ||
330 | if (!ep) { | ||
331 | pr_err("%s: port '%s' is not in a connectable state\n", | ||
332 | __func__, port->name); | ||
333 | ep = ERR_PTR(-EINVAL); | ||
334 | } | ||
335 | goto out; | ||
336 | } | ||
337 | /* don't need to grab the individual port lock here since we must be | ||
338 | * holding the ports_lock to add the new element, and never dropped | ||
339 | * it, and thus noone could have gotten a reference to this port | ||
340 | * and thus the state couldn't have been touched */ | ||
341 | ep = &port->peers[0]; | ||
342 | _ready_ep(ep, owner, ops, priv); | ||
343 | out: | ||
344 | spin_unlock_irqrestore(&info->ports_lock, flags); | ||
345 | return ep; | ||
346 | } | ||
347 | |||
348 | struct trpc_endpoint *trpc_create_peer(struct trpc_node *owner, | ||
349 | struct trpc_endpoint *ep, | ||
350 | struct trpc_ep_ops *ops, | ||
351 | void *priv) | ||
352 | { | ||
353 | struct tegra_rpc_info *info = tegra_rpc; | ||
354 | struct trpc_endpoint *peer; | ||
355 | unsigned long flags; | ||
356 | |||
357 | BUG_ON(!owner); | ||
358 | |||
359 | spin_lock_irqsave(&info->ports_lock, flags); | ||
360 | peer = _create_peer(info, owner, ep, ops, priv); | ||
361 | spin_unlock_irqrestore(&info->ports_lock, flags); | ||
362 | return peer; | ||
363 | } | ||
364 | |||
365 | /* timeout == -1, waits forever | ||
366 | * timeout == 0, return immediately | ||
367 | */ | ||
368 | int trpc_connect(struct trpc_endpoint *from, long timeout) | ||
369 | { | ||
370 | struct tegra_rpc_info *info = tegra_rpc; | ||
371 | struct trpc_port *port = from->port; | ||
372 | struct trpc_node *src = from->owner; | ||
373 | int ret; | ||
374 | bool no_retry = !timeout; | ||
375 | unsigned long endtime = jiffies + msecs_to_jiffies(timeout); | ||
376 | unsigned long flags; | ||
377 | |||
378 | spin_lock_irqsave(&port->lock, flags); | ||
379 | /* XXX: add state for connections and ports to prevent invalid | ||
380 | * states like multiple connections, etc. ? */ | ||
381 | if (unlikely(is_closed(port))) { | ||
382 | ret = -ECONNRESET; | ||
383 | pr_err("%s: can't connect to %s, closed\n", __func__, | ||
384 | port->name); | ||
385 | goto out; | ||
386 | } else if (is_connected(port)) { | ||
387 | ret = 0; | ||
388 | goto out; | ||
389 | } | ||
390 | spin_unlock_irqrestore(&port->lock, flags); | ||
391 | |||
392 | do { | ||
393 | ret = nodes_try_connect(info, src, from); | ||
394 | |||
395 | spin_lock_irqsave(&port->lock, flags); | ||
396 | if (is_connected(port)) { | ||
397 | ret = 0; | ||
398 | goto out; | ||
399 | } else if (no_retry) { | ||
400 | goto out; | ||
401 | } else if (signal_pending(current)) { | ||
402 | ret = -EINTR; | ||
403 | goto out; | ||
404 | } | ||
405 | spin_unlock_irqrestore(&port->lock, flags); | ||
406 | usleep_range(5000, 20000); | ||
407 | } while (timeout < 0 || time_before(jiffies, endtime)); | ||
408 | |||
409 | return -ETIMEDOUT; | ||
410 | |||
411 | out: | ||
412 | spin_unlock_irqrestore(&port->lock, flags); | ||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | /* convenience function for doing this common pattern in a single call */ | ||
417 | struct trpc_endpoint *trpc_create_connect(struct trpc_node *src, | ||
418 | char *name, | ||
419 | struct trpc_ep_ops *ops, | ||
420 | void *priv, | ||
421 | long timeout) | ||
422 | { | ||
423 | struct trpc_endpoint *ep; | ||
424 | int ret; | ||
425 | |||
426 | ep = trpc_create(src, name, ops, priv); | ||
427 | if (IS_ERR(ep)) | ||
428 | return ep; | ||
429 | |||
430 | ret = trpc_connect(ep, timeout); | ||
431 | if (ret) { | ||
432 | trpc_close(ep); | ||
433 | return ERR_PTR(ret); | ||
434 | } | ||
435 | |||
436 | return ep; | ||
437 | } | ||
438 | |||
439 | void trpc_close(struct trpc_endpoint *ep) | ||
440 | { | ||
441 | struct trpc_port *port = ep->port; | ||
442 | struct trpc_endpoint *peer = ep->out; | ||
443 | bool need_close_op = false; | ||
444 | unsigned long flags; | ||
445 | |||
446 | spin_lock_irqsave(&port->lock, flags); | ||
447 | BUG_ON(!ep->ready); | ||
448 | ep->ready = false; | ||
449 | port->closed = true; | ||
450 | if (peer->ready) { | ||
451 | need_close_op = true; | ||
452 | /* the peer may be waiting for a message */ | ||
453 | wake_up_all(&peer->msg_waitq); | ||
454 | if (peer->connect_done) | ||
455 | complete(peer->connect_done); | ||
456 | } | ||
457 | spin_unlock_irqrestore(&port->lock, flags); | ||
458 | if (need_close_op && peer->ops && peer->ops->close) | ||
459 | peer->ops->close(peer); | ||
460 | trpc_put(ep); | ||
461 | } | ||
462 | |||
463 | int trpc_wait_peer(struct trpc_endpoint *ep, long timeout) | ||
464 | { | ||
465 | struct trpc_port *port = ep->port; | ||
466 | DECLARE_COMPLETION_ONSTACK(event); | ||
467 | int ret; | ||
468 | unsigned long flags; | ||
469 | |||
470 | if (timeout < 0) | ||
471 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
472 | else if (timeout > 0) | ||
473 | timeout = msecs_to_jiffies(timeout); | ||
474 | |||
475 | spin_lock_irqsave(&port->lock, flags); | ||
476 | if (ep->connect_done) { | ||
477 | ret = -EBUSY; | ||
478 | goto done; | ||
479 | } else if (is_connected(port)) { | ||
480 | ret = 0; | ||
481 | goto done; | ||
482 | } else if (is_closed(port)) { | ||
483 | ret = -ECONNRESET; | ||
484 | goto done; | ||
485 | } else if (!timeout) { | ||
486 | ret = -EAGAIN; | ||
487 | goto done; | ||
488 | } | ||
489 | ep->connect_done = &event; | ||
490 | spin_unlock_irqrestore(&port->lock, flags); | ||
491 | |||
492 | ret = wait_for_completion_interruptible_timeout(&event, timeout); | ||
493 | |||
494 | spin_lock_irqsave(&port->lock, flags); | ||
495 | ep->connect_done = NULL; | ||
496 | |||
497 | if (is_connected(port)) { | ||
498 | ret = 0; | ||
499 | } else { | ||
500 | if (is_closed(port)) | ||
501 | ret = -ECONNRESET; | ||
502 | else if (ret == -ERESTARTSYS) | ||
503 | ret = -EINTR; | ||
504 | else if (!ret) | ||
505 | ret = -ETIMEDOUT; | ||
506 | } | ||
507 | |||
508 | done: | ||
509 | spin_unlock_irqrestore(&port->lock, flags); | ||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | static inline int _ep_id(struct trpc_endpoint *ep) | ||
514 | { | ||
515 | return ep - ep->port->peers; | ||
516 | } | ||
517 | |||
518 | static int queue_msg(struct trpc_node *src, struct trpc_endpoint *from, | ||
519 | void *buf, size_t len, gfp_t gfp_flags) | ||
520 | { | ||
521 | struct tegra_rpc_info *info = tegra_rpc; | ||
522 | struct trpc_endpoint *peer = from->out; | ||
523 | struct trpc_port *port = from->port; | ||
524 | struct trpc_msg *msg; | ||
525 | unsigned long flags; | ||
526 | int ret; | ||
527 | |||
528 | BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN); | ||
529 | /* shouldn't be enqueueing to the endpoint */ | ||
530 | BUG_ON(peer->ops && peer->ops->send); | ||
531 | |||
532 | DBG(TRPC_TRACE_MSG, "%s: queueing message for %s.%d\n", __func__, | ||
533 | port->name, _ep_id(peer)); | ||
534 | |||
535 | msg = kmem_cache_alloc(info->msg_cache, gfp_flags); | ||
536 | if (!msg) { | ||
537 | pr_err("%s: can't alloc memory for msg\n", __func__); | ||
538 | return -ENOMEM; | ||
539 | } | ||
540 | |||
541 | memcpy(msg->payload, buf, len); | ||
542 | msg->len = len; | ||
543 | |||
544 | spin_lock_irqsave(&port->lock, flags); | ||
545 | if (is_closed(port)) { | ||
546 | pr_err("%s: cannot send message for closed port %s.%d\n", | ||
547 | __func__, port->name, _ep_id(peer)); | ||
548 | ret = -ECONNRESET; | ||
549 | goto err; | ||
550 | } else if (!is_connected(port)) { | ||
551 | pr_err("%s: cannot send message for unconnected port %s.%d\n", | ||
552 | __func__, port->name, _ep_id(peer)); | ||
553 | ret = -ENOTCONN; | ||
554 | goto err; | ||
555 | } | ||
556 | |||
557 | list_add_tail(&msg->list, &peer->msg_list); | ||
558 | if (peer->ops && peer->ops->notify_recv) | ||
559 | peer->ops->notify_recv(peer); | ||
560 | wake_up_all(&peer->msg_waitq); | ||
561 | spin_unlock_irqrestore(&port->lock, flags); | ||
562 | return 0; | ||
563 | |||
564 | err: | ||
565 | spin_unlock_irqrestore(&port->lock, flags); | ||
566 | kmem_cache_free(info->msg_cache, msg); | ||
567 | return ret; | ||
568 | } | ||
569 | |||
570 | /* Returns -ENOMEM if failed to allocate memory for the message. */ | ||
571 | int trpc_send_msg(struct trpc_node *src, struct trpc_endpoint *from, | ||
572 | void *buf, size_t len, gfp_t gfp_flags) | ||
573 | { | ||
574 | struct trpc_endpoint *peer = from->out; | ||
575 | struct trpc_port *port = from->port; | ||
576 | |||
577 | BUG_ON(len > TEGRA_RPC_MAX_MSG_LEN); | ||
578 | |||
579 | DBG(TRPC_TRACE_MSG, "%s: sending message from %s.%d to %s.%d\n", | ||
580 | __func__, port->name, _ep_id(from), port->name, _ep_id(peer)); | ||
581 | |||
582 | if (peer->ops && peer->ops->send) { | ||
583 | might_sleep(); | ||
584 | return peer->ops->send(peer, buf, len); | ||
585 | } else { | ||
586 | might_sleep_if(gfp_flags & __GFP_WAIT); | ||
587 | return queue_msg(src, from, buf, len, gfp_flags); | ||
588 | } | ||
589 | } | ||
590 | |||
591 | static inline struct trpc_msg *dequeue_msg_locked(struct trpc_endpoint *ep) | ||
592 | { | ||
593 | struct trpc_msg *msg = NULL; | ||
594 | |||
595 | if (!list_empty(&ep->msg_list)) { | ||
596 | msg = list_first_entry(&ep->msg_list, struct trpc_msg, list); | ||
597 | list_del_init(&msg->list); | ||
598 | } | ||
599 | |||
600 | return msg; | ||
601 | } | ||
602 | |||
603 | static bool __should_wake(struct trpc_endpoint *ep) | ||
604 | { | ||
605 | struct trpc_port *port = ep->port; | ||
606 | unsigned long flags; | ||
607 | bool ret; | ||
608 | |||
609 | spin_lock_irqsave(&port->lock, flags); | ||
610 | ret = !list_empty(&ep->msg_list) || is_closed(port); | ||
611 | spin_unlock_irqrestore(&port->lock, flags); | ||
612 | return ret; | ||
613 | } | ||
614 | |||
615 | int trpc_recv_msg(struct trpc_node *src, struct trpc_endpoint *ep, | ||
616 | void *buf, size_t buf_len, long timeout) | ||
617 | { | ||
618 | struct tegra_rpc_info *info = tegra_rpc; | ||
619 | struct trpc_port *port = ep->port; | ||
620 | struct trpc_msg *msg; | ||
621 | size_t len; | ||
622 | long ret; | ||
623 | unsigned long flags; | ||
624 | |||
625 | BUG_ON(buf_len > TEGRA_RPC_MAX_MSG_LEN); | ||
626 | |||
627 | spin_lock_irqsave(&port->lock, flags); | ||
628 | /* we allow closed ports to finish receiving already-queued messages */ | ||
629 | msg = dequeue_msg_locked(ep); | ||
630 | if (msg) { | ||
631 | goto got_msg; | ||
632 | } else if (is_closed(port)) { | ||
633 | ret = -ECONNRESET; | ||
634 | goto out; | ||
635 | } else if (!is_connected(port)) { | ||
636 | ret = -ENOTCONN; | ||
637 | goto out; | ||
638 | } | ||
639 | |||
640 | if (timeout == 0) { | ||
641 | ret = 0; | ||
642 | goto out; | ||
643 | } else if (timeout < 0) { | ||
644 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
645 | } else { | ||
646 | timeout = msecs_to_jiffies(timeout); | ||
647 | } | ||
648 | spin_unlock_irqrestore(&port->lock, flags); | ||
649 | DBG(TRPC_TRACE_MSG, "%s: waiting for message for %s.%d\n", __func__, | ||
650 | port->name, _ep_id(ep)); | ||
651 | |||
652 | ret = wait_event_interruptible_timeout(ep->msg_waitq, __should_wake(ep), | ||
653 | timeout); | ||
654 | |||
655 | DBG(TRPC_TRACE_MSG, "%s: woke up for %s\n", __func__, port->name); | ||
656 | spin_lock_irqsave(&port->lock, flags); | ||
657 | msg = dequeue_msg_locked(ep); | ||
658 | if (!msg) { | ||
659 | if (is_closed(port)) | ||
660 | ret = -ECONNRESET; | ||
661 | else if (!ret) | ||
662 | ret = -ETIMEDOUT; | ||
663 | else if (ret == -ERESTARTSYS) | ||
664 | ret = -EINTR; | ||
665 | else | ||
666 | pr_err("%s: error (%d) while receiving msg for '%s'\n", | ||
667 | __func__, (int)ret, port->name); | ||
668 | goto out; | ||
669 | } | ||
670 | |||
671 | got_msg: | ||
672 | spin_unlock_irqrestore(&port->lock, flags); | ||
673 | len = min(buf_len, msg->len); | ||
674 | memcpy(buf, msg->payload, len); | ||
675 | kmem_cache_free(info->msg_cache, msg); | ||
676 | return len; | ||
677 | |||
678 | out: | ||
679 | spin_unlock_irqrestore(&port->lock, flags); | ||
680 | return ret; | ||
681 | } | ||
682 | |||
683 | int trpc_node_register(struct trpc_node *node) | ||
684 | { | ||
685 | struct tegra_rpc_info *info = tegra_rpc; | ||
686 | |||
687 | if (!info) | ||
688 | return -ENOMEM; | ||
689 | |||
690 | pr_info("%s: Adding '%s' to node list\n", __func__, node->name); | ||
691 | |||
692 | mutex_lock(&info->node_lock); | ||
693 | if (node->type == TRPC_NODE_LOCAL) | ||
694 | list_add(&node->list, &info->node_list); | ||
695 | else | ||
696 | list_add_tail(&node->list, &info->node_list); | ||
697 | mutex_unlock(&info->node_lock); | ||
698 | return 0; | ||
699 | } | ||
700 | |||
701 | void trpc_node_unregister(struct trpc_node *node) | ||
702 | { | ||
703 | struct tegra_rpc_info *info = tegra_rpc; | ||
704 | |||
705 | mutex_lock(&info->node_lock); | ||
706 | list_del(&node->list); | ||
707 | mutex_unlock(&info->node_lock); | ||
708 | } | ||
709 | |||
710 | static int trpc_debug_ports_show(struct seq_file *s, void *data) | ||
711 | { | ||
712 | struct tegra_rpc_info *info = s->private; | ||
713 | struct rb_node *n; | ||
714 | unsigned long flags; | ||
715 | int i; | ||
716 | |||
717 | spin_lock_irqsave(&info->ports_lock, flags); | ||
718 | for (n = rb_first(&info->ports); n; n = rb_next(n)) { | ||
719 | struct trpc_port *port = rb_entry(n, struct trpc_port, rb_node); | ||
720 | seq_printf(s, "port: %s\n closed:%s\n", port->name, | ||
721 | port->closed ? "yes" : "no"); | ||
722 | |||
723 | spin_lock(&port->lock); | ||
724 | for (i = 0; i < ARRAY_SIZE(port->peers); i++) { | ||
725 | struct trpc_endpoint *ep = &port->peers[i]; | ||
726 | seq_printf(s, " peer%d: %s\n ready:%s\n", i, | ||
727 | ep->owner ? ep->owner->name : "<none>", | ||
728 | ep->ready ? "yes" : "no"); | ||
729 | if (ep->ops && ep->ops->show) | ||
730 | ep->ops->show(s, ep); | ||
731 | } | ||
732 | spin_unlock(&port->lock); | ||
733 | } | ||
734 | spin_unlock_irqrestore(&info->ports_lock, flags); | ||
735 | |||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | static int trpc_debug_ports_open(struct inode *inode, struct file *file) | ||
740 | { | ||
741 | return single_open(file, trpc_debug_ports_show, inode->i_private); | ||
742 | } | ||
743 | |||
744 | static const struct file_operations trpc_debug_ports_fops = { | ||
745 | .open = trpc_debug_ports_open, | ||
746 | .read = seq_read, | ||
747 | .llseek = seq_lseek, | ||
748 | .release = single_release, | ||
749 | }; | ||
750 | |||
751 | static void trpc_debug_init(struct tegra_rpc_info *info) | ||
752 | { | ||
753 | trpc_debug_root = debugfs_create_dir("tegra_rpc", NULL); | ||
754 | if (IS_ERR_OR_NULL(trpc_debug_root)) { | ||
755 | pr_err("%s: couldn't create debug files\n", __func__); | ||
756 | return; | ||
757 | } | ||
758 | |||
759 | debugfs_create_file("ports", 0664, trpc_debug_root, info, | ||
760 | &trpc_debug_ports_fops); | ||
761 | } | ||
762 | |||
763 | static int __init tegra_rpc_init(void) | ||
764 | { | ||
765 | struct tegra_rpc_info *rpc_info; | ||
766 | int ret; | ||
767 | |||
768 | rpc_info = kzalloc(sizeof(struct tegra_rpc_info), GFP_KERNEL); | ||
769 | if (!rpc_info) { | ||
770 | pr_err("%s: error allocating rpc_info\n", __func__); | ||
771 | return -ENOMEM; | ||
772 | } | ||
773 | |||
774 | rpc_info->ports = RB_ROOT; | ||
775 | spin_lock_init(&rpc_info->ports_lock); | ||
776 | INIT_LIST_HEAD(&rpc_info->node_list); | ||
777 | mutex_init(&rpc_info->node_lock); | ||
778 | |||
779 | rpc_info->msg_cache = KMEM_CACHE(trpc_msg, 0); | ||
780 | if (!rpc_info->msg_cache) { | ||
781 | pr_err("%s: unable to create message cache\n", __func__); | ||
782 | ret = -ENOMEM; | ||
783 | goto err_kmem_cache; | ||
784 | } | ||
785 | |||
786 | trpc_debug_init(rpc_info); | ||
787 | tegra_rpc = rpc_info; | ||
788 | |||
789 | return 0; | ||
790 | |||
791 | err_kmem_cache: | ||
792 | kfree(rpc_info); | ||
793 | return ret; | ||
794 | } | ||
795 | |||
796 | subsys_initcall(tegra_rpc_init); | ||