aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/tegra/avp/avp_svc.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/media/video/tegra/avp/avp_svc.c
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/media/video/tegra/avp/avp_svc.c')
-rw-r--r--drivers/media/video/tegra/avp/avp_svc.c890
1 files changed, 890 insertions, 0 deletions
diff --git a/drivers/media/video/tegra/avp/avp_svc.c b/drivers/media/video/tegra/avp/avp_svc.c
new file mode 100644
index 00000000000..17c8b8535a6
--- /dev/null
+++ b/drivers/media/video/tegra/avp/avp_svc.c
@@ -0,0 +1,890 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 * Author: Dima Zavin <dima@android.com>
4 *
5 * Copyright (C) 2010-2011 NVIDIA Corporation
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/err.h>
22#include <linux/io.h>
23#include <linux/kthread.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/tegra_rpc.h>
28#include <linux/tegra_avp.h>
29#include <linux/types.h>
30
31#include <mach/clk.h>
32#include <mach/nvmap.h>
33
34#include "../../../../video/tegra/nvmap/nvmap.h"
35
36#include "avp_msg.h"
37#include "trpc.h"
38#include "avp.h"
39
40enum {
41 AVP_DBG_TRACE_SVC = 1U << 0,
42};
43
44static u32 debug_mask;
45module_param_named(debug_mask, debug_mask, uint, S_IWUSR | S_IRUGO);
46
47#define DBG(flag, args...) \
48 do { if (unlikely(debug_mask & (flag))) pr_info(args); } while (0)
49
50enum {
51 CLK_REQUEST_VCP = 0,
52 CLK_REQUEST_BSEA = 1,
53 CLK_REQUEST_VDE = 2,
54 CLK_REQUEST_AVP = 3,
55 NUM_CLK_REQUESTS,
56};
57
58struct avp_module {
59 const char *name;
60 u32 clk_req;
61};
62
63static struct avp_module avp_modules[] = {
64 [AVP_MODULE_ID_AVP] = {
65 .name = "cop",
66 .clk_req = CLK_REQUEST_AVP,
67 },
68 [AVP_MODULE_ID_VCP] = {
69 .name = "vcp",
70 .clk_req = CLK_REQUEST_VCP,
71 },
72 [AVP_MODULE_ID_BSEA] = {
73 .name = "bsea",
74 .clk_req = CLK_REQUEST_BSEA,
75 },
76 [AVP_MODULE_ID_VDE] = {
77 .name = "vde",
78 .clk_req = CLK_REQUEST_VDE,
79 },
80};
81#define NUM_AVP_MODULES ARRAY_SIZE(avp_modules)
82
83struct avp_clk {
84 struct clk *clk;
85 int refcnt;
86 struct avp_module *mod;
87};
88
89struct avp_svc_info {
90 struct avp_clk clks[NUM_CLK_REQUESTS];
91 /* used for dvfs */
92 struct clk *sclk;
93 struct clk *emcclk;
94
95 struct mutex clk_lock;
96
97 struct trpc_endpoint *cpu_ep;
98 struct task_struct *svc_thread;
99
100 /* client for remote allocations, for easy tear down */
101 struct nvmap_client *nvmap_remote;
102 struct trpc_node *rpc_node;
103 unsigned long max_avp_rate;
104 unsigned long emc_rate;
105
106 /* variable to check if video is present */
107 bool is_vde_on;
108};
109
110static void do_svc_nvmap_create(struct avp_svc_info *avp_svc,
111 struct svc_msg *_msg,
112 size_t len)
113{
114 struct svc_nvmap_create *msg = (struct svc_nvmap_create *)_msg;
115 struct svc_nvmap_create_resp resp;
116 struct nvmap_handle_ref *handle;
117 u32 handle_id = 0;
118 u32 err = 0;
119
120 handle = nvmap_create_handle(avp_svc->nvmap_remote, msg->size);
121 if (unlikely(IS_ERR(handle))) {
122 pr_err("avp_svc: error creating handle (%d bytes) for remote\n",
123 msg->size);
124 err = AVP_ERR_ENOMEM;
125 } else
126 handle_id = (u32)nvmap_ref_to_id(handle);
127
128 resp.svc_id = SVC_NVMAP_CREATE_RESPONSE;
129 resp.err = err;
130 resp.handle_id = handle_id;
131 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
132 sizeof(resp), GFP_KERNEL);
133 /* TODO: do we need to put the handle if send_msg failed? */
134}
135
136static void do_svc_nvmap_alloc(struct avp_svc_info *avp_svc,
137 struct svc_msg *_msg,
138 size_t len)
139{
140 struct svc_nvmap_alloc *msg = (struct svc_nvmap_alloc *)_msg;
141 struct svc_common_resp resp;
142 struct nvmap_handle *handle;
143 u32 err = 0;
144 u32 heap_mask = 0;
145 int i;
146 size_t align;
147
148 handle = nvmap_get_handle_id(avp_svc->nvmap_remote, msg->handle_id);
149 if (IS_ERR(handle)) {
150 pr_err("avp_svc: unknown remote handle 0x%x\n", msg->handle_id);
151 err = AVP_ERR_EACCES;
152 goto out;
153 }
154
155 if (msg->num_heaps > 4) {
156 pr_err("avp_svc: invalid remote alloc request (%d heaps?!)\n",
157 msg->num_heaps);
158 /* TODO: should we error out instead ? */
159 msg->num_heaps = 0;
160 }
161 if (msg->num_heaps == 0)
162 heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC | NVMAP_HEAP_SYSMEM;
163
164 for (i = 0; i < msg->num_heaps; i++) {
165 switch (msg->heaps[i]) {
166 case AVP_NVMAP_HEAP_EXTERNAL:
167 heap_mask |= NVMAP_HEAP_SYSMEM;
168 break;
169 case AVP_NVMAP_HEAP_GART:
170 heap_mask |= NVMAP_HEAP_IOVMM;
171 break;
172 case AVP_NVMAP_HEAP_EXTERNAL_CARVEOUT:
173 heap_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
174 break;
175 case AVP_NVMAP_HEAP_IRAM:
176 heap_mask |= NVMAP_HEAP_CARVEOUT_IRAM;
177 break;
178 default:
179 break;
180 }
181 }
182
183 align = max_t(size_t, L1_CACHE_BYTES, msg->align);
184 err = nvmap_alloc_handle_id(avp_svc->nvmap_remote, msg->handle_id,
185 heap_mask, align, 0);
186 nvmap_handle_put(handle);
187 if (err) {
188 pr_err("avp_svc: can't allocate for handle 0x%x (%d)\n",
189 msg->handle_id, err);
190 err = AVP_ERR_ENOMEM;
191 }
192
193out:
194 resp.svc_id = SVC_NVMAP_ALLOC_RESPONSE;
195 resp.err = err;
196 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
197 sizeof(resp), GFP_KERNEL);
198}
199
200static void do_svc_nvmap_free(struct avp_svc_info *avp_svc,
201 struct svc_msg *_msg,
202 size_t len)
203{
204 struct svc_nvmap_free *msg = (struct svc_nvmap_free *)_msg;
205
206 nvmap_free_handle_id(avp_svc->nvmap_remote, msg->handle_id);
207}
208
209static void do_svc_nvmap_pin(struct avp_svc_info *avp_svc,
210 struct svc_msg *_msg,
211 size_t len)
212{
213 struct svc_nvmap_pin *msg = (struct svc_nvmap_pin *)_msg;
214 struct svc_nvmap_pin_resp resp;
215 struct nvmap_handle_ref *handle;
216 phys_addr_t addr = ~0UL;
217 unsigned long id = msg->handle_id;
218 int err;
219
220 handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote, id);
221 if (IS_ERR(handle)) {
222 pr_err("avp_svc: can't dup handle %lx\n", id);
223 goto out;
224 }
225 err = nvmap_pin_ids(avp_svc->nvmap_remote, 1, &id);
226 if (err) {
227 pr_err("avp_svc: can't pin for handle %lx (%d)\n", id, err);
228 goto out;
229 }
230 addr = nvmap_handle_address(avp_svc->nvmap_remote, id);
231
232out:
233 resp.svc_id = SVC_NVMAP_PIN_RESPONSE;
234 resp.addr = addr;
235 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
236 sizeof(resp), GFP_KERNEL);
237}
238
239static void do_svc_nvmap_unpin(struct avp_svc_info *avp_svc,
240 struct svc_msg *_msg,
241 size_t len)
242{
243 struct svc_nvmap_unpin *msg = (struct svc_nvmap_unpin *)_msg;
244 struct svc_common_resp resp;
245 unsigned long id = msg->handle_id;
246
247 nvmap_unpin_ids(avp_svc->nvmap_remote, 1, &id);
248 nvmap_free_handle_id(avp_svc->nvmap_remote, id);
249
250 resp.svc_id = SVC_NVMAP_UNPIN_RESPONSE;
251 resp.err = 0;
252 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
253 sizeof(resp), GFP_KERNEL);
254}
255
256static void do_svc_nvmap_from_id(struct avp_svc_info *avp_svc,
257 struct svc_msg *_msg,
258 size_t len)
259{
260 struct svc_nvmap_from_id *msg = (struct svc_nvmap_from_id *)_msg;
261 struct svc_common_resp resp;
262 struct nvmap_handle_ref *handle;
263 int err = 0;
264
265 handle = nvmap_duplicate_handle_id(avp_svc->nvmap_remote,
266 msg->handle_id);
267 if (IS_ERR(handle)) {
268 pr_err("avp_svc: can't duplicate handle for id 0x%x (%d)\n",
269 msg->handle_id, (int)PTR_ERR(handle));
270 err = AVP_ERR_ENOMEM;
271 }
272
273 resp.svc_id = SVC_NVMAP_FROM_ID_RESPONSE;
274 resp.err = err;
275 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
276 sizeof(resp), GFP_KERNEL);
277}
278
279static void do_svc_nvmap_get_addr(struct avp_svc_info *avp_svc,
280 struct svc_msg *_msg,
281 size_t len)
282{
283 struct svc_nvmap_get_addr *msg = (struct svc_nvmap_get_addr *)_msg;
284 struct svc_nvmap_get_addr_resp resp;
285
286 resp.svc_id = SVC_NVMAP_GET_ADDRESS_RESPONSE;
287 resp.addr = nvmap_handle_address(avp_svc->nvmap_remote, msg->handle_id);
288 resp.addr += msg->offs;
289 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
290 sizeof(resp), GFP_KERNEL);
291}
292
293static void do_svc_pwr_register(struct avp_svc_info *avp_svc,
294 struct svc_msg *_msg,
295 size_t len)
296{
297 struct svc_pwr_register *msg = (struct svc_pwr_register *)_msg;
298 struct svc_pwr_register_resp resp;
299
300 resp.svc_id = SVC_POWER_RESPONSE;
301 resp.err = 0;
302 resp.client_id = msg->client_id;
303
304 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
305 sizeof(resp), GFP_KERNEL);
306}
307
308static struct avp_module *find_avp_module(struct avp_svc_info *avp_svc, u32 id)
309{
310 if (id < NUM_AVP_MODULES && avp_modules[id].name)
311 return &avp_modules[id];
312 return NULL;
313}
314
315static void do_svc_module_reset(struct avp_svc_info *avp_svc,
316 struct svc_msg *_msg,
317 size_t len)
318{
319 struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
320 struct svc_common_resp resp;
321 struct avp_module *mod;
322 struct avp_clk *aclk;
323
324 mod = find_avp_module(avp_svc, msg->module_id);
325 if (!mod) {
326 if (msg->module_id == AVP_MODULE_ID_AVP)
327 pr_err("avp_svc: AVP suicidal?!?!\n");
328 else
329 pr_err("avp_svc: Unknown module reset requested: %d\n",
330 msg->module_id);
331 /* other side doesn't handle errors for reset */
332 resp.err = 0;
333 goto send_response;
334 }
335
336 aclk = &avp_svc->clks[mod->clk_req];
337 tegra_periph_reset_assert(aclk->clk);
338 udelay(10);
339 tegra_periph_reset_deassert(aclk->clk);
340 resp.err = 0;
341
342send_response:
343 resp.svc_id = SVC_MODULE_RESET_RESPONSE;
344 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
345 sizeof(resp), GFP_KERNEL);
346}
347
348static void do_svc_module_clock(struct avp_svc_info *avp_svc,
349 struct svc_msg *_msg,
350 size_t len)
351{
352 struct svc_module_ctrl *msg = (struct svc_module_ctrl *)_msg;
353 struct svc_common_resp resp;
354 struct avp_module *mod;
355 struct avp_clk *aclk;
356 unsigned long emc_rate = 0;
357
358 mod = find_avp_module(avp_svc, msg->module_id);
359 if (!mod) {
360 pr_err("avp_svc: unknown module clock requested: %d\n",
361 msg->module_id);
362 resp.err = AVP_ERR_EINVAL;
363 goto send_response;
364 }
365
366 if (msg->module_id == AVP_MODULE_ID_VDE)
367 avp_svc->is_vde_on = msg->enable;
368
369 if (avp_svc->is_vde_on == true)
370 emc_rate = ULONG_MAX;
371
372 mutex_lock(&avp_svc->clk_lock);
373 aclk = &avp_svc->clks[mod->clk_req];
374 if (msg->enable) {
375 if (aclk->refcnt++ == 0) {
376 clk_set_rate(avp_svc->emcclk, emc_rate);
377 clk_enable(avp_svc->emcclk);
378 clk_enable(avp_svc->sclk);
379 clk_enable(aclk->clk);
380 }
381 } else {
382 if (unlikely(aclk->refcnt == 0)) {
383 pr_err("avp_svc: unbalanced clock disable for '%s'\n",
384 aclk->mod->name);
385 } else if (--aclk->refcnt == 0) {
386 clk_disable(aclk->clk);
387 clk_set_rate(avp_svc->sclk, 0);
388 clk_disable(avp_svc->sclk);
389 clk_set_rate(avp_svc->emcclk, 0);
390 clk_disable(avp_svc->emcclk);
391 }
392 }
393 mutex_unlock(&avp_svc->clk_lock);
394 resp.err = 0;
395
396send_response:
397 resp.svc_id = SVC_MODULE_CLOCK_RESPONSE;
398 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
399 sizeof(resp), GFP_KERNEL);
400}
401
402static void do_svc_null_response(struct avp_svc_info *avp_svc,
403 struct svc_msg *_msg,
404 size_t len, u32 resp_svc_id)
405{
406 struct svc_common_resp resp;
407 resp.svc_id = resp_svc_id;
408 resp.err = 0;
409 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
410 sizeof(resp), GFP_KERNEL);
411}
412
413static void do_svc_dfs_get_state(struct avp_svc_info *avp_svc,
414 struct svc_msg *_msg,
415 size_t len)
416{
417 struct svc_dfs_get_state_resp resp;
418 resp.svc_id = SVC_DFS_GETSTATE_RESPONSE;
419 resp.state = AVP_DFS_STATE_STOPPED;
420 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
421 sizeof(resp), GFP_KERNEL);
422}
423
424static void do_svc_dfs_get_clk_util(struct avp_svc_info *avp_svc,
425 struct svc_msg *_msg,
426 size_t len)
427{
428 struct svc_dfs_get_clk_util_resp resp;
429
430 resp.svc_id = SVC_DFS_GET_CLK_UTIL_RESPONSE;
431 resp.err = 0;
432 memset(&resp.usage, 0, sizeof(struct avp_clk_usage));
433 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
434 sizeof(resp), GFP_KERNEL);
435}
436
437static void do_svc_pwr_max_freq(struct avp_svc_info *avp_svc,
438 struct svc_msg *_msg,
439 size_t len)
440{
441 struct svc_pwr_max_freq_resp resp;
442
443 resp.svc_id = SVC_POWER_MAXFREQ;
444 resp.freq = 0;
445 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
446 sizeof(resp), GFP_KERNEL);
447}
448
449static void do_svc_printf(struct avp_svc_info *avp_svc, struct svc_msg *_msg,
450 size_t len)
451{
452 struct svc_printf *msg = (struct svc_printf *)_msg;
453 char tmp_str[SVC_MAX_STRING_LEN];
454
455 /* ensure we null terminate the source */
456 strlcpy(tmp_str, msg->str, SVC_MAX_STRING_LEN);
457 pr_info("[AVP]: %s", tmp_str);
458}
459
460static void do_svc_module_clock_set(struct avp_svc_info *avp_svc,
461 struct svc_msg *_msg,
462 size_t len)
463{
464 struct svc_clock_ctrl *msg = (struct svc_clock_ctrl *)_msg;
465 struct svc_clock_ctrl_response resp;
466 struct avp_module *mod;
467 struct avp_clk *aclk;
468 int ret = 0;
469
470 mod = find_avp_module(avp_svc, msg->module_id);
471 if (!mod) {
472 pr_err("avp_svc: unknown module clock requested: %d\n",
473 msg->module_id);
474 resp.err = AVP_ERR_EINVAL;
475 goto send_response;
476 }
477
478 mutex_lock(&avp_svc->clk_lock);
479 if (msg->module_id == AVP_MODULE_ID_AVP) {
480 /* check if max avp clock is asked and set max emc frequency */
481 if (msg->clk_freq >= avp_svc->max_avp_rate) {
482 clk_set_rate(avp_svc->emcclk, ULONG_MAX);
483 }
484 else {
485 /* if no, set emc frequency as per platform data.
486 * if no platform data is send, set it to maximum */
487 if (avp_svc->emc_rate)
488 clk_set_rate(avp_svc->emcclk, avp_svc->emc_rate);
489 else
490 clk_set_rate(avp_svc->emcclk, ULONG_MAX);
491 }
492 ret = clk_set_rate(avp_svc->sclk, msg->clk_freq);
493 } else {
494 aclk = &avp_svc->clks[mod->clk_req];
495 ret = clk_set_rate(aclk->clk, msg->clk_freq);
496 }
497 if (ret) {
498 pr_err("avp_svc: Failed to set module (id = %d) frequency to %d Hz\n",
499 msg->module_id, msg->clk_freq);
500 resp.err = AVP_ERR_EINVAL;
501 resp.act_freq = 0;
502 mutex_unlock(&avp_svc->clk_lock);
503 goto send_response;
504 }
505
506 if (msg->module_id == AVP_MODULE_ID_AVP)
507 resp.act_freq = clk_get_rate(avp_svc->sclk);
508 else
509 resp.act_freq = clk_get_rate(aclk->clk);
510
511 mutex_unlock(&avp_svc->clk_lock);
512 resp.err = 0;
513
514send_response:
515 resp.svc_id = SVC_MODULE_CLOCK_SET_RESPONSE;
516 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
517 sizeof(resp), GFP_KERNEL);
518}
519
520static void do_svc_unsupported_msg(struct avp_svc_info *avp_svc,
521 u32 resp_svc_id)
522{
523 struct svc_common_resp resp;
524
525 resp.err = AVP_ERR_ENOTSUP;
526 resp.svc_id = resp_svc_id;
527 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
528 sizeof(resp), GFP_KERNEL);
529}
530
531static void do_svc_module_clock_get(struct avp_svc_info *avp_svc,
532 struct svc_msg *_msg,
533 size_t len)
534{
535 struct svc_clock_ctrl *msg = (struct svc_clock_ctrl *)_msg;
536 struct svc_clock_ctrl_response resp;
537 struct avp_module *mod;
538 struct avp_clk *aclk;
539
540 mod = find_avp_module(avp_svc, msg->module_id);
541 if (!mod) {
542 pr_err("avp_svc: unknown module get clock requested: %d\n",
543 msg->module_id);
544 resp.err = AVP_ERR_EINVAL;
545 goto send_response;
546 }
547
548 mutex_lock(&avp_svc->clk_lock);
549 aclk = &avp_svc->clks[mod->clk_req];
550 resp.act_freq = clk_get_rate(aclk->clk);
551 mutex_unlock(&avp_svc->clk_lock);
552 resp.err = 0;
553
554send_response:
555 resp.svc_id = SVC_MODULE_CLOCK_GET_RESPONSE;
556 trpc_send_msg(avp_svc->rpc_node, avp_svc->cpu_ep, &resp,
557 sizeof(resp), GFP_KERNEL);
558}
559
560static int dispatch_svc_message(struct avp_svc_info *avp_svc,
561 struct svc_msg *msg,
562 size_t len)
563{
564 int ret = 0;
565
566 switch (msg->svc_id) {
567 case SVC_NVMAP_CREATE:
568 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_create\n", __func__);
569 do_svc_nvmap_create(avp_svc, msg, len);
570 break;
571 case SVC_NVMAP_ALLOC:
572 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_alloc\n", __func__);
573 do_svc_nvmap_alloc(avp_svc, msg, len);
574 break;
575 case SVC_NVMAP_FREE:
576 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_free\n", __func__);
577 do_svc_nvmap_free(avp_svc, msg, len);
578 break;
579 case SVC_NVMAP_PIN:
580 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_pin\n", __func__);
581 do_svc_nvmap_pin(avp_svc, msg, len);
582 break;
583 case SVC_NVMAP_UNPIN:
584 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_unpin\n", __func__);
585 do_svc_nvmap_unpin(avp_svc, msg, len);
586 break;
587 case SVC_NVMAP_FROM_ID:
588 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_from_id\n", __func__);
589 do_svc_nvmap_from_id(avp_svc, msg, len);
590 break;
591 case SVC_NVMAP_GET_ADDRESS:
592 DBG(AVP_DBG_TRACE_SVC, "%s: got nvmap_get_addr\n", __func__);
593 do_svc_nvmap_get_addr(avp_svc, msg, len);
594 break;
595 case SVC_POWER_REGISTER:
596 DBG(AVP_DBG_TRACE_SVC, "%s: got power_register\n", __func__);
597 do_svc_pwr_register(avp_svc, msg, len);
598 break;
599 case SVC_POWER_UNREGISTER:
600 DBG(AVP_DBG_TRACE_SVC, "%s: got power_unregister\n", __func__);
601 /* nothing to do */
602 break;
603 case SVC_POWER_BUSY_HINT_MULTI:
604 DBG(AVP_DBG_TRACE_SVC, "%s: got power_busy_hint_multi\n",
605 __func__);
606 /* nothing to do */
607 break;
608 case SVC_POWER_BUSY_HINT:
609 case SVC_POWER_STARVATION:
610 DBG(AVP_DBG_TRACE_SVC, "%s: got power busy/starve hint\n",
611 __func__);
612 do_svc_null_response(avp_svc, msg, len, SVC_POWER_RESPONSE);
613 break;
614 case SVC_POWER_MAXFREQ:
615 DBG(AVP_DBG_TRACE_SVC, "%s: got power get_max_freq\n",
616 __func__);
617 do_svc_pwr_max_freq(avp_svc, msg, len);
618 break;
619 case SVC_DFS_GETSTATE:
620 DBG(AVP_DBG_TRACE_SVC, "%s: got dfs_get_state\n", __func__);
621 do_svc_dfs_get_state(avp_svc, msg, len);
622 break;
623 case SVC_MODULE_RESET:
624 DBG(AVP_DBG_TRACE_SVC, "%s: got module_reset\n", __func__);
625 do_svc_module_reset(avp_svc, msg, len);
626 break;
627 case SVC_MODULE_CLOCK:
628 DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock\n", __func__);
629 do_svc_module_clock(avp_svc, msg, len);
630 break;
631 case SVC_DFS_GET_CLK_UTIL:
632 DBG(AVP_DBG_TRACE_SVC, "%s: got get_clk_util\n", __func__);
633 do_svc_dfs_get_clk_util(avp_svc, msg, len);
634 break;
635 case SVC_PRINTF:
636 DBG(AVP_DBG_TRACE_SVC, "%s: got remote printf\n", __func__);
637 do_svc_printf(avp_svc, msg, len);
638 break;
639 case SVC_AVP_WDT_RESET:
640 pr_err("avp_svc: AVP has been reset by watchdog\n");
641 break;
642 case SVC_MODULE_CLOCK_SET:
643 DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock_set\n", __func__);
644 do_svc_module_clock_set(avp_svc, msg, len);
645 break;
646 case SVC_MODULE_CLOCK_GET:
647 DBG(AVP_DBG_TRACE_SVC, "%s: got module_clock_get\n", __func__);
648 do_svc_module_clock_get(avp_svc, msg, len);
649 break;
650 default:
651 pr_warning("avp_svc: Unsupported SVC call 0x%x\n", msg->svc_id);
652 do_svc_unsupported_msg(avp_svc, msg->svc_id);
653 ret = -ENOMSG;
654 break;
655 }
656
657 return ret;
658}
659
660static int avp_svc_thread(void *data)
661{
662 struct avp_svc_info *avp_svc = data;
663 u8 buf[TEGRA_RPC_MAX_MSG_LEN];
664 struct svc_msg *msg = (struct svc_msg *)buf;
665 int ret;
666 long timeout;
667
668 BUG_ON(!avp_svc->cpu_ep);
669
670 ret = trpc_wait_peer(avp_svc->cpu_ep, -1);
671 if (ret) {
672 pr_err("%s: no connection from AVP (%d)\n", __func__, ret);
673 goto err;
674 }
675
676 pr_info("%s: got remote peer\n", __func__);
677
678 while (!kthread_should_stop()) {
679 DBG(AVP_DBG_TRACE_SVC, "%s: waiting for message\n", __func__);
680 ret = trpc_recv_msg(avp_svc->rpc_node, avp_svc->cpu_ep, buf,
681 TEGRA_RPC_MAX_MSG_LEN, -1);
682 DBG(AVP_DBG_TRACE_SVC, "%s: got message\n", __func__);
683
684 if (ret == -ECONNRESET || ret == -ENOTCONN) {
685 wait_queue_head_t wq;
686 init_waitqueue_head(&wq);
687
688 pr_info("%s: AVP seems to be down; "
689 "wait for kthread_stop\n", __func__);
690 timeout = msecs_to_jiffies(100);
691 timeout = wait_event_interruptible_timeout(wq,
692 kthread_should_stop(), timeout);
693 if (timeout == 0)
694 pr_err("%s: timed out while waiting for "
695 "kthread_stop\n", __func__);
696 continue;
697 } else if (ret <= 0) {
698 pr_err("%s: couldn't receive msg (ret=%d)\n",
699 __func__, ret);
700 continue;
701 }
702 dispatch_svc_message(avp_svc, msg, ret);
703 }
704
705err:
706 trpc_put(avp_svc->cpu_ep);
707 pr_info("%s: exiting\n", __func__);
708 return ret;
709}
710
711int avp_svc_start(struct avp_svc_info *avp_svc)
712{
713 struct trpc_endpoint *ep;
714 int ret;
715
716 avp_svc->nvmap_remote = nvmap_create_client(nvmap_dev, "avp_remote");
717 if (IS_ERR(avp_svc->nvmap_remote)) {
718 pr_err("%s: cannot create remote nvmap client\n", __func__);
719 ret = PTR_ERR(avp_svc->nvmap_remote);
720 goto err_nvmap_create_remote_client;
721 }
722
723 ep = trpc_create(avp_svc->rpc_node, "RPC_CPU_PORT", NULL, NULL);
724 if (IS_ERR(ep)) {
725 pr_err("%s: can't create RPC_CPU_PORT\n", __func__);
726 ret = PTR_ERR(ep);
727 goto err_cpu_port_create;
728 }
729
730 /* TODO: protect this */
731 avp_svc->cpu_ep = ep;
732
733 /* the service thread should get an extra reference for the port */
734 trpc_get(avp_svc->cpu_ep);
735 avp_svc->svc_thread = kthread_run(avp_svc_thread, avp_svc,
736 "avp_svc_thread");
737 if (IS_ERR_OR_NULL(avp_svc->svc_thread)) {
738 avp_svc->svc_thread = NULL;
739 pr_err("%s: can't create svc thread\n", __func__);
740 ret = -ENOMEM;
741 goto err_kthread;
742 }
743 return 0;
744
745err_kthread:
746 trpc_close(avp_svc->cpu_ep);
747 trpc_put(avp_svc->cpu_ep);
748 avp_svc->cpu_ep = NULL;
749err_cpu_port_create:
750 nvmap_client_put(avp_svc->nvmap_remote);
751err_nvmap_create_remote_client:
752 avp_svc->nvmap_remote = NULL;
753 return ret;
754}
755
756void avp_svc_stop(struct avp_svc_info *avp_svc)
757{
758 int ret;
759 int i;
760
761 trpc_close(avp_svc->cpu_ep);
762 ret = kthread_stop(avp_svc->svc_thread);
763 if (ret == -EINTR) {
764 /* the thread never started, drop it's extra reference */
765 trpc_put(avp_svc->cpu_ep);
766 }
767 avp_svc->cpu_ep = NULL;
768
769 nvmap_client_put(avp_svc->nvmap_remote);
770 avp_svc->nvmap_remote = NULL;
771
772 mutex_lock(&avp_svc->clk_lock);
773 for (i = 0; i < NUM_CLK_REQUESTS; i++) {
774 struct avp_clk *aclk = &avp_svc->clks[i];
775 BUG_ON(aclk->refcnt < 0);
776 if (aclk->refcnt > 0) {
777 pr_info("%s: remote left clock '%s' on\n", __func__,
778 aclk->mod->name);
779 clk_disable(aclk->clk);
780 /* sclk/emcclk was enabled once for every clock */
781 clk_set_rate(avp_svc->sclk, 0);
782 clk_disable(avp_svc->sclk);
783 clk_set_rate(avp_svc->emcclk, 0);
784 clk_disable(avp_svc->emcclk);
785 }
786 aclk->refcnt = 0;
787 }
788 mutex_unlock(&avp_svc->clk_lock);
789}
790
791struct avp_svc_info *avp_svc_init(struct platform_device *pdev,
792 struct trpc_node *rpc_node)
793{
794 struct tegra_avp_platform_data *pdata;
795 struct avp_svc_info *avp_svc;
796 int ret;
797 int i;
798 int cnt = 0;
799
800 BUG_ON(!rpc_node);
801
802 avp_svc = kzalloc(sizeof(struct avp_svc_info), GFP_KERNEL);
803 if (!avp_svc) {
804 ret = -ENOMEM;
805 goto err_alloc;
806 }
807
808 BUILD_BUG_ON(NUM_CLK_REQUESTS > BITS_PER_LONG);
809
810 pdata = pdev->dev.platform_data;
811
812 for (i = 0; i < NUM_AVP_MODULES; i++) {
813 struct avp_module *mod = &avp_modules[i];
814 struct clk *clk;
815 if (!mod->name)
816 continue;
817 BUG_ON(mod->clk_req >= NUM_CLK_REQUESTS ||
818 cnt++ >= NUM_CLK_REQUESTS);
819
820 clk = clk_get(&pdev->dev, mod->name);
821 if (IS_ERR(clk)) {
822 ret = PTR_ERR(clk);
823 pr_err("avp_svc: Couldn't get required clocks\n");
824 goto err_get_clks;
825 }
826 avp_svc->clks[mod->clk_req].clk = clk;
827 avp_svc->clks[mod->clk_req].mod = mod;
828 avp_svc->clks[mod->clk_req].refcnt = 0;
829 }
830
831 avp_svc->sclk = clk_get(&pdev->dev, "sclk");
832 if (IS_ERR(avp_svc->sclk)) {
833 pr_err("avp_svc: Couldn't get sclk for dvfs\n");
834 ret = -ENOENT;
835 goto err_get_clks;
836 }
837 avp_svc->max_avp_rate = clk_round_rate(avp_svc->sclk, ULONG_MAX);
838 clk_set_rate(avp_svc->sclk, 0);
839
840 avp_svc->emcclk = clk_get(&pdev->dev, "emc");
841 if (IS_ERR(avp_svc->emcclk)) {
842 pr_err("avp_svc: Couldn't get emcclk for dvfs\n");
843 ret = -ENOENT;
844 goto err_get_clks;
845 }
846
847 /*
848 * The emc is a shared clock, it will be set to the rate
849 * requested in platform data. Set the rate to ULONG_MAX
850 * if platform data is NULL.
851 */
852 avp_svc->emc_rate = 0;
853 if (pdata) {
854 clk_set_rate(avp_svc->emcclk, pdata->emc_clk_rate);
855 avp_svc->emc_rate = pdata->emc_clk_rate;
856 }
857 else {
858 clk_set_rate(avp_svc->emcclk, ULONG_MAX);
859 }
860
861 avp_svc->rpc_node = rpc_node;
862
863 mutex_init(&avp_svc->clk_lock);
864
865 return avp_svc;
866
867err_get_clks:
868 for (i = 0; i < NUM_CLK_REQUESTS; i++)
869 if (avp_svc->clks[i].clk)
870 clk_put(avp_svc->clks[i].clk);
871 if (!IS_ERR_OR_NULL(avp_svc->sclk))
872 clk_put(avp_svc->sclk);
873 if (!IS_ERR_OR_NULL(avp_svc->emcclk))
874 clk_put(avp_svc->emcclk);
875 kfree(avp_svc);
876err_alloc:
877 return ERR_PTR(ret);
878}
879
880void avp_svc_destroy(struct avp_svc_info *avp_svc)
881{
882 int i;
883
884 for (i = 0; i < NUM_CLK_REQUESTS; i++)
885 clk_put(avp_svc->clks[i].clk);
886 clk_put(avp_svc->sclk);
887 clk_put(avp_svc->emcclk);
888
889 kfree(avp_svc);
890}