aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/tidspbridge/rmgr/node.c
diff options
context:
space:
mode:
authorOmar Ramirez Luna <omar.ramirez@ti.com>2010-06-23 09:01:58 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-06-23 18:39:07 -0400
commit7d55524d3039e3e70756ee0a45f2fe59b7ed3fd2 (patch)
treee104d6a35c6fbae2743749dff63de14b2dea8778 /drivers/staging/tidspbridge/rmgr/node.c
parentc4ca3d5a4b02b484fdb1bab59489699b94998fad (diff)
staging: ti dspbridge: add resource manager
Add TI's DSP Bridge resource manager driver sources Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com> Signed-off-by: Kanigeri, Hari <h-kanigeri2@ti.com> Signed-off-by: Ameya Palande <ameya.palande@nokia.com> Signed-off-by: Guzman Lugo, Fernando <fernando.lugo@ti.com> Signed-off-by: Hebbar, Shivananda <x0hebbar@ti.com> Signed-off-by: Ramos Falcon, Ernesto <ernesto@ti.com> Signed-off-by: Felipe Contreras <felipe.contreras@gmail.com> Signed-off-by: Anna, Suman <s-anna@ti.com> Signed-off-by: Gupta, Ramesh <grgupta@ti.com> Signed-off-by: Gomez Castellanos, Ivan <ivan.gomez@ti.com> Signed-off-by: Andy Shevchenko <ext-andriy.shevchenko@nokia.com> Signed-off-by: Armando Uribe De Leon <x0095078@ti.com> Signed-off-by: Deepak Chitriki <deepak.chitriki@ti.com> Signed-off-by: Menon, Nishanth <nm@ti.com> Signed-off-by: Phil Carmody <ext-phil.2.carmody@nokia.com> Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging/tidspbridge/rmgr/node.c')
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c3231
1 files changed, 3231 insertions, 0 deletions
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
new file mode 100644
index 00000000000..3d2cf962fd6
--- /dev/null
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -0,0 +1,3231 @@
1/*
2 * node.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge Node Manager.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19/* ----------------------------------- Host OS */
20#include <dspbridge/host_os.h>
21
22/* ----------------------------------- DSP/BIOS Bridge */
23#include <dspbridge/std.h>
24#include <dspbridge/dbdefs.h>
25
26/* ----------------------------------- Trace & Debug */
27#include <dspbridge/dbc.h>
28
29/* ----------------------------------- OS Adaptation Layer */
30#include <dspbridge/cfg.h>
31#include <dspbridge/list.h>
32#include <dspbridge/memdefs.h>
33#include <dspbridge/proc.h>
34#include <dspbridge/strm.h>
35#include <dspbridge/sync.h>
36#include <dspbridge/ntfy.h>
37
38/* ----------------------------------- Platform Manager */
39#include <dspbridge/cmm.h>
40#include <dspbridge/cod.h>
41#include <dspbridge/dev.h>
42#include <dspbridge/msg.h>
43
44/* ----------------------------------- Resource Manager */
45#include <dspbridge/dbdcd.h>
46#include <dspbridge/disp.h>
47#include <dspbridge/rms_sh.h>
48
49/* ----------------------------------- Link Driver */
50#include <dspbridge/dspdefs.h>
51#include <dspbridge/dspioctl.h>
52
53/* ----------------------------------- Others */
54#include <dspbridge/gb.h>
55#include <dspbridge/uuidutil.h>
56
57/* ----------------------------------- This */
58#include <dspbridge/nodepriv.h>
59#include <dspbridge/node.h>
60#include <dspbridge/dmm.h>
61
62/* Static/Dynamic Loader includes */
63#include <dspbridge/dbll.h>
64#include <dspbridge/nldr.h>
65
66#include <dspbridge/drv.h>
67#include <dspbridge/drvdefs.h>
68#include <dspbridge/resourcecleanup.h>
69#include <_tiomap.h>
70
71#define HOSTPREFIX "/host"
72#define PIPEPREFIX "/dbpipe"
73
74#define MAX_INPUTS(h) \
75 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
76#define MAX_OUTPUTS(h) \
77 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
78
79#define NODE_GET_PRIORITY(h) ((h)->prio)
80#define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
81#define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
82
83#define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
84#define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
85
86#define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
87#define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
88
89#define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
90#define CREATEPHASE 1
91#define EXECUTEPHASE 2
92#define DELETEPHASE 3
93
94/* Define default STRM parameters */
95/*
96 * TBD: Put in header file, make global DSP_STRMATTRS with defaults,
97 * or make defaults configurable.
98 */
99#define DEFAULTBUFSIZE 32
100#define DEFAULTNBUFS 2
101#define DEFAULTSEGID 0
102#define DEFAULTALIGNMENT 0
103#define DEFAULTTIMEOUT 10000
104
105#define RMSQUERYSERVER 0
106#define RMSCONFIGURESERVER 1
107#define RMSCREATENODE 2
108#define RMSEXECUTENODE 3
109#define RMSDELETENODE 4
110#define RMSCHANGENODEPRIORITY 5
111#define RMSREADMEMORY 6
112#define RMSWRITEMEMORY 7
113#define RMSCOPY 8
114#define MAXTIMEOUT 2000
115
116#define NUMRMSFXNS 9
117
118#define PWR_TIMEOUT 500 /* default PWR timeout in msec */
119
120#define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
121
122/*
123 * ======== node_mgr ========
124 */
125struct node_mgr {
126 struct dev_object *hdev_obj; /* Device object */
127 /* Function interface to Bridge driver */
128 struct bridge_drv_interface *intf_fxns;
129 struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
130 struct disp_object *disp_obj; /* Node dispatcher */
131 struct lst_list *node_list; /* List of all allocated nodes */
132 u32 num_nodes; /* Number of nodes in node_list */
133 u32 num_created; /* Number of nodes *created* on DSP */
134 struct gb_t_map *pipe_map; /* Pipe connection bit map */
135 struct gb_t_map *pipe_done_map; /* Pipes that are half free */
136 struct gb_t_map *chnl_map; /* Channel allocation bit map */
137 struct gb_t_map *dma_chnl_map; /* DMA Channel allocation bit map */
138 struct gb_t_map *zc_chnl_map; /* Zero-Copy Channel alloc bit map */
139 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
140 struct mutex node_mgr_lock; /* For critical sections */
141 u32 ul_fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
142 struct msg_mgr *msg_mgr_obj;
143
144 /* Processor properties needed by Node Dispatcher */
145 u32 ul_num_chnls; /* Total number of channels */
146 u32 ul_chnl_offset; /* Offset of chnl ids rsvd for RMS */
147 u32 ul_chnl_buf_size; /* Buffer size for data to RMS */
148 int proc_family; /* eg, 5000 */
149 int proc_type; /* eg, 5510 */
150 u32 udsp_word_size; /* Size of DSP word on host bytes */
151 u32 udsp_data_mau_size; /* Size of DSP data MAU */
152 u32 udsp_mau_size; /* Size of MAU */
153 s32 min_pri; /* Minimum runtime priority for node */
154 s32 max_pri; /* Maximum runtime priority for node */
155
156 struct strm_mgr *strm_mgr_obj; /* STRM manager */
157
158 /* Loader properties */
159 struct nldr_object *nldr_obj; /* Handle to loader */
160 struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
161 bool loader_init; /* Loader Init function succeeded? */
162};
163
164/*
165 * ======== connecttype ========
166 */
167enum connecttype {
168 NOTCONNECTED = 0,
169 NODECONNECT,
170 HOSTCONNECT,
171 DEVICECONNECT,
172};
173
174/*
175 * ======== stream_chnl ========
176 */
177struct stream_chnl {
178 enum connecttype type; /* Type of stream connection */
179 u32 dev_id; /* pipe or channel id */
180};
181
182/*
183 * ======== node_object ========
184 */
185struct node_object {
186 struct list_head list_elem;
187 struct node_mgr *hnode_mgr; /* The manager of this node */
188 struct proc_object *hprocessor; /* Back pointer to processor */
189 struct dsp_uuid node_uuid; /* Node's ID */
190 s32 prio; /* Node's current priority */
191 u32 utimeout; /* Timeout for blocking NODE calls */
192 u32 heap_size; /* Heap Size */
193 u32 udsp_heap_virt_addr; /* Heap Size */
194 u32 ugpp_heap_virt_addr; /* Heap Size */
195 enum node_type ntype; /* Type of node: message, task, etc */
196 enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
197 u32 num_inputs; /* Current number of inputs */
198 u32 num_outputs; /* Current number of outputs */
199 u32 max_input_index; /* Current max input stream index */
200 u32 max_output_index; /* Current max output stream index */
201 struct stream_chnl *inputs; /* Node's input streams */
202 struct stream_chnl *outputs; /* Node's output streams */
203 struct node_createargs create_args; /* Args for node create func */
204 nodeenv node_env; /* Environment returned by RMS */
205 struct dcd_genericobj dcd_props; /* Node properties from DCD */
206 struct dsp_cbdata *pargs; /* Optional args to pass to node */
207 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
208 char *pstr_dev_name; /* device name, if device node */
209 struct sync_object *sync_done; /* Synchronize node_terminate */
210 s32 exit_status; /* execute function return status */
211
212 /* Information needed for node_get_attr() */
213 void *device_owner; /* If dev node, task that owns it */
214 u32 num_gpp_inputs; /* Current # of from GPP streams */
215 u32 num_gpp_outputs; /* Current # of to GPP streams */
216 /* Current stream connections */
217 struct dsp_streamconnect *stream_connect;
218
219 /* Message queue */
220 struct msg_queue *msg_queue_obj;
221
222 /* These fields used for SM messaging */
223 struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
224
225 /* Handle to pass to dynamic loader */
226 struct nldr_nodeobject *nldr_node_obj;
227 bool loaded; /* Code is (dynamically) loaded */
228 bool phase_split; /* Phases split in many libs or ovly */
229
230};
231
232/* Default buffer attributes */
233static struct dsp_bufferattr node_dfltbufattrs = {
234 0, /* cb_struct */
235 1, /* segment_id */
236 0, /* buf_alignment */
237};
238
239static void delete_node(struct node_object *hnode,
240 struct process_context *pr_ctxt);
241static void delete_node_mgr(struct node_mgr *hnode_mgr);
242static void fill_stream_connect(struct node_object *hNode1,
243 struct node_object *hNode2, u32 uStream1,
244 u32 uStream2);
245static void fill_stream_def(struct node_object *hnode,
246 struct node_strmdef *pstrm_def,
247 struct dsp_strmattr *pattrs);
248static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
249static int get_fxn_address(struct node_object *hnode, u32 * pulFxnAddr,
250 u32 uPhase);
251static int get_node_props(struct dcd_manager *hdcd_mgr,
252 struct node_object *hnode,
253 CONST struct dsp_uuid *pNodeId,
254 struct dcd_genericobj *pdcdProps);
255static int get_proc_props(struct node_mgr *hnode_mgr,
256 struct dev_object *hdev_obj);
257static int get_rms_fxns(struct node_mgr *hnode_mgr);
258static u32 ovly(void *priv_ref, u32 ulDspRunAddr, u32 ulDspLoadAddr,
259 u32 ul_num_bytes, u32 nMemSpace);
260static u32 mem_write(void *priv_ref, u32 ulDspAddr, void *pbuf,
261 u32 ul_num_bytes, u32 nMemSpace);
262
263static u32 refs; /* module reference count */
264
265/* Dynamic loader functions. */
266static struct node_ldr_fxns nldr_fxns = {
267 nldr_allocate,
268 nldr_create,
269 nldr_delete,
270 nldr_exit,
271 nldr_get_fxn_addr,
272 nldr_init,
273 nldr_load,
274 nldr_unload,
275};
276
277enum node_state node_get_state(void *hnode)
278{
279 struct node_object *pnode = (struct node_object *)hnode;
280 if (!pnode)
281 return -1;
282 else
283 return pnode->node_state;
284}
285
286/*
287 * ======== node_allocate ========
288 * Purpose:
289 * Allocate GPP resources to manage a node on the DSP.
290 */
291int node_allocate(struct proc_object *hprocessor,
292 IN CONST struct dsp_uuid *pNodeId,
293 OPTIONAL IN CONST struct dsp_cbdata *pargs,
294 OPTIONAL IN CONST struct dsp_nodeattrin *attr_in,
295 OUT struct node_object **ph_node,
296 struct process_context *pr_ctxt)
297{
298 struct node_mgr *hnode_mgr;
299 struct dev_object *hdev_obj;
300 struct node_object *pnode = NULL;
301 enum node_type node_type = NODE_TASK;
302 struct node_msgargs *pmsg_args;
303 struct node_taskargs *ptask_args;
304 u32 num_streams;
305 struct bridge_drv_interface *intf_fxns;
306 int status = 0;
307 struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
308 u32 proc_id;
309 u32 pul_value;
310 u32 dynext_base;
311 u32 off_set = 0;
312 u32 ul_stack_seg_addr, ul_stack_seg_val;
313 u32 ul_gpp_mem_base;
314 struct cfg_hostres *host_res;
315 struct bridge_dev_context *pbridge_context;
316 u32 mapped_addr = 0;
317 u32 map_attrs = 0x0;
318 struct dsp_processorstate proc_state;
319#ifdef DSP_DMM_DEBUG
320 struct dmm_object *dmm_mgr;
321 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
322#endif
323
324 void *node_res;
325
326 DBC_REQUIRE(refs > 0);
327 DBC_REQUIRE(hprocessor != NULL);
328 DBC_REQUIRE(ph_node != NULL);
329 DBC_REQUIRE(pNodeId != NULL);
330
331 *ph_node = NULL;
332
333 status = proc_get_processor_id(hprocessor, &proc_id);
334
335 if (proc_id != DSP_UNIT)
336 goto func_end;
337
338 status = proc_get_dev_object(hprocessor, &hdev_obj);
339 if (DSP_SUCCEEDED(status)) {
340 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
341 if (hnode_mgr == NULL)
342 status = -EPERM;
343
344 }
345
346 if (DSP_FAILED(status))
347 goto func_end;
348
349 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
350 if (!pbridge_context) {
351 status = -EFAULT;
352 goto func_end;
353 }
354
355 status = proc_get_state(hprocessor, &proc_state,
356 sizeof(struct dsp_processorstate));
357 if (DSP_FAILED(status))
358 goto func_end;
359 /* If processor is in error state then don't attempt
360 to send the message */
361 if (proc_state.proc_state == PROC_ERROR) {
362 status = -EPERM;
363 goto func_end;
364 }
365
366 /* Assuming that 0 is not a valid function address */
367 if (hnode_mgr->ul_fxn_addrs[0] == 0) {
368 /* No RMS on target - we currently can't handle this */
369 pr_err("%s: Failed, no RMS in base image\n", __func__);
370 status = -EPERM;
371 } else {
372 /* Validate attr_in fields, if non-NULL */
373 if (attr_in) {
374 /* Check if attr_in->prio is within range */
375 if (attr_in->prio < hnode_mgr->min_pri ||
376 attr_in->prio > hnode_mgr->max_pri)
377 status = -EDOM;
378 }
379 }
380 /* Allocate node object and fill in */
381 if (DSP_FAILED(status))
382 goto func_end;
383
384 pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
385 if (pnode == NULL) {
386 status = -ENOMEM;
387 goto func_end;
388 }
389 pnode->hnode_mgr = hnode_mgr;
390 /* This critical section protects get_node_props */
391 mutex_lock(&hnode_mgr->node_mgr_lock);
392
393 /* Get dsp_ndbprops from node database */
394 status = get_node_props(hnode_mgr->hdcd_mgr, pnode, pNodeId,
395 &(pnode->dcd_props));
396 if (DSP_FAILED(status))
397 goto func_cont;
398
399 pnode->node_uuid = *pNodeId;
400 pnode->hprocessor = hprocessor;
401 pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
402 pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
403 pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
404
405 /* Currently only C64 DSP builds support Node Dynamic * heaps */
406 /* Allocate memory for node heap */
407 pnode->create_args.asa.task_arg_obj.heap_size = 0;
408 pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
409 pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
410 pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
411 if (!attr_in)
412 goto func_cont;
413
414 /* Check if we have a user allocated node heap */
415 if (!(attr_in->pgpp_virt_addr))
416 goto func_cont;
417
418 /* check for page aligned Heap size */
419 if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
420 pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
421 __func__, attr_in->heap_size);
422 status = -EINVAL;
423 } else {
424 pnode->create_args.asa.task_arg_obj.heap_size =
425 attr_in->heap_size;
426 pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
427 (u32) attr_in->pgpp_virt_addr;
428 }
429 if (DSP_FAILED(status))
430 goto func_cont;
431
432 status = proc_reserve_memory(hprocessor,
433 pnode->create_args.asa.task_arg_obj.
434 heap_size + PAGE_SIZE,
435 (void **)&(pnode->create_args.asa.
436 task_arg_obj.udsp_heap_res_addr),
437 pr_ctxt);
438 if (DSP_FAILED(status)) {
439 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
440 __func__, status);
441 goto func_cont;
442 }
443#ifdef DSP_DMM_DEBUG
444 status = dmm_get_handle(p_proc_object, &dmm_mgr);
445 if (!dmm_mgr) {
446 status = DSP_EHANDLE;
447 goto func_cont;
448 }
449
450 dmm_mem_map_dump(dmm_mgr);
451#endif
452
453 map_attrs |= DSP_MAPLITTLEENDIAN;
454 map_attrs |= DSP_MAPELEMSIZE32;
455 map_attrs |= DSP_MAPVIRTUALADDR;
456 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
457 pnode->create_args.asa.task_arg_obj.heap_size,
458 (void *)pnode->create_args.asa.task_arg_obj.
459 udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
460 pr_ctxt);
461 if (DSP_FAILED(status))
462 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
463 __func__, status);
464 else
465 pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
466 (u32) mapped_addr;
467
468func_cont:
469 mutex_unlock(&hnode_mgr->node_mgr_lock);
470 if (attr_in != NULL) {
471 /* Overrides of NBD properties */
472 pnode->utimeout = attr_in->utimeout;
473 pnode->prio = attr_in->prio;
474 }
475 /* Create object to manage notifications */
476 if (DSP_SUCCEEDED(status)) {
477 pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
478 GFP_KERNEL);
479 if (pnode->ntfy_obj)
480 ntfy_init(pnode->ntfy_obj);
481 else
482 status = -ENOMEM;
483 }
484
485 if (DSP_SUCCEEDED(status)) {
486 node_type = node_get_type(pnode);
487 /* Allocate dsp_streamconnect array for device, task, and
488 * dais socket nodes. */
489 if (node_type != NODE_MESSAGE) {
490 num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
491 pnode->stream_connect = kzalloc(num_streams *
492 sizeof(struct dsp_streamconnect),
493 GFP_KERNEL);
494 if (num_streams > 0 && pnode->stream_connect == NULL)
495 status = -ENOMEM;
496
497 }
498 if (DSP_SUCCEEDED(status) && (node_type == NODE_TASK ||
499 node_type == NODE_DAISSOCKET)) {
500 /* Allocate arrays for maintainig stream connections */
501 pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
502 sizeof(struct stream_chnl), GFP_KERNEL);
503 pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
504 sizeof(struct stream_chnl), GFP_KERNEL);
505 ptask_args = &(pnode->create_args.asa.task_arg_obj);
506 ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
507 sizeof(struct node_strmdef),
508 GFP_KERNEL);
509 ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
510 sizeof(struct node_strmdef),
511 GFP_KERNEL);
512 if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
513 ptask_args->strm_in_def
514 == NULL))
515 || (MAX_OUTPUTS(pnode) > 0
516 && (pnode->outputs == NULL
517 || ptask_args->strm_out_def == NULL)))
518 status = -ENOMEM;
519 }
520 }
521 if (DSP_SUCCEEDED(status) && (node_type != NODE_DEVICE)) {
522 /* Create an event that will be posted when RMS_EXIT is
523 * received. */
524 pnode->sync_done = kzalloc(sizeof(struct sync_object),
525 GFP_KERNEL);
526 if (pnode->sync_done)
527 sync_init_event(pnode->sync_done);
528 else
529 status = -ENOMEM;
530
531 if (DSP_SUCCEEDED(status)) {
532 /*Get the shared mem mgr for this nodes dev object */
533 status = cmm_get_handle(hprocessor, &hcmm_mgr);
534 if (DSP_SUCCEEDED(status)) {
535 /* Allocate a SM addr translator for this node
536 * w/ deflt attr */
537 status = cmm_xlator_create(&pnode->xlator,
538 hcmm_mgr, NULL);
539 }
540 }
541 if (DSP_SUCCEEDED(status)) {
542 /* Fill in message args */
543 if ((pargs != NULL) && (pargs->cb_data > 0)) {
544 pmsg_args =
545 &(pnode->create_args.asa.node_msg_args);
546 pmsg_args->pdata = kzalloc(pargs->cb_data,
547 GFP_KERNEL);
548 if (pmsg_args->pdata == NULL) {
549 status = -ENOMEM;
550 } else {
551 pmsg_args->arg_length = pargs->cb_data;
552 memcpy(pmsg_args->pdata,
553 pargs->node_data,
554 pargs->cb_data);
555 }
556 }
557 }
558 }
559
560 if (DSP_SUCCEEDED(status) && node_type != NODE_DEVICE) {
561 /* Create a message queue for this node */
562 intf_fxns = hnode_mgr->intf_fxns;
563 status =
564 (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
565 &pnode->msg_queue_obj,
566 0,
567 pnode->create_args.asa.
568 node_msg_args.max_msgs,
569 pnode);
570 }
571
572 if (DSP_SUCCEEDED(status)) {
573 /* Create object for dynamic loading */
574
575 status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
576 (void *)pnode,
577 &pnode->dcd_props.
578 obj_data.node_obj,
579 &pnode->
580 nldr_node_obj,
581 &pnode->phase_split);
582 }
583
584 /* Compare value read from Node Properties and check if it is same as
585 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
586 * GPP Address, Read the value in that address and override the
587 * stack_seg value in task args */
588 if (DSP_SUCCEEDED(status) &&
589 (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
590 stack_seg_name != NULL) {
591 if (strcmp((char *)
592 pnode->dcd_props.obj_data.node_obj.ndb_props.
593 stack_seg_name, STACKSEGLABEL) == 0) {
594 status =
595 hnode_mgr->nldr_fxns.
596 pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
597 &dynext_base);
598 if (DSP_FAILED(status))
599 pr_err("%s: Failed to get addr for DYNEXT_BEG"
600 " status = 0x%x\n", __func__, status);
601
602 status =
603 hnode_mgr->nldr_fxns.
604 pfn_get_fxn_addr(pnode->nldr_node_obj,
605 "L1DSRAM_HEAP", &pul_value);
606
607 if (DSP_FAILED(status))
608 pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
609 " status = 0x%x\n", __func__, status);
610
611 host_res = pbridge_context->resources;
612 if (!host_res)
613 status = -EPERM;
614
615 if (DSP_FAILED(status)) {
616 pr_err("%s: Failed to get host resource, status"
617 " = 0x%x\n", __func__, status);
618 goto func_end;
619 }
620
621 ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
622 off_set = pul_value - dynext_base;
623 ul_stack_seg_addr = ul_gpp_mem_base + off_set;
624 ul_stack_seg_val = (u32) *((reg_uword32 *)
625 ((u32)
626 (ul_stack_seg_addr)));
627
628 dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
629 " 0x%x\n", __func__, ul_stack_seg_val,
630 ul_stack_seg_addr);
631
632 pnode->create_args.asa.task_arg_obj.stack_seg =
633 ul_stack_seg_val;
634
635 }
636 }
637
638 if (DSP_SUCCEEDED(status)) {
639 /* Add the node to the node manager's list of allocated
640 * nodes. */
641 lst_init_elem((struct list_head *)pnode);
642 NODE_SET_STATE(pnode, NODE_ALLOCATED);
643
644 mutex_lock(&hnode_mgr->node_mgr_lock);
645
646 lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
647 ++(hnode_mgr->num_nodes);
648
649 /* Exit critical section */
650 mutex_unlock(&hnode_mgr->node_mgr_lock);
651
652 /* Preset this to assume phases are split
653 * (for overlay and dll) */
654 pnode->phase_split = true;
655
656 if (DSP_SUCCEEDED(status))
657 *ph_node = pnode;
658
659 /* Notify all clients registered for DSP_NODESTATECHANGE. */
660 proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
661 } else {
662 /* Cleanup */
663 if (pnode)
664 delete_node(pnode, pr_ctxt);
665
666 }
667
668 if (DSP_SUCCEEDED(status)) {
669 drv_insert_node_res_element(*ph_node, &node_res, pr_ctxt);
670 drv_proc_node_update_heap_status(node_res, true);
671 drv_proc_node_update_status(node_res, true);
672 }
673 DBC_ENSURE((DSP_FAILED(status) && (*ph_node == NULL)) ||
674 (DSP_SUCCEEDED(status) && *ph_node));
675func_end:
676 dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
677 "ph_node: %p status: 0x%x\n", __func__, hprocessor,
678 pNodeId, pargs, attr_in, ph_node, status);
679 return status;
680}
681
682/*
683 * ======== node_alloc_msg_buf ========
684 * Purpose:
685 * Allocates buffer for zero copy messaging.
686 */
687DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
688 OPTIONAL IN OUT struct dsp_bufferattr *pattr,
689 OUT u8 **pbuffer)
690{
691 struct node_object *pnode = (struct node_object *)hnode;
692 int status = 0;
693 bool va_flag = false;
694 bool set_info;
695 u32 proc_id;
696
697 DBC_REQUIRE(refs > 0);
698 DBC_REQUIRE(pbuffer != NULL);
699
700 DBC_REQUIRE(usize > 0);
701
702 if (!pnode)
703 status = -EFAULT;
704 else if (node_get_type(pnode) == NODE_DEVICE)
705 status = -EPERM;
706
707 if (DSP_FAILED(status))
708 goto func_end;
709
710 if (pattr == NULL)
711 pattr = &node_dfltbufattrs; /* set defaults */
712
713 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
714 if (proc_id != DSP_UNIT) {
715 DBC_ASSERT(NULL);
716 goto func_end;
717 }
718 /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
719 * virt address, so set this info in this node's translator
720 * object for future ref. If MEM_GETVIRTUALSEGID then retrieve
721 * virtual address from node's translator. */
722 if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
723 (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
724 va_flag = true;
725 set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
726 true : false;
727 /* Clear mask bits */
728 pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
729 /* Set/get this node's translators virtual address base/size */
730 status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
731 pattr->segment_id, set_info);
732 }
733 if (DSP_SUCCEEDED(status) && (!va_flag)) {
734 if (pattr->segment_id != 1) {
735 /* Node supports single SM segment only. */
736 status = -EBADR;
737 }
738 /* Arbitrary SM buffer alignment not supported for host side
739 * allocs, but guaranteed for the following alignment
740 * values. */
741 switch (pattr->buf_alignment) {
742 case 0:
743 case 1:
744 case 2:
745 case 4:
746 break;
747 default:
748 /* alignment value not suportted */
749 status = -EPERM;
750 break;
751 }
752 if (DSP_SUCCEEDED(status)) {
753 /* allocate physical buffer from seg_id in node's
754 * translator */
755 (void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
756 usize);
757 if (*pbuffer == NULL) {
758 pr_err("%s: error - Out of shared memory\n",
759 __func__);
760 status = -ENOMEM;
761 }
762 }
763 }
764func_end:
765 return status;
766}
767
768/*
769 * ======== node_change_priority ========
770 * Purpose:
771 * Change the priority of a node in the allocated state, or that is
772 * currently running or paused on the target.
773 */
774int node_change_priority(struct node_object *hnode, s32 prio)
775{
776 struct node_object *pnode = (struct node_object *)hnode;
777 struct node_mgr *hnode_mgr = NULL;
778 enum node_type node_type;
779 enum node_state state;
780 int status = 0;
781 u32 proc_id;
782
783 DBC_REQUIRE(refs > 0);
784
785 if (!hnode || !hnode->hnode_mgr) {
786 status = -EFAULT;
787 } else {
788 hnode_mgr = hnode->hnode_mgr;
789 node_type = node_get_type(hnode);
790 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
791 status = -EPERM;
792 else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
793 status = -EDOM;
794 }
795 if (DSP_FAILED(status))
796 goto func_end;
797
798 /* Enter critical section */
799 mutex_lock(&hnode_mgr->node_mgr_lock);
800
801 state = node_get_state(hnode);
802 if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
803 NODE_SET_PRIORITY(hnode, prio);
804 } else {
805 if (state != NODE_RUNNING) {
806 status = -EBADR;
807 goto func_cont;
808 }
809 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
810 if (proc_id == DSP_UNIT) {
811 status =
812 disp_node_change_priority(hnode_mgr->disp_obj,
813 hnode,
814 hnode_mgr->ul_fxn_addrs
815 [RMSCHANGENODEPRIORITY],
816 hnode->node_env, prio);
817 }
818 if (DSP_SUCCEEDED(status))
819 NODE_SET_PRIORITY(hnode, prio);
820
821 }
822func_cont:
823 /* Leave critical section */
824 mutex_unlock(&hnode_mgr->node_mgr_lock);
825func_end:
826 return status;
827}
828
829/*
830 * ======== node_connect ========
831 * Purpose:
832 * Connect two nodes on the DSP, or a node on the DSP to the GPP.
833 */
834int node_connect(struct node_object *hNode1, u32 uStream1,
835 struct node_object *hNode2,
836 u32 uStream2, OPTIONAL IN struct dsp_strmattr *pattrs,
837 OPTIONAL IN struct dsp_cbdata *conn_param)
838{
839 struct node_mgr *hnode_mgr;
840 char *pstr_dev_name = NULL;
841 enum node_type node1_type = NODE_TASK;
842 enum node_type node2_type = NODE_TASK;
843 struct node_strmdef *pstrm_def;
844 struct node_strmdef *input = NULL;
845 struct node_strmdef *output = NULL;
846 struct node_object *dev_node_obj;
847 struct node_object *hnode;
848 struct stream_chnl *pstream;
849 u32 pipe_id = GB_NOBITS;
850 u32 chnl_id = GB_NOBITS;
851 s8 chnl_mode;
852 u32 dw_length;
853 int status = 0;
854 DBC_REQUIRE(refs > 0);
855
856 if ((hNode1 != (struct node_object *)DSP_HGPPNODE && !hNode1) ||
857 (hNode2 != (struct node_object *)DSP_HGPPNODE && !hNode2))
858 status = -EFAULT;
859
860 if (DSP_SUCCEEDED(status)) {
861 /* The two nodes must be on the same processor */
862 if (hNode1 != (struct node_object *)DSP_HGPPNODE &&
863 hNode2 != (struct node_object *)DSP_HGPPNODE &&
864 hNode1->hnode_mgr != hNode2->hnode_mgr)
865 status = -EPERM;
866 /* Cannot connect a node to itself */
867 if (hNode1 == hNode2)
868 status = -EPERM;
869
870 }
871 if (DSP_SUCCEEDED(status)) {
872 /* node_get_type() will return NODE_GPP if hnode =
873 * DSP_HGPPNODE. */
874 node1_type = node_get_type(hNode1);
875 node2_type = node_get_type(hNode2);
876 /* Check stream indices ranges */
877 if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
878 uStream1 >= MAX_OUTPUTS(hNode1)) || (node2_type != NODE_GPP
879 && node2_type !=
880 NODE_DEVICE
881 && uStream2 >=
882 MAX_INPUTS(hNode2)))
883 status = -EINVAL;
884 }
885 if (DSP_SUCCEEDED(status)) {
886 /*
887 * Only the following types of connections are allowed:
888 * task/dais socket < == > task/dais socket
889 * task/dais socket < == > device
890 * task/dais socket < == > GPP
891 *
892 * ie, no message nodes, and at least one task or dais
893 * socket node.
894 */
895 if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
896 (node1_type != NODE_TASK && node1_type != NODE_DAISSOCKET &&
897 node2_type != NODE_TASK && node2_type != NODE_DAISSOCKET))
898 status = -EPERM;
899 }
900 /*
901 * Check stream mode. Default is STRMMODE_PROCCOPY.
902 */
903 if (DSP_SUCCEEDED(status) && pattrs) {
904 if (pattrs->strm_mode != STRMMODE_PROCCOPY)
905 status = -EPERM; /* illegal stream mode */
906
907 }
908 if (DSP_FAILED(status))
909 goto func_end;
910
911 if (node1_type != NODE_GPP) {
912 hnode_mgr = hNode1->hnode_mgr;
913 } else {
914 DBC_ASSERT(hNode2 != (struct node_object *)DSP_HGPPNODE);
915 hnode_mgr = hNode2->hnode_mgr;
916 }
917 /* Enter critical section */
918 mutex_lock(&hnode_mgr->node_mgr_lock);
919
920 /* Nodes must be in the allocated state */
921 if (node1_type != NODE_GPP && node_get_state(hNode1) != NODE_ALLOCATED)
922 status = -EBADR;
923
924 if (node2_type != NODE_GPP && node_get_state(hNode2) != NODE_ALLOCATED)
925 status = -EBADR;
926
927 if (DSP_SUCCEEDED(status)) {
928 /* Check that stream indices for task and dais socket nodes
929 * are not already be used. (Device nodes checked later) */
930 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
931 output =
932 &(hNode1->create_args.asa.
933 task_arg_obj.strm_out_def[uStream1]);
934 if (output->sz_device != NULL)
935 status = -EISCONN;
936
937 }
938 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
939 input =
940 &(hNode2->create_args.asa.
941 task_arg_obj.strm_in_def[uStream2]);
942 if (input->sz_device != NULL)
943 status = -EISCONN;
944
945 }
946 }
947 /* Connecting two task nodes? */
948 if (DSP_SUCCEEDED(status) && ((node1_type == NODE_TASK ||
949 node1_type == NODE_DAISSOCKET)
950 && (node2_type == NODE_TASK
951 || node2_type == NODE_DAISSOCKET))) {
952 /* Find available pipe */
953 pipe_id = gb_findandset(hnode_mgr->pipe_map);
954 if (pipe_id == GB_NOBITS) {
955 status = -ECONNREFUSED;
956 } else {
957 hNode1->outputs[uStream1].type = NODECONNECT;
958 hNode2->inputs[uStream2].type = NODECONNECT;
959 hNode1->outputs[uStream1].dev_id = pipe_id;
960 hNode2->inputs[uStream2].dev_id = pipe_id;
961 output->sz_device = kzalloc(PIPENAMELEN + 1,
962 GFP_KERNEL);
963 input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
964 if (output->sz_device == NULL ||
965 input->sz_device == NULL) {
966 /* Undo the connection */
967 kfree(output->sz_device);
968
969 kfree(input->sz_device);
970
971 output->sz_device = NULL;
972 input->sz_device = NULL;
973 gb_clear(hnode_mgr->pipe_map, pipe_id);
974 status = -ENOMEM;
975 } else {
976 /* Copy "/dbpipe<pipId>" name to device names */
977 sprintf(output->sz_device, "%s%d",
978 PIPEPREFIX, pipe_id);
979 strcpy(input->sz_device, output->sz_device);
980 }
981 }
982 }
983 /* Connecting task node to host? */
984 if (DSP_SUCCEEDED(status) && (node1_type == NODE_GPP ||
985 node2_type == NODE_GPP)) {
986 if (node1_type == NODE_GPP) {
987 chnl_mode = CHNL_MODETODSP;
988 } else {
989 DBC_ASSERT(node2_type == NODE_GPP);
990 chnl_mode = CHNL_MODEFROMDSP;
991 }
992 /* Reserve a channel id. We need to put the name "/host<id>"
993 * in the node's create_args, but the host
994 * side channel will not be opened until DSPStream_Open is
995 * called for this node. */
996 if (pattrs) {
997 if (pattrs->strm_mode == STRMMODE_RDMA) {
998 chnl_id =
999 gb_findandset(hnode_mgr->dma_chnl_map);
1000 /* dma chans are 2nd transport chnl set
1001 * ids(e.g. 16-31) */
1002 (chnl_id != GB_NOBITS) ?
1003 (chnl_id =
1004 chnl_id +
1005 hnode_mgr->ul_num_chnls) : chnl_id;
1006 } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
1007 chnl_id = gb_findandset(hnode_mgr->zc_chnl_map);
1008 /* zero-copy chans are 3nd transport set
1009 * (e.g. 32-47) */
1010 (chnl_id != GB_NOBITS) ? (chnl_id = chnl_id +
1011 (2 *
1012 hnode_mgr->
1013 ul_num_chnls))
1014 : chnl_id;
1015 } else { /* must be PROCCOPY */
1016 DBC_ASSERT(pattrs->strm_mode ==
1017 STRMMODE_PROCCOPY);
1018 chnl_id = gb_findandset(hnode_mgr->chnl_map);
1019 /* e.g. 0-15 */
1020 }
1021 } else {
1022 /* default to PROCCOPY */
1023 chnl_id = gb_findandset(hnode_mgr->chnl_map);
1024 }
1025 if (chnl_id == GB_NOBITS) {
1026 status = -ECONNREFUSED;
1027 goto func_cont2;
1028 }
1029 pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
1030 if (pstr_dev_name != NULL)
1031 goto func_cont2;
1032
1033 if (pattrs) {
1034 if (pattrs->strm_mode == STRMMODE_RDMA) {
1035 gb_clear(hnode_mgr->dma_chnl_map, chnl_id -
1036 hnode_mgr->ul_num_chnls);
1037 } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
1038 gb_clear(hnode_mgr->zc_chnl_map, chnl_id -
1039 (2 * hnode_mgr->ul_num_chnls));
1040 } else {
1041 DBC_ASSERT(pattrs->strm_mode ==
1042 STRMMODE_PROCCOPY);
1043 gb_clear(hnode_mgr->chnl_map, chnl_id);
1044 }
1045 } else {
1046 gb_clear(hnode_mgr->chnl_map, chnl_id);
1047 }
1048 status = -ENOMEM;
1049func_cont2:
1050 if (DSP_SUCCEEDED(status)) {
1051 if (hNode1 == (struct node_object *)DSP_HGPPNODE) {
1052 hNode2->inputs[uStream2].type = HOSTCONNECT;
1053 hNode2->inputs[uStream2].dev_id = chnl_id;
1054 input->sz_device = pstr_dev_name;
1055 } else {
1056 hNode1->outputs[uStream1].type = HOSTCONNECT;
1057 hNode1->outputs[uStream1].dev_id = chnl_id;
1058 output->sz_device = pstr_dev_name;
1059 }
1060 sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1061 }
1062 }
1063 /* Connecting task node to device node? */
1064 if (DSP_SUCCEEDED(status) && ((node1_type == NODE_DEVICE) ||
1065 (node2_type == NODE_DEVICE))) {
1066 if (node2_type == NODE_DEVICE) {
1067 /* node1 == > device */
1068 dev_node_obj = hNode2;
1069 hnode = hNode1;
1070 pstream = &(hNode1->outputs[uStream1]);
1071 pstrm_def = output;
1072 } else {
1073 /* device == > node2 */
1074 dev_node_obj = hNode1;
1075 hnode = hNode2;
1076 pstream = &(hNode2->inputs[uStream2]);
1077 pstrm_def = input;
1078 }
1079 /* Set up create args */
1080 pstream->type = DEVICECONNECT;
1081 dw_length = strlen(dev_node_obj->pstr_dev_name);
1082 if (conn_param != NULL) {
1083 pstrm_def->sz_device = kzalloc(dw_length + 1 +
1084 conn_param->cb_data,
1085 GFP_KERNEL);
1086 } else {
1087 pstrm_def->sz_device = kzalloc(dw_length + 1,
1088 GFP_KERNEL);
1089 }
1090 if (pstrm_def->sz_device == NULL) {
1091 status = -ENOMEM;
1092 } else {
1093 /* Copy device name */
1094 strncpy(pstrm_def->sz_device,
1095 dev_node_obj->pstr_dev_name, dw_length);
1096 if (conn_param != NULL) {
1097 strncat(pstrm_def->sz_device,
1098 (char *)conn_param->node_data,
1099 (u32) conn_param->cb_data);
1100 }
1101 dev_node_obj->device_owner = hnode;
1102 }
1103 }
1104 if (DSP_SUCCEEDED(status)) {
1105 /* Fill in create args */
1106 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1107 hNode1->create_args.asa.task_arg_obj.num_outputs++;
1108 fill_stream_def(hNode1, output, pattrs);
1109 }
1110 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1111 hNode2->create_args.asa.task_arg_obj.num_inputs++;
1112 fill_stream_def(hNode2, input, pattrs);
1113 }
1114 /* Update hNode1 and hNode2 stream_connect */
1115 if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1116 hNode1->num_outputs++;
1117 if (uStream1 > hNode1->max_output_index)
1118 hNode1->max_output_index = uStream1;
1119
1120 }
1121 if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1122 hNode2->num_inputs++;
1123 if (uStream2 > hNode2->max_input_index)
1124 hNode2->max_input_index = uStream2;
1125
1126 }
1127 fill_stream_connect(hNode1, hNode2, uStream1, uStream2);
1128 }
1129 /* end of sync_enter_cs */
1130 /* Exit critical section */
1131 mutex_unlock(&hnode_mgr->node_mgr_lock);
1132func_end:
1133 dev_dbg(bridge, "%s: hNode1: %p uStream1: %d hNode2: %p uStream2: %d"
1134 "pattrs: %p status: 0x%x\n", __func__, hNode1,
1135 uStream1, hNode2, uStream2, pattrs, status);
1136 return status;
1137}
1138
1139/*
1140 * ======== node_create ========
1141 * Purpose:
1142 * Create a node on the DSP by remotely calling the node's create function.
1143 */
1144int node_create(struct node_object *hnode)
1145{
1146 struct node_object *pnode = (struct node_object *)hnode;
1147 struct node_mgr *hnode_mgr;
1148 struct bridge_drv_interface *intf_fxns;
1149 u32 ul_create_fxn;
1150 enum node_type node_type;
1151 int status = 0;
1152 int status1 = 0;
1153 struct dsp_cbdata cb_data;
1154 u32 proc_id = 255;
1155 struct dsp_processorstate proc_state;
1156 struct proc_object *hprocessor;
1157#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1158 struct dspbridge_platform_data *pdata =
1159 omap_dspbridge_dev->dev.platform_data;
1160#endif
1161
1162 DBC_REQUIRE(refs > 0);
1163 if (!pnode) {
1164 status = -EFAULT;
1165 goto func_end;
1166 }
1167 hprocessor = hnode->hprocessor;
1168 status = proc_get_state(hprocessor, &proc_state,
1169 sizeof(struct dsp_processorstate));
1170 if (DSP_FAILED(status))
1171 goto func_end;
1172 /* If processor is in error state then don't attempt to create
1173 new node */
1174 if (proc_state.proc_state == PROC_ERROR) {
1175 status = -EPERM;
1176 goto func_end;
1177 }
1178 /* create struct dsp_cbdata struct for PWR calls */
1179 cb_data.cb_data = PWR_TIMEOUT;
1180 node_type = node_get_type(hnode);
1181 hnode_mgr = hnode->hnode_mgr;
1182 intf_fxns = hnode_mgr->intf_fxns;
1183 /* Get access to node dispatcher */
1184 mutex_lock(&hnode_mgr->node_mgr_lock);
1185
1186 /* Check node state */
1187 if (node_get_state(hnode) != NODE_ALLOCATED)
1188 status = -EBADR;
1189
1190 if (DSP_SUCCEEDED(status))
1191 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1192
1193 if (DSP_FAILED(status))
1194 goto func_cont2;
1195
1196 if (proc_id != DSP_UNIT)
1197 goto func_cont2;
1198
1199 /* Make sure streams are properly connected */
1200 if ((hnode->num_inputs && hnode->max_input_index >
1201 hnode->num_inputs - 1) ||
1202 (hnode->num_outputs && hnode->max_output_index >
1203 hnode->num_outputs - 1))
1204 status = -ENOTCONN;
1205
1206 if (DSP_SUCCEEDED(status)) {
1207 /* If node's create function is not loaded, load it */
1208 /* Boost the OPP level to max level that DSP can be requested */
1209#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1210 if (pdata->cpu_set_freq)
1211 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1212#endif
1213 status = hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
1214 NLDR_CREATE);
1215 /* Get address of node's create function */
1216 if (DSP_SUCCEEDED(status)) {
1217 hnode->loaded = true;
1218 if (node_type != NODE_DEVICE) {
1219 status = get_fxn_address(hnode, &ul_create_fxn,
1220 CREATEPHASE);
1221 }
1222 } else {
1223 pr_err("%s: failed to load create code: 0x%x\n",
1224 __func__, status);
1225 }
1226 /* Request the lowest OPP level */
1227#if defined(CONFIG_BRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1228 if (pdata->cpu_set_freq)
1229 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1230#endif
1231 /* Get address of iAlg functions, if socket node */
1232 if (DSP_SUCCEEDED(status)) {
1233 if (node_type == NODE_DAISSOCKET) {
1234 status = hnode_mgr->nldr_fxns.pfn_get_fxn_addr
1235 (hnode->nldr_node_obj,
1236 hnode->dcd_props.obj_data.node_obj.
1237 pstr_i_alg_name,
1238 &hnode->create_args.asa.
1239 task_arg_obj.ul_dais_arg);
1240 }
1241 }
1242 }
1243 if (DSP_SUCCEEDED(status)) {
1244 if (node_type != NODE_DEVICE) {
1245 status = disp_node_create(hnode_mgr->disp_obj, hnode,
1246 hnode_mgr->ul_fxn_addrs
1247 [RMSCREATENODE],
1248 ul_create_fxn,
1249 &(hnode->create_args),
1250 &(hnode->node_env));
1251 if (DSP_SUCCEEDED(status)) {
1252 /* Set the message queue id to the node env
1253 * pointer */
1254 intf_fxns = hnode_mgr->intf_fxns;
1255 (*intf_fxns->pfn_msg_set_queue_id) (hnode->
1256 msg_queue_obj,
1257 hnode->node_env);
1258 }
1259 }
1260 }
1261 /* Phase II/Overlays: Create, execute, delete phases possibly in
1262 * different files/sections. */
1263 if (hnode->loaded && hnode->phase_split) {
1264 /* If create code was dynamically loaded, we can now unload
1265 * it. */
1266 status1 = hnode_mgr->nldr_fxns.pfn_unload(hnode->nldr_node_obj,
1267 NLDR_CREATE);
1268 hnode->loaded = false;
1269 }
1270 if (DSP_FAILED(status1))
1271 pr_err("%s: Failed to unload create code: 0x%x\n",
1272 __func__, status1);
1273func_cont2:
1274 /* Update node state and node manager state */
1275 if (DSP_SUCCEEDED(status)) {
1276 NODE_SET_STATE(hnode, NODE_CREATED);
1277 hnode_mgr->num_created++;
1278 goto func_cont;
1279 }
1280 if (status != -EBADR) {
1281 /* Put back in NODE_ALLOCATED state if error occurred */
1282 NODE_SET_STATE(hnode, NODE_ALLOCATED);
1283 }
1284func_cont:
1285 /* Free access to node dispatcher */
1286 mutex_unlock(&hnode_mgr->node_mgr_lock);
1287func_end:
1288 if (DSP_SUCCEEDED(status)) {
1289 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
1290 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1291 }
1292
1293 dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
1294 hnode, status);
1295 return status;
1296}
1297
1298/*
1299 * ======== node_create_mgr ========
1300 * Purpose:
1301 * Create a NODE Manager object.
1302 */
1303int node_create_mgr(OUT struct node_mgr **phNodeMgr,
1304 struct dev_object *hdev_obj)
1305{
1306 u32 i;
1307 struct node_mgr *node_mgr_obj = NULL;
1308 struct disp_attr disp_attr_obj;
1309 char *sz_zl_file = "";
1310 struct nldr_attrs nldr_attrs_obj;
1311 int status = 0;
1312 u8 dev_type;
1313 DBC_REQUIRE(refs > 0);
1314 DBC_REQUIRE(phNodeMgr != NULL);
1315 DBC_REQUIRE(hdev_obj != NULL);
1316
1317 *phNodeMgr = NULL;
1318 /* Allocate Node manager object */
1319 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1320 if (node_mgr_obj) {
1321 node_mgr_obj->hdev_obj = hdev_obj;
1322 node_mgr_obj->node_list = kzalloc(sizeof(struct lst_list),
1323 GFP_KERNEL);
1324 node_mgr_obj->pipe_map = gb_create(MAXPIPES);
1325 node_mgr_obj->pipe_done_map = gb_create(MAXPIPES);
1326 if (node_mgr_obj->node_list == NULL
1327 || node_mgr_obj->pipe_map == NULL
1328 || node_mgr_obj->pipe_done_map == NULL) {
1329 status = -ENOMEM;
1330 } else {
1331 INIT_LIST_HEAD(&node_mgr_obj->node_list->head);
1332 node_mgr_obj->ntfy_obj = kmalloc(
1333 sizeof(struct ntfy_object), GFP_KERNEL);
1334 if (node_mgr_obj->ntfy_obj)
1335 ntfy_init(node_mgr_obj->ntfy_obj);
1336 else
1337 status = -ENOMEM;
1338 }
1339 node_mgr_obj->num_created = 0;
1340 } else {
1341 status = -ENOMEM;
1342 }
1343 /* get devNodeType */
1344 if (DSP_SUCCEEDED(status))
1345 status = dev_get_dev_type(hdev_obj, &dev_type);
1346
1347 /* Create the DCD Manager */
1348 if (DSP_SUCCEEDED(status)) {
1349 status =
1350 dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr);
1351 if (DSP_SUCCEEDED(status))
1352 status = get_proc_props(node_mgr_obj, hdev_obj);
1353
1354 }
1355 /* Create NODE Dispatcher */
1356 if (DSP_SUCCEEDED(status)) {
1357 disp_attr_obj.ul_chnl_offset = node_mgr_obj->ul_chnl_offset;
1358 disp_attr_obj.ul_chnl_buf_size = node_mgr_obj->ul_chnl_buf_size;
1359 disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1360 disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1361 status =
1362 disp_create(&node_mgr_obj->disp_obj, hdev_obj,
1363 &disp_attr_obj);
1364 }
1365 /* Create a STRM Manager */
1366 if (DSP_SUCCEEDED(status))
1367 status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1368
1369 if (DSP_SUCCEEDED(status)) {
1370 dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1371 /* Get msg_ctrl queue manager */
1372 dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1373 mutex_init(&node_mgr_obj->node_mgr_lock);
1374 node_mgr_obj->chnl_map = gb_create(node_mgr_obj->ul_num_chnls);
1375 /* dma chnl map. ul_num_chnls is # per transport */
1376 node_mgr_obj->dma_chnl_map =
1377 gb_create(node_mgr_obj->ul_num_chnls);
1378 node_mgr_obj->zc_chnl_map =
1379 gb_create(node_mgr_obj->ul_num_chnls);
1380 if ((node_mgr_obj->chnl_map == NULL)
1381 || (node_mgr_obj->dma_chnl_map == NULL)
1382 || (node_mgr_obj->zc_chnl_map == NULL)) {
1383 status = -ENOMEM;
1384 } else {
1385 /* Block out reserved channels */
1386 for (i = 0; i < node_mgr_obj->ul_chnl_offset; i++)
1387 gb_set(node_mgr_obj->chnl_map, i);
1388
1389 /* Block out channels reserved for RMS */
1390 gb_set(node_mgr_obj->chnl_map,
1391 node_mgr_obj->ul_chnl_offset);
1392 gb_set(node_mgr_obj->chnl_map,
1393 node_mgr_obj->ul_chnl_offset + 1);
1394 }
1395 }
1396 if (DSP_SUCCEEDED(status)) {
1397 /* NO RM Server on the IVA */
1398 if (dev_type != IVA_UNIT) {
1399 /* Get addresses of any RMS functions loaded */
1400 status = get_rms_fxns(node_mgr_obj);
1401 }
1402 }
1403
1404 /* Get loader functions and create loader */
1405 if (DSP_SUCCEEDED(status))
1406 node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
1407
1408 if (DSP_SUCCEEDED(status)) {
1409 nldr_attrs_obj.pfn_ovly = ovly;
1410 nldr_attrs_obj.pfn_write = mem_write;
1411 nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size;
1412 nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size;
1413 node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.pfn_init();
1414 status =
1415 node_mgr_obj->nldr_fxns.pfn_create(&node_mgr_obj->nldr_obj,
1416 hdev_obj,
1417 &nldr_attrs_obj);
1418 }
1419 if (DSP_SUCCEEDED(status))
1420 *phNodeMgr = node_mgr_obj;
1421 else
1422 delete_node_mgr(node_mgr_obj);
1423
1424 DBC_ENSURE((DSP_FAILED(status) && (*phNodeMgr == NULL)) ||
1425 (DSP_SUCCEEDED(status) && *phNodeMgr));
1426
1427 return status;
1428}
1429
1430/*
1431 * ======== node_delete ========
1432 * Purpose:
1433 * Delete a node on the DSP by remotely calling the node's delete function.
1434 * Loads the node's delete function if necessary. Free GPP side resources
1435 * after node's delete function returns.
1436 */
1437int node_delete(struct node_object *hnode,
1438 struct process_context *pr_ctxt)
1439{
1440 struct node_object *pnode = (struct node_object *)hnode;
1441 struct node_mgr *hnode_mgr;
1442 struct proc_object *hprocessor;
1443 struct disp_object *disp_obj;
1444 u32 ul_delete_fxn;
1445 enum node_type node_type;
1446 enum node_state state;
1447 int status = 0;
1448 int status1 = 0;
1449 struct dsp_cbdata cb_data;
1450 u32 proc_id;
1451 struct bridge_drv_interface *intf_fxns;
1452
1453 void *node_res;
1454
1455 struct dsp_processorstate proc_state;
1456 DBC_REQUIRE(refs > 0);
1457
1458 if (!hnode) {
1459 status = -EFAULT;
1460 goto func_end;
1461 }
1462 /* create struct dsp_cbdata struct for PWR call */
1463 cb_data.cb_data = PWR_TIMEOUT;
1464 hnode_mgr = hnode->hnode_mgr;
1465 hprocessor = hnode->hprocessor;
1466 disp_obj = hnode_mgr->disp_obj;
1467 node_type = node_get_type(hnode);
1468 intf_fxns = hnode_mgr->intf_fxns;
1469 /* Enter critical section */
1470 mutex_lock(&hnode_mgr->node_mgr_lock);
1471
1472 state = node_get_state(hnode);
1473 /* Execute delete phase code for non-device node in all cases
1474 * except when the node was only allocated. Delete phase must be
1475 * executed even if create phase was executed, but failed.
1476 * If the node environment pointer is non-NULL, the delete phase
1477 * code must be executed. */
1478 if (!(state == NODE_ALLOCATED && hnode->node_env == (u32) NULL) &&
1479 node_type != NODE_DEVICE) {
1480 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1481 if (DSP_FAILED(status))
1482 goto func_cont1;
1483
1484 if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
1485 /* If node has terminated, execute phase code will
1486 * have already been unloaded in node_on_exit(). If the
1487 * node is PAUSED, the execute phase is loaded, and it
1488 * is now ok to unload it. If the node is running, we
1489 * will unload the execute phase only after deleting
1490 * the node. */
1491 if (state == NODE_PAUSED && hnode->loaded &&
1492 hnode->phase_split) {
1493 /* Ok to unload execute code as long as node
1494 * is not * running */
1495 status1 =
1496 hnode_mgr->nldr_fxns.
1497 pfn_unload(hnode->nldr_node_obj,
1498 NLDR_EXECUTE);
1499 hnode->loaded = false;
1500 NODE_SET_STATE(hnode, NODE_DONE);
1501 }
1502 /* Load delete phase code if not loaded or if haven't
1503 * * unloaded EXECUTE phase */
1504 if ((!(hnode->loaded) || (state == NODE_RUNNING)) &&
1505 hnode->phase_split) {
1506 status =
1507 hnode_mgr->nldr_fxns.
1508 pfn_load(hnode->nldr_node_obj, NLDR_DELETE);
1509 if (DSP_SUCCEEDED(status))
1510 hnode->loaded = true;
1511 else
1512 pr_err("%s: fail - load delete code:"
1513 " 0x%x\n", __func__, status);
1514 }
1515 }
1516func_cont1:
1517 if (DSP_SUCCEEDED(status)) {
1518 /* Unblock a thread trying to terminate the node */
1519 (void)sync_set_event(hnode->sync_done);
1520 if (proc_id == DSP_UNIT) {
1521 /* ul_delete_fxn = address of node's delete
1522 * function */
1523 status = get_fxn_address(hnode, &ul_delete_fxn,
1524 DELETEPHASE);
1525 } else if (proc_id == IVA_UNIT)
1526 ul_delete_fxn = (u32) hnode->node_env;
1527 if (DSP_SUCCEEDED(status)) {
1528 status = proc_get_state(hprocessor,
1529 &proc_state,
1530 sizeof(struct
1531 dsp_processorstate));
1532 if (proc_state.proc_state != PROC_ERROR) {
1533 status =
1534 disp_node_delete(disp_obj, hnode,
1535 hnode_mgr->
1536 ul_fxn_addrs
1537 [RMSDELETENODE],
1538 ul_delete_fxn,
1539 hnode->node_env);
1540 } else
1541 NODE_SET_STATE(hnode, NODE_DONE);
1542
1543 /* Unload execute, if not unloaded, and delete
1544 * function */
1545 if (state == NODE_RUNNING &&
1546 hnode->phase_split) {
1547 status1 =
1548 hnode_mgr->nldr_fxns.
1549 pfn_unload(hnode->nldr_node_obj,
1550 NLDR_EXECUTE);
1551 }
1552 if (DSP_FAILED(status1))
1553 pr_err("%s: fail - unload execute code:"
1554 " 0x%x\n", __func__, status1);
1555
1556 status1 =
1557 hnode_mgr->nldr_fxns.pfn_unload(hnode->
1558 nldr_node_obj,
1559 NLDR_DELETE);
1560 hnode->loaded = false;
1561 if (DSP_FAILED(status1))
1562 pr_err("%s: fail - unload delete code: "
1563 "0x%x\n", __func__, status1);
1564 }
1565 }
1566 }
1567 /* Free host side resources even if a failure occurred */
1568 /* Remove node from hnode_mgr->node_list */
1569 lst_remove_elem(hnode_mgr->node_list, (struct list_head *)hnode);
1570 hnode_mgr->num_nodes--;
1571 /* Decrement count of nodes created on DSP */
1572 if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
1573 (hnode->node_env != (u32) NULL)))
1574 hnode_mgr->num_created--;
1575 /* Free host-side resources allocated by node_create()
1576 * delete_node() fails if SM buffers not freed by client! */
1577 if (drv_get_node_res_element(hnode, &node_res, pr_ctxt) !=
1578 -ENOENT)
1579 drv_proc_node_update_status(node_res, false);
1580 delete_node(hnode, pr_ctxt);
1581
1582 drv_remove_node_res_element(node_res, pr_ctxt);
1583 /* Exit critical section */
1584 mutex_unlock(&hnode_mgr->node_mgr_lock);
1585 proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
1586func_end:
1587 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
1588 return status;
1589}
1590
1591/*
1592 * ======== node_delete_mgr ========
1593 * Purpose:
1594 * Delete the NODE Manager.
1595 */
1596int node_delete_mgr(struct node_mgr *hnode_mgr)
1597{
1598 int status = 0;
1599
1600 DBC_REQUIRE(refs > 0);
1601
1602 if (hnode_mgr)
1603 delete_node_mgr(hnode_mgr);
1604 else
1605 status = -EFAULT;
1606
1607 return status;
1608}
1609
1610/*
1611 * ======== node_enum_nodes ========
1612 * Purpose:
1613 * Enumerate currently allocated nodes.
1614 */
1615int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1616 u32 node_tab_size, OUT u32 *pu_num_nodes,
1617 OUT u32 *pu_allocated)
1618{
1619 struct node_object *hnode;
1620 u32 i;
1621 int status = 0;
1622 DBC_REQUIRE(refs > 0);
1623 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
1624 DBC_REQUIRE(pu_num_nodes != NULL);
1625 DBC_REQUIRE(pu_allocated != NULL);
1626
1627 if (!hnode_mgr) {
1628 status = -EFAULT;
1629 goto func_end;
1630 }
1631 /* Enter critical section */
1632 mutex_lock(&hnode_mgr->node_mgr_lock);
1633
1634 if (hnode_mgr->num_nodes > node_tab_size) {
1635 *pu_allocated = hnode_mgr->num_nodes;
1636 *pu_num_nodes = 0;
1637 status = -EINVAL;
1638 } else {
1639 hnode = (struct node_object *)lst_first(hnode_mgr->
1640 node_list);
1641 for (i = 0; i < hnode_mgr->num_nodes; i++) {
1642 DBC_ASSERT(hnode);
1643 node_tab[i] = hnode;
1644 hnode = (struct node_object *)lst_next
1645 (hnode_mgr->node_list,
1646 (struct list_head *)hnode);
1647 }
1648 *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1649 }
1650 /* end of sync_enter_cs */
1651 /* Exit critical section */
1652 mutex_unlock(&hnode_mgr->node_mgr_lock);
1653func_end:
1654 return status;
1655}
1656
1657/*
1658 * ======== node_exit ========
1659 * Purpose:
1660 * Discontinue usage of NODE module.
1661 */
1662void node_exit(void)
1663{
1664 DBC_REQUIRE(refs > 0);
1665
1666 refs--;
1667
1668 DBC_ENSURE(refs >= 0);
1669}
1670
1671/*
1672 * ======== node_free_msg_buf ========
1673 * Purpose:
1674 * Frees the message buffer.
1675 */
1676int node_free_msg_buf(struct node_object *hnode, IN u8 * pbuffer,
1677 OPTIONAL struct dsp_bufferattr *pattr)
1678{
1679 struct node_object *pnode = (struct node_object *)hnode;
1680 int status = 0;
1681 u32 proc_id;
1682 DBC_REQUIRE(refs > 0);
1683 DBC_REQUIRE(pbuffer != NULL);
1684 DBC_REQUIRE(pnode != NULL);
1685 DBC_REQUIRE(pnode->xlator != NULL);
1686
1687 if (!hnode) {
1688 status = -EFAULT;
1689 goto func_end;
1690 }
1691 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1692 if (proc_id == DSP_UNIT) {
1693 if (DSP_SUCCEEDED(status)) {
1694 if (pattr == NULL) {
1695 /* set defaults */
1696 pattr = &node_dfltbufattrs;
1697 }
1698 /* Node supports single SM segment only */
1699 if (pattr->segment_id != 1)
1700 status = -EBADR;
1701
1702 /* pbuffer is clients Va. */
1703 status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1704 }
1705 } else {
1706 DBC_ASSERT(NULL); /* BUG */
1707 }
1708func_end:
1709 return status;
1710}
1711
1712/*
1713 * ======== node_get_attr ========
1714 * Purpose:
1715 * Copy the current attributes of the specified node into a dsp_nodeattr
1716 * structure.
1717 */
1718int node_get_attr(struct node_object *hnode,
1719 OUT struct dsp_nodeattr *pattr, u32 attr_size)
1720{
1721 struct node_mgr *hnode_mgr;
1722 int status = 0;
1723 DBC_REQUIRE(refs > 0);
1724 DBC_REQUIRE(pattr != NULL);
1725 DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
1726
1727 if (!hnode) {
1728 status = -EFAULT;
1729 } else {
1730 hnode_mgr = hnode->hnode_mgr;
1731 /* Enter hnode_mgr critical section (since we're accessing
1732 * data that could be changed by node_change_priority() and
1733 * node_connect(). */
1734 mutex_lock(&hnode_mgr->node_mgr_lock);
1735 pattr->cb_struct = sizeof(struct dsp_nodeattr);
1736 /* dsp_nodeattrin */
1737 pattr->in_node_attr_in.cb_struct =
1738 sizeof(struct dsp_nodeattrin);
1739 pattr->in_node_attr_in.prio = hnode->prio;
1740 pattr->in_node_attr_in.utimeout = hnode->utimeout;
1741 pattr->in_node_attr_in.heap_size =
1742 hnode->create_args.asa.task_arg_obj.heap_size;
1743 pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1744 hnode->create_args.asa.task_arg_obj.ugpp_heap_addr;
1745 pattr->node_attr_inputs = hnode->num_gpp_inputs;
1746 pattr->node_attr_outputs = hnode->num_gpp_outputs;
1747 /* dsp_nodeinfo */
1748 get_node_info(hnode, &(pattr->node_info));
1749 /* end of sync_enter_cs */
1750 /* Exit critical section */
1751 mutex_unlock(&hnode_mgr->node_mgr_lock);
1752 }
1753 return status;
1754}
1755
1756/*
1757 * ======== node_get_channel_id ========
1758 * Purpose:
1759 * Get the channel index reserved for a stream connection between the
1760 * host and a node.
1761 */
1762int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1763 OUT u32 *pulId)
1764{
1765 enum node_type node_type;
1766 int status = -EINVAL;
1767 DBC_REQUIRE(refs > 0);
1768 DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
1769 DBC_REQUIRE(pulId != NULL);
1770
1771 if (!hnode) {
1772 status = -EFAULT;
1773 return status;
1774 }
1775 node_type = node_get_type(hnode);
1776 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
1777 status = -EPERM;
1778 return status;
1779 }
1780 if (dir == DSP_TONODE) {
1781 if (index < MAX_INPUTS(hnode)) {
1782 if (hnode->inputs[index].type == HOSTCONNECT) {
1783 *pulId = hnode->inputs[index].dev_id;
1784 status = 0;
1785 }
1786 }
1787 } else {
1788 DBC_ASSERT(dir == DSP_FROMNODE);
1789 if (index < MAX_OUTPUTS(hnode)) {
1790 if (hnode->outputs[index].type == HOSTCONNECT) {
1791 *pulId = hnode->outputs[index].dev_id;
1792 status = 0;
1793 }
1794 }
1795 }
1796 return status;
1797}
1798
1799/*
1800 * ======== node_get_message ========
1801 * Purpose:
1802 * Retrieve a message from a node on the DSP.
1803 */
1804int node_get_message(struct node_object *hnode,
1805 OUT struct dsp_msg *pmsg, u32 utimeout)
1806{
1807 struct node_mgr *hnode_mgr;
1808 enum node_type node_type;
1809 struct bridge_drv_interface *intf_fxns;
1810 int status = 0;
1811 void *tmp_buf;
1812 struct dsp_processorstate proc_state;
1813 struct proc_object *hprocessor;
1814
1815 DBC_REQUIRE(refs > 0);
1816 DBC_REQUIRE(pmsg != NULL);
1817
1818 if (!hnode) {
1819 status = -EFAULT;
1820 goto func_end;
1821 }
1822 hprocessor = hnode->hprocessor;
1823 status = proc_get_state(hprocessor, &proc_state,
1824 sizeof(struct dsp_processorstate));
1825 if (DSP_FAILED(status))
1826 goto func_end;
1827 /* If processor is in error state then don't attempt to get the
1828 message */
1829 if (proc_state.proc_state == PROC_ERROR) {
1830 status = -EPERM;
1831 goto func_end;
1832 }
1833 hnode_mgr = hnode->hnode_mgr;
1834 node_type = node_get_type(hnode);
1835 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1836 node_type != NODE_DAISSOCKET) {
1837 status = -EPERM;
1838 goto func_end;
1839 }
1840 /* This function will block unless a message is available. Since
1841 * DSPNode_RegisterNotify() allows notification when a message
1842 * is available, the system can be designed so that
1843 * DSPNode_GetMessage() is only called when a message is
1844 * available. */
1845 intf_fxns = hnode_mgr->intf_fxns;
1846 status =
1847 (*intf_fxns->pfn_msg_get) (hnode->msg_queue_obj, pmsg, utimeout);
1848 /* Check if message contains SM descriptor */
1849 if (DSP_FAILED(status) || !(pmsg->dw_cmd & DSP_RMSBUFDESC))
1850 goto func_end;
1851
1852 /* Translate DSP byte addr to GPP Va. */
1853 tmp_buf = cmm_xlator_translate(hnode->xlator,
1854 (void *)(pmsg->dw_arg1 *
1855 hnode->hnode_mgr->
1856 udsp_word_size), CMM_DSPPA2PA);
1857 if (tmp_buf != NULL) {
1858 /* now convert this GPP Pa to Va */
1859 tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1860 CMM_PA2VA);
1861 if (tmp_buf != NULL) {
1862 /* Adjust SM size in msg */
1863 pmsg->dw_arg1 = (u32) tmp_buf;
1864 pmsg->dw_arg2 *= hnode->hnode_mgr->udsp_word_size;
1865 } else {
1866 status = -ESRCH;
1867 }
1868 } else {
1869 status = -ESRCH;
1870 }
1871func_end:
1872 dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x\n", __func__,
1873 hnode, pmsg, utimeout);
1874 return status;
1875}
1876
1877/*
1878 * ======== node_get_nldr_obj ========
1879 */
1880int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1881 struct nldr_object **phNldrObj)
1882{
1883 int status = 0;
1884 struct node_mgr *node_mgr_obj = hnode_mgr;
1885 DBC_REQUIRE(phNldrObj != NULL);
1886
1887 if (!hnode_mgr)
1888 status = -EFAULT;
1889 else
1890 *phNldrObj = node_mgr_obj->nldr_obj;
1891
1892 DBC_ENSURE(DSP_SUCCEEDED(status) || ((phNldrObj != NULL) &&
1893 (*phNldrObj == NULL)));
1894 return status;
1895}
1896
1897/*
1898 * ======== node_get_strm_mgr ========
1899 * Purpose:
1900 * Returns the Stream manager.
1901 */
1902int node_get_strm_mgr(struct node_object *hnode,
1903 struct strm_mgr **phStrmMgr)
1904{
1905 int status = 0;
1906
1907 DBC_REQUIRE(refs > 0);
1908
1909 if (!hnode)
1910 status = -EFAULT;
1911 else
1912 *phStrmMgr = hnode->hnode_mgr->strm_mgr_obj;
1913
1914 return status;
1915}
1916
1917/*
1918 * ======== node_get_load_type ========
1919 */
1920enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1921{
1922 DBC_REQUIRE(refs > 0);
1923 DBC_REQUIRE(hnode);
1924 if (!hnode) {
1925 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1926 return -1;
1927 } else {
1928 return hnode->dcd_props.obj_data.node_obj.us_load_type;
1929 }
1930}
1931
1932/*
1933 * ======== node_get_timeout ========
1934 * Purpose:
1935 * Returns the timeout value for this node.
1936 */
1937u32 node_get_timeout(struct node_object *hnode)
1938{
1939 DBC_REQUIRE(refs > 0);
1940 DBC_REQUIRE(hnode);
1941 if (!hnode) {
1942 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1943 return 0;
1944 } else {
1945 return hnode->utimeout;
1946 }
1947}
1948
1949/*
1950 * ======== node_get_type ========
1951 * Purpose:
1952 * Returns the node type.
1953 */
1954enum node_type node_get_type(struct node_object *hnode)
1955{
1956 enum node_type node_type;
1957
1958 if (hnode == (struct node_object *)DSP_HGPPNODE)
1959 node_type = NODE_GPP;
1960 else {
1961 if (!hnode)
1962 node_type = -1;
1963 else
1964 node_type = hnode->ntype;
1965 }
1966 return node_type;
1967}
1968
1969/*
1970 * ======== node_init ========
1971 * Purpose:
1972 * Initialize the NODE module.
1973 */
1974bool node_init(void)
1975{
1976 DBC_REQUIRE(refs >= 0);
1977
1978 refs++;
1979
1980 return true;
1981}
1982
1983/*
1984 * ======== node_on_exit ========
1985 * Purpose:
1986 * Gets called when RMS_EXIT is received for a node.
1987 */
1988void node_on_exit(struct node_object *hnode, s32 nStatus)
1989{
1990 if (!hnode)
1991 return;
1992
1993 /* Set node state to done */
1994 NODE_SET_STATE(hnode, NODE_DONE);
1995 hnode->exit_status = nStatus;
1996 if (hnode->loaded && hnode->phase_split) {
1997 (void)hnode->hnode_mgr->nldr_fxns.pfn_unload(hnode->
1998 nldr_node_obj,
1999 NLDR_EXECUTE);
2000 hnode->loaded = false;
2001 }
2002 /* Unblock call to node_terminate */
2003 (void)sync_set_event(hnode->sync_done);
2004 /* Notify clients */
2005 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
2006 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2007}
2008
2009/*
2010 * ======== node_pause ========
2011 * Purpose:
2012 * Suspend execution of a node currently running on the DSP.
2013 */
2014int node_pause(struct node_object *hnode)
2015{
2016 struct node_object *pnode = (struct node_object *)hnode;
2017 enum node_type node_type;
2018 enum node_state state;
2019 struct node_mgr *hnode_mgr;
2020 int status = 0;
2021 u32 proc_id;
2022 struct dsp_processorstate proc_state;
2023 struct proc_object *hprocessor;
2024
2025 DBC_REQUIRE(refs > 0);
2026
2027 if (!hnode) {
2028 status = -EFAULT;
2029 } else {
2030 node_type = node_get_type(hnode);
2031 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2032 status = -EPERM;
2033 }
2034 if (DSP_FAILED(status))
2035 goto func_end;
2036
2037 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2038
2039 if (proc_id == IVA_UNIT)
2040 status = -ENOSYS;
2041
2042 if (DSP_SUCCEEDED(status)) {
2043 hnode_mgr = hnode->hnode_mgr;
2044
2045 /* Enter critical section */
2046 mutex_lock(&hnode_mgr->node_mgr_lock);
2047 state = node_get_state(hnode);
2048 /* Check node state */
2049 if (state != NODE_RUNNING)
2050 status = -EBADR;
2051
2052 if (DSP_FAILED(status))
2053 goto func_cont;
2054 hprocessor = hnode->hprocessor;
2055 status = proc_get_state(hprocessor, &proc_state,
2056 sizeof(struct dsp_processorstate));
2057 if (DSP_FAILED(status))
2058 goto func_cont;
2059 /* If processor is in error state then don't attempt
2060 to send the message */
2061 if (proc_state.proc_state == PROC_ERROR) {
2062 status = -EPERM;
2063 goto func_cont;
2064 }
2065
2066 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2067 hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY],
2068 hnode->node_env, NODE_SUSPENDEDPRI);
2069
2070 /* Update state */
2071 if (DSP_SUCCEEDED(status))
2072 NODE_SET_STATE(hnode, NODE_PAUSED);
2073
2074func_cont:
2075 /* End of sync_enter_cs */
2076 /* Leave critical section */
2077 mutex_unlock(&hnode_mgr->node_mgr_lock);
2078 if (DSP_SUCCEEDED(status)) {
2079 proc_notify_clients(hnode->hprocessor,
2080 DSP_NODESTATECHANGE);
2081 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2082 }
2083 }
2084func_end:
2085 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2086 return status;
2087}
2088
2089/*
2090 * ======== node_put_message ========
2091 * Purpose:
2092 * Send a message to a message node, task node, or XDAIS socket node. This
2093 * function will block until the message stream can accommodate the
2094 * message, or a timeout occurs.
2095 */
2096int node_put_message(struct node_object *hnode,
2097 IN CONST struct dsp_msg *pmsg, u32 utimeout)
2098{
2099 struct node_mgr *hnode_mgr = NULL;
2100 enum node_type node_type;
2101 struct bridge_drv_interface *intf_fxns;
2102 enum node_state state;
2103 int status = 0;
2104 void *tmp_buf;
2105 struct dsp_msg new_msg;
2106 struct dsp_processorstate proc_state;
2107 struct proc_object *hprocessor;
2108
2109 DBC_REQUIRE(refs > 0);
2110 DBC_REQUIRE(pmsg != NULL);
2111
2112 if (!hnode) {
2113 status = -EFAULT;
2114 goto func_end;
2115 }
2116 hprocessor = hnode->hprocessor;
2117 status = proc_get_state(hprocessor, &proc_state,
2118 sizeof(struct dsp_processorstate));
2119 if (DSP_FAILED(status))
2120 goto func_end;
2121 /* If processor is in bad state then don't attempt sending the
2122 message */
2123 if (proc_state.proc_state == PROC_ERROR) {
2124 status = -EPERM;
2125 goto func_end;
2126 }
2127 hnode_mgr = hnode->hnode_mgr;
2128 node_type = node_get_type(hnode);
2129 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
2130 node_type != NODE_DAISSOCKET)
2131 status = -EPERM;
2132
2133 if (DSP_SUCCEEDED(status)) {
2134 /* Check node state. Can't send messages to a node after
2135 * we've sent the RMS_EXIT command. There is still the
2136 * possibility that node_terminate can be called after we've
2137 * checked the state. Could add another SYNC object to
2138 * prevent this (can't use node_mgr_lock, since we don't
2139 * want to block other NODE functions). However, the node may
2140 * still exit on its own, before this message is sent. */
2141 mutex_lock(&hnode_mgr->node_mgr_lock);
2142 state = node_get_state(hnode);
2143 if (state == NODE_TERMINATING || state == NODE_DONE)
2144 status = -EBADR;
2145
2146 /* end of sync_enter_cs */
2147 mutex_unlock(&hnode_mgr->node_mgr_lock);
2148 }
2149 if (DSP_FAILED(status))
2150 goto func_end;
2151
2152 /* assign pmsg values to new msg */
2153 new_msg = *pmsg;
2154 /* Now, check if message contains a SM buffer descriptor */
2155 if (pmsg->dw_cmd & DSP_RMSBUFDESC) {
2156 /* Translate GPP Va to DSP physical buf Ptr. */
2157 tmp_buf = cmm_xlator_translate(hnode->xlator,
2158 (void *)new_msg.dw_arg1,
2159 CMM_VA2DSPPA);
2160 if (tmp_buf != NULL) {
2161 /* got translation, convert to MAUs in msg */
2162 if (hnode->hnode_mgr->udsp_word_size != 0) {
2163 new_msg.dw_arg1 =
2164 (u32) tmp_buf /
2165 hnode->hnode_mgr->udsp_word_size;
2166 /* MAUs */
2167 new_msg.dw_arg2 /= hnode->hnode_mgr->
2168 udsp_word_size;
2169 } else {
2170 pr_err("%s: udsp_word_size is zero!\n",
2171 __func__);
2172 status = -EPERM; /* bad DSPWordSize */
2173 }
2174 } else { /* failed to translate buffer address */
2175 status = -ESRCH;
2176 }
2177 }
2178 if (DSP_SUCCEEDED(status)) {
2179 intf_fxns = hnode_mgr->intf_fxns;
2180 status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj,
2181 &new_msg, utimeout);
2182 }
2183func_end:
2184 dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2185 "status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
2186 return status;
2187}
2188
2189/*
2190 * ======== node_register_notify ========
2191 * Purpose:
2192 * Register to be notified on specific events for this node.
2193 */
2194int node_register_notify(struct node_object *hnode, u32 event_mask,
2195 u32 notify_type,
2196 struct dsp_notification *hnotification)
2197{
2198 struct bridge_drv_interface *intf_fxns;
2199 int status = 0;
2200
2201 DBC_REQUIRE(refs > 0);
2202 DBC_REQUIRE(hnotification != NULL);
2203
2204 if (!hnode) {
2205 status = -EFAULT;
2206 } else {
2207 /* Check if event mask is a valid node related event */
2208 if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2209 status = -EINVAL;
2210
2211 /* Check if notify type is valid */
2212 if (notify_type != DSP_SIGNALEVENT)
2213 status = -EINVAL;
2214
2215 /* Only one Notification can be registered at a
2216 * time - Limitation */
2217 if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2218 status = -EINVAL;
2219 }
2220 if (DSP_SUCCEEDED(status)) {
2221 if (event_mask == DSP_NODESTATECHANGE) {
2222 status = ntfy_register(hnode->ntfy_obj, hnotification,
2223 event_mask & DSP_NODESTATECHANGE,
2224 notify_type);
2225 } else {
2226 /* Send Message part of event mask to msg_ctrl */
2227 intf_fxns = hnode->hnode_mgr->intf_fxns;
2228 status = (*intf_fxns->pfn_msg_register_notify)
2229 (hnode->msg_queue_obj,
2230 event_mask & DSP_NODEMESSAGEREADY, notify_type,
2231 hnotification);
2232 }
2233
2234 }
2235 dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2236 "hnotification: %p status 0x%x\n", __func__, hnode,
2237 event_mask, notify_type, hnotification, status);
2238 return status;
2239}
2240
2241/*
2242 * ======== node_run ========
2243 * Purpose:
2244 * Start execution of a node's execute phase, or resume execution of a node
2245 * that has been suspended (via NODE_NodePause()) on the DSP. Load the
2246 * node's execute function if necessary.
2247 */
2248int node_run(struct node_object *hnode)
2249{
2250 struct node_object *pnode = (struct node_object *)hnode;
2251 struct node_mgr *hnode_mgr;
2252 enum node_type node_type;
2253 enum node_state state;
2254 u32 ul_execute_fxn;
2255 u32 ul_fxn_addr;
2256 int status = 0;
2257 u32 proc_id;
2258 struct bridge_drv_interface *intf_fxns;
2259 struct dsp_processorstate proc_state;
2260 struct proc_object *hprocessor;
2261
2262 DBC_REQUIRE(refs > 0);
2263
2264 if (!hnode) {
2265 status = -EFAULT;
2266 goto func_end;
2267 }
2268 hprocessor = hnode->hprocessor;
2269 status = proc_get_state(hprocessor, &proc_state,
2270 sizeof(struct dsp_processorstate));
2271 if (DSP_FAILED(status))
2272 goto func_end;
2273 /* If processor is in error state then don't attempt to run the node */
2274 if (proc_state.proc_state == PROC_ERROR) {
2275 status = -EPERM;
2276 goto func_end;
2277 }
2278 node_type = node_get_type(hnode);
2279 if (node_type == NODE_DEVICE)
2280 status = -EPERM;
2281 if (DSP_FAILED(status))
2282 goto func_end;
2283
2284 hnode_mgr = hnode->hnode_mgr;
2285 if (!hnode_mgr) {
2286 status = -EFAULT;
2287 goto func_end;
2288 }
2289 intf_fxns = hnode_mgr->intf_fxns;
2290 /* Enter critical section */
2291 mutex_lock(&hnode_mgr->node_mgr_lock);
2292
2293 state = node_get_state(hnode);
2294 if (state != NODE_CREATED && state != NODE_PAUSED)
2295 status = -EBADR;
2296
2297 if (DSP_SUCCEEDED(status))
2298 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2299
2300 if (DSP_FAILED(status))
2301 goto func_cont1;
2302
2303 if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
2304 goto func_cont1;
2305
2306 if (state == NODE_CREATED) {
2307 /* If node's execute function is not loaded, load it */
2308 if (!(hnode->loaded) && hnode->phase_split) {
2309 status =
2310 hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
2311 NLDR_EXECUTE);
2312 if (DSP_SUCCEEDED(status)) {
2313 hnode->loaded = true;
2314 } else {
2315 pr_err("%s: fail - load execute code: 0x%x\n",
2316 __func__, status);
2317 }
2318 }
2319 if (DSP_SUCCEEDED(status)) {
2320 /* Get address of node's execute function */
2321 if (proc_id == IVA_UNIT)
2322 ul_execute_fxn = (u32) hnode->node_env;
2323 else {
2324 status = get_fxn_address(hnode, &ul_execute_fxn,
2325 EXECUTEPHASE);
2326 }
2327 }
2328 if (DSP_SUCCEEDED(status)) {
2329 ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSEXECUTENODE];
2330 status =
2331 disp_node_run(hnode_mgr->disp_obj, hnode,
2332 ul_fxn_addr, ul_execute_fxn,
2333 hnode->node_env);
2334 }
2335 } else if (state == NODE_PAUSED) {
2336 ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY];
2337 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2338 ul_fxn_addr, hnode->node_env,
2339 NODE_GET_PRIORITY(hnode));
2340 } else {
2341 /* We should never get here */
2342 DBC_ASSERT(false);
2343 }
2344func_cont1:
2345 /* Update node state. */
2346 if (DSP_SUCCEEDED(status))
2347 NODE_SET_STATE(hnode, NODE_RUNNING);
2348 else /* Set state back to previous value */
2349 NODE_SET_STATE(hnode, state);
2350 /*End of sync_enter_cs */
2351 /* Exit critical section */
2352 mutex_unlock(&hnode_mgr->node_mgr_lock);
2353 if (DSP_SUCCEEDED(status)) {
2354 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
2355 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2356 }
2357func_end:
2358 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2359 return status;
2360}
2361
2362/*
2363 * ======== node_terminate ========
2364 * Purpose:
2365 * Signal a node running on the DSP that it should exit its execute phase
2366 * function.
2367 */
2368int node_terminate(struct node_object *hnode, OUT int *pstatus)
2369{
2370 struct node_object *pnode = (struct node_object *)hnode;
2371 struct node_mgr *hnode_mgr = NULL;
2372 enum node_type node_type;
2373 struct bridge_drv_interface *intf_fxns;
2374 enum node_state state;
2375 struct dsp_msg msg, killmsg;
2376 int status = 0;
2377 u32 proc_id, kill_time_out;
2378 struct deh_mgr *hdeh_mgr;
2379 struct dsp_processorstate proc_state;
2380
2381 DBC_REQUIRE(refs > 0);
2382 DBC_REQUIRE(pstatus != NULL);
2383
2384 if (!hnode || !hnode->hnode_mgr) {
2385 status = -EFAULT;
2386 goto func_end;
2387 }
2388 if (pnode->hprocessor == NULL) {
2389 status = -EFAULT;
2390 goto func_end;
2391 }
2392 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2393
2394 if (DSP_SUCCEEDED(status)) {
2395 hnode_mgr = hnode->hnode_mgr;
2396 node_type = node_get_type(hnode);
2397 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2398 status = -EPERM;
2399 }
2400 if (DSP_SUCCEEDED(status)) {
2401 /* Check node state */
2402 mutex_lock(&hnode_mgr->node_mgr_lock);
2403 state = node_get_state(hnode);
2404 if (state != NODE_RUNNING) {
2405 status = -EBADR;
2406 /* Set the exit status if node terminated on
2407 * its own. */
2408 if (state == NODE_DONE)
2409 *pstatus = hnode->exit_status;
2410
2411 } else {
2412 NODE_SET_STATE(hnode, NODE_TERMINATING);
2413 }
2414 /* end of sync_enter_cs */
2415 mutex_unlock(&hnode_mgr->node_mgr_lock);
2416 }
2417 if (DSP_SUCCEEDED(status)) {
2418 /*
2419 * Send exit message. Do not change state to NODE_DONE
2420 * here. That will be done in callback.
2421 */
2422 status = proc_get_state(pnode->hprocessor, &proc_state,
2423 sizeof(struct dsp_processorstate));
2424 if (DSP_FAILED(status))
2425 goto func_cont;
2426 /* If processor is in error state then don't attempt to send
2427 * A kill task command */
2428 if (proc_state.proc_state == PROC_ERROR) {
2429 status = -EPERM;
2430 goto func_cont;
2431 }
2432
2433 msg.dw_cmd = RMS_EXIT;
2434 msg.dw_arg1 = hnode->node_env;
2435 killmsg.dw_cmd = RMS_KILLTASK;
2436 killmsg.dw_arg1 = hnode->node_env;
2437 intf_fxns = hnode_mgr->intf_fxns;
2438
2439 if (hnode->utimeout > MAXTIMEOUT)
2440 kill_time_out = MAXTIMEOUT;
2441 else
2442 kill_time_out = (hnode->utimeout) * 2;
2443
2444 status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj, &msg,
2445 hnode->utimeout);
2446 if (DSP_FAILED(status))
2447 goto func_cont;
2448
2449 /*
2450 * Wait on synchronization object that will be
2451 * posted in the callback on receiving RMS_EXIT
2452 * message, or by node_delete. Check for valid hnode,
2453 * in case posted by node_delete().
2454 */
2455 status = sync_wait_on_event(hnode->sync_done,
2456 kill_time_out / 2);
2457 if (status != ETIME)
2458 goto func_cont;
2459
2460 status = (*intf_fxns->pfn_msg_put)(hnode->msg_queue_obj,
2461 &killmsg, hnode->utimeout);
2462 if (DSP_FAILED(status))
2463 goto func_cont;
2464 status = sync_wait_on_event(hnode->sync_done,
2465 kill_time_out / 2);
2466 if (DSP_FAILED(status)) {
2467 /*
2468 * Here it goes the part of the simulation of
2469 * the DSP exception.
2470 */
2471 dev_get_deh_mgr(hnode_mgr->hdev_obj, &hdeh_mgr);
2472 if (!hdeh_mgr)
2473 goto func_cont;
2474
2475 (*intf_fxns->pfn_deh_notify)(hdeh_mgr, DSP_SYSERROR,
2476 DSP_EXCEPTIONABORT);
2477 }
2478 }
2479func_cont:
2480 if (DSP_SUCCEEDED(status)) {
2481 /* Enter CS before getting exit status, in case node was
2482 * deleted. */
2483 mutex_lock(&hnode_mgr->node_mgr_lock);
2484 /* Make sure node wasn't deleted while we blocked */
2485 if (!hnode) {
2486 status = -EPERM;
2487 } else {
2488 *pstatus = hnode->exit_status;
2489 dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
2490 __func__, hnode, hnode->node_env, status);
2491 }
2492 mutex_unlock(&hnode_mgr->node_mgr_lock);
2493 } /*End of sync_enter_cs */
2494func_end:
2495 return status;
2496}
2497
2498/*
2499 * ======== delete_node ========
2500 * Purpose:
2501 * Free GPP resources allocated in node_allocate() or node_connect().
2502 */
2503static void delete_node(struct node_object *hnode,
2504 struct process_context *pr_ctxt)
2505{
2506 struct node_mgr *hnode_mgr;
2507 struct cmm_xlatorobject *xlator;
2508 struct bridge_drv_interface *intf_fxns;
2509 u32 i;
2510 enum node_type node_type;
2511 struct stream_chnl stream;
2512 struct node_msgargs node_msg_args;
2513 struct node_taskargs task_arg_obj;
2514#ifdef DSP_DMM_DEBUG
2515 struct dmm_object *dmm_mgr;
2516 struct proc_object *p_proc_object =
2517 (struct proc_object *)hnode->hprocessor;
2518#endif
2519 int status;
2520 if (!hnode)
2521 goto func_end;
2522 hnode_mgr = hnode->hnode_mgr;
2523 if (!hnode_mgr)
2524 goto func_end;
2525 xlator = hnode->xlator;
2526 node_type = node_get_type(hnode);
2527 if (node_type != NODE_DEVICE) {
2528 node_msg_args = hnode->create_args.asa.node_msg_args;
2529 kfree(node_msg_args.pdata);
2530
2531 /* Free msg_ctrl queue */
2532 if (hnode->msg_queue_obj) {
2533 intf_fxns = hnode_mgr->intf_fxns;
2534 (*intf_fxns->pfn_msg_delete_queue) (hnode->
2535 msg_queue_obj);
2536 hnode->msg_queue_obj = NULL;
2537 }
2538
2539 kfree(hnode->sync_done);
2540
2541 /* Free all stream info */
2542 if (hnode->inputs) {
2543 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2544 stream = hnode->inputs[i];
2545 free_stream(hnode_mgr, stream);
2546 }
2547 kfree(hnode->inputs);
2548 hnode->inputs = NULL;
2549 }
2550 if (hnode->outputs) {
2551 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2552 stream = hnode->outputs[i];
2553 free_stream(hnode_mgr, stream);
2554 }
2555 kfree(hnode->outputs);
2556 hnode->outputs = NULL;
2557 }
2558 task_arg_obj = hnode->create_args.asa.task_arg_obj;
2559 if (task_arg_obj.strm_in_def) {
2560 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2561 kfree(task_arg_obj.strm_in_def[i].sz_device);
2562 task_arg_obj.strm_in_def[i].sz_device = NULL;
2563 }
2564 kfree(task_arg_obj.strm_in_def);
2565 task_arg_obj.strm_in_def = NULL;
2566 }
2567 if (task_arg_obj.strm_out_def) {
2568 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2569 kfree(task_arg_obj.strm_out_def[i].sz_device);
2570 task_arg_obj.strm_out_def[i].sz_device = NULL;
2571 }
2572 kfree(task_arg_obj.strm_out_def);
2573 task_arg_obj.strm_out_def = NULL;
2574 }
2575 if (task_arg_obj.udsp_heap_res_addr) {
2576 status = proc_un_map(hnode->hprocessor, (void *)
2577 task_arg_obj.udsp_heap_addr,
2578 pr_ctxt);
2579
2580 status = proc_un_reserve_memory(hnode->hprocessor,
2581 (void *)
2582 task_arg_obj.
2583 udsp_heap_res_addr,
2584 pr_ctxt);
2585#ifdef DSP_DMM_DEBUG
2586 status = dmm_get_handle(p_proc_object, &dmm_mgr);
2587 if (dmm_mgr)
2588 dmm_mem_map_dump(dmm_mgr);
2589 else
2590 status = DSP_EHANDLE;
2591#endif
2592 }
2593 }
2594 if (node_type != NODE_MESSAGE) {
2595 kfree(hnode->stream_connect);
2596 hnode->stream_connect = NULL;
2597 }
2598 kfree(hnode->pstr_dev_name);
2599 hnode->pstr_dev_name = NULL;
2600
2601 if (hnode->ntfy_obj) {
2602 ntfy_delete(hnode->ntfy_obj);
2603 kfree(hnode->ntfy_obj);
2604 hnode->ntfy_obj = NULL;
2605 }
2606
2607 /* These were allocated in dcd_get_object_def (via node_allocate) */
2608 kfree(hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn);
2609 hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn = NULL;
2610
2611 kfree(hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn);
2612 hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn = NULL;
2613
2614 kfree(hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn);
2615 hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn = NULL;
2616
2617 kfree(hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name);
2618 hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name = NULL;
2619
2620 /* Free all SM address translator resources */
2621 if (xlator) {
2622 (void)cmm_xlator_delete(xlator, TRUE); /* force free */
2623 xlator = NULL;
2624 }
2625
2626 kfree(hnode->nldr_node_obj);
2627 hnode->nldr_node_obj = NULL;
2628 hnode->hnode_mgr = NULL;
2629 kfree(hnode);
2630 hnode = NULL;
2631func_end:
2632 return;
2633}
2634
2635/*
2636 * ======== delete_node_mgr ========
2637 * Purpose:
2638 * Frees the node manager.
2639 */
2640static void delete_node_mgr(struct node_mgr *hnode_mgr)
2641{
2642 struct node_object *hnode;
2643
2644 if (hnode_mgr) {
2645 /* Free resources */
2646 if (hnode_mgr->hdcd_mgr)
2647 dcd_destroy_manager(hnode_mgr->hdcd_mgr);
2648
2649 /* Remove any elements remaining in lists */
2650 if (hnode_mgr->node_list) {
2651 while ((hnode = (struct node_object *)
2652 lst_get_head(hnode_mgr->node_list)))
2653 delete_node(hnode, NULL);
2654
2655 DBC_ASSERT(LST_IS_EMPTY(hnode_mgr->node_list));
2656 kfree(hnode_mgr->node_list);
2657 }
2658 mutex_destroy(&hnode_mgr->node_mgr_lock);
2659 if (hnode_mgr->ntfy_obj) {
2660 ntfy_delete(hnode_mgr->ntfy_obj);
2661 kfree(hnode_mgr->ntfy_obj);
2662 }
2663
2664 if (hnode_mgr->pipe_map)
2665 gb_delete(hnode_mgr->pipe_map);
2666
2667 if (hnode_mgr->pipe_done_map)
2668 gb_delete(hnode_mgr->pipe_done_map);
2669
2670 if (hnode_mgr->chnl_map)
2671 gb_delete(hnode_mgr->chnl_map);
2672
2673 if (hnode_mgr->dma_chnl_map)
2674 gb_delete(hnode_mgr->dma_chnl_map);
2675
2676 if (hnode_mgr->zc_chnl_map)
2677 gb_delete(hnode_mgr->zc_chnl_map);
2678
2679 if (hnode_mgr->disp_obj)
2680 disp_delete(hnode_mgr->disp_obj);
2681
2682 if (hnode_mgr->strm_mgr_obj)
2683 strm_delete(hnode_mgr->strm_mgr_obj);
2684
2685 /* Delete the loader */
2686 if (hnode_mgr->nldr_obj)
2687 hnode_mgr->nldr_fxns.pfn_delete(hnode_mgr->nldr_obj);
2688
2689 if (hnode_mgr->loader_init)
2690 hnode_mgr->nldr_fxns.pfn_exit();
2691
2692 kfree(hnode_mgr);
2693 }
2694}
2695
2696/*
2697 * ======== fill_stream_connect ========
2698 * Purpose:
2699 * Fills stream information.
2700 */
2701static void fill_stream_connect(struct node_object *hNode1,
2702 struct node_object *hNode2,
2703 u32 uStream1, u32 uStream2)
2704{
2705 u32 strm_index;
2706 struct dsp_streamconnect *strm1 = NULL;
2707 struct dsp_streamconnect *strm2 = NULL;
2708 enum node_type node1_type = NODE_TASK;
2709 enum node_type node2_type = NODE_TASK;
2710
2711 node1_type = node_get_type(hNode1);
2712 node2_type = node_get_type(hNode2);
2713 if (hNode1 != (struct node_object *)DSP_HGPPNODE) {
2714
2715 if (node1_type != NODE_DEVICE) {
2716 strm_index = hNode1->num_inputs +
2717 hNode1->num_outputs - 1;
2718 strm1 = &(hNode1->stream_connect[strm_index]);
2719 strm1->cb_struct = sizeof(struct dsp_streamconnect);
2720 strm1->this_node_stream_index = uStream1;
2721 }
2722
2723 if (hNode2 != (struct node_object *)DSP_HGPPNODE) {
2724 /* NODE == > NODE */
2725 if (node1_type != NODE_DEVICE) {
2726 strm1->connected_node = hNode2;
2727 strm1->ui_connected_node_id = hNode2->node_uuid;
2728 strm1->connected_node_stream_index = uStream2;
2729 strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
2730 }
2731 if (node2_type != NODE_DEVICE) {
2732 strm_index = hNode2->num_inputs +
2733 hNode2->num_outputs - 1;
2734 strm2 = &(hNode2->stream_connect[strm_index]);
2735 strm2->cb_struct =
2736 sizeof(struct dsp_streamconnect);
2737 strm2->this_node_stream_index = uStream2;
2738 strm2->connected_node = hNode1;
2739 strm2->ui_connected_node_id = hNode1->node_uuid;
2740 strm2->connected_node_stream_index = uStream1;
2741 strm2->connect_type = CONNECTTYPE_NODEINPUT;
2742 }
2743 } else if (node1_type != NODE_DEVICE)
2744 strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2745 } else {
2746 /* GPP == > NODE */
2747 DBC_ASSERT(hNode2 != (struct node_object *)DSP_HGPPNODE);
2748 strm_index = hNode2->num_inputs + hNode2->num_outputs - 1;
2749 strm2 = &(hNode2->stream_connect[strm_index]);
2750 strm2->cb_struct = sizeof(struct dsp_streamconnect);
2751 strm2->this_node_stream_index = uStream2;
2752 strm2->connect_type = CONNECTTYPE_GPPINPUT;
2753 }
2754}
2755
2756/*
2757 * ======== fill_stream_def ========
2758 * Purpose:
2759 * Fills Stream attributes.
2760 */
2761static void fill_stream_def(struct node_object *hnode,
2762 struct node_strmdef *pstrm_def,
2763 struct dsp_strmattr *pattrs)
2764{
2765 struct node_mgr *hnode_mgr = hnode->hnode_mgr;
2766
2767 if (pattrs != NULL) {
2768 pstrm_def->num_bufs = pattrs->num_bufs;
2769 pstrm_def->buf_size =
2770 pattrs->buf_size / hnode_mgr->udsp_data_mau_size;
2771 pstrm_def->seg_id = pattrs->seg_id;
2772 pstrm_def->buf_alignment = pattrs->buf_alignment;
2773 pstrm_def->utimeout = pattrs->utimeout;
2774 } else {
2775 pstrm_def->num_bufs = DEFAULTNBUFS;
2776 pstrm_def->buf_size =
2777 DEFAULTBUFSIZE / hnode_mgr->udsp_data_mau_size;
2778 pstrm_def->seg_id = DEFAULTSEGID;
2779 pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2780 pstrm_def->utimeout = DEFAULTTIMEOUT;
2781 }
2782}
2783
2784/*
2785 * ======== free_stream ========
2786 * Purpose:
2787 * Updates the channel mask and frees the pipe id.
2788 */
2789static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2790{
2791 /* Free up the pipe id unless other node has not yet been deleted. */
2792 if (stream.type == NODECONNECT) {
2793 if (gb_test(hnode_mgr->pipe_done_map, stream.dev_id)) {
2794 /* The other node has already been deleted */
2795 gb_clear(hnode_mgr->pipe_done_map, stream.dev_id);
2796 gb_clear(hnode_mgr->pipe_map, stream.dev_id);
2797 } else {
2798 /* The other node has not been deleted yet */
2799 gb_set(hnode_mgr->pipe_done_map, stream.dev_id);
2800 }
2801 } else if (stream.type == HOSTCONNECT) {
2802 if (stream.dev_id < hnode_mgr->ul_num_chnls) {
2803 gb_clear(hnode_mgr->chnl_map, stream.dev_id);
2804 } else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) {
2805 /* dsp-dma */
2806 gb_clear(hnode_mgr->dma_chnl_map, stream.dev_id -
2807 (1 * hnode_mgr->ul_num_chnls));
2808 } else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) {
2809 /* zero-copy */
2810 gb_clear(hnode_mgr->zc_chnl_map, stream.dev_id -
2811 (2 * hnode_mgr->ul_num_chnls));
2812 }
2813 }
2814}
2815
2816/*
2817 * ======== get_fxn_address ========
2818 * Purpose:
2819 * Retrieves the address for create, execute or delete phase for a node.
2820 */
2821static int get_fxn_address(struct node_object *hnode, u32 * pulFxnAddr,
2822 u32 uPhase)
2823{
2824 char *pstr_fxn_name = NULL;
2825 struct node_mgr *hnode_mgr = hnode->hnode_mgr;
2826 int status = 0;
2827 DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
2828 node_get_type(hnode) == NODE_DAISSOCKET ||
2829 node_get_type(hnode) == NODE_MESSAGE);
2830
2831 switch (uPhase) {
2832 case CREATEPHASE:
2833 pstr_fxn_name =
2834 hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn;
2835 break;
2836 case EXECUTEPHASE:
2837 pstr_fxn_name =
2838 hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn;
2839 break;
2840 case DELETEPHASE:
2841 pstr_fxn_name =
2842 hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn;
2843 break;
2844 default:
2845 /* Should never get here */
2846 DBC_ASSERT(false);
2847 break;
2848 }
2849
2850 status =
2851 hnode_mgr->nldr_fxns.pfn_get_fxn_addr(hnode->nldr_node_obj,
2852 pstr_fxn_name, pulFxnAddr);
2853
2854 return status;
2855}
2856
2857/*
2858 * ======== get_node_info ========
2859 * Purpose:
2860 * Retrieves the node information.
2861 */
2862void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *pNodeInfo)
2863{
2864 u32 i;
2865
2866 DBC_REQUIRE(hnode);
2867 DBC_REQUIRE(pNodeInfo != NULL);
2868
2869 pNodeInfo->cb_struct = sizeof(struct dsp_nodeinfo);
2870 pNodeInfo->nb_node_database_props =
2871 hnode->dcd_props.obj_data.node_obj.ndb_props;
2872 pNodeInfo->execution_priority = hnode->prio;
2873 pNodeInfo->device_owner = hnode->device_owner;
2874 pNodeInfo->number_streams = hnode->num_inputs + hnode->num_outputs;
2875 pNodeInfo->node_env = hnode->node_env;
2876
2877 pNodeInfo->ns_execution_state = node_get_state(hnode);
2878
2879 /* Copy stream connect data */
2880 for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
2881 pNodeInfo->sc_stream_connection[i] = hnode->stream_connect[i];
2882
2883}
2884
2885/*
2886 * ======== get_node_props ========
2887 * Purpose:
2888 * Retrieve node properties.
2889 */
2890static int get_node_props(struct dcd_manager *hdcd_mgr,
2891 struct node_object *hnode,
2892 CONST struct dsp_uuid *pNodeId,
2893 struct dcd_genericobj *pdcdProps)
2894{
2895 u32 len;
2896 struct node_msgargs *pmsg_args;
2897 struct node_taskargs *task_arg_obj;
2898 enum node_type node_type = NODE_TASK;
2899 struct dsp_ndbprops *pndb_props =
2900 &(pdcdProps->obj_data.node_obj.ndb_props);
2901 int status = 0;
2902 char sz_uuid[MAXUUIDLEN];
2903
2904 status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)pNodeId,
2905 DSP_DCDNODETYPE, pdcdProps);
2906
2907 if (DSP_SUCCEEDED(status)) {
2908 hnode->ntype = node_type = pndb_props->ntype;
2909
2910 /* Create UUID value to set in registry. */
2911 uuid_uuid_to_string((struct dsp_uuid *)pNodeId, sz_uuid,
2912 MAXUUIDLEN);
2913 dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
2914
2915 /* Fill in message args that come from NDB */
2916 if (node_type != NODE_DEVICE) {
2917 pmsg_args = &(hnode->create_args.asa.node_msg_args);
2918 pmsg_args->seg_id =
2919 pdcdProps->obj_data.node_obj.msg_segid;
2920 pmsg_args->notify_type =
2921 pdcdProps->obj_data.node_obj.msg_notify_type;
2922 pmsg_args->max_msgs = pndb_props->message_depth;
2923 dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
2924 pmsg_args->max_msgs);
2925 } else {
2926 /* Copy device name */
2927 DBC_REQUIRE(pndb_props->ac_name);
2928 len = strlen(pndb_props->ac_name);
2929 DBC_ASSERT(len < MAXDEVNAMELEN);
2930 hnode->pstr_dev_name = kzalloc(len + 1, GFP_KERNEL);
2931 if (hnode->pstr_dev_name == NULL) {
2932 status = -ENOMEM;
2933 } else {
2934 strncpy(hnode->pstr_dev_name,
2935 pndb_props->ac_name, len);
2936 }
2937 }
2938 }
2939 if (DSP_SUCCEEDED(status)) {
2940 /* Fill in create args that come from NDB */
2941 if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
2942 task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
2943 task_arg_obj->prio = pndb_props->prio;
2944 task_arg_obj->stack_size = pndb_props->stack_size;
2945 task_arg_obj->sys_stack_size =
2946 pndb_props->sys_stack_size;
2947 task_arg_obj->stack_seg = pndb_props->stack_seg;
2948 dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
2949 "0x%x words System Stack Size: 0x%x words "
2950 "Stack Segment: 0x%x profile count : 0x%x\n",
2951 task_arg_obj->prio, task_arg_obj->stack_size,
2952 task_arg_obj->sys_stack_size,
2953 task_arg_obj->stack_seg,
2954 pndb_props->count_profiles);
2955 }
2956 }
2957
2958 return status;
2959}
2960
2961/*
2962 * ======== get_proc_props ========
2963 * Purpose:
2964 * Retrieve the processor properties.
2965 */
2966static int get_proc_props(struct node_mgr *hnode_mgr,
2967 struct dev_object *hdev_obj)
2968{
2969 struct cfg_hostres *host_res;
2970 struct bridge_dev_context *pbridge_context;
2971 int status = 0;
2972
2973 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
2974 if (!pbridge_context)
2975 status = -EFAULT;
2976
2977 if (DSP_SUCCEEDED(status)) {
2978 host_res = pbridge_context->resources;
2979 if (!host_res)
2980 return -EPERM;
2981 hnode_mgr->ul_chnl_offset = host_res->dw_chnl_offset;
2982 hnode_mgr->ul_chnl_buf_size = host_res->dw_chnl_buf_size;
2983 hnode_mgr->ul_num_chnls = host_res->dw_num_chnls;
2984
2985 /*
2986 * PROC will add an API to get dsp_processorinfo.
2987 * Fill in default values for now.
2988 */
2989 /* TODO -- Instead of hard coding, take from registry */
2990 hnode_mgr->proc_family = 6000;
2991 hnode_mgr->proc_type = 6410;
2992 hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2993 hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2994 hnode_mgr->udsp_word_size = DSPWORDSIZE;
2995 hnode_mgr->udsp_data_mau_size = DSPWORDSIZE;
2996 hnode_mgr->udsp_mau_size = 1;
2997
2998 }
2999 return status;
3000}
3001
3002/*
3003 * ======== node_get_uuid_props ========
3004 * Purpose:
3005 * Fetch Node UUID properties from DCD/DOF file.
3006 */
3007int node_get_uuid_props(void *hprocessor,
3008 IN CONST struct dsp_uuid *pNodeId,
3009 OUT struct dsp_ndbprops *node_props)
3010{
3011 struct node_mgr *hnode_mgr = NULL;
3012 struct dev_object *hdev_obj;
3013 int status = 0;
3014 struct dcd_nodeprops dcd_node_props;
3015 struct dsp_processorstate proc_state;
3016
3017 DBC_REQUIRE(refs > 0);
3018 DBC_REQUIRE(hprocessor != NULL);
3019 DBC_REQUIRE(pNodeId != NULL);
3020
3021 if (hprocessor == NULL || pNodeId == NULL) {
3022 status = -EFAULT;
3023 goto func_end;
3024 }
3025 status = proc_get_state(hprocessor, &proc_state,
3026 sizeof(struct dsp_processorstate));
3027 if (DSP_FAILED(status))
3028 goto func_end;
3029 /* If processor is in error state then don't attempt
3030 to send the message */
3031 if (proc_state.proc_state == PROC_ERROR) {
3032 status = -EPERM;
3033 goto func_end;
3034 }
3035
3036 status = proc_get_dev_object(hprocessor, &hdev_obj);
3037 if (hdev_obj) {
3038 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
3039 if (hnode_mgr == NULL) {
3040 status = -EFAULT;
3041 goto func_end;
3042 }
3043 }
3044
3045 /*
3046 * Enter the critical section. This is needed because
3047 * dcd_get_object_def will ultimately end up calling dbll_open/close,
3048 * which needs to be protected in order to not corrupt the zlib manager
3049 * (COD).
3050 */
3051 mutex_lock(&hnode_mgr->node_mgr_lock);
3052
3053 dcd_node_props.pstr_create_phase_fxn = NULL;
3054 dcd_node_props.pstr_execute_phase_fxn = NULL;
3055 dcd_node_props.pstr_delete_phase_fxn = NULL;
3056 dcd_node_props.pstr_i_alg_name = NULL;
3057
3058 status = dcd_get_object_def(hnode_mgr->hdcd_mgr,
3059 (struct dsp_uuid *)pNodeId, DSP_DCDNODETYPE,
3060 (struct dcd_genericobj *)&dcd_node_props);
3061
3062 if (DSP_SUCCEEDED(status)) {
3063 *node_props = dcd_node_props.ndb_props;
3064 kfree(dcd_node_props.pstr_create_phase_fxn);
3065
3066 kfree(dcd_node_props.pstr_execute_phase_fxn);
3067
3068 kfree(dcd_node_props.pstr_delete_phase_fxn);
3069
3070 kfree(dcd_node_props.pstr_i_alg_name);
3071 }
3072 /* Leave the critical section, we're done. */
3073 mutex_unlock(&hnode_mgr->node_mgr_lock);
3074func_end:
3075 return status;
3076}
3077
3078/*
3079 * ======== get_rms_fxns ========
3080 * Purpose:
3081 * Retrieve the RMS functions.
3082 */
3083static int get_rms_fxns(struct node_mgr *hnode_mgr)
3084{
3085 s32 i;
3086 struct dev_object *dev_obj = hnode_mgr->hdev_obj;
3087 int status = 0;
3088
3089 static char *psz_fxns[NUMRMSFXNS] = {
3090 "RMS_queryServer", /* RMSQUERYSERVER */
3091 "RMS_configureServer", /* RMSCONFIGURESERVER */
3092 "RMS_createNode", /* RMSCREATENODE */
3093 "RMS_executeNode", /* RMSEXECUTENODE */
3094 "RMS_deleteNode", /* RMSDELETENODE */
3095 "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
3096 "RMS_readMemory", /* RMSREADMEMORY */
3097 "RMS_writeMemory", /* RMSWRITEMEMORY */
3098 "RMS_copy", /* RMSCOPY */
3099 };
3100
3101 for (i = 0; i < NUMRMSFXNS; i++) {
3102 status = dev_get_symbol(dev_obj, psz_fxns[i],
3103 &(hnode_mgr->ul_fxn_addrs[i]));
3104 if (DSP_FAILED(status)) {
3105 if (status == -ESPIPE) {
3106 /*
3107 * May be loaded dynamically (in the future),
3108 * but return an error for now.
3109 */
3110 dev_dbg(bridge, "%s: RMS function: %s currently"
3111 " not loaded\n", __func__, psz_fxns[i]);
3112 } else {
3113 dev_dbg(bridge, "%s: Symbol not found: %s "
3114 "status = 0x%x\n", __func__,
3115 psz_fxns[i], status);
3116 break;
3117 }
3118 }
3119 }
3120
3121 return status;
3122}
3123
3124/*
3125 * ======== ovly ========
3126 * Purpose:
3127 * Called during overlay.Sends command to RMS to copy a block of data.
3128 */
3129static u32 ovly(void *priv_ref, u32 ulDspRunAddr, u32 ulDspLoadAddr,
3130 u32 ul_num_bytes, u32 nMemSpace)
3131{
3132 struct node_object *hnode = (struct node_object *)priv_ref;
3133 struct node_mgr *hnode_mgr;
3134 u32 ul_bytes = 0;
3135 u32 ul_size;
3136 u32 ul_timeout;
3137 int status = 0;
3138 struct bridge_dev_context *hbridge_context;
3139 /* Function interface to Bridge driver*/
3140 struct bridge_drv_interface *intf_fxns;
3141
3142 DBC_REQUIRE(hnode);
3143
3144 hnode_mgr = hnode->hnode_mgr;
3145
3146 ul_size = ul_num_bytes / hnode_mgr->udsp_word_size;
3147 ul_timeout = hnode->utimeout;
3148
3149 /* Call new MemCopy function */
3150 intf_fxns = hnode_mgr->intf_fxns;
3151 status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
3152 if (DSP_SUCCEEDED(status)) {
3153 status =
3154 (*intf_fxns->pfn_brd_mem_copy) (hbridge_context,
3155 ulDspRunAddr, ulDspLoadAddr,
3156 ul_num_bytes, (u32) nMemSpace);
3157 if (DSP_SUCCEEDED(status))
3158 ul_bytes = ul_num_bytes;
3159 else
3160 pr_debug("%s: failed to copy brd memory, status 0x%x\n",
3161 __func__, status);
3162 } else {
3163 pr_debug("%s: failed to get Bridge context, status 0x%x\n",
3164 __func__, status);
3165 }
3166
3167 return ul_bytes;
3168}
3169
3170/*
3171 * ======== mem_write ========
3172 */
3173static u32 mem_write(void *priv_ref, u32 ulDspAddr, void *pbuf,
3174 u32 ul_num_bytes, u32 nMemSpace)
3175{
3176 struct node_object *hnode = (struct node_object *)priv_ref;
3177 struct node_mgr *hnode_mgr;
3178 u16 mem_sect_type;
3179 u32 ul_timeout;
3180 int status = 0;
3181 struct bridge_dev_context *hbridge_context;
3182 /* Function interface to Bridge driver */
3183 struct bridge_drv_interface *intf_fxns;
3184
3185 DBC_REQUIRE(hnode);
3186 DBC_REQUIRE(nMemSpace & DBLL_CODE || nMemSpace & DBLL_DATA);
3187
3188 hnode_mgr = hnode->hnode_mgr;
3189
3190 ul_timeout = hnode->utimeout;
3191 mem_sect_type = (nMemSpace & DBLL_CODE) ? RMS_CODE : RMS_DATA;
3192
3193 /* Call new MemWrite function */
3194 intf_fxns = hnode_mgr->intf_fxns;
3195 status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
3196 status = (*intf_fxns->pfn_brd_mem_write) (hbridge_context, pbuf,
3197 ulDspAddr, ul_num_bytes, mem_sect_type);
3198
3199 return ul_num_bytes;
3200}
3201
3202/*
3203 * ======== node_find_addr ========
3204 */
3205int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3206 u32 offset_range, void *sym_addr_output, char *sym_name)
3207{
3208 struct node_object *node_obj;
3209 int status = -ENOENT;
3210 u32 n;
3211
3212 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
3213 (unsigned int) node_mgr,
3214 sym_addr, offset_range,
3215 (unsigned int) sym_addr_output, sym_name);
3216
3217 node_obj = (struct node_object *)(node_mgr->node_list->head.next);
3218
3219 for (n = 0; n < node_mgr->num_nodes; n++) {
3220 status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3221 offset_range, sym_addr_output, sym_name);
3222
3223 if (DSP_SUCCEEDED(status))
3224 break;
3225
3226 node_obj = (struct node_object *) (node_obj->list_elem.next);
3227 }
3228
3229 return status;
3230}
3231