aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/tidspbridge/Makefile2
-rw-r--r--drivers/staging/tidspbridge/TODO1
-rw-r--r--drivers/staging/tidspbridge/core/_deh.h2
-rw-r--r--drivers/staging/tidspbridge/core/_msg_sm.h16
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h30
-rw-r--r--drivers/staging/tidspbridge/core/chnl_sm.c676
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c52
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c672
-rw-r--r--drivers/staging/tidspbridge/core/msg_sm.c619
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c199
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c112
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c96
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c28
-rw-r--r--drivers/staging/tidspbridge/dynload/cload.c102
-rw-r--r--drivers/staging/tidspbridge/dynload/dload_internal.h6
-rw-r--r--drivers/staging/tidspbridge/gen/gb.c166
-rw-r--r--drivers/staging/tidspbridge/gen/gh.c38
-rw-r--r--drivers/staging/tidspbridge/gen/gs.c88
-rw-r--r--drivers/staging/tidspbridge/gen/uuidutil.c22
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h26
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/brddefs.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h50
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/chnl.h21
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/chnldefs.h9
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h21
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmm.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h39
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cod.h13
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h14
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbdefs.h98
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbldefs.h141
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dbll.h6
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dblldefs.h65
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dehdefs.h32
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dev.h65
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/disp.h15
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dispdefs.h35
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drv.h40
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drvdefs.h25
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h216
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdefs.h88
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdrv.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspio.h4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspioctl.h13
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/gb.h79
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/gs.h59
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/host_os.h9
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/io_sm.h143
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/iodefs.h36
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/ldr.h29
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/list.h225
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h40
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h24
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/node.h20
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/nodepriv.h10
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/pwr.h8
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h33
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h11
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/rms_sh.h9
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/strm.h62
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/strmdefs.h6
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/sync.h14
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/utildefs.h39
-rw-r--r--drivers/staging/tidspbridge/pmgr/chnl.c4
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c675
-rw-r--r--drivers/staging/tidspbridge/pmgr/cod.c20
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c53
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c355
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c272
-rw-r--r--drivers/staging/tidspbridge/pmgr/io.c9
-rw-r--r--drivers/staging/tidspbridge/pmgr/ioobj.h4
-rw-r--r--drivers/staging/tidspbridge/pmgr/msg.c4
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c66
-rw-r--r--drivers/staging/tidspbridge/rmgr/disp.c124
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c202
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c1
-rw-r--r--drivers/staging/tidspbridge/rmgr/mgr.c66
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c117
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c1092
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c170
-rw-r--r--drivers/staging/tidspbridge/rmgr/pwr.c8
-rw-r--r--drivers/staging/tidspbridge/rmgr/rmm.c93
-rw-r--r--drivers/staging/tidspbridge/rmgr/strm.c86
86 files changed, 3009 insertions, 5272 deletions
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index 41c644c3318..fd6a2761cc3 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o 1obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
2 2
3libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o 3libgen = gen/gh.o gen/uuidutil.o
4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ 4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
5 core/tiomap3430_pwr.o core/tiomap_io.o \ 5 core/tiomap3430_pwr.o core/tiomap_io.o \
6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o 6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
diff --git a/drivers/staging/tidspbridge/TODO b/drivers/staging/tidspbridge/TODO
index 187363f2bdc..1c51e2dc7b5 100644
--- a/drivers/staging/tidspbridge/TODO
+++ b/drivers/staging/tidspbridge/TODO
@@ -6,7 +6,6 @@
6* Eliminate general services and libraries - use or extend existing kernel 6* Eliminate general services and libraries - use or extend existing kernel
7 libraries instead (e.g. gcf/lcm in nldr.c, global helpers in gen/) 7 libraries instead (e.g. gcf/lcm in nldr.c, global helpers in gen/)
8* Eliminate direct manipulation of OMAP_SYSC_BASE 8* Eliminate direct manipulation of OMAP_SYSC_BASE
9* Eliminate list.h : seem like a redundant wrapper to existing kernel lists
10* Eliminate DSP_SUCCEEDED macros and their imposed redundant indentations 9* Eliminate DSP_SUCCEEDED macros and their imposed redundant indentations
11 (adopt the kernel way of checking for return values) 10 (adopt the kernel way of checking for return values)
12* Audit interfaces exposed to user space 11* Audit interfaces exposed to user space
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h
index 16723cd3483..025d34320e7 100644
--- a/drivers/staging/tidspbridge/core/_deh.h
+++ b/drivers/staging/tidspbridge/core/_deh.h
@@ -25,7 +25,7 @@
25 25
26/* DEH Manager: only one created per board: */ 26/* DEH Manager: only one created per board: */
27struct deh_mgr { 27struct deh_mgr {
28 struct bridge_dev_context *hbridge_context; /* Bridge context. */ 28 struct bridge_dev_context *bridge_context; /* Bridge context. */
29 struct ntfy_object *ntfy_obj; /* NTFY object */ 29 struct ntfy_object *ntfy_obj; /* NTFY object */
30 30
31 /* MMU Fault DPC */ 31 /* MMU Fault DPC */
diff --git a/drivers/staging/tidspbridge/core/_msg_sm.h b/drivers/staging/tidspbridge/core/_msg_sm.h
index 556de5c025d..f6e58e3f3b4 100644
--- a/drivers/staging/tidspbridge/core/_msg_sm.h
+++ b/drivers/staging/tidspbridge/core/_msg_sm.h
@@ -20,7 +20,7 @@
20#ifndef _MSG_SM_ 20#ifndef _MSG_SM_
21#define _MSG_SM_ 21#define _MSG_SM_
22 22
23#include <dspbridge/list.h> 23#include <linux/list.h>
24#include <dspbridge/msgdefs.h> 24#include <dspbridge/msgdefs.h>
25 25
26/* 26/*
@@ -85,13 +85,13 @@ struct msg_mgr {
85 /* Function interface to Bridge driver */ 85 /* Function interface to Bridge driver */
86 struct bridge_drv_interface *intf_fxns; 86 struct bridge_drv_interface *intf_fxns;
87 87
88 struct io_mgr *hio_mgr; /* IO manager */ 88 struct io_mgr *iomgr; /* IO manager */
89 struct lst_list *queue_list; /* List of MSG_QUEUEs */ 89 struct list_head queue_list; /* List of MSG_QUEUEs */
90 spinlock_t msg_mgr_lock; /* For critical sections */ 90 spinlock_t msg_mgr_lock; /* For critical sections */
91 /* Signalled when MsgFrame is available */ 91 /* Signalled when MsgFrame is available */
92 struct sync_object *sync_event; 92 struct sync_object *sync_event;
93 struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */ 93 struct list_head msg_free_list; /* Free MsgFrames ready to be filled */
94 struct lst_list *msg_used_list; /* MsgFrames ready to go to DSP */ 94 struct list_head msg_used_list; /* MsgFrames ready to go to DSP */
95 u32 msgs_pending; /* # of queued messages to go to DSP */ 95 u32 msgs_pending; /* # of queued messages to go to DSP */
96 u32 max_msgs; /* Max # of msgs that fit in buffer */ 96 u32 max_msgs; /* Max # of msgs that fit in buffer */
97 msg_onexit on_exit; /* called when RMS_EXIT is received */ 97 msg_onexit on_exit; /* called when RMS_EXIT is received */
@@ -108,12 +108,12 @@ struct msg_mgr {
108 */ 108 */
109struct msg_queue { 109struct msg_queue {
110 struct list_head list_elem; 110 struct list_head list_elem;
111 struct msg_mgr *hmsg_mgr; 111 struct msg_mgr *msg_mgr;
112 u32 max_msgs; /* Node message depth */ 112 u32 max_msgs; /* Node message depth */
113 u32 msgq_id; /* Node environment pointer */ 113 u32 msgq_id; /* Node environment pointer */
114 struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */ 114 struct list_head msg_free_list; /* Free MsgFrames ready to be filled */
115 /* Filled MsgFramess waiting to be read */ 115 /* Filled MsgFramess waiting to be read */
116 struct lst_list *msg_used_list; 116 struct list_head msg_used_list;
117 void *arg; /* Handle passed to mgr on_exit callback */ 117 void *arg; /* Handle passed to mgr on_exit callback */
118 struct sync_object *sync_event; /* Signalled when message is ready */ 118 struct sync_object *sync_event; /* Signalled when message is ready */
119 struct sync_object *sync_done; /* For synchronizing cleanup */ 119 struct sync_object *sync_done; /* For synchronizing cleanup */
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index 1159a500f49..1e0273e50d2 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -319,24 +319,24 @@ static const struct bpwr_clk_t bpwr_clks[] = {
319 319
320/* This Bridge driver's device context: */ 320/* This Bridge driver's device context: */
321struct bridge_dev_context { 321struct bridge_dev_context {
322 struct dev_object *hdev_obj; /* Handle to Bridge device object. */ 322 struct dev_object *dev_obj; /* Handle to Bridge device object. */
323 u32 dw_dsp_base_addr; /* Arm's API to DSP virt base addr */ 323 u32 dsp_base_addr; /* Arm's API to DSP virt base addr */
324 /* 324 /*
325 * DSP External memory prog address as seen virtually by the OS on 325 * DSP External memory prog address as seen virtually by the OS on
326 * the host side. 326 * the host side.
327 */ 327 */
328 u32 dw_dsp_ext_base_addr; /* See the comment above */ 328 u32 dsp_ext_base_addr; /* See the comment above */
329 u32 dw_api_reg_base; /* API mem map'd registers */ 329 u32 api_reg_base; /* API mem map'd registers */
330 void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */ 330 void __iomem *dsp_mmu_base; /* DSP MMU Mapped registers */
331 u32 dw_api_clk_base; /* CLK Registers */ 331 u32 api_clk_base; /* CLK Registers */
332 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ 332 u32 dsp_clk_m2_base; /* DSP Clock Module m2 */
333 u32 dw_public_rhea; /* Pub Rhea */ 333 u32 public_rhea; /* Pub Rhea */
334 u32 dw_int_addr; /* MB INTR reg */ 334 u32 int_addr; /* MB INTR reg */
335 u32 dw_tc_endianism; /* TC Endianism register */ 335 u32 tc_endianism; /* TC Endianism register */
336 u32 dw_test_base; /* DSP MMU Mapped registers */ 336 u32 test_base; /* DSP MMU Mapped registers */
337 u32 dw_self_loop; /* Pointer to the selfloop */ 337 u32 self_loop; /* Pointer to the selfloop */
338 u32 dw_dsp_start_add; /* API Boot vector */ 338 u32 dsp_start_add; /* API Boot vector */
339 u32 dw_internal_size; /* Internal memory size */ 339 u32 internal_size; /* Internal memory size */
340 340
341 struct omap_mbox *mbox; /* Mail box handle */ 341 struct omap_mbox *mbox; /* Mail box handle */
342 342
@@ -348,7 +348,7 @@ struct bridge_dev_context {
348 */ 348 */
349 /* DMMU TLB entries */ 349 /* DMMU TLB entries */
350 struct bridge_ioctl_extproc atlb_entry[BRDIOCTL_NUMOFMMUTLB]; 350 struct bridge_ioctl_extproc atlb_entry[BRDIOCTL_NUMOFMMUTLB];
351 u32 dw_brd_state; /* Last known board state. */ 351 u32 brd_state; /* Last known board state. */
352 352
353 /* TC Settings */ 353 /* TC Settings */
354 bool tc_word_swap_on; /* Traffic Controller Word Swap */ 354 bool tc_word_swap_on; /* Traffic Controller Word Swap */
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c
index 662a5b5a58e..3c05d7cb9c9 100644
--- a/drivers/staging/tidspbridge/core/chnl_sm.c
+++ b/drivers/staging/tidspbridge/core/chnl_sm.c
@@ -37,9 +37,9 @@
37 * which may cause timeouts and/or failure offunction sync_wait_on_event. 37 * which may cause timeouts and/or failure offunction sync_wait_on_event.
38 * This invariant condition is: 38 * This invariant condition is:
39 * 39 *
40 * LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset 40 * list_empty(&pchnl->io_completions) ==> pchnl->sync_event is reset
41 * and 41 * and
42 * !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set. 42 * !list_empty(&pchnl->io_completions) ==> pchnl->sync_event is set.
43 */ 43 */
44 44
45#include <linux/types.h> 45#include <linux/types.h>
@@ -73,11 +73,9 @@
73#define MAILBOX_IRQ INT_MAIL_MPU_IRQ 73#define MAILBOX_IRQ INT_MAIL_MPU_IRQ
74 74
75/* ----------------------------------- Function Prototypes */ 75/* ----------------------------------- Function Prototypes */
76static struct lst_list *create_chirp_list(u32 chirps); 76static int create_chirp_list(struct list_head *list, u32 chirps);
77 77
78static void free_chirp_list(struct lst_list *chirp_list); 78static void free_chirp_list(struct list_head *list);
79
80static struct chnl_irp *make_new_chirp(void);
81 79
82static int search_free_channel(struct chnl_mgr *chnl_mgr_obj, 80static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
83 u32 *chnl); 81 u32 *chnl);
@@ -107,35 +105,31 @@ int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
107 is_eos = (byte_size == 0); 105 is_eos = (byte_size == 0);
108 106
109 /* Validate args */ 107 /* Validate args */
110 if (!host_buf || !pchnl) { 108 if (!host_buf || !pchnl)
111 status = -EFAULT; 109 return -EFAULT;
112 } else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) { 110
113 status = -EPERM; 111 if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode))
114 } else { 112 return -EPERM;
115 /* 113
116 * Check the channel state: only queue chirp if channel state 114 /*
117 * allows it. 115 * Check the channel state: only queue chirp if channel state
118 */ 116 * allows it.
119 dw_state = pchnl->dw_state; 117 */
120 if (dw_state != CHNL_STATEREADY) { 118 dw_state = pchnl->state;
121 if (dw_state & CHNL_STATECANCEL) 119 if (dw_state != CHNL_STATEREADY) {
122 status = -ECANCELED; 120 if (dw_state & CHNL_STATECANCEL)
123 else if ((dw_state & CHNL_STATEEOS) && 121 return -ECANCELED;
124 CHNL_IS_OUTPUT(pchnl->chnl_mode)) 122 if ((dw_state & CHNL_STATEEOS) &&
125 status = -EPIPE; 123 CHNL_IS_OUTPUT(pchnl->chnl_mode))
126 else 124 return -EPIPE;
127 /* No other possible states left */ 125 /* No other possible states left */
128 DBC_ASSERT(0); 126 DBC_ASSERT(0);
129 }
130 } 127 }
131 128
132 dev_obj = dev_get_first(); 129 dev_obj = dev_get_first();
133 dev_get_bridge_context(dev_obj, &dev_ctxt); 130 dev_get_bridge_context(dev_obj, &dev_ctxt);
134 if (!dev_ctxt) 131 if (!dev_ctxt)
135 status = -EFAULT; 132 return -EFAULT;
136
137 if (status)
138 goto func_end;
139 133
140 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) { 134 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
141 if (!(host_buf < (void *)USERMODE_ADDR)) { 135 if (!(host_buf < (void *)USERMODE_ADDR)) {
@@ -144,18 +138,16 @@ int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
144 } 138 }
145 /* if addr in user mode, then copy to kernel space */ 139 /* if addr in user mode, then copy to kernel space */
146 host_sys_buf = kmalloc(buf_size, GFP_KERNEL); 140 host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
147 if (host_sys_buf == NULL) { 141 if (host_sys_buf == NULL)
148 status = -ENOMEM; 142 return -ENOMEM;
149 goto func_end; 143
150 }
151 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) { 144 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
152 status = copy_from_user(host_sys_buf, host_buf, 145 status = copy_from_user(host_sys_buf, host_buf,
153 buf_size); 146 buf_size);
154 if (status) { 147 if (status) {
155 kfree(host_sys_buf); 148 kfree(host_sys_buf);
156 host_sys_buf = NULL; 149 host_sys_buf = NULL;
157 status = -EFAULT; 150 return -EFAULT;
158 goto func_end;
159 } 151 }
160 } 152 }
161 } 153 }
@@ -169,63 +161,62 @@ func_cont:
169 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); 161 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
170 if (pchnl->chnl_type == CHNL_PCPY) { 162 if (pchnl->chnl_type == CHNL_PCPY) {
171 /* This is a processor-copy channel. */ 163 /* This is a processor-copy channel. */
172 if (!status && CHNL_IS_OUTPUT(pchnl->chnl_mode)) { 164 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
173 /* Check buffer size on output channels for fit. */ 165 /* Check buffer size on output channels for fit. */
174 if (byte_size > 166 if (byte_size > io_buf_size(
175 io_buf_size(pchnl->chnl_mgr_obj->hio_mgr)) 167 pchnl->chnl_mgr_obj->iomgr)) {
176 status = -EINVAL; 168 status = -EINVAL;
177 169 goto out;
170 }
178 } 171 }
179 } 172 }
180 if (!status) {
181 /* Get a free chirp: */
182 chnl_packet_obj =
183 (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
184 if (chnl_packet_obj == NULL)
185 status = -EIO;
186
187 }
188 if (!status) {
189 /* Enqueue the chirp on the chnl's IORequest queue: */
190 chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
191 host_buf;
192 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
193 chnl_packet_obj->host_sys_buf = host_sys_buf;
194 173
195 /* 174 /* Get a free chirp: */
196 * Note: for dma chans dw_dsp_addr contains dsp address 175 if (list_empty(&pchnl->free_packets_list)) {
197 * of SM buffer. 176 status = -EIO;
198 */ 177 goto out;
199 DBC_ASSERT(chnl_mgr_obj->word_size != 0); 178 }
200 /* DSP address */ 179 chnl_packet_obj = list_first_entry(&pchnl->free_packets_list,
201 chnl_packet_obj->dsp_tx_addr = 180 struct chnl_irp, link);
202 dw_dsp_addr / chnl_mgr_obj->word_size; 181 list_del(&chnl_packet_obj->link);
203 chnl_packet_obj->byte_size = byte_size; 182
204 chnl_packet_obj->buf_size = buf_size; 183 /* Enqueue the chirp on the chnl's IORequest queue: */
205 /* Only valid for output channel */ 184 chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
206 chnl_packet_obj->dw_arg = dw_arg; 185 host_buf;
207 chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS : 186 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
208 CHNL_IOCSTATCOMPLETE); 187 chnl_packet_obj->host_sys_buf = host_sys_buf;
209 lst_put_tail(pchnl->pio_requests, 188
210 (struct list_head *)chnl_packet_obj); 189 /*
211 pchnl->cio_reqs++; 190 * Note: for dma chans dw_dsp_addr contains dsp address
212 DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets); 191 * of SM buffer.
213 /* 192 */
214 * If end of stream, update the channel state to prevent 193 DBC_ASSERT(chnl_mgr_obj->word_size != 0);
215 * more IOR's. 194 /* DSP address */
216 */ 195 chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size;
217 if (is_eos) 196 chnl_packet_obj->byte_size = byte_size;
218 pchnl->dw_state |= CHNL_STATEEOS; 197 chnl_packet_obj->buf_size = buf_size;
219 198 /* Only valid for output channel */
220 /* Legacy DSM Processor-Copy */ 199 chnl_packet_obj->arg = dw_arg;
221 DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY); 200 chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
222 /* Request IO from the DSP */ 201 CHNL_IOCSTATCOMPLETE);
223 io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl, 202 list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests);
224 (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT : 203 pchnl->cio_reqs++;
225 IO_OUTPUT), &mb_val); 204 DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
226 sched_dpc = true; 205 /*
227 206 * If end of stream, update the channel state to prevent
228 } 207 * more IOR's.
208 */
209 if (is_eos)
210 pchnl->state |= CHNL_STATEEOS;
211
212 /* Legacy DSM Processor-Copy */
213 DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
214 /* Request IO from the DSP */
215 io_request_chnl(chnl_mgr_obj->iomgr, pchnl,
216 (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
217 IO_OUTPUT), &mb_val);
218 sched_dpc = true;
219out:
229 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX); 220 omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
230 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); 221 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
231 if (mb_val != 0) 222 if (mb_val != 0)
@@ -233,9 +224,8 @@ func_cont:
233 224
234 /* Schedule a DPC, to do the actual data transfer */ 225 /* Schedule a DPC, to do the actual data transfer */
235 if (sched_dpc) 226 if (sched_dpc)
236 iosm_schedule(chnl_mgr_obj->hio_mgr); 227 iosm_schedule(chnl_mgr_obj->iomgr);
237 228
238func_end:
239 return status; 229 return status;
240} 230}
241 231
@@ -250,59 +240,55 @@ func_end:
250 */ 240 */
251int bridge_chnl_cancel_io(struct chnl_object *chnl_obj) 241int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
252{ 242{
253 int status = 0;
254 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; 243 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
255 u32 chnl_id = -1; 244 u32 chnl_id = -1;
256 s8 chnl_mode; 245 s8 chnl_mode;
257 struct chnl_irp *chnl_packet_obj; 246 struct chnl_irp *chirp, *tmp;
258 struct chnl_mgr *chnl_mgr_obj = NULL; 247 struct chnl_mgr *chnl_mgr_obj = NULL;
259 248
260 /* Check args: */ 249 /* Check args: */
261 if (pchnl && pchnl->chnl_mgr_obj) { 250 if (!pchnl || !pchnl->chnl_mgr_obj)
262 chnl_id = pchnl->chnl_id; 251 return -EFAULT;
263 chnl_mode = pchnl->chnl_mode; 252
264 chnl_mgr_obj = pchnl->chnl_mgr_obj; 253 chnl_id = pchnl->chnl_id;
265 } else { 254 chnl_mode = pchnl->chnl_mode;
266 status = -EFAULT; 255 chnl_mgr_obj = pchnl->chnl_mgr_obj;
267 }
268 if (status)
269 goto func_end;
270 256
271 /* Mark this channel as cancelled, to prevent further IORequests or 257 /* Mark this channel as cancelled, to prevent further IORequests or
272 * IORequests or dispatching. */ 258 * IORequests or dispatching. */
273 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock); 259 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
274 pchnl->dw_state |= CHNL_STATECANCEL; 260
275 if (LST_IS_EMPTY(pchnl->pio_requests)) 261 pchnl->state |= CHNL_STATECANCEL;
276 goto func_cont; 262
263 if (list_empty(&pchnl->io_requests)) {
264 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
265 return 0;
266 }
277 267
278 if (pchnl->chnl_type == CHNL_PCPY) { 268 if (pchnl->chnl_type == CHNL_PCPY) {
279 /* Indicate we have no more buffers available for transfer: */ 269 /* Indicate we have no more buffers available for transfer: */
280 if (CHNL_IS_INPUT(pchnl->chnl_mode)) { 270 if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
281 io_cancel_chnl(chnl_mgr_obj->hio_mgr, chnl_id); 271 io_cancel_chnl(chnl_mgr_obj->iomgr, chnl_id);
282 } else { 272 } else {
283 /* Record that we no longer have output buffers 273 /* Record that we no longer have output buffers
284 * available: */ 274 * available: */
285 chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id); 275 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
286 } 276 }
287 } 277 }
288 /* Move all IOR's to IOC queue: */ 278 /* Move all IOR's to IOC queue: */
289 while (!LST_IS_EMPTY(pchnl->pio_requests)) { 279 list_for_each_entry_safe(chirp, tmp, &pchnl->io_requests, link) {
290 chnl_packet_obj = 280 list_del(&chirp->link);
291 (struct chnl_irp *)lst_get_head(pchnl->pio_requests); 281 chirp->byte_size = 0;
292 if (chnl_packet_obj) { 282 chirp->status |= CHNL_IOCSTATCANCEL;
293 chnl_packet_obj->byte_size = 0; 283 list_add_tail(&chirp->link, &pchnl->io_completions);
294 chnl_packet_obj->status |= CHNL_IOCSTATCANCEL; 284 pchnl->cio_cs++;
295 lst_put_tail(pchnl->pio_completions, 285 pchnl->cio_reqs--;
296 (struct list_head *)chnl_packet_obj); 286 DBC_ASSERT(pchnl->cio_reqs >= 0);
297 pchnl->cio_cs++;
298 pchnl->cio_reqs--;
299 DBC_ASSERT(pchnl->cio_reqs >= 0);
300 }
301 } 287 }
302func_cont: 288
303 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); 289 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
304func_end: 290
305 return status; 291 return 0;
306} 292}
307 293
308/* 294/*
@@ -319,59 +305,43 @@ int bridge_chnl_close(struct chnl_object *chnl_obj)
319 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; 305 struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
320 306
321 /* Check args: */ 307 /* Check args: */
322 if (!pchnl) { 308 if (!pchnl)
323 status = -EFAULT; 309 return -EFAULT;
324 goto func_cont; 310 /* Cancel IO: this ensures no further IO requests or notifications */
325 } 311 status = bridge_chnl_cancel_io(chnl_obj);
326 { 312 if (status)
327 /* Cancel IO: this ensures no further IO requests or 313 return status;
328 * notifications. */ 314 /* Assert I/O on this channel is now cancelled: Protects from io_dpc */
329 status = bridge_chnl_cancel_io(chnl_obj); 315 DBC_ASSERT((pchnl->state & CHNL_STATECANCEL));
316 /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
317 /* Free the slot in the channel manager: */
318 pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL;
319 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
320 pchnl->chnl_mgr_obj->open_channels -= 1;
321 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
322 if (pchnl->ntfy_obj) {
323 ntfy_delete(pchnl->ntfy_obj);
324 kfree(pchnl->ntfy_obj);
325 pchnl->ntfy_obj = NULL;
330 } 326 }
331func_cont: 327 /* Reset channel event: (NOTE: user_event freed in user context) */
332 if (!status) { 328 if (pchnl->sync_event) {
333 /* Assert I/O on this channel is now cancelled: Protects 329 sync_reset_event(pchnl->sync_event);
334 * from io_dpc. */ 330 kfree(pchnl->sync_event);
335 DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL)); 331 pchnl->sync_event = NULL;
336 /* Invalidate channel object: Protects from
337 * CHNL_GetIOCompletion(). */
338 /* Free the slot in the channel manager: */
339 pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL;
340 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
341 pchnl->chnl_mgr_obj->open_channels -= 1;
342 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
343 if (pchnl->ntfy_obj) {
344 ntfy_delete(pchnl->ntfy_obj);
345 kfree(pchnl->ntfy_obj);
346 pchnl->ntfy_obj = NULL;
347 }
348 /* Reset channel event: (NOTE: user_event freed in user
349 * context.). */
350 if (pchnl->sync_event) {
351 sync_reset_event(pchnl->sync_event);
352 kfree(pchnl->sync_event);
353 pchnl->sync_event = NULL;
354 }
355 /* Free I/O request and I/O completion queues: */
356 if (pchnl->pio_completions) {
357 free_chirp_list(pchnl->pio_completions);
358 pchnl->pio_completions = NULL;
359 pchnl->cio_cs = 0;
360 }
361 if (pchnl->pio_requests) {
362 free_chirp_list(pchnl->pio_requests);
363 pchnl->pio_requests = NULL;
364 pchnl->cio_reqs = 0;
365 }
366 if (pchnl->free_packets_list) {
367 free_chirp_list(pchnl->free_packets_list);
368 pchnl->free_packets_list = NULL;
369 }
370 /* Release channel object. */
371 kfree(pchnl);
372 pchnl = NULL;
373 } 332 }
374 DBC_ENSURE(status || !pchnl); 333 /* Free I/O request and I/O completion queues: */
334 free_chirp_list(&pchnl->io_completions);
335 pchnl->cio_cs = 0;
336
337 free_chirp_list(&pchnl->io_requests);
338 pchnl->cio_reqs = 0;
339
340 free_chirp_list(&pchnl->free_packets_list);
341
342 /* Release channel object. */
343 kfree(pchnl);
344
375 return status; 345 return status;
376} 346}
377 347
@@ -407,18 +377,18 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
407 DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS); 377 DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
408 max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY; 378 max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
409 /* Create array of channels */ 379 /* Create array of channels */
410 chnl_mgr_obj->ap_channel = kzalloc(sizeof(struct chnl_object *) 380 chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *)
411 * max_channels, GFP_KERNEL); 381 * max_channels, GFP_KERNEL);
412 if (chnl_mgr_obj->ap_channel) { 382 if (chnl_mgr_obj->channels) {
413 /* Initialize chnl_mgr object */ 383 /* Initialize chnl_mgr object */
414 chnl_mgr_obj->dw_type = CHNL_TYPESM; 384 chnl_mgr_obj->type = CHNL_TYPESM;
415 chnl_mgr_obj->word_size = mgr_attrts->word_size; 385 chnl_mgr_obj->word_size = mgr_attrts->word_size;
416 /* Total # chnls supported */ 386 /* Total # chnls supported */
417 chnl_mgr_obj->max_channels = max_channels; 387 chnl_mgr_obj->max_channels = max_channels;
418 chnl_mgr_obj->open_channels = 0; 388 chnl_mgr_obj->open_channels = 0;
419 chnl_mgr_obj->dw_output_mask = 0; 389 chnl_mgr_obj->output_mask = 0;
420 chnl_mgr_obj->dw_last_output = 0; 390 chnl_mgr_obj->last_output = 0;
421 chnl_mgr_obj->hdev_obj = hdev_obj; 391 chnl_mgr_obj->dev_obj = hdev_obj;
422 spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock); 392 spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
423 } else { 393 } else {
424 status = -ENOMEM; 394 status = -ENOMEM;
@@ -453,7 +423,7 @@ int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
453 for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels; 423 for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
454 chnl_id++) { 424 chnl_id++) {
455 status = 425 status =
456 bridge_chnl_close(chnl_mgr_obj->ap_channel 426 bridge_chnl_close(chnl_mgr_obj->channels
457 [chnl_id]); 427 [chnl_id]);
458 if (status) 428 if (status)
459 dev_dbg(bridge, "%s: Error status 0x%x\n", 429 dev_dbg(bridge, "%s: Error status 0x%x\n",
@@ -461,10 +431,10 @@ int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
461 } 431 }
462 432
463 /* Free channel manager object: */ 433 /* Free channel manager object: */
464 kfree(chnl_mgr_obj->ap_channel); 434 kfree(chnl_mgr_obj->channels);
465 435
466 /* Set hchnl_mgr to NULL in device object. */ 436 /* Set hchnl_mgr to NULL in device object. */
467 dev_set_chnl_mgr(chnl_mgr_obj->hdev_obj, NULL); 437 dev_set_chnl_mgr(chnl_mgr_obj->dev_obj, NULL);
468 /* Free this Chnl Mgr object: */ 438 /* Free this Chnl Mgr object: */
469 kfree(hchnl_mgr); 439 kfree(hchnl_mgr);
470 } else { 440 } else {
@@ -505,7 +475,7 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
505 && (pchnl->chnl_type == CHNL_PCPY)) { 475 && (pchnl->chnl_type == CHNL_PCPY)) {
506 /* Wait for IO completions, up to the specified 476 /* Wait for IO completions, up to the specified
507 * timeout: */ 477 * timeout: */
508 while (!LST_IS_EMPTY(pchnl->pio_requests) && !status) { 478 while (!list_empty(&pchnl->io_requests) && !status) {
509 status = bridge_chnl_get_ioc(chnl_obj, 479 status = bridge_chnl_get_ioc(chnl_obj,
510 timeout, &chnl_ioc_obj); 480 timeout, &chnl_ioc_obj);
511 if (status) 481 if (status)
@@ -518,10 +488,10 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
518 } else { 488 } else {
519 status = bridge_chnl_cancel_io(chnl_obj); 489 status = bridge_chnl_cancel_io(chnl_obj);
520 /* Now, leave the channel in the ready state: */ 490 /* Now, leave the channel in the ready state: */
521 pchnl->dw_state &= ~CHNL_STATECANCEL; 491 pchnl->state &= ~CHNL_STATECANCEL;
522 } 492 }
523 } 493 }
524 DBC_ENSURE(status || LST_IS_EMPTY(pchnl->pio_requests)); 494 DBC_ENSURE(status || list_empty(&pchnl->io_requests));
525 return status; 495 return status;
526} 496}
527 497
@@ -538,16 +508,16 @@ int bridge_chnl_get_info(struct chnl_object *chnl_obj,
538 if (channel_info != NULL) { 508 if (channel_info != NULL) {
539 if (pchnl) { 509 if (pchnl) {
540 /* Return the requested information: */ 510 /* Return the requested information: */
541 channel_info->hchnl_mgr = pchnl->chnl_mgr_obj; 511 channel_info->chnl_mgr = pchnl->chnl_mgr_obj;
542 channel_info->event_obj = pchnl->user_event; 512 channel_info->event_obj = pchnl->user_event;
543 channel_info->cnhl_id = pchnl->chnl_id; 513 channel_info->cnhl_id = pchnl->chnl_id;
544 channel_info->dw_mode = pchnl->chnl_mode; 514 channel_info->mode = pchnl->chnl_mode;
545 channel_info->bytes_tx = pchnl->bytes_moved; 515 channel_info->bytes_tx = pchnl->bytes_moved;
546 channel_info->process = pchnl->process; 516 channel_info->process = pchnl->process;
547 channel_info->sync_event = pchnl->sync_event; 517 channel_info->sync_event = pchnl->sync_event;
548 channel_info->cio_cs = pchnl->cio_cs; 518 channel_info->cio_cs = pchnl->cio_cs;
549 channel_info->cio_reqs = pchnl->cio_reqs; 519 channel_info->cio_reqs = pchnl->cio_reqs;
550 channel_info->dw_state = pchnl->dw_state; 520 channel_info->state = pchnl->state;
551 } else { 521 } else {
552 status = -EFAULT; 522 status = -EFAULT;
553 } 523 }
@@ -581,7 +551,7 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
581 if (!chan_ioc || !pchnl) { 551 if (!chan_ioc || !pchnl) {
582 status = -EFAULT; 552 status = -EFAULT;
583 } else if (timeout == CHNL_IOCNOWAIT) { 553 } else if (timeout == CHNL_IOCNOWAIT) {
584 if (LST_IS_EMPTY(pchnl->pio_completions)) 554 if (list_empty(&pchnl->io_completions))
585 status = -EREMOTEIO; 555 status = -EREMOTEIO;
586 556
587 } 557 }
@@ -596,7 +566,7 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
596 566
597 ioc.status = CHNL_IOCSTATCOMPLETE; 567 ioc.status = CHNL_IOCSTATCOMPLETE;
598 if (timeout != 568 if (timeout !=
599 CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) { 569 CHNL_IOCNOWAIT && list_empty(&pchnl->io_completions)) {
600 if (timeout == CHNL_IOCINFINITE) 570 if (timeout == CHNL_IOCINFINITE)
601 timeout = SYNC_INFINITE; 571 timeout = SYNC_INFINITE;
602 572
@@ -611,7 +581,7 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
611 * fails due to unkown causes. */ 581 * fails due to unkown causes. */
612 /* Even though Wait failed, there may be something in 582 /* Even though Wait failed, there may be something in
613 * the Q: */ 583 * the Q: */
614 if (LST_IS_EMPTY(pchnl->pio_completions)) { 584 if (list_empty(&pchnl->io_completions)) {
615 ioc.status |= CHNL_IOCSTATCANCEL; 585 ioc.status |= CHNL_IOCSTATCANCEL;
616 dequeue_ioc = false; 586 dequeue_ioc = false;
617 } 587 }
@@ -622,38 +592,34 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
622 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); 592 omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
623 if (dequeue_ioc) { 593 if (dequeue_ioc) {
624 /* Dequeue IOC and set chan_ioc; */ 594 /* Dequeue IOC and set chan_ioc; */
625 DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions)); 595 DBC_ASSERT(!list_empty(&pchnl->io_completions));
626 chnl_packet_obj = 596 chnl_packet_obj = list_first_entry(&pchnl->io_completions,
627 (struct chnl_irp *)lst_get_head(pchnl->pio_completions); 597 struct chnl_irp, link);
598 list_del(&chnl_packet_obj->link);
628 /* Update chan_ioc from channel state and chirp: */ 599 /* Update chan_ioc from channel state and chirp: */
629 if (chnl_packet_obj) { 600 pchnl->cio_cs--;
630 pchnl->cio_cs--; 601 /*
631 /* If this is a zero-copy channel, then set IOC's pbuf 602 * If this is a zero-copy channel, then set IOC's pbuf
632 * to the DSP's address. This DSP address will get 603 * to the DSP's address. This DSP address will get
633 * translated to user's virtual addr later. */ 604 * translated to user's virtual addr later.
634 { 605 */
635 host_sys_buf = chnl_packet_obj->host_sys_buf; 606 host_sys_buf = chnl_packet_obj->host_sys_buf;
636 ioc.pbuf = chnl_packet_obj->host_user_buf; 607 ioc.buf = chnl_packet_obj->host_user_buf;
637 } 608 ioc.byte_size = chnl_packet_obj->byte_size;
638 ioc.byte_size = chnl_packet_obj->byte_size; 609 ioc.buf_size = chnl_packet_obj->buf_size;
639 ioc.buf_size = chnl_packet_obj->buf_size; 610 ioc.arg = chnl_packet_obj->arg;
640 ioc.dw_arg = chnl_packet_obj->dw_arg; 611 ioc.status |= chnl_packet_obj->status;
641 ioc.status |= chnl_packet_obj->status; 612 /* Place the used chirp on the free list: */
642 /* Place the used chirp on the free list: */ 613 list_add_tail(&chnl_packet_obj->link,
643 lst_put_tail(pchnl->free_packets_list, 614 &pchnl->free_packets_list);
644 (struct list_head *)chnl_packet_obj);
645 } else {
646 ioc.pbuf = NULL;
647 ioc.byte_size = 0;
648 }
649 } else { 615 } else {
650 ioc.pbuf = NULL; 616 ioc.buf = NULL;
651 ioc.byte_size = 0; 617 ioc.byte_size = 0;
652 ioc.dw_arg = 0; 618 ioc.arg = 0;
653 ioc.buf_size = 0; 619 ioc.buf_size = 0;
654 } 620 }
655 /* Ensure invariant: If any IOC's are queued for this channel... */ 621 /* Ensure invariant: If any IOC's are queued for this channel... */
656 if (!LST_IS_EMPTY(pchnl->pio_completions)) { 622 if (!list_empty(&pchnl->io_completions)) {
657 /* Since DSPStream_Reclaim() does not take a timeout 623 /* Since DSPStream_Reclaim() does not take a timeout
658 * parameter, we pass the stream's timeout value to 624 * parameter, we pass the stream's timeout value to
659 * bridge_chnl_get_ioc. We cannot determine whether or not 625 * bridge_chnl_get_ioc. We cannot determine whether or not
@@ -674,11 +640,11 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
674 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); 640 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
675 if (dequeue_ioc 641 if (dequeue_ioc
676 && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) { 642 && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
677 if (!(ioc.pbuf < (void *)USERMODE_ADDR)) 643 if (!(ioc.buf < (void *)USERMODE_ADDR))
678 goto func_cont; 644 goto func_cont;
679 645
680 /* If the addr is in user mode, then copy it */ 646 /* If the addr is in user mode, then copy it */
681 if (!host_sys_buf || !ioc.pbuf) { 647 if (!host_sys_buf || !ioc.buf) {
682 status = -EFAULT; 648 status = -EFAULT;
683 goto func_cont; 649 goto func_cont;
684 } 650 }
@@ -686,7 +652,7 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
686 goto func_cont1; 652 goto func_cont1;
687 653
688 /*host_user_buf */ 654 /*host_user_buf */
689 status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size); 655 status = copy_to_user(ioc.buf, host_sys_buf, ioc.byte_size);
690 if (status) { 656 if (status) {
691 if (current->flags & PF_EXITING) 657 if (current->flags & PF_EXITING)
692 status = 0; 658 status = 0;
@@ -710,32 +676,22 @@ func_end:
710int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id, 676int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
711 struct chnl_mgrinfo *mgr_info) 677 struct chnl_mgrinfo *mgr_info)
712{ 678{
713 int status = 0;
714 struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr; 679 struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
715 680
716 if (mgr_info != NULL) { 681 if (!mgr_info || !hchnl_mgr)
717 if (ch_id <= CHNL_MAXCHANNELS) { 682 return -EFAULT;
718 if (hchnl_mgr) {
719 /* Return the requested information: */
720 mgr_info->chnl_obj =
721 chnl_mgr_obj->ap_channel[ch_id];
722 mgr_info->open_channels =
723 chnl_mgr_obj->open_channels;
724 mgr_info->dw_type = chnl_mgr_obj->dw_type;
725 /* total # of chnls */
726 mgr_info->max_channels =
727 chnl_mgr_obj->max_channels;
728 } else {
729 status = -EFAULT;
730 }
731 } else {
732 status = -ECHRNG;
733 }
734 } else {
735 status = -EFAULT;
736 }
737 683
738 return status; 684 if (ch_id > CHNL_MAXCHANNELS)
685 return -ECHRNG;
686
687 /* Return the requested information: */
688 mgr_info->chnl_obj = chnl_mgr_obj->channels[ch_id];
689 mgr_info->open_channels = chnl_mgr_obj->open_channels;
690 mgr_info->type = chnl_mgr_obj->type;
691 /* total # of chnls */
692 mgr_info->max_channels = chnl_mgr_obj->max_channels;
693
694 return 0;
739} 695}
740 696
741/* 697/*
@@ -762,7 +718,7 @@ int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
762 718
763 /* Reset the byte count and put channel back in ready state. */ 719 /* Reset the byte count and put channel back in ready state. */
764 chnl_obj->bytes_moved = 0; 720 chnl_obj->bytes_moved = 0;
765 chnl_obj->dw_state &= ~CHNL_STATECANCEL; 721 chnl_obj->state &= ~CHNL_STATECANCEL;
766 } 722 }
767 723
768 return status; 724 return status;
@@ -785,116 +741,103 @@ int bridge_chnl_open(struct chnl_object **chnl,
785 DBC_REQUIRE(pattrs != NULL); 741 DBC_REQUIRE(pattrs != NULL);
786 DBC_REQUIRE(hchnl_mgr != NULL); 742 DBC_REQUIRE(hchnl_mgr != NULL);
787 *chnl = NULL; 743 *chnl = NULL;
744
788 /* Validate Args: */ 745 /* Validate Args: */
789 if (pattrs->uio_reqs == 0) { 746 if (!pattrs->uio_reqs)
790 status = -EINVAL; 747 return -EINVAL;
748
749 if (!hchnl_mgr)
750 return -EFAULT;
751
752 if (ch_id != CHNL_PICKFREE) {
753 if (ch_id >= chnl_mgr_obj->max_channels)
754 return -ECHRNG;
755 if (chnl_mgr_obj->channels[ch_id] != NULL)
756 return -EALREADY;
791 } else { 757 } else {
792 if (!hchnl_mgr) { 758 /* Check for free channel */
793 status = -EFAULT; 759 status = search_free_channel(chnl_mgr_obj, &ch_id);
794 } else { 760 if (status)
795 if (ch_id != CHNL_PICKFREE) { 761 return status;
796 if (ch_id >= chnl_mgr_obj->max_channels)
797 status = -ECHRNG;
798 else if (chnl_mgr_obj->ap_channel[ch_id] !=
799 NULL)
800 status = -EALREADY;
801 } else {
802 /* Check for free channel */
803 status =
804 search_free_channel(chnl_mgr_obj, &ch_id);
805 }
806 }
807 } 762 }
808 if (status)
809 goto func_end;
810 763
811 DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels); 764 DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
765
812 /* Create channel object: */ 766 /* Create channel object: */
813 pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL); 767 pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
814 if (!pchnl) { 768 if (!pchnl)
815 status = -ENOMEM; 769 return -ENOMEM;
816 goto func_end; 770
817 }
818 /* Protect queues from io_dpc: */ 771 /* Protect queues from io_dpc: */
819 pchnl->dw_state = CHNL_STATECANCEL; 772 pchnl->state = CHNL_STATECANCEL;
773
820 /* Allocate initial IOR and IOC queues: */ 774 /* Allocate initial IOR and IOC queues: */
821 pchnl->free_packets_list = create_chirp_list(pattrs->uio_reqs); 775 status = create_chirp_list(&pchnl->free_packets_list,
822 pchnl->pio_requests = create_chirp_list(0); 776 pattrs->uio_reqs);
823 pchnl->pio_completions = create_chirp_list(0); 777 if (status)
778 goto out_err;
779
780 INIT_LIST_HEAD(&pchnl->io_requests);
781 INIT_LIST_HEAD(&pchnl->io_completions);
782
824 pchnl->chnl_packets = pattrs->uio_reqs; 783 pchnl->chnl_packets = pattrs->uio_reqs;
825 pchnl->cio_cs = 0; 784 pchnl->cio_cs = 0;
826 pchnl->cio_reqs = 0; 785 pchnl->cio_reqs = 0;
786
827 sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL); 787 sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
828 if (sync_event) 788 if (!sync_event) {
829 sync_init_event(sync_event);
830 else
831 status = -ENOMEM; 789 status = -ENOMEM;
832 790 goto out_err;
833 if (!status) {
834 pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
835 GFP_KERNEL);
836 if (pchnl->ntfy_obj)
837 ntfy_init(pchnl->ntfy_obj);
838 else
839 status = -ENOMEM;
840 } 791 }
792 sync_init_event(sync_event);
841 793
842 if (!status) { 794 pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
843 if (pchnl->pio_completions && pchnl->pio_requests && 795 if (!pchnl->ntfy_obj) {
844 pchnl->free_packets_list) { 796 status = -ENOMEM;
845 /* Initialize CHNL object fields: */ 797 goto out_err;
846 pchnl->chnl_mgr_obj = chnl_mgr_obj; 798 }
847 pchnl->chnl_id = ch_id; 799 ntfy_init(pchnl->ntfy_obj);
848 pchnl->chnl_mode = chnl_mode; 800
849 pchnl->user_event = sync_event; 801 /* Initialize CHNL object fields: */
850 pchnl->sync_event = sync_event; 802 pchnl->chnl_mgr_obj = chnl_mgr_obj;
851 /* Get the process handle */ 803 pchnl->chnl_id = ch_id;
852 pchnl->process = current->tgid; 804 pchnl->chnl_mode = chnl_mode;
853 pchnl->pcb_arg = 0; 805 pchnl->user_event = sync_event;
854 pchnl->bytes_moved = 0; 806 pchnl->sync_event = sync_event;
855 /* Default to proc-copy */ 807 /* Get the process handle */
856 pchnl->chnl_type = CHNL_PCPY; 808 pchnl->process = current->tgid;
857 } else { 809 pchnl->cb_arg = 0;
858 status = -ENOMEM; 810 pchnl->bytes_moved = 0;
859 } 811 /* Default to proc-copy */
860 } 812 pchnl->chnl_type = CHNL_PCPY;
813
814 /* Insert channel object in channel manager: */
815 chnl_mgr_obj->channels[pchnl->chnl_id] = pchnl;
816 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
817 chnl_mgr_obj->open_channels++;
818 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
819 /* Return result... */
820 pchnl->state = CHNL_STATEREADY;
821 *chnl = pchnl;
861 822
862 if (status) { 823 return status;
863 /* Free memory */ 824
864 if (pchnl->pio_completions) { 825out_err:
865 free_chirp_list(pchnl->pio_completions); 826 /* Free memory */
866 pchnl->pio_completions = NULL; 827 free_chirp_list(&pchnl->io_completions);
867 pchnl->cio_cs = 0; 828 free_chirp_list(&pchnl->io_requests);
868 } 829 free_chirp_list(&pchnl->free_packets_list);
869 if (pchnl->pio_requests) { 830
870 free_chirp_list(pchnl->pio_requests); 831 if (sync_event)
871 pchnl->pio_requests = NULL;
872 }
873 if (pchnl->free_packets_list) {
874 free_chirp_list(pchnl->free_packets_list);
875 pchnl->free_packets_list = NULL;
876 }
877 kfree(sync_event); 832 kfree(sync_event);
878 sync_event = NULL;
879 833
880 if (pchnl->ntfy_obj) { 834 if (pchnl->ntfy_obj) {
881 ntfy_delete(pchnl->ntfy_obj); 835 ntfy_delete(pchnl->ntfy_obj);
882 kfree(pchnl->ntfy_obj); 836 kfree(pchnl->ntfy_obj);
883 pchnl->ntfy_obj = NULL; 837 pchnl->ntfy_obj = NULL;
884 }
885 kfree(pchnl);
886 } else {
887 /* Insert channel object in channel manager: */
888 chnl_mgr_obj->ap_channel[pchnl->chnl_id] = pchnl;
889 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
890 chnl_mgr_obj->open_channels++;
891 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
892 /* Return result... */
893 pchnl->dw_state = CHNL_STATEREADY;
894 *chnl = pchnl;
895 } 838 }
896func_end: 839 kfree(pchnl);
897 DBC_ENSURE((!status && pchnl) || (*chnl == NULL)); 840
898 return status; 841 return status;
899} 842}
900 843
@@ -924,37 +867,35 @@ int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
924 * Purpose: 867 * Purpose:
925 * Initialize a queue of channel I/O Request/Completion packets. 868 * Initialize a queue of channel I/O Request/Completion packets.
926 * Parameters: 869 * Parameters:
870 * list: Pointer to a list_head
927 * chirps: Number of Chirps to allocate. 871 * chirps: Number of Chirps to allocate.
928 * Returns: 872 * Returns:
929 * Pointer to queue of IRPs, or NULL. 873 * 0 if successful, error code otherwise.
930 * Requires: 874 * Requires:
931 * Ensures: 875 * Ensures:
932 */ 876 */
933static struct lst_list *create_chirp_list(u32 chirps) 877static int create_chirp_list(struct list_head *list, u32 chirps)
934{ 878{
935 struct lst_list *chirp_list; 879 struct chnl_irp *chirp;
936 struct chnl_irp *chnl_packet_obj;
937 u32 i; 880 u32 i;
938 881
939 chirp_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL); 882 INIT_LIST_HEAD(list);
940 883
941 if (chirp_list) { 884 /* Make N chirps and place on queue. */
942 INIT_LIST_HEAD(&chirp_list->head); 885 for (i = 0; i < chirps; i++) {
943 /* Make N chirps and place on queue. */ 886 chirp = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
944 for (i = 0; (i < chirps) 887 if (!chirp)
945 && ((chnl_packet_obj = make_new_chirp()) != NULL); i++) { 888 break;
946 lst_put_tail(chirp_list, 889 list_add_tail(&chirp->link, list);
947 (struct list_head *)chnl_packet_obj); 890 }
948 }
949 891
950 /* If we couldn't allocate all chirps, free those allocated: */ 892 /* If we couldn't allocate all chirps, free those allocated: */
951 if (i != chirps) { 893 if (i != chirps) {
952 free_chirp_list(chirp_list); 894 free_chirp_list(list);
953 chirp_list = NULL; 895 return -ENOMEM;
954 }
955 } 896 }
956 897
957 return chirp_list; 898 return 0;
958} 899}
959 900
960/* 901/*
@@ -962,31 +903,16 @@ static struct lst_list *create_chirp_list(u32 chirps)
962 * Purpose: 903 * Purpose:
963 * Free the queue of Chirps. 904 * Free the queue of Chirps.
964 */ 905 */
965static void free_chirp_list(struct lst_list *chirp_list) 906static void free_chirp_list(struct list_head *chirp_list)
966{ 907{
967 DBC_REQUIRE(chirp_list != NULL); 908 struct chnl_irp *chirp, *tmp;
968
969 while (!LST_IS_EMPTY(chirp_list))
970 kfree(lst_get_head(chirp_list));
971 909
972 kfree(chirp_list); 910 DBC_REQUIRE(chirp_list != NULL);
973}
974
975/*
976 * ======== make_new_chirp ========
977 * Allocate the memory for a new channel IRP.
978 */
979static struct chnl_irp *make_new_chirp(void)
980{
981 struct chnl_irp *chnl_packet_obj;
982 911
983 chnl_packet_obj = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL); 912 list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
984 if (chnl_packet_obj != NULL) { 913 list_del(&chirp->link);
985 /* lst_init_elem only resets the list's member values. */ 914 kfree(chirp);
986 lst_init_elem(&chnl_packet_obj->link);
987 } 915 }
988
989 return chnl_packet_obj;
990} 916}
991 917
992/* 918/*
@@ -1002,7 +928,7 @@ static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
1002 DBC_REQUIRE(chnl_mgr_obj); 928 DBC_REQUIRE(chnl_mgr_obj);
1003 929
1004 for (i = 0; i < chnl_mgr_obj->max_channels; i++) { 930 for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
1005 if (chnl_mgr_obj->ap_channel[i] == NULL) { 931 if (chnl_mgr_obj->channels[i] == NULL) {
1006 status = 0; 932 status = 0;
1007 *chnl = i; 933 *chnl = i;
1008 break; 934 break;
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 46d17c777b8..589a0554332 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -146,54 +146,6 @@ void dsp_clk_init(void)
146 ssi.sst_fck, ssi.ssr_fck, ssi.ick); 146 ssi.sst_fck, ssi.ssr_fck, ssi.ick);
147} 147}
148 148
149#ifdef CONFIG_OMAP_MCBSP
150static void mcbsp_clk_prepare(bool flag, u8 id)
151{
152 struct cfg_hostres *resources;
153 struct dev_object *hdev_object = NULL;
154 struct bridge_dev_context *bridge_context = NULL;
155 u32 val;
156
157 hdev_object = (struct dev_object *)drv_get_first_dev_object();
158 if (!hdev_object)
159 return;
160
161 dev_get_bridge_context(hdev_object, &bridge_context);
162 if (!bridge_context)
163 return;
164
165 resources = bridge_context->resources;
166 if (!resources)
167 return;
168
169 if (flag) {
170 if (id == DSP_CLK_MCBSP1) {
171 /* set MCBSP1_CLKS, on McBSP1 ON */
172 val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
173 val |= 1 << 2;
174 __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
175 } else if (id == DSP_CLK_MCBSP2) {
176 /* set MCBSP2_CLKS, on McBSP2 ON */
177 val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
178 val |= 1 << 6;
179 __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
180 }
181 } else {
182 if (id == DSP_CLK_MCBSP1) {
183 /* clear MCBSP1_CLKS, on McBSP1 OFF */
184 val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
185 val &= ~(1 << 2);
186 __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
187 } else if (id == DSP_CLK_MCBSP2) {
188 /* clear MCBSP2_CLKS, on McBSP2 OFF */
189 val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
190 val &= ~(1 << 6);
191 __raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
192 }
193 }
194}
195#endif
196
197/** 149/**
198 * dsp_gpt_wait_overflow - set gpt overflow and wait for fixed timeout 150 * dsp_gpt_wait_overflow - set gpt overflow and wait for fixed timeout
199 * @clk_id: GP Timer clock id. 151 * @clk_id: GP Timer clock id.
@@ -257,9 +209,9 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
257 break; 209 break;
258#ifdef CONFIG_OMAP_MCBSP 210#ifdef CONFIG_OMAP_MCBSP
259 case MCBSP_CLK: 211 case MCBSP_CLK:
260 mcbsp_clk_prepare(true, clk_id);
261 omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO); 212 omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);
262 omap_mcbsp_request(MCBSP_ID(clk_id)); 213 omap_mcbsp_request(MCBSP_ID(clk_id));
214 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
263 break; 215 break;
264#endif 216#endif
265 case WDT_CLK: 217 case WDT_CLK:
@@ -334,7 +286,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
334 break; 286 break;
335#ifdef CONFIG_OMAP_MCBSP 287#ifdef CONFIG_OMAP_MCBSP
336 case MCBSP_CLK: 288 case MCBSP_CLK:
337 mcbsp_clk_prepare(false, clk_id); 289 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC);
338 omap_mcbsp_free(MCBSP_ID(clk_id)); 290 omap_mcbsp_free(MCBSP_ID(clk_id));
339 break; 291 break;
340#endif 292#endif
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 27e0aa81a58..694c0e5e55c 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -24,6 +24,7 @@
24 * function. 24 * function.
25 */ 25 */
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/list.h>
27 28
28/* Host OS */ 29/* Host OS */
29#include <dspbridge/host_os.h> 30#include <dspbridge/host_os.h>
@@ -88,39 +89,39 @@
88struct io_mgr { 89struct io_mgr {
89 /* These four fields must be the first fields in a io_mgr_ struct */ 90 /* These four fields must be the first fields in a io_mgr_ struct */
90 /* Bridge device context */ 91 /* Bridge device context */
91 struct bridge_dev_context *hbridge_context; 92 struct bridge_dev_context *bridge_context;
92 /* Function interface to Bridge driver */ 93 /* Function interface to Bridge driver */
93 struct bridge_drv_interface *intf_fxns; 94 struct bridge_drv_interface *intf_fxns;
94 struct dev_object *hdev_obj; /* Device this board represents */ 95 struct dev_object *dev_obj; /* Device this board represents */
95 96
96 /* These fields initialized in bridge_io_create() */ 97 /* These fields initialized in bridge_io_create() */
97 struct chnl_mgr *hchnl_mgr; 98 struct chnl_mgr *chnl_mgr;
98 struct shm *shared_mem; /* Shared Memory control */ 99 struct shm *shared_mem; /* Shared Memory control */
99 u8 *input; /* Address of input channel */ 100 u8 *input; /* Address of input channel */
100 u8 *output; /* Address of output channel */ 101 u8 *output; /* Address of output channel */
101 struct msg_mgr *hmsg_mgr; /* Message manager */ 102 struct msg_mgr *msg_mgr; /* Message manager */
102 /* Msg control for from DSP messages */ 103 /* Msg control for from DSP messages */
103 struct msg_ctrl *msg_input_ctrl; 104 struct msg_ctrl *msg_input_ctrl;
104 /* Msg control for to DSP messages */ 105 /* Msg control for to DSP messages */
105 struct msg_ctrl *msg_output_ctrl; 106 struct msg_ctrl *msg_output_ctrl;
106 u8 *msg_input; /* Address of input messages */ 107 u8 *msg_input; /* Address of input messages */
107 u8 *msg_output; /* Address of output messages */ 108 u8 *msg_output; /* Address of output messages */
108 u32 usm_buf_size; /* Size of a shared memory I/O channel */ 109 u32 sm_buf_size; /* Size of a shared memory I/O channel */
109 bool shared_irq; /* Is this IRQ shared? */ 110 bool shared_irq; /* Is this IRQ shared? */
110 u32 word_size; /* Size in bytes of DSP word */ 111 u32 word_size; /* Size in bytes of DSP word */
111 u16 intr_val; /* Interrupt value */ 112 u16 intr_val; /* Interrupt value */
112 /* Private extnd proc info; mmu setup */ 113 /* Private extnd proc info; mmu setup */
113 struct mgr_processorextinfo ext_proc_info; 114 struct mgr_processorextinfo ext_proc_info;
114 struct cmm_object *hcmm_mgr; /* Shared Mem Mngr */ 115 struct cmm_object *cmm_mgr; /* Shared Mem Mngr */
115 struct work_struct io_workq; /* workqueue */ 116 struct work_struct io_workq; /* workqueue */
116#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 117#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
117 u32 ul_trace_buffer_begin; /* Trace message start address */ 118 u32 trace_buffer_begin; /* Trace message start address */
118 u32 ul_trace_buffer_end; /* Trace message end address */ 119 u32 trace_buffer_end; /* Trace message end address */
119 u32 ul_trace_buffer_current; /* Trace message current address */ 120 u32 trace_buffer_current; /* Trace message current address */
120 u32 ul_gpp_read_pointer; /* GPP Read pointer to Trace buffer */ 121 u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */
121 u8 *pmsg; 122 u8 *msg;
122 u32 ul_gpp_va; 123 u32 gpp_va;
123 u32 ul_dsp_va; 124 u32 dsp_va;
124#endif 125#endif
125 /* IO Dpc */ 126 /* IO Dpc */
126 u32 dpc_req; /* Number of requested DPC's. */ 127 u32 dpc_req; /* Number of requested DPC's. */
@@ -167,57 +168,41 @@ int bridge_io_create(struct io_mgr **io_man,
167 struct dev_object *hdev_obj, 168 struct dev_object *hdev_obj,
168 const struct io_attrs *mgr_attrts) 169 const struct io_attrs *mgr_attrts)
169{ 170{
170 int status = 0;
171 struct io_mgr *pio_mgr = NULL; 171 struct io_mgr *pio_mgr = NULL;
172 struct shm *shared_mem = NULL;
173 struct bridge_dev_context *hbridge_context = NULL; 172 struct bridge_dev_context *hbridge_context = NULL;
174 struct cfg_devnode *dev_node_obj; 173 struct cfg_devnode *dev_node_obj;
175 struct chnl_mgr *hchnl_mgr; 174 struct chnl_mgr *hchnl_mgr;
176 u8 dev_type; 175 u8 dev_type;
177 176
178 /* Check requirements */ 177 /* Check requirements */
179 if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0) { 178 if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0)
180 status = -EFAULT; 179 return -EFAULT;
181 goto func_end; 180
182 } 181 *io_man = NULL;
182
183 dev_get_chnl_mgr(hdev_obj, &hchnl_mgr); 183 dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
184 if (!hchnl_mgr || hchnl_mgr->hio_mgr) { 184 if (!hchnl_mgr || hchnl_mgr->iomgr)
185 status = -EFAULT; 185 return -EFAULT;
186 goto func_end; 186
187 }
188 /* 187 /*
189 * Message manager will be created when a file is loaded, since 188 * Message manager will be created when a file is loaded, since
190 * size of message buffer in shared memory is configurable in 189 * size of message buffer in shared memory is configurable in
191 * the base image. 190 * the base image.
192 */ 191 */
193 dev_get_bridge_context(hdev_obj, &hbridge_context); 192 dev_get_bridge_context(hdev_obj, &hbridge_context);
194 if (!hbridge_context) { 193 if (!hbridge_context)
195 status = -EFAULT; 194 return -EFAULT;
196 goto func_end; 195
197 }
198 dev_get_dev_type(hdev_obj, &dev_type); 196 dev_get_dev_type(hdev_obj, &dev_type);
199 /*
200 * DSP shared memory area will get set properly when
201 * a program is loaded. They are unknown until a COFF file is
202 * loaded. I chose the value -1 because it was less likely to be
203 * a valid address than 0.
204 */
205 shared_mem = (struct shm *)-1;
206 197
207 /* Allocate IO manager object */ 198 /* Allocate IO manager object */
208 pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL); 199 pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
209 if (pio_mgr == NULL) { 200 if (!pio_mgr)
210 status = -ENOMEM; 201 return -ENOMEM;
211 goto func_end;
212 }
213 202
214 /* Initialize chnl_mgr object */ 203 /* Initialize chnl_mgr object */
215#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 204 pio_mgr->chnl_mgr = hchnl_mgr;
216 pio_mgr->pmsg = NULL;
217#endif
218 pio_mgr->hchnl_mgr = hchnl_mgr;
219 pio_mgr->word_size = mgr_attrts->word_size; 205 pio_mgr->word_size = mgr_attrts->word_size;
220 pio_mgr->shared_mem = shared_mem;
221 206
222 if (dev_type == DSP_UNIT) { 207 if (dev_type == DSP_UNIT) {
223 /* Create an IO DPC */ 208 /* Create an IO DPC */
@@ -229,29 +214,24 @@ int bridge_io_create(struct io_mgr **io_man,
229 214
230 spin_lock_init(&pio_mgr->dpc_lock); 215 spin_lock_init(&pio_mgr->dpc_lock);
231 216
232 status = dev_get_dev_node(hdev_obj, &dev_node_obj); 217 if (dev_get_dev_node(hdev_obj, &dev_node_obj)) {
218 bridge_io_destroy(pio_mgr);
219 return -EIO;
220 }
233 } 221 }
234 222
235 if (!status) { 223 pio_mgr->bridge_context = hbridge_context;
236 pio_mgr->hbridge_context = hbridge_context; 224 pio_mgr->shared_irq = mgr_attrts->irq_shared;
237 pio_mgr->shared_irq = mgr_attrts->irq_shared; 225 if (dsp_wdt_init()) {
238 if (dsp_wdt_init())
239 status = -EPERM;
240 } else {
241 status = -EIO;
242 }
243func_end:
244 if (status) {
245 /* Cleanup */
246 bridge_io_destroy(pio_mgr); 226 bridge_io_destroy(pio_mgr);
247 if (io_man) 227 return -EPERM;
248 *io_man = NULL;
249 } else {
250 /* Return IO manager object to caller... */
251 hchnl_mgr->hio_mgr = pio_mgr;
252 *io_man = pio_mgr;
253 } 228 }
254 return status; 229
230 /* Return IO manager object to caller... */
231 hchnl_mgr->iomgr = pio_mgr;
232 *io_man = pio_mgr;
233
234 return 0;
255} 235}
256 236
257/* 237/*
@@ -267,7 +247,7 @@ int bridge_io_destroy(struct io_mgr *hio_mgr)
267 tasklet_kill(&hio_mgr->dpc_tasklet); 247 tasklet_kill(&hio_mgr->dpc_tasklet);
268 248
269#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 249#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
270 kfree(hio_mgr->pmsg); 250 kfree(hio_mgr->msg);
271#endif 251#endif
272 dsp_wdt_exit(); 252 dsp_wdt_exit();
273 /* Free this IO manager object */ 253 /* Free this IO manager object */
@@ -326,7 +306,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
326 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB 306 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
327 }; 307 };
328 308
329 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); 309 status = dev_get_bridge_context(hio_mgr->dev_obj, &pbridge_context);
330 if (!pbridge_context) { 310 if (!pbridge_context) {
331 status = -EFAULT; 311 status = -EFAULT;
332 goto func_end; 312 goto func_end;
@@ -337,15 +317,15 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
337 status = -EFAULT; 317 status = -EFAULT;
338 goto func_end; 318 goto func_end;
339 } 319 }
340 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); 320 status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
341 if (!cod_man) { 321 if (!cod_man) {
342 status = -EFAULT; 322 status = -EFAULT;
343 goto func_end; 323 goto func_end;
344 } 324 }
345 hchnl_mgr = hio_mgr->hchnl_mgr; 325 hchnl_mgr = hio_mgr->chnl_mgr;
346 /* The message manager is destroyed when the board is stopped. */ 326 /* The message manager is destroyed when the board is stopped. */
347 dev_get_msg_mgr(hio_mgr->hdev_obj, &hio_mgr->hmsg_mgr); 327 dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr);
348 hmsg_mgr = hio_mgr->hmsg_mgr; 328 hmsg_mgr = hio_mgr->msg_mgr;
349 if (!hchnl_mgr || !hmsg_mgr) { 329 if (!hchnl_mgr || !hmsg_mgr) {
350 status = -EFAULT; 330 status = -EFAULT;
351 goto func_end; 331 goto func_end;
@@ -437,11 +417,11 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
437 417
438 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */ 418 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
439 ndx = 0; 419 ndx = 0;
440 ul_gpp_pa = host_res->dw_mem_phys[1]; 420 ul_gpp_pa = host_res->mem_phys[1];
441 ul_gpp_va = host_res->dw_mem_base[1]; 421 ul_gpp_va = host_res->mem_base[1];
442 /* This is the virtual uncached ioremapped address!!! */ 422 /* This is the virtual uncached ioremapped address!!! */
443 /* Why can't we directly take the DSPVA from the symbols? */ 423 /* Why can't we directly take the DSPVA from the symbols? */
444 ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt; 424 ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
445 ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size; 425 ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
446 ul_seg1_size = 426 ul_seg1_size =
447 (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size; 427 (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
@@ -461,9 +441,9 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
461 ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size); 441 ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
462 442
463 if ((ul_seg_size + ul_seg1_size + ul_pad_size) > 443 if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
464 host_res->dw_mem_length[1]) { 444 host_res->mem_length[1]) {
465 pr_err("%s: shm Error, reserved 0x%x required 0x%x\n", 445 pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
466 __func__, host_res->dw_mem_length[1], 446 __func__, host_res->mem_length[1],
467 ul_seg_size + ul_seg1_size + ul_pad_size); 447 ul_seg_size + ul_seg1_size + ul_pad_size);
468 status = -ENOMEM; 448 status = -ENOMEM;
469 } 449 }
@@ -503,7 +483,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
503 1)) == 0)) { 483 1)) == 0)) {
504 status = 484 status =
505 hio_mgr->intf_fxns-> 485 hio_mgr->intf_fxns->
506 pfn_brd_mem_map(hio_mgr->hbridge_context, 486 brd_mem_map(hio_mgr->bridge_context,
507 pa_curr, va_curr, 487 pa_curr, va_curr,
508 page_size[i], map_attrs, 488 page_size[i], map_attrs,
509 NULL); 489 NULL);
@@ -547,38 +527,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
547 * This is the physical address written to 527 * This is the physical address written to
548 * DSP MMU. 528 * DSP MMU.
549 */ 529 */
550 ae_proc[ndx].ul_gpp_pa = pa_curr; 530 ae_proc[ndx].gpp_pa = pa_curr;
551 /* 531 /*
552 * This is the virtual uncached ioremapped 532 * This is the virtual uncached ioremapped
553 * address!!! 533 * address!!!
554 */ 534 */
555 ae_proc[ndx].ul_gpp_va = gpp_va_curr; 535 ae_proc[ndx].gpp_va = gpp_va_curr;
556 ae_proc[ndx].ul_dsp_va = 536 ae_proc[ndx].dsp_va =
557 va_curr / hio_mgr->word_size; 537 va_curr / hio_mgr->word_size;
558 ae_proc[ndx].ul_size = page_size[i]; 538 ae_proc[ndx].size = page_size[i];
559 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN; 539 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
560 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT; 540 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
561 ae_proc[ndx].mixed_mode = HW_MMU_CPUES; 541 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
562 dev_dbg(bridge, "shm MMU TLB entry PA %x" 542 dev_dbg(bridge, "shm MMU TLB entry PA %x"
563 " VA %x DSP_VA %x Size %x\n", 543 " VA %x DSP_VA %x Size %x\n",
564 ae_proc[ndx].ul_gpp_pa, 544 ae_proc[ndx].gpp_pa,
565 ae_proc[ndx].ul_gpp_va, 545 ae_proc[ndx].gpp_va,
566 ae_proc[ndx].ul_dsp_va * 546 ae_proc[ndx].dsp_va *
567 hio_mgr->word_size, page_size[i]); 547 hio_mgr->word_size, page_size[i]);
568 ndx++; 548 ndx++;
569 } else { 549 } else {
570 status = 550 status =
571 hio_mgr->intf_fxns-> 551 hio_mgr->intf_fxns->
572 pfn_brd_mem_map(hio_mgr->hbridge_context, 552 brd_mem_map(hio_mgr->bridge_context,
573 pa_curr, va_curr, 553 pa_curr, va_curr,
574 page_size[i], map_attrs, 554 page_size[i], map_attrs,
575 NULL); 555 NULL);
576 dev_dbg(bridge, 556 dev_dbg(bridge,
577 "shm MMU PTE entry PA %x" 557 "shm MMU PTE entry PA %x"
578 " VA %x DSP_VA %x Size %x\n", 558 " VA %x DSP_VA %x Size %x\n",
579 ae_proc[ndx].ul_gpp_pa, 559 ae_proc[ndx].gpp_pa,
580 ae_proc[ndx].ul_gpp_va, 560 ae_proc[ndx].gpp_va,
581 ae_proc[ndx].ul_dsp_va * 561 ae_proc[ndx].dsp_va *
582 hio_mgr->word_size, page_size[i]); 562 hio_mgr->word_size, page_size[i]);
583 if (status) 563 if (status)
584 goto func_end; 564 goto func_end;
@@ -600,47 +580,47 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
600 * should not conflict with shm entries on MPU or DSP side. 580 * should not conflict with shm entries on MPU or DSP side.
601 */ 581 */
602 for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) { 582 for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
603 if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0) 583 if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0)
604 continue; 584 continue;
605 585
606 if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys > 586 if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys >
607 ul_gpp_pa - 0x100000 587 ul_gpp_pa - 0x100000
608 && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <= 588 && hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <=
609 ul_gpp_pa + ul_seg_size) 589 ul_gpp_pa + ul_seg_size)
610 || (hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt > 590 || (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt >
611 ul_dsp_va - 0x100000 / hio_mgr->word_size 591 ul_dsp_va - 0x100000 / hio_mgr->word_size
612 && hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt <= 592 && hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt <=
613 ul_dsp_va + ul_seg_size / hio_mgr->word_size)) { 593 ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
614 dev_dbg(bridge, 594 dev_dbg(bridge,
615 "CDB MMU entry %d conflicts with " 595 "CDB MMU entry %d conflicts with "
616 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: " 596 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
617 "GppPa %x, DspVa %x, Bytes %x.\n", i, 597 "GppPa %x, DspVa %x, Bytes %x.\n", i,
618 hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys, 598 hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys,
619 hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt, 599 hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt,
620 ul_gpp_pa, ul_dsp_va, ul_seg_size); 600 ul_gpp_pa, ul_dsp_va, ul_seg_size);
621 status = -EPERM; 601 status = -EPERM;
622 } else { 602 } else {
623 if (ndx < MAX_LOCK_TLB_ENTRIES) { 603 if (ndx < MAX_LOCK_TLB_ENTRIES) {
624 ae_proc[ndx].ul_dsp_va = 604 ae_proc[ndx].dsp_va =
625 hio_mgr->ext_proc_info.ty_tlb[i]. 605 hio_mgr->ext_proc_info.ty_tlb[i].
626 ul_dsp_virt; 606 dsp_virt;
627 ae_proc[ndx].ul_gpp_pa = 607 ae_proc[ndx].gpp_pa =
628 hio_mgr->ext_proc_info.ty_tlb[i]. 608 hio_mgr->ext_proc_info.ty_tlb[i].
629 ul_gpp_phys; 609 gpp_phys;
630 ae_proc[ndx].ul_gpp_va = 0; 610 ae_proc[ndx].gpp_va = 0;
631 /* 1 MB */ 611 /* 1 MB */
632 ae_proc[ndx].ul_size = 0x100000; 612 ae_proc[ndx].size = 0x100000;
633 dev_dbg(bridge, "shm MMU entry PA %x " 613 dev_dbg(bridge, "shm MMU entry PA %x "
634 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, 614 "DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa,
635 ae_proc[ndx].ul_dsp_va); 615 ae_proc[ndx].dsp_va);
636 ndx++; 616 ndx++;
637 } else { 617 } else {
638 status = hio_mgr->intf_fxns->pfn_brd_mem_map 618 status = hio_mgr->intf_fxns->brd_mem_map
639 (hio_mgr->hbridge_context, 619 (hio_mgr->bridge_context,
640 hio_mgr->ext_proc_info.ty_tlb[i]. 620 hio_mgr->ext_proc_info.ty_tlb[i].
641 ul_gpp_phys, 621 gpp_phys,
642 hio_mgr->ext_proc_info.ty_tlb[i]. 622 hio_mgr->ext_proc_info.ty_tlb[i].
643 ul_dsp_virt, 0x100000, map_attrs, 623 dsp_virt, 0x100000, map_attrs,
644 NULL); 624 NULL);
645 } 625 }
646 } 626 }
@@ -657,8 +637,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
657 /* Map the L4 peripherals */ 637 /* Map the L4 peripherals */
658 i = 0; 638 i = 0;
659 while (l4_peripheral_table[i].phys_addr) { 639 while (l4_peripheral_table[i].phys_addr) {
660 status = hio_mgr->intf_fxns->pfn_brd_mem_map 640 status = hio_mgr->intf_fxns->brd_mem_map
661 (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr, 641 (hio_mgr->bridge_context, l4_peripheral_table[i].phys_addr,
662 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB, 642 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
663 map_attrs, NULL); 643 map_attrs, NULL);
664 if (status) 644 if (status)
@@ -667,33 +647,33 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
667 } 647 }
668 648
669 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { 649 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
670 ae_proc[i].ul_dsp_va = 0; 650 ae_proc[i].dsp_va = 0;
671 ae_proc[i].ul_gpp_pa = 0; 651 ae_proc[i].gpp_pa = 0;
672 ae_proc[i].ul_gpp_va = 0; 652 ae_proc[i].gpp_va = 0;
673 ae_proc[i].ul_size = 0; 653 ae_proc[i].size = 0;
674 } 654 }
675 /* 655 /*
676 * Set the shm physical address entry (grayed out in CDB file) 656 * Set the shm physical address entry (grayed out in CDB file)
677 * to the virtual uncached ioremapped address of shm reserved 657 * to the virtual uncached ioremapped address of shm reserved
678 * on MPU. 658 * on MPU.
679 */ 659 */
680 hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys = 660 hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
681 (ul_gpp_va + ul_seg1_size + ul_pad_size); 661 (ul_gpp_va + ul_seg1_size + ul_pad_size);
682 662
683 /* 663 /*
684 * Need shm Phys addr. IO supports only one DSP for now: 664 * Need shm Phys addr. IO supports only one DSP for now:
685 * num_procs = 1. 665 * num_procs = 1.
686 */ 666 */
687 if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) { 667 if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) {
688 status = -EFAULT; 668 status = -EFAULT;
689 goto func_end; 669 goto func_end;
690 } else { 670 } else {
691 if (ae_proc[0].ul_dsp_va > ul_shm_base) { 671 if (ae_proc[0].dsp_va > ul_shm_base) {
692 status = -EPERM; 672 status = -EPERM;
693 goto func_end; 673 goto func_end;
694 } 674 }
695 /* ul_shm_base may not be at ul_dsp_va address */ 675 /* ul_shm_base may not be at ul_dsp_va address */
696 ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) * 676 ul_shm_base_offset = (ul_shm_base - ae_proc[0].dsp_va) *
697 hio_mgr->word_size; 677 hio_mgr->word_size;
698 /* 678 /*
699 * bridge_dev_ctrl() will set dev context dsp-mmu info. In 679 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
@@ -703,12 +683,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
703 */ 683 */
704 684
705 status = 685 status =
706 hio_mgr->intf_fxns->pfn_dev_cntrl(hio_mgr->hbridge_context, 686 hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
707 BRDIOCTL_SETMMUCONFIG, 687 BRDIOCTL_SETMMUCONFIG,
708 ae_proc); 688 ae_proc);
709 if (status) 689 if (status)
710 goto func_end; 690 goto func_end;
711 ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys; 691 ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
712 ul_shm_base += ul_shm_base_offset; 692 ul_shm_base += ul_shm_base_offset;
713 ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base, 693 ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
714 ul_mem_length); 694 ul_mem_length);
@@ -718,14 +698,14 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
718 } 698 }
719 /* Register SM */ 699 /* Register SM */
720 status = 700 status =
721 register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa); 701 register_shm_segs(hio_mgr, cod_man, ae_proc[0].gpp_pa);
722 } 702 }
723 703
724 hio_mgr->shared_mem = (struct shm *)ul_shm_base; 704 hio_mgr->shared_mem = (struct shm *)ul_shm_base;
725 hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm); 705 hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
726 hio_mgr->output = hio_mgr->input + (ul_shm_length - 706 hio_mgr->output = hio_mgr->input + (ul_shm_length -
727 sizeof(struct shm)) / 2; 707 sizeof(struct shm)) / 2;
728 hio_mgr->usm_buf_size = hio_mgr->output - hio_mgr->input; 708 hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input;
729 709
730 /* Set up Shared memory addresses for messaging. */ 710 /* Set up Shared memory addresses for messaging. */
731 hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem 711 hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
@@ -754,45 +734,45 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
754#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 734#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
755 /* Get the start address of trace buffer */ 735 /* Get the start address of trace buffer */
756 status = cod_get_sym_value(cod_man, SYS_PUTCBEG, 736 status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
757 &hio_mgr->ul_trace_buffer_begin); 737 &hio_mgr->trace_buffer_begin);
758 if (status) { 738 if (status) {
759 status = -EFAULT; 739 status = -EFAULT;
760 goto func_end; 740 goto func_end;
761 } 741 }
762 742
763 hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin = 743 hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin =
764 (ul_gpp_va + ul_seg1_size + ul_pad_size) + 744 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
765 (hio_mgr->ul_trace_buffer_begin - ul_dsp_va); 745 (hio_mgr->trace_buffer_begin - ul_dsp_va);
766 /* Get the end address of trace buffer */ 746 /* Get the end address of trace buffer */
767 status = cod_get_sym_value(cod_man, SYS_PUTCEND, 747 status = cod_get_sym_value(cod_man, SYS_PUTCEND,
768 &hio_mgr->ul_trace_buffer_end); 748 &hio_mgr->trace_buffer_end);
769 if (status) { 749 if (status) {
770 status = -EFAULT; 750 status = -EFAULT;
771 goto func_end; 751 goto func_end;
772 } 752 }
773 hio_mgr->ul_trace_buffer_end = 753 hio_mgr->trace_buffer_end =
774 (ul_gpp_va + ul_seg1_size + ul_pad_size) + 754 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
775 (hio_mgr->ul_trace_buffer_end - ul_dsp_va); 755 (hio_mgr->trace_buffer_end - ul_dsp_va);
776 /* Get the current address of DSP write pointer */ 756 /* Get the current address of DSP write pointer */
777 status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT, 757 status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
778 &hio_mgr->ul_trace_buffer_current); 758 &hio_mgr->trace_buffer_current);
779 if (status) { 759 if (status) {
780 status = -EFAULT; 760 status = -EFAULT;
781 goto func_end; 761 goto func_end;
782 } 762 }
783 hio_mgr->ul_trace_buffer_current = 763 hio_mgr->trace_buffer_current =
784 (ul_gpp_va + ul_seg1_size + ul_pad_size) + 764 (ul_gpp_va + ul_seg1_size + ul_pad_size) +
785 (hio_mgr->ul_trace_buffer_current - ul_dsp_va); 765 (hio_mgr->trace_buffer_current - ul_dsp_va);
786 /* Calculate the size of trace buffer */ 766 /* Calculate the size of trace buffer */
787 kfree(hio_mgr->pmsg); 767 kfree(hio_mgr->msg);
788 hio_mgr->pmsg = kmalloc(((hio_mgr->ul_trace_buffer_end - 768 hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end -
789 hio_mgr->ul_trace_buffer_begin) * 769 hio_mgr->trace_buffer_begin) *
790 hio_mgr->word_size) + 2, GFP_KERNEL); 770 hio_mgr->word_size) + 2, GFP_KERNEL);
791 if (!hio_mgr->pmsg) 771 if (!hio_mgr->msg)
792 status = -ENOMEM; 772 status = -ENOMEM;
793 773
794 hio_mgr->ul_dsp_va = ul_dsp_va; 774 hio_mgr->dsp_va = ul_dsp_va;
795 hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size); 775 hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
796 776
797#endif 777#endif
798func_end: 778func_end:
@@ -806,7 +786,7 @@ func_end:
806u32 io_buf_size(struct io_mgr *hio_mgr) 786u32 io_buf_size(struct io_mgr *hio_mgr)
807{ 787{
808 if (hio_mgr) 788 if (hio_mgr)
809 return hio_mgr->usm_buf_size; 789 return hio_mgr->sm_buf_size;
810 else 790 else
811 return 0; 791 return 0;
812} 792}
@@ -827,7 +807,7 @@ void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
827 /* Inform DSP that we have no more buffers on this channel */ 807 /* Inform DSP that we have no more buffers on this channel */
828 set_chnl_free(sm, chnl); 808 set_chnl_free(sm, chnl);
829 809
830 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS); 810 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
831func_end: 811func_end:
832 return; 812 return;
833} 813}
@@ -849,7 +829,7 @@ static void io_dispatch_pm(struct io_mgr *pio_mgr)
849 if (parg[0] == MBX_PM_HIBERNATE_EN) { 829 if (parg[0] == MBX_PM_HIBERNATE_EN) {
850 dev_dbg(bridge, "PM: Hibernate command\n"); 830 dev_dbg(bridge, "PM: Hibernate command\n");
851 status = pio_mgr->intf_fxns-> 831 status = pio_mgr->intf_fxns->
852 pfn_dev_cntrl(pio_mgr->hbridge_context, 832 dev_cntrl(pio_mgr->bridge_context,
853 BRDIOCTL_PWR_HIBERNATE, parg); 833 BRDIOCTL_PWR_HIBERNATE, parg);
854 if (status) 834 if (status)
855 pr_err("%s: hibernate cmd failed 0x%x\n", 835 pr_err("%s: hibernate cmd failed 0x%x\n",
@@ -858,7 +838,7 @@ static void io_dispatch_pm(struct io_mgr *pio_mgr)
858 parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt; 838 parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
859 dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]); 839 dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
860 status = pio_mgr->intf_fxns-> 840 status = pio_mgr->intf_fxns->
861 pfn_dev_cntrl(pio_mgr->hbridge_context, 841 dev_cntrl(pio_mgr->bridge_context,
862 BRDIOCTL_CONSTRAINT_REQUEST, parg); 842 BRDIOCTL_CONSTRAINT_REQUEST, parg);
863 if (status) 843 if (status)
864 dev_dbg(bridge, "PM: Failed to set constraint " 844 dev_dbg(bridge, "PM: Failed to set constraint "
@@ -867,7 +847,7 @@ static void io_dispatch_pm(struct io_mgr *pio_mgr)
867 dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n", 847 dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
868 parg[0]); 848 parg[0]);
869 status = pio_mgr->intf_fxns-> 849 status = pio_mgr->intf_fxns->
870 pfn_dev_cntrl(pio_mgr->hbridge_context, 850 dev_cntrl(pio_mgr->bridge_context,
871 BRDIOCTL_CLK_CTRL, parg); 851 BRDIOCTL_CLK_CTRL, parg);
872 if (status) 852 if (status)
873 dev_dbg(bridge, "PM: Failed to ctrl the DSP clk" 853 dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
@@ -892,9 +872,9 @@ void io_dpc(unsigned long ref_data)
892 872
893 if (!pio_mgr) 873 if (!pio_mgr)
894 goto func_end; 874 goto func_end;
895 chnl_mgr_obj = pio_mgr->hchnl_mgr; 875 chnl_mgr_obj = pio_mgr->chnl_mgr;
896 dev_get_msg_mgr(pio_mgr->hdev_obj, &msg_mgr_obj); 876 dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj);
897 dev_get_deh_mgr(pio_mgr->hdev_obj, &hdeh_mgr); 877 dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr);
898 if (!chnl_mgr_obj) 878 if (!chnl_mgr_obj)
899 goto func_end; 879 goto func_end;
900 880
@@ -990,15 +970,15 @@ void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
990 970
991 if (!pchnl || !mbx_val) 971 if (!pchnl || !mbx_val)
992 goto func_end; 972 goto func_end;
993 chnl_mgr_obj = io_manager->hchnl_mgr; 973 chnl_mgr_obj = io_manager->chnl_mgr;
994 sm = io_manager->shared_mem; 974 sm = io_manager->shared_mem;
995 if (io_mode == IO_INPUT) { 975 if (io_mode == IO_INPUT) {
996 /* 976 /*
997 * Assertion fires if CHNL_AddIOReq() called on a stream 977 * Assertion fires if CHNL_AddIOReq() called on a stream
998 * which was cancelled, or attached to a dead board. 978 * which was cancelled, or attached to a dead board.
999 */ 979 */
1000 DBC_ASSERT((pchnl->dw_state == CHNL_STATEREADY) || 980 DBC_ASSERT((pchnl->state == CHNL_STATEREADY) ||
1001 (pchnl->dw_state == CHNL_STATEEOS)); 981 (pchnl->state == CHNL_STATEEOS));
1002 /* Indicate to the DSP we have a buffer available for input */ 982 /* Indicate to the DSP we have a buffer available for input */
1003 set_chnl_busy(sm, pchnl->chnl_id); 983 set_chnl_busy(sm, pchnl->chnl_id);
1004 *mbx_val = MBX_PCPY_CLASS; 984 *mbx_val = MBX_PCPY_CLASS;
@@ -1007,13 +987,13 @@ void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
1007 * This assertion fails if CHNL_AddIOReq() was called on a 987 * This assertion fails if CHNL_AddIOReq() was called on a
1008 * stream which was cancelled, or attached to a dead board. 988 * stream which was cancelled, or attached to a dead board.
1009 */ 989 */
1010 DBC_ASSERT((pchnl->dw_state & ~CHNL_STATEEOS) == 990 DBC_ASSERT((pchnl->state & ~CHNL_STATEEOS) ==
1011 CHNL_STATEREADY); 991 CHNL_STATEREADY);
1012 /* 992 /*
1013 * Record the fact that we have a buffer available for 993 * Record the fact that we have a buffer available for
1014 * output. 994 * output.
1015 */ 995 */
1016 chnl_mgr_obj->dw_output_mask |= (1 << pchnl->chnl_id); 996 chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
1017 } else { 997 } else {
1018 DBC_ASSERT(io_mode); /* Shouldn't get here. */ 998 DBC_ASSERT(io_mode); /* Shouldn't get here. */
1019 } 999 }
@@ -1056,7 +1036,7 @@ static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
1056 u32 shift; 1036 u32 shift;
1057 1037
1058 id = (pchnl != 1038 id = (pchnl !=
1059 NULL ? pchnl->chnl_id : (chnl_mgr_obj->dw_last_output + 1)); 1039 NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1));
1060 id = ((id == CHNL_MAXCHANNELS) ? 0 : id); 1040 id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1061 if (id >= CHNL_MAXCHANNELS) 1041 if (id >= CHNL_MAXCHANNELS)
1062 goto func_end; 1042 goto func_end;
@@ -1067,7 +1047,7 @@ static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
1067 if (mask & shift) { 1047 if (mask & shift) {
1068 ret = id; 1048 ret = id;
1069 if (pchnl == NULL) 1049 if (pchnl == NULL)
1070 chnl_mgr_obj->dw_last_output = id; 1050 chnl_mgr_obj->last_output = id;
1071 break; 1051 break;
1072 } 1052 }
1073 id = id + 1; 1053 id = id + 1;
@@ -1096,7 +1076,7 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1096 bool notify_client = false; 1076 bool notify_client = false;
1097 1077
1098 sm = pio_mgr->shared_mem; 1078 sm = pio_mgr->shared_mem;
1099 chnl_mgr_obj = pio_mgr->hchnl_mgr; 1079 chnl_mgr_obj = pio_mgr->chnl_mgr;
1100 1080
1101 /* Attempt to perform input */ 1081 /* Attempt to perform input */
1102 if (!sm->input_full) 1082 if (!sm->input_full)
@@ -1110,18 +1090,20 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1110 DBC_ASSERT(chnl_id); 1090 DBC_ASSERT(chnl_id);
1111 goto func_end; 1091 goto func_end;
1112 } 1092 }
1113 pchnl = chnl_mgr_obj->ap_channel[chnl_id]; 1093 pchnl = chnl_mgr_obj->channels[chnl_id];
1114 if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) { 1094 if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
1115 if ((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY) { 1095 if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
1116 if (!pchnl->pio_requests)
1117 goto func_end;
1118 /* Get the I/O request, and attempt a transfer */ 1096 /* Get the I/O request, and attempt a transfer */
1119 chnl_packet_obj = (struct chnl_irp *) 1097 if (!list_empty(&pchnl->io_requests)) {
1120 lst_get_head(pchnl->pio_requests); 1098 if (!pchnl->cio_reqs)
1121 if (chnl_packet_obj) {
1122 pchnl->cio_reqs--;
1123 if (pchnl->cio_reqs < 0)
1124 goto func_end; 1099 goto func_end;
1100
1101 chnl_packet_obj = list_first_entry(
1102 &pchnl->io_requests,
1103 struct chnl_irp, link);
1104 list_del(&chnl_packet_obj->link);
1105 pchnl->cio_reqs--;
1106
1125 /* 1107 /*
1126 * Ensure we don't overflow the client's 1108 * Ensure we don't overflow the client's
1127 * buffer. 1109 * buffer.
@@ -1131,7 +1113,7 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1131 pio_mgr->input, bytes); 1113 pio_mgr->input, bytes);
1132 pchnl->bytes_moved += bytes; 1114 pchnl->bytes_moved += bytes;
1133 chnl_packet_obj->byte_size = bytes; 1115 chnl_packet_obj->byte_size = bytes;
1134 chnl_packet_obj->dw_arg = dw_arg; 1116 chnl_packet_obj->arg = dw_arg;
1135 chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE; 1117 chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
1136 1118
1137 if (bytes == 0) { 1119 if (bytes == 0) {
@@ -1140,7 +1122,7 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1140 * sends EOS more than once on this 1122 * sends EOS more than once on this
1141 * channel. 1123 * channel.
1142 */ 1124 */
1143 if (pchnl->dw_state & CHNL_STATEEOS) 1125 if (pchnl->state & CHNL_STATEEOS)
1144 goto func_end; 1126 goto func_end;
1145 /* 1127 /*
1146 * Zero bytes indicates EOS. Update 1128 * Zero bytes indicates EOS. Update
@@ -1148,21 +1130,18 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1148 * the channel state. 1130 * the channel state.
1149 */ 1131 */
1150 chnl_packet_obj->status |= 1132 chnl_packet_obj->status |=
1151 CHNL_IOCSTATEOS; 1133 CHNL_IOCSTATEOS;
1152 pchnl->dw_state |= CHNL_STATEEOS; 1134 pchnl->state |= CHNL_STATEEOS;
1153 /* 1135 /*
1154 * Notify that end of stream has 1136 * Notify that end of stream has
1155 * occurred. 1137 * occurred.
1156 */ 1138 */
1157 ntfy_notify(pchnl->ntfy_obj, 1139 ntfy_notify(pchnl->ntfy_obj,
1158 DSP_STREAMDONE); 1140 DSP_STREAMDONE);
1159 } 1141 }
1160 /* Tell DSP if no more I/O buffers available */ 1142 /* Tell DSP if no more I/O buffers available */
1161 if (!pchnl->pio_requests) 1143 if (list_empty(&pchnl->io_requests))
1162 goto func_end;
1163 if (LST_IS_EMPTY(pchnl->pio_requests)) {
1164 set_chnl_free(sm, pchnl->chnl_id); 1144 set_chnl_free(sm, pchnl->chnl_id);
1165 }
1166 clear_chnl = true; 1145 clear_chnl = true;
1167 notify_client = true; 1146 notify_client = true;
1168 } else { 1147 } else {
@@ -1185,7 +1164,7 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1185 if (clear_chnl) { 1164 if (clear_chnl) {
1186 /* Indicate to the DSP we have read the input */ 1165 /* Indicate to the DSP we have read the input */
1187 sm->input_full = 0; 1166 sm->input_full = 0;
1188 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS); 1167 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1189 } 1168 }
1190 if (notify_client) { 1169 if (notify_client) {
1191 /* Notify client with IO completion record */ 1170 /* Notify client with IO completion record */
@@ -1216,89 +1195,73 @@ static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1216 input_empty = msg_ctr_obj->buf_empty; 1195 input_empty = msg_ctr_obj->buf_empty;
1217 num_msgs = msg_ctr_obj->size; 1196 num_msgs = msg_ctr_obj->size;
1218 if (input_empty) 1197 if (input_empty)
1219 goto func_end; 1198 return;
1220 1199
1221 msg_input = pio_mgr->msg_input; 1200 msg_input = pio_mgr->msg_input;
1222 for (i = 0; i < num_msgs; i++) { 1201 for (i = 0; i < num_msgs; i++) {
1223 /* Read the next message */ 1202 /* Read the next message */
1224 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_cmd); 1203 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd);
1225 msg.msg.dw_cmd = 1204 msg.msg.cmd =
1226 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr); 1205 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1227 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg1); 1206 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1);
1228 msg.msg.dw_arg1 = 1207 msg.msg.arg1 =
1229 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr); 1208 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1230 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg2); 1209 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2);
1231 msg.msg.dw_arg2 = 1210 msg.msg.arg2 =
1232 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr); 1211 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1233 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id); 1212 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
1234 msg.msgq_id = 1213 msg.msgq_id =
1235 read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr); 1214 read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
1236 msg_input += sizeof(struct msg_dspmsg); 1215 msg_input += sizeof(struct msg_dspmsg);
1237 if (!hmsg_mgr->queue_list)
1238 goto func_end;
1239 1216
1240 /* Determine which queue to put the message in */ 1217 /* Determine which queue to put the message in */
1241 msg_queue_obj = 1218 dev_dbg(bridge, "input msg: cmd=0x%x arg1=0x%x "
1242 (struct msg_queue *)lst_first(hmsg_mgr->queue_list); 1219 "arg2=0x%x msgq_id=0x%x\n", msg.msg.cmd,
1243 dev_dbg(bridge, "input msg: dw_cmd=0x%x dw_arg1=0x%x " 1220 msg.msg.arg1, msg.msg.arg2, msg.msgq_id);
1244 "dw_arg2=0x%x msgq_id=0x%x \n", msg.msg.dw_cmd,
1245 msg.msg.dw_arg1, msg.msg.dw_arg2, msg.msgq_id);
1246 /* 1221 /*
1247 * Interrupt may occur before shared memory and message 1222 * Interrupt may occur before shared memory and message
1248 * input locations have been set up. If all nodes were 1223 * input locations have been set up. If all nodes were
1249 * cleaned up, hmsg_mgr->max_msgs should be 0. 1224 * cleaned up, hmsg_mgr->max_msgs should be 0.
1250 */ 1225 */
1251 while (msg_queue_obj != NULL) { 1226 list_for_each_entry(msg_queue_obj, &hmsg_mgr->queue_list,
1252 if (msg.msgq_id == msg_queue_obj->msgq_id) { 1227 list_elem) {
1253 /* Found it */ 1228 if (msg.msgq_id != msg_queue_obj->msgq_id)
1254 if (msg.msg.dw_cmd == RMS_EXITACK) { 1229 continue;
1255 /* 1230 /* Found it */
1256 * Call the node exit notification. 1231 if (msg.msg.cmd == RMS_EXITACK) {
1257 * The exit message does not get 1232 /*
1258 * queued. 1233 * Call the node exit notification.
1259 */ 1234 * The exit message does not get
1260 (*hmsg_mgr->on_exit) ((void *) 1235 * queued.
1261 msg_queue_obj->arg, 1236 */
1262 msg.msg.dw_arg1); 1237 (*hmsg_mgr->on_exit)(msg_queue_obj->arg,
1263 } else { 1238 msg.msg.arg1);
1264 /* 1239 break;
1265 * Not an exit acknowledgement, queue 1240 }
1266 * the message. 1241 /*
1267 */ 1242 * Not an exit acknowledgement, queue
1268 if (!msg_queue_obj->msg_free_list) 1243 * the message.
1269 goto func_end; 1244 */
1270 pmsg = (struct msg_frame *)lst_get_head 1245 if (list_empty(&msg_queue_obj->msg_free_list)) {
1271 (msg_queue_obj->msg_free_list); 1246 /*
1272 if (msg_queue_obj->msg_used_list 1247 * No free frame to copy the
1273 && pmsg) { 1248 * message into.
1274 pmsg->msg_data = msg; 1249 */
1275 lst_put_tail 1250 pr_err("%s: no free msg frames,"
1276 (msg_queue_obj->msg_used_list, 1251 " discarding msg\n",
1277 (struct list_head *)pmsg); 1252 __func__);
1278 ntfy_notify
1279 (msg_queue_obj->ntfy_obj,
1280 DSP_NODEMESSAGEREADY);
1281 sync_set_event
1282 (msg_queue_obj->sync_event);
1283 } else {
1284 /*
1285 * No free frame to copy the
1286 * message into.
1287 */
1288 pr_err("%s: no free msg frames,"
1289 " discarding msg\n",
1290 __func__);
1291 }
1292 }
1293 break; 1253 break;
1294 } 1254 }
1295 1255
1296 if (!hmsg_mgr->queue_list || !msg_queue_obj) 1256 pmsg = list_first_entry(&msg_queue_obj->msg_free_list,
1297 goto func_end; 1257 struct msg_frame, list_elem);
1298 msg_queue_obj = 1258 list_del(&pmsg->list_elem);
1299 (struct msg_queue *)lst_next(hmsg_mgr->queue_list, 1259 pmsg->msg_data = msg;
1300 (struct list_head *) 1260 list_add_tail(&pmsg->list_elem,
1301 msg_queue_obj); 1261 &msg_queue_obj->msg_used_list);
1262 ntfy_notify(msg_queue_obj->ntfy_obj,
1263 DSP_NODEMESSAGEREADY);
1264 sync_set_event(msg_queue_obj->sync_event);
1302 } 1265 }
1303 } 1266 }
1304 /* Set the post SWI flag */ 1267 /* Set the post SWI flag */
@@ -1306,10 +1269,8 @@ static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1306 /* Tell the DSP we've read the messages */ 1269 /* Tell the DSP we've read the messages */
1307 msg_ctr_obj->buf_empty = true; 1270 msg_ctr_obj->buf_empty = true;
1308 msg_ctr_obj->post_swi = true; 1271 msg_ctr_obj->post_swi = true;
1309 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS); 1272 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1310 } 1273 }
1311func_end:
1312 return;
1313} 1274}
1314 1275
1315/* 1276/*
@@ -1322,8 +1283,7 @@ static void notify_chnl_complete(struct chnl_object *pchnl,
1322{ 1283{
1323 bool signal_event; 1284 bool signal_event;
1324 1285
1325 if (!pchnl || !pchnl->sync_event || 1286 if (!pchnl || !pchnl->sync_event || !chnl_packet_obj)
1326 !pchnl->pio_completions || !chnl_packet_obj)
1327 goto func_end; 1287 goto func_end;
1328 1288
1329 /* 1289 /*
@@ -1332,10 +1292,9 @@ static void notify_chnl_complete(struct chnl_object *pchnl,
1332 * signalled by the only IO completion list consumer: 1292 * signalled by the only IO completion list consumer:
1333 * bridge_chnl_get_ioc(). 1293 * bridge_chnl_get_ioc().
1334 */ 1294 */
1335 signal_event = LST_IS_EMPTY(pchnl->pio_completions); 1295 signal_event = list_empty(&pchnl->io_completions);
1336 /* Enqueue the IO completion info for the client */ 1296 /* Enqueue the IO completion info for the client */
1337 lst_put_tail(pchnl->pio_completions, 1297 list_add_tail(&chnl_packet_obj->link, &pchnl->io_completions);
1338 (struct list_head *)chnl_packet_obj);
1339 pchnl->cio_cs++; 1298 pchnl->cio_cs++;
1340 1299
1341 if (pchnl->cio_cs > pchnl->chnl_packets) 1300 if (pchnl->cio_cs > pchnl->chnl_packets)
@@ -1364,49 +1323,51 @@ static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1364 struct chnl_irp *chnl_packet_obj; 1323 struct chnl_irp *chnl_packet_obj;
1365 u32 dw_dsp_f_mask; 1324 u32 dw_dsp_f_mask;
1366 1325
1367 chnl_mgr_obj = pio_mgr->hchnl_mgr; 1326 chnl_mgr_obj = pio_mgr->chnl_mgr;
1368 sm = pio_mgr->shared_mem; 1327 sm = pio_mgr->shared_mem;
1369 /* Attempt to perform output */ 1328 /* Attempt to perform output */
1370 if (sm->output_full) 1329 if (sm->output_full)
1371 goto func_end; 1330 goto func_end;
1372 1331
1373 if (pchnl && !((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY)) 1332 if (pchnl && !((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
1374 goto func_end; 1333 goto func_end;
1375 1334
1376 /* Look to see if both a PC and DSP output channel are ready */ 1335 /* Look to see if both a PC and DSP output channel are ready */
1377 dw_dsp_f_mask = sm->dsp_free_mask; 1336 dw_dsp_f_mask = sm->dsp_free_mask;
1378 chnl_id = 1337 chnl_id =
1379 find_ready_output(chnl_mgr_obj, pchnl, 1338 find_ready_output(chnl_mgr_obj, pchnl,
1380 (chnl_mgr_obj->dw_output_mask & dw_dsp_f_mask)); 1339 (chnl_mgr_obj->output_mask & dw_dsp_f_mask));
1381 if (chnl_id == OUTPUTNOTREADY) 1340 if (chnl_id == OUTPUTNOTREADY)
1382 goto func_end; 1341 goto func_end;
1383 1342
1384 pchnl = chnl_mgr_obj->ap_channel[chnl_id]; 1343 pchnl = chnl_mgr_obj->channels[chnl_id];
1385 if (!pchnl || !pchnl->pio_requests) { 1344 if (!pchnl || list_empty(&pchnl->io_requests)) {
1386 /* Shouldn't get here */ 1345 /* Shouldn't get here */
1387 goto func_end; 1346 goto func_end;
1388 } 1347 }
1389 /* Get the I/O request, and attempt a transfer */ 1348
1390 chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_requests); 1349 if (!pchnl->cio_reqs)
1391 if (!chnl_packet_obj)
1392 goto func_end; 1350 goto func_end;
1393 1351
1352 /* Get the I/O request, and attempt a transfer */
1353 chnl_packet_obj = list_first_entry(&pchnl->io_requests,
1354 struct chnl_irp, link);
1355 list_del(&chnl_packet_obj->link);
1356
1394 pchnl->cio_reqs--; 1357 pchnl->cio_reqs--;
1395 if (pchnl->cio_reqs < 0 || !pchnl->pio_requests)
1396 goto func_end;
1397 1358
1398 /* Record fact that no more I/O buffers available */ 1359 /* Record fact that no more I/O buffers available */
1399 if (LST_IS_EMPTY(pchnl->pio_requests)) 1360 if (list_empty(&pchnl->io_requests))
1400 chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id); 1361 chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
1401 1362
1402 /* Transfer buffer to DSP side */ 1363 /* Transfer buffer to DSP side */
1403 chnl_packet_obj->byte_size = min(pio_mgr->usm_buf_size, 1364 chnl_packet_obj->byte_size = min(pio_mgr->sm_buf_size,
1404 chnl_packet_obj->byte_size); 1365 chnl_packet_obj->byte_size);
1405 memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf, 1366 memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
1406 chnl_packet_obj->byte_size); 1367 chnl_packet_obj->byte_size);
1407 pchnl->bytes_moved += chnl_packet_obj->byte_size; 1368 pchnl->bytes_moved += chnl_packet_obj->byte_size;
1408 /* Write all 32 bits of arg */ 1369 /* Write all 32 bits of arg */
1409 sm->arg = chnl_packet_obj->dw_arg; 1370 sm->arg = chnl_packet_obj->arg;
1410#if _CHNL_WORDSIZE == 2 1371#if _CHNL_WORDSIZE == 2
1411 /* Access can be different SM access word size (e.g. 16/32 bit words) */ 1372 /* Access can be different SM access word size (e.g. 16/32 bit words) */
1412 sm->output_id = (u16) chnl_id; 1373 sm->output_id = (u16) chnl_id;
@@ -1420,7 +1381,7 @@ static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1420#endif 1381#endif
1421 sm->output_full = 1; 1382 sm->output_full = 1;
1422 /* Indicate to the DSP we have written the output */ 1383 /* Indicate to the DSP we have written the output */
1423 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS); 1384 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1424 /* Notify client with IO completion record (keep EOS) */ 1385 /* Notify client with IO completion record (keep EOS) */
1425 chnl_packet_obj->status &= CHNL_IOCSTATEOS; 1386 chnl_packet_obj->status &= CHNL_IOCSTATEOS;
1426 notify_chnl_complete(pchnl, chnl_packet_obj); 1387 notify_chnl_complete(pchnl, chnl_packet_obj);
@@ -1440,81 +1401,69 @@ static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1440{ 1401{
1441 u32 num_msgs = 0; 1402 u32 num_msgs = 0;
1442 u32 i; 1403 u32 i;
1443 u8 *msg_output; 1404 struct msg_dspmsg *msg_output;
1444 struct msg_frame *pmsg; 1405 struct msg_frame *pmsg;
1445 struct msg_ctrl *msg_ctr_obj; 1406 struct msg_ctrl *msg_ctr_obj;
1446 u32 output_empty;
1447 u32 val; 1407 u32 val;
1448 u32 addr; 1408 u32 addr;
1449 1409
1450 msg_ctr_obj = pio_mgr->msg_output_ctrl; 1410 msg_ctr_obj = pio_mgr->msg_output_ctrl;
1451 1411
1452 /* Check if output has been cleared */ 1412 /* Check if output has been cleared */
1453 output_empty = msg_ctr_obj->buf_empty; 1413 if (!msg_ctr_obj->buf_empty)
1454 if (output_empty) { 1414 return;
1455 num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ? 1415
1456 hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending; 1416 num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
1457 msg_output = pio_mgr->msg_output; 1417 hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
1458 /* Copy num_msgs messages into shared memory */ 1418 msg_output = (struct msg_dspmsg *) pio_mgr->msg_output;
1459 for (i = 0; i < num_msgs; i++) { 1419
1460 if (!hmsg_mgr->msg_used_list) { 1420 /* Copy num_msgs messages into shared memory */
1461 pmsg = NULL; 1421 for (i = 0; i < num_msgs; i++) {
1462 goto func_end; 1422 if (list_empty(&hmsg_mgr->msg_used_list))
1463 } else { 1423 continue;
1464 pmsg = (struct msg_frame *) 1424
1465 lst_get_head(hmsg_mgr->msg_used_list); 1425 pmsg = list_first_entry(&hmsg_mgr->msg_used_list,
1466 } 1426 struct msg_frame, list_elem);
1467 if (pmsg != NULL) { 1427 list_del(&pmsg->list_elem);
1468 val = (pmsg->msg_data).msgq_id; 1428
1469 addr = (u32) &(((struct msg_dspmsg *) 1429 val = (pmsg->msg_data).msgq_id;
1470 msg_output)->msgq_id); 1430 addr = (u32) &msg_output->msgq_id;
1471 write_ext32_bit_dsp_data( 1431 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1472 pio_mgr->hbridge_context, addr, val); 1432
1473 val = (pmsg->msg_data).msg.dw_cmd; 1433 val = (pmsg->msg_data).msg.cmd;
1474 addr = (u32) &((((struct msg_dspmsg *) 1434 addr = (u32) &msg_output->msg.cmd;
1475 msg_output)->msg).dw_cmd); 1435 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1476 write_ext32_bit_dsp_data( 1436
1477 pio_mgr->hbridge_context, addr, val); 1437 val = (pmsg->msg_data).msg.arg1;
1478 val = (pmsg->msg_data).msg.dw_arg1; 1438 addr = (u32) &msg_output->msg.arg1;
1479 addr = (u32) &((((struct msg_dspmsg *) 1439 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1480 msg_output)->msg).dw_arg1); 1440
1481 write_ext32_bit_dsp_data( 1441 val = (pmsg->msg_data).msg.arg2;
1482 pio_mgr->hbridge_context, addr, val); 1442 addr = (u32) &msg_output->msg.arg2;
1483 val = (pmsg->msg_data).msg.dw_arg2; 1443 write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
1484 addr = (u32) &((((struct msg_dspmsg *) 1444
1485 msg_output)->msg).dw_arg2); 1445 msg_output++;
1486 write_ext32_bit_dsp_data( 1446 list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list);
1487 pio_mgr->hbridge_context, addr, val); 1447 sync_set_event(hmsg_mgr->sync_event);
1488 msg_output += sizeof(struct msg_dspmsg); 1448 }
1489 if (!hmsg_mgr->msg_free_list)
1490 goto func_end;
1491 lst_put_tail(hmsg_mgr->msg_free_list,
1492 (struct list_head *)pmsg);
1493 sync_set_event(hmsg_mgr->sync_event);
1494 }
1495 }
1496 1449
1497 if (num_msgs > 0) { 1450 if (num_msgs > 0) {
1498 hmsg_mgr->msgs_pending -= num_msgs; 1451 hmsg_mgr->msgs_pending -= num_msgs;
1499#if _CHNL_WORDSIZE == 2 1452#if _CHNL_WORDSIZE == 2
1500 /* 1453 /*
1501 * Access can be different SM access word size 1454 * Access can be different SM access word size
1502 * (e.g. 16/32 bit words) 1455 * (e.g. 16/32 bit words)
1503 */ 1456 */
1504 msg_ctr_obj->size = (u16) num_msgs; 1457 msg_ctr_obj->size = (u16) num_msgs;
1505#else 1458#else
1506 msg_ctr_obj->size = num_msgs; 1459 msg_ctr_obj->size = num_msgs;
1507#endif 1460#endif
1508 msg_ctr_obj->buf_empty = false; 1461 msg_ctr_obj->buf_empty = false;
1509 /* Set the post SWI flag */ 1462 /* Set the post SWI flag */
1510 msg_ctr_obj->post_swi = true; 1463 msg_ctr_obj->post_swi = true;
1511 /* Tell the DSP we have written the output. */ 1464 /* Tell the DSP we have written the output. */
1512 sm_interrupt_dsp(pio_mgr->hbridge_context, 1465 sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
1513 MBX_PCPY_CLASS);
1514 }
1515 } 1466 }
1516func_end:
1517 return;
1518} 1467}
1519 1468
1520/* 1469/*
@@ -1569,9 +1518,9 @@ static int register_shm_segs(struct io_mgr *hio_mgr,
1569 } 1518 }
1570 /* Register with CMM */ 1519 /* Register with CMM */
1571 if (!status) { 1520 if (!status) {
1572 status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr); 1521 status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr);
1573 if (!status) { 1522 if (!status) {
1574 status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr, 1523 status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr,
1575 CMM_ALLSEGMENTS); 1524 CMM_ALLSEGMENTS);
1576 } 1525 }
1577 } 1526 }
@@ -1592,10 +1541,10 @@ static int register_shm_segs(struct io_mgr *hio_mgr,
1592 goto func_end; 1541 goto func_end;
1593 } 1542 }
1594 /* First TLB entry reserved for Bridge SM use. */ 1543 /* First TLB entry reserved for Bridge SM use. */
1595 ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys; 1544 ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
1596 /* Get size in bytes */ 1545 /* Get size in bytes */
1597 ul_dsp_virt = 1546 ul_dsp_virt =
1598 hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt * 1547 hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt *
1599 hio_mgr->word_size; 1548 hio_mgr->word_size;
1600 /* 1549 /*
1601 * Calc byte offset used to convert GPP phys <-> DSP byte 1550 * Calc byte offset used to convert GPP phys <-> DSP byte
@@ -1626,7 +1575,7 @@ static int register_shm_segs(struct io_mgr *hio_mgr,
1626 ul_dsp_virt; 1575 ul_dsp_virt;
1627 /* Register SM Segment 0. */ 1576 /* Register SM Segment 0. */
1628 status = 1577 status =
1629 cmm_register_gppsm_seg(hio_mgr->hcmm_mgr, dw_gpp_base_pa, 1578 cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa,
1630 ul_rsrvd_size, dw_offset, 1579 ul_rsrvd_size, dw_offset,
1631 (dw_gpp_base_pa > 1580 (dw_gpp_base_pa >
1632 ul_dsp_virt) ? CMM_ADDTODSPPA : 1581 ul_dsp_virt) ? CMM_ADDTODSPPA :
@@ -1714,6 +1663,9 @@ int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
1714int bridge_io_get_proc_load(struct io_mgr *hio_mgr, 1663int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
1715 struct dsp_procloadstat *proc_lstat) 1664 struct dsp_procloadstat *proc_lstat)
1716{ 1665{
1666 if (!hio_mgr->shared_mem)
1667 return -EFAULT;
1668
1717 proc_lstat->curr_load = 1669 proc_lstat->curr_load =
1718 hio_mgr->shared_mem->load_mon_info.curr_dsp_load; 1670 hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
1719 proc_lstat->predicted_load = 1671 proc_lstat->predicted_load =
@@ -1730,10 +1682,6 @@ int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
1730 return 0; 1682 return 0;
1731} 1683}
1732 1684
1733void io_sm_init(void)
1734{
1735 /* Do nothing */
1736}
1737 1685
1738#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) 1686#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
1739void print_dsp_debug_trace(struct io_mgr *hio_mgr) 1687void print_dsp_debug_trace(struct io_mgr *hio_mgr)
@@ -1743,54 +1691,54 @@ void print_dsp_debug_trace(struct io_mgr *hio_mgr)
1743 while (true) { 1691 while (true) {
1744 /* Get the DSP current pointer */ 1692 /* Get the DSP current pointer */
1745 ul_gpp_cur_pointer = 1693 ul_gpp_cur_pointer =
1746 *(u32 *) (hio_mgr->ul_trace_buffer_current); 1694 *(u32 *) (hio_mgr->trace_buffer_current);
1747 ul_gpp_cur_pointer = 1695 ul_gpp_cur_pointer =
1748 hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer - 1696 hio_mgr->gpp_va + (ul_gpp_cur_pointer -
1749 hio_mgr->ul_dsp_va); 1697 hio_mgr->dsp_va);
1750 1698
1751 /* No new debug messages available yet */ 1699 /* No new debug messages available yet */
1752 if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) { 1700 if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) {
1753 break; 1701 break;
1754 } else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) { 1702 } else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) {
1755 /* Continuous data */ 1703 /* Continuous data */
1756 ul_new_message_length = 1704 ul_new_message_length =
1757 ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer; 1705 ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer;
1758 1706
1759 memcpy(hio_mgr->pmsg, 1707 memcpy(hio_mgr->msg,
1760 (char *)hio_mgr->ul_gpp_read_pointer, 1708 (char *)hio_mgr->gpp_read_pointer,
1761 ul_new_message_length); 1709 ul_new_message_length);
1762 hio_mgr->pmsg[ul_new_message_length] = '\0'; 1710 hio_mgr->msg[ul_new_message_length] = '\0';
1763 /* 1711 /*
1764 * Advance the GPP trace pointer to DSP current 1712 * Advance the GPP trace pointer to DSP current
1765 * pointer. 1713 * pointer.
1766 */ 1714 */
1767 hio_mgr->ul_gpp_read_pointer += ul_new_message_length; 1715 hio_mgr->gpp_read_pointer += ul_new_message_length;
1768 /* Print the trace messages */ 1716 /* Print the trace messages */
1769 pr_info("DSPTrace: %s\n", hio_mgr->pmsg); 1717 pr_info("DSPTrace: %s\n", hio_mgr->msg);
1770 } else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) { 1718 } else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) {
1771 /* Handle trace buffer wraparound */ 1719 /* Handle trace buffer wraparound */
1772 memcpy(hio_mgr->pmsg, 1720 memcpy(hio_mgr->msg,
1773 (char *)hio_mgr->ul_gpp_read_pointer, 1721 (char *)hio_mgr->gpp_read_pointer,
1774 hio_mgr->ul_trace_buffer_end - 1722 hio_mgr->trace_buffer_end -
1775 hio_mgr->ul_gpp_read_pointer); 1723 hio_mgr->gpp_read_pointer);
1776 ul_new_message_length = 1724 ul_new_message_length =
1777 ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin; 1725 ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin;
1778 memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end - 1726 memcpy(&hio_mgr->msg[hio_mgr->trace_buffer_end -
1779 hio_mgr->ul_gpp_read_pointer], 1727 hio_mgr->gpp_read_pointer],
1780 (char *)hio_mgr->ul_trace_buffer_begin, 1728 (char *)hio_mgr->trace_buffer_begin,
1781 ul_new_message_length); 1729 ul_new_message_length);
1782 hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end - 1730 hio_mgr->msg[hio_mgr->trace_buffer_end -
1783 hio_mgr->ul_gpp_read_pointer + 1731 hio_mgr->gpp_read_pointer +
1784 ul_new_message_length] = '\0'; 1732 ul_new_message_length] = '\0';
1785 /* 1733 /*
1786 * Advance the GPP trace pointer to DSP current 1734 * Advance the GPP trace pointer to DSP current
1787 * pointer. 1735 * pointer.
1788 */ 1736 */
1789 hio_mgr->ul_gpp_read_pointer = 1737 hio_mgr->gpp_read_pointer =
1790 hio_mgr->ul_trace_buffer_begin + 1738 hio_mgr->trace_buffer_begin +
1791 ul_new_message_length; 1739 ul_new_message_length;
1792 /* Print the trace messages */ 1740 /* Print the trace messages */
1793 pr_info("DSPTrace: %s\n", hio_mgr->pmsg); 1741 pr_info("DSPTrace: %s\n", hio_mgr->msg);
1794 } 1742 }
1795 } 1743 }
1796} 1744}
@@ -1828,7 +1776,7 @@ int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
1828 struct bridge_dev_context *pbridge_context = hbridge_context; 1776 struct bridge_dev_context *pbridge_context = hbridge_context;
1829 struct bridge_drv_interface *intf_fxns; 1777 struct bridge_drv_interface *intf_fxns;
1830 struct dev_object *dev_obj = (struct dev_object *) 1778 struct dev_object *dev_obj = (struct dev_object *)
1831 pbridge_context->hdev_obj; 1779 pbridge_context->dev_obj;
1832 1780
1833 status = dev_get_cod_mgr(dev_obj, &cod_mgr); 1781 status = dev_get_cod_mgr(dev_obj, &cod_mgr);
1834 1782
@@ -1862,7 +1810,7 @@ int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
1862 psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC); 1810 psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
1863 if (psz_buf != NULL) { 1811 if (psz_buf != NULL) {
1864 /* Read trace buffer data */ 1812 /* Read trace buffer data */
1865 status = (*intf_fxns->pfn_brd_read)(pbridge_context, 1813 status = (*intf_fxns->brd_read)(pbridge_context,
1866 (u8 *)psz_buf, (u32)ul_trace_begin, 1814 (u8 *)psz_buf, (u32)ul_trace_begin,
1867 ul_num_bytes, 0); 1815 ul_num_bytes, 0);
1868 1816
@@ -1877,7 +1825,7 @@ int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
1877 __func__, psz_buf); 1825 __func__, psz_buf);
1878 1826
1879 /* Read the value at the DSP address in trace_cur_pos. */ 1827 /* Read the value at the DSP address in trace_cur_pos. */
1880 status = (*intf_fxns->pfn_brd_read)(pbridge_context, 1828 status = (*intf_fxns->brd_read)(pbridge_context,
1881 (u8 *)&trace_cur_pos, (u32)trace_cur_pos, 1829 (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
1882 4, 0); 1830 4, 0);
1883 if (status) 1831 if (status)
@@ -2001,7 +1949,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context)
2001 "ILC", "RILC", "IER", "CSR"}; 1949 "ILC", "RILC", "IER", "CSR"};
2002 const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"}; 1950 const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
2003 struct bridge_drv_interface *intf_fxns; 1951 struct bridge_drv_interface *intf_fxns;
2004 struct dev_object *dev_object = bridge_context->hdev_obj; 1952 struct dev_object *dev_object = bridge_context->dev_obj;
2005 1953
2006 status = dev_get_cod_mgr(dev_object, &code_mgr); 1954 status = dev_get_cod_mgr(dev_object, &code_mgr);
2007 if (!code_mgr) { 1955 if (!code_mgr) {
@@ -2044,7 +1992,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context)
2044 poll_cnt < POLL_MAX) { 1992 poll_cnt < POLL_MAX) {
2045 1993
2046 /* Read DSP dump size from the DSP trace buffer... */ 1994 /* Read DSP dump size from the DSP trace buffer... */
2047 status = (*intf_fxns->pfn_brd_read)(bridge_context, 1995 status = (*intf_fxns->brd_read)(bridge_context,
2048 (u8 *)&mmu_fault_dbg_info, (u32)trace_begin, 1996 (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
2049 sizeof(mmu_fault_dbg_info), 0); 1997 sizeof(mmu_fault_dbg_info), 0);
2050 1998
@@ -2080,7 +2028,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context)
2080 buffer_end = buffer + total_size / 4; 2028 buffer_end = buffer + total_size / 4;
2081 2029
2082 /* Read bytes from the DSP trace buffer... */ 2030 /* Read bytes from the DSP trace buffer... */
2083 status = (*intf_fxns->pfn_brd_read)(bridge_context, 2031 status = (*intf_fxns->brd_read)(bridge_context,
2084 (u8 *)buffer, (u32)trace_begin, 2032 (u8 *)buffer, (u32)trace_begin,
2085 total_size, 0); 2033 total_size, 0);
2086 if (status) { 2034 if (status) {
@@ -2207,7 +2155,7 @@ void dump_dl_modules(struct bridge_dev_context *bridge_context)
2207 struct cod_manager *code_mgr; 2155 struct cod_manager *code_mgr;
2208 struct bridge_drv_interface *intf_fxns; 2156 struct bridge_drv_interface *intf_fxns;
2209 struct bridge_dev_context *bridge_ctxt = bridge_context; 2157 struct bridge_dev_context *bridge_ctxt = bridge_context;
2210 struct dev_object *dev_object = bridge_ctxt->hdev_obj; 2158 struct dev_object *dev_object = bridge_ctxt->dev_obj;
2211 struct modules_header modules_hdr; 2159 struct modules_header modules_hdr;
2212 struct dll_module *module_struct = NULL; 2160 struct dll_module *module_struct = NULL;
2213 u32 module_dsp_addr; 2161 u32 module_dsp_addr;
@@ -2241,7 +2189,7 @@ void dump_dl_modules(struct bridge_dev_context *bridge_context)
2241 pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr); 2189 pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
2242 2190
2243 /* Copy the modules_header structure from DSP memory. */ 2191 /* Copy the modules_header structure from DSP memory. */
2244 status = (*intf_fxns->pfn_brd_read)(bridge_context, (u8 *) &modules_hdr, 2192 status = (*intf_fxns->brd_read)(bridge_context, (u8 *) &modules_hdr,
2245 (u32) module_dsp_addr, sizeof(modules_hdr), 0); 2193 (u32) module_dsp_addr, sizeof(modules_hdr), 0);
2246 2194
2247 if (status) { 2195 if (status) {
@@ -2276,7 +2224,7 @@ void dump_dl_modules(struct bridge_dev_context *bridge_context)
2276 goto func_end; 2224 goto func_end;
2277 } 2225 }
2278 /* Copy the dll_module structure from DSP memory */ 2226 /* Copy the dll_module structure from DSP memory */
2279 status = (*intf_fxns->pfn_brd_read)(bridge_context, 2227 status = (*intf_fxns->brd_read)(bridge_context,
2280 (u8 *)module_struct, module_dsp_addr, module_size, 0); 2228 (u8 *)module_struct, module_dsp_addr, module_size, 0);
2281 2229
2282 if (status) { 2230 if (status) {
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c
index 87712e24dfb..94d9e04a22f 100644
--- a/drivers/staging/tidspbridge/core/msg_sm.c
+++ b/drivers/staging/tidspbridge/core/msg_sm.c
@@ -24,7 +24,6 @@
24#include <dspbridge/dbc.h> 24#include <dspbridge/dbc.h>
25 25
26/* ----------------------------------- OS Adaptation Layer */ 26/* ----------------------------------- OS Adaptation Layer */
27#include <dspbridge/list.h>
28#include <dspbridge/sync.h> 27#include <dspbridge/sync.h>
29 28
30/* ----------------------------------- Platform Manager */ 29/* ----------------------------------- Platform Manager */
@@ -38,10 +37,10 @@
38#include <dspbridge/dspmsg.h> 37#include <dspbridge/dspmsg.h>
39 38
40/* ----------------------------------- Function Prototypes */ 39/* ----------------------------------- Function Prototypes */
41static int add_new_msg(struct lst_list *msg_list); 40static int add_new_msg(struct list_head *msg_list);
42static void delete_msg_mgr(struct msg_mgr *hmsg_mgr); 41static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
43static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp); 42static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
44static void free_msg_list(struct lst_list *msg_list); 43static void free_msg_list(struct list_head *msg_list);
45 44
46/* 45/*
47 * ======== bridge_msg_create ======== 46 * ======== bridge_msg_create ========
@@ -56,61 +55,46 @@ int bridge_msg_create(struct msg_mgr **msg_man,
56 struct io_mgr *hio_mgr; 55 struct io_mgr *hio_mgr;
57 int status = 0; 56 int status = 0;
58 57
59 if (!msg_man || !msg_callback || !hdev_obj) { 58 if (!msg_man || !msg_callback || !hdev_obj)
60 status = -EFAULT; 59 return -EFAULT;
61 goto func_end; 60
62 }
63 dev_get_io_mgr(hdev_obj, &hio_mgr); 61 dev_get_io_mgr(hdev_obj, &hio_mgr);
64 if (!hio_mgr) { 62 if (!hio_mgr)
65 status = -EFAULT; 63 return -EFAULT;
66 goto func_end; 64
67 }
68 *msg_man = NULL; 65 *msg_man = NULL;
69 /* Allocate msg_ctrl manager object */ 66 /* Allocate msg_ctrl manager object */
70 msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL); 67 msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
68 if (!msg_mgr_obj)
69 return -ENOMEM;
71 70
72 if (msg_mgr_obj) { 71 msg_mgr_obj->on_exit = msg_callback;
73 msg_mgr_obj->on_exit = msg_callback; 72 msg_mgr_obj->iomgr = hio_mgr;
74 msg_mgr_obj->hio_mgr = hio_mgr; 73 /* List of MSG_QUEUEs */
75 /* List of MSG_QUEUEs */ 74 INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
76 msg_mgr_obj->queue_list = kzalloc(sizeof(struct lst_list), 75 /*
77 GFP_KERNEL); 76 * Queues of message frames for messages to the DSP. Message
78 /* Queues of message frames for messages to the DSP. Message 77 * frames will only be added to the free queue when a
79 * frames will only be added to the free queue when a 78 * msg_queue object is created.
80 * msg_queue object is created. */ 79 */
81 msg_mgr_obj->msg_free_list = kzalloc(sizeof(struct lst_list), 80 INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
82 GFP_KERNEL); 81 INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
83 msg_mgr_obj->msg_used_list = kzalloc(sizeof(struct lst_list), 82 spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
84 GFP_KERNEL);
85 if (msg_mgr_obj->queue_list == NULL ||
86 msg_mgr_obj->msg_free_list == NULL ||
87 msg_mgr_obj->msg_used_list == NULL) {
88 status = -ENOMEM;
89 } else {
90 INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
91 INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
92 INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
93 spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
94 }
95 83
96 /* Create an event to be used by bridge_msg_put() in waiting 84 /*
97 * for an available free frame from the message manager. */ 85 * Create an event to be used by bridge_msg_put() in waiting
98 msg_mgr_obj->sync_event = 86 * for an available free frame from the message manager.
99 kzalloc(sizeof(struct sync_object), GFP_KERNEL); 87 */
100 if (!msg_mgr_obj->sync_event) 88 msg_mgr_obj->sync_event =
101 status = -ENOMEM; 89 kzalloc(sizeof(struct sync_object), GFP_KERNEL);
102 else 90 if (!msg_mgr_obj->sync_event) {
103 sync_init_event(msg_mgr_obj->sync_event); 91 kfree(msg_mgr_obj);
104 92 return -ENOMEM;
105 if (!status)
106 *msg_man = msg_mgr_obj;
107 else
108 delete_msg_mgr(msg_mgr_obj);
109
110 } else {
111 status = -ENOMEM;
112 } 93 }
113func_end: 94 sync_init_event(msg_mgr_obj->sync_event);
95
96 *msg_man = msg_mgr_obj;
97
114 return status; 98 return status;
115} 99}
116 100
@@ -119,8 +103,7 @@ func_end:
119 * Create a msg_queue for sending/receiving messages to/from a node 103 * Create a msg_queue for sending/receiving messages to/from a node
120 * on the DSP. 104 * on the DSP.
121 */ 105 */
122int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, 106int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq,
123 struct msg_queue **msgq,
124 u32 msgq_id, u32 max_msgs, void *arg) 107 u32 msgq_id, u32 max_msgs, void *arg)
125{ 108{
126 u32 i; 109 u32 i;
@@ -128,107 +111,87 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
128 struct msg_queue *msg_q; 111 struct msg_queue *msg_q;
129 int status = 0; 112 int status = 0;
130 113
131 if (!hmsg_mgr || msgq == NULL || !hmsg_mgr->msg_free_list) { 114 if (!hmsg_mgr || msgq == NULL)
132 status = -EFAULT; 115 return -EFAULT;
133 goto func_end;
134 }
135 116
136 *msgq = NULL; 117 *msgq = NULL;
137 /* Allocate msg_queue object */ 118 /* Allocate msg_queue object */
138 msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL); 119 msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
139 if (!msg_q) { 120 if (!msg_q)
140 status = -ENOMEM; 121 return -ENOMEM;
141 goto func_end; 122
142 }
143 lst_init_elem((struct list_head *)msg_q);
144 msg_q->max_msgs = max_msgs; 123 msg_q->max_msgs = max_msgs;
145 msg_q->hmsg_mgr = hmsg_mgr; 124 msg_q->msg_mgr = hmsg_mgr;
146 msg_q->arg = arg; /* Node handle */ 125 msg_q->arg = arg; /* Node handle */
147 msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */ 126 msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
148 /* Queues of Message frames for messages from the DSP */ 127 /* Queues of Message frames for messages from the DSP */
149 msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL); 128 INIT_LIST_HEAD(&msg_q->msg_free_list);
150 msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL); 129 INIT_LIST_HEAD(&msg_q->msg_used_list);
151 if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
152 status = -ENOMEM;
153 else {
154 INIT_LIST_HEAD(&msg_q->msg_free_list->head);
155 INIT_LIST_HEAD(&msg_q->msg_used_list->head);
156 }
157 130
158 /* Create event that will be signalled when a message from 131 /* Create event that will be signalled when a message from
159 * the DSP is available. */ 132 * the DSP is available. */
160 if (!status) { 133 msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
161 msg_q->sync_event = kzalloc(sizeof(struct sync_object), 134 if (!msg_q->sync_event) {
162 GFP_KERNEL); 135 status = -ENOMEM;
163 if (msg_q->sync_event) 136 goto out_err;
164 sync_init_event(msg_q->sync_event); 137
165 else
166 status = -ENOMEM;
167 } 138 }
139 sync_init_event(msg_q->sync_event);
168 140
169 /* Create a notification list for message ready notification. */ 141 /* Create a notification list for message ready notification. */
170 if (!status) { 142 msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
171 msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), 143 if (!msg_q->ntfy_obj) {
172 GFP_KERNEL); 144 status = -ENOMEM;
173 if (msg_q->ntfy_obj) 145 goto out_err;
174 ntfy_init(msg_q->ntfy_obj);
175 else
176 status = -ENOMEM;
177 } 146 }
147 ntfy_init(msg_q->ntfy_obj);
178 148
179 /* Create events that will be used to synchronize cleanup 149 /* Create events that will be used to synchronize cleanup
180 * when the object is deleted. sync_done will be set to 150 * when the object is deleted. sync_done will be set to
181 * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack 151 * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
182 * will be set by the unblocked thread to signal that it 152 * will be set by the unblocked thread to signal that it
183 * is unblocked and will no longer reference the object. */ 153 * is unblocked and will no longer reference the object. */
184 if (!status) { 154 msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
185 msg_q->sync_done = kzalloc(sizeof(struct sync_object), 155 if (!msg_q->sync_done) {
186 GFP_KERNEL); 156 status = -ENOMEM;
187 if (msg_q->sync_done) 157 goto out_err;
188 sync_init_event(msg_q->sync_done);
189 else
190 status = -ENOMEM;
191 } 158 }
159 sync_init_event(msg_q->sync_done);
192 160
193 if (!status) { 161 msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
194 msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), 162 if (!msg_q->sync_done_ack) {
195 GFP_KERNEL); 163 status = -ENOMEM;
196 if (msg_q->sync_done_ack) 164 goto out_err;
197 sync_init_event(msg_q->sync_done_ack);
198 else
199 status = -ENOMEM;
200 } 165 }
166 sync_init_event(msg_q->sync_done_ack);
201 167
202 if (!status) { 168 /* Enter critical section */
203 /* Enter critical section */ 169 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
204 spin_lock_bh(&hmsg_mgr->msg_mgr_lock); 170 /* Initialize message frames and put in appropriate queues */
205 /* Initialize message frames and put in appropriate queues */ 171 for (i = 0; i < max_msgs && !status; i++) {
206 for (i = 0; i < max_msgs && !status; i++) { 172 status = add_new_msg(&hmsg_mgr->msg_free_list);
207 status = add_new_msg(hmsg_mgr->msg_free_list); 173 if (!status) {
208 if (!status) { 174 num_allocated++;
209 num_allocated++; 175 status = add_new_msg(&msg_q->msg_free_list);
210 status = add_new_msg(msg_q->msg_free_list);
211 }
212 }
213 if (status) {
214 /* Stay inside CS to prevent others from taking any
215 * of the newly allocated message frames. */
216 delete_msg_queue(msg_q, num_allocated);
217 } else {
218 lst_put_tail(hmsg_mgr->queue_list,
219 (struct list_head *)msg_q);
220 *msgq = msg_q;
221 /* Signal that free frames are now available */
222 if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
223 sync_set_event(hmsg_mgr->sync_event);
224
225 } 176 }
226 /* Exit critical section */ 177 }
178 if (status) {
227 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); 179 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
228 } else { 180 goto out_err;
229 delete_msg_queue(msg_q, 0);
230 } 181 }
231func_end: 182
183 list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list);
184 *msgq = msg_q;
185 /* Signal that free frames are now available */
186 if (!list_empty(&hmsg_mgr->msg_free_list))
187 sync_set_event(hmsg_mgr->sync_event);
188
189 /* Exit critical section */
190 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
191
192 return 0;
193out_err:
194 delete_msg_queue(msg_q, num_allocated);
232 return status; 195 return status;
233} 196}
234 197
@@ -251,10 +214,10 @@ void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
251 struct msg_mgr *hmsg_mgr; 214 struct msg_mgr *hmsg_mgr;
252 u32 io_msg_pend; 215 u32 io_msg_pend;
253 216
254 if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr) 217 if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
255 goto func_end; 218 return;
256 219
257 hmsg_mgr = msg_queue_obj->hmsg_mgr; 220 hmsg_mgr = msg_queue_obj->msg_mgr;
258 msg_queue_obj->done = true; 221 msg_queue_obj->done = true;
259 /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */ 222 /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
260 io_msg_pend = msg_queue_obj->io_msg_pend; 223 io_msg_pend = msg_queue_obj->io_msg_pend;
@@ -267,18 +230,12 @@ void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
267 } 230 }
268 /* Remove message queue from hmsg_mgr->queue_list */ 231 /* Remove message queue from hmsg_mgr->queue_list */
269 spin_lock_bh(&hmsg_mgr->msg_mgr_lock); 232 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
270 lst_remove_elem(hmsg_mgr->queue_list, 233 list_del(&msg_queue_obj->list_elem);
271 (struct list_head *)msg_queue_obj);
272 /* Free the message queue object */ 234 /* Free the message queue object */
273 delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs); 235 delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
274 if (!hmsg_mgr->msg_free_list) 236 if (list_empty(&hmsg_mgr->msg_free_list))
275 goto func_cont;
276 if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
277 sync_reset_event(hmsg_mgr->sync_event); 237 sync_reset_event(hmsg_mgr->sync_event);
278func_cont:
279 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); 238 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
280func_end:
281 return;
282} 239}
283 240
284/* 241/*
@@ -290,91 +247,74 @@ int bridge_msg_get(struct msg_queue *msg_queue_obj,
290{ 247{
291 struct msg_frame *msg_frame_obj; 248 struct msg_frame *msg_frame_obj;
292 struct msg_mgr *hmsg_mgr; 249 struct msg_mgr *hmsg_mgr;
293 bool got_msg = false;
294 struct sync_object *syncs[2]; 250 struct sync_object *syncs[2];
295 u32 index; 251 u32 index;
296 int status = 0; 252 int status = 0;
297 253
298 if (!msg_queue_obj || pmsg == NULL) { 254 if (!msg_queue_obj || pmsg == NULL)
299 status = -ENOMEM; 255 return -ENOMEM;
300 goto func_end;
301 }
302 256
303 hmsg_mgr = msg_queue_obj->hmsg_mgr; 257 hmsg_mgr = msg_queue_obj->msg_mgr;
304 if (!msg_queue_obj->msg_used_list) {
305 status = -EFAULT;
306 goto func_end;
307 }
308 258
309 /* Enter critical section */
310 spin_lock_bh(&hmsg_mgr->msg_mgr_lock); 259 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
311 /* If a message is already there, get it */ 260 /* If a message is already there, get it */
312 if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) { 261 if (!list_empty(&msg_queue_obj->msg_used_list)) {
313 msg_frame_obj = (struct msg_frame *) 262 msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
314 lst_get_head(msg_queue_obj->msg_used_list); 263 struct msg_frame, list_elem);
315 if (msg_frame_obj != NULL) { 264 list_del(&msg_frame_obj->list_elem);
316 *pmsg = msg_frame_obj->msg_data.msg; 265 *pmsg = msg_frame_obj->msg_data.msg;
317 lst_put_tail(msg_queue_obj->msg_free_list, 266 list_add_tail(&msg_frame_obj->list_elem,
318 (struct list_head *)msg_frame_obj); 267 &msg_queue_obj->msg_free_list);
319 if (LST_IS_EMPTY(msg_queue_obj->msg_used_list)) 268 if (list_empty(&msg_queue_obj->msg_used_list))
320 sync_reset_event(msg_queue_obj->sync_event); 269 sync_reset_event(msg_queue_obj->sync_event);
321 270 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
322 got_msg = true; 271 return 0;
323 } 272 }
324 } else {
325 if (msg_queue_obj->done)
326 status = -EPERM;
327 else
328 msg_queue_obj->io_msg_pend++;
329 273
274 if (msg_queue_obj->done) {
275 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
276 return -EPERM;
330 } 277 }
331 /* Exit critical section */ 278 msg_queue_obj->io_msg_pend++;
332 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); 279 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
333 if (!status && !got_msg) { 280
334 /* Wait til message is available, timeout, or done. We don't 281 /*
335 * have to schedule the DPC, since the DSP will send messages 282 * Wait til message is available, timeout, or done. We don't
336 * when they are available. */ 283 * have to schedule the DPC, since the DSP will send messages
337 syncs[0] = msg_queue_obj->sync_event; 284 * when they are available.
338 syncs[1] = msg_queue_obj->sync_done; 285 */
339 status = sync_wait_on_multiple_events(syncs, 2, utimeout, 286 syncs[0] = msg_queue_obj->sync_event;
340 &index); 287 syncs[1] = msg_queue_obj->sync_done;
341 /* Enter critical section */ 288 status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
342 spin_lock_bh(&hmsg_mgr->msg_mgr_lock); 289
343 if (msg_queue_obj->done) { 290 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
344 msg_queue_obj->io_msg_pend--; 291 if (msg_queue_obj->done) {
345 /* Exit critical section */ 292 msg_queue_obj->io_msg_pend--;
346 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); 293 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
347 /* Signal that we're not going to access msg_queue_obj 294 /*
348 * anymore, so it can be deleted. */ 295 * Signal that we're not going to access msg_queue_obj
349 (void)sync_set_event(msg_queue_obj->sync_done_ack); 296 * anymore, so it can be deleted.
350 status = -EPERM; 297 */
351 } else { 298 sync_set_event(msg_queue_obj->sync_done_ack);
352 if (!status) { 299 return -EPERM;
353 DBC_ASSERT(!LST_IS_EMPTY 300 }
354 (msg_queue_obj->msg_used_list)); 301 if (!status && !list_empty(&msg_queue_obj->msg_used_list)) {
355 /* Get msg from used list */ 302 /* Get msg from used list */
356 msg_frame_obj = (struct msg_frame *) 303 msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
357 lst_get_head(msg_queue_obj->msg_used_list); 304 struct msg_frame, list_elem);
358 /* Copy message into pmsg and put frame on the 305 list_del(&msg_frame_obj->list_elem);
359 * free list */ 306 /* Copy message into pmsg and put frame on the free list */
360 if (msg_frame_obj != NULL) { 307 *pmsg = msg_frame_obj->msg_data.msg;
361 *pmsg = msg_frame_obj->msg_data.msg; 308 list_add_tail(&msg_frame_obj->list_elem,
362 lst_put_tail 309 &msg_queue_obj->msg_free_list);
363 (msg_queue_obj->msg_free_list, 310 }
364 (struct list_head *) 311 msg_queue_obj->io_msg_pend--;
365 msg_frame_obj); 312 /* Reset the event if there are still queued messages */
366 } 313 if (!list_empty(&msg_queue_obj->msg_used_list))
367 } 314 sync_set_event(msg_queue_obj->sync_event);
368 msg_queue_obj->io_msg_pend--; 315
369 /* Reset the event if there are still queued messages */ 316 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
370 if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) 317
371 sync_set_event(msg_queue_obj->sync_event);
372
373 /* Exit critical section */
374 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
375 }
376 }
377func_end:
378 return status; 318 return status;
379} 319}
380 320
@@ -387,107 +327,100 @@ int bridge_msg_put(struct msg_queue *msg_queue_obj,
387{ 327{
388 struct msg_frame *msg_frame_obj; 328 struct msg_frame *msg_frame_obj;
389 struct msg_mgr *hmsg_mgr; 329 struct msg_mgr *hmsg_mgr;
390 bool put_msg = false;
391 struct sync_object *syncs[2]; 330 struct sync_object *syncs[2];
392 u32 index; 331 u32 index;
393 int status = 0; 332 int status;
394 333
395 if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) { 334 if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr)
396 status = -ENOMEM; 335 return -EFAULT;
397 goto func_end; 336
398 } 337 hmsg_mgr = msg_queue_obj->msg_mgr;
399 hmsg_mgr = msg_queue_obj->hmsg_mgr;
400 if (!hmsg_mgr->msg_free_list) {
401 status = -EFAULT;
402 goto func_end;
403 }
404 338
405 spin_lock_bh(&hmsg_mgr->msg_mgr_lock); 339 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
406 340
407 /* If a message frame is available, use it */ 341 /* If a message frame is available, use it */
408 if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) { 342 if (!list_empty(&hmsg_mgr->msg_free_list)) {
409 msg_frame_obj = 343 msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
410 (struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list); 344 struct msg_frame, list_elem);
411 if (msg_frame_obj != NULL) { 345 list_del(&msg_frame_obj->list_elem);
412 msg_frame_obj->msg_data.msg = *pmsg; 346 msg_frame_obj->msg_data.msg = *pmsg;
413 msg_frame_obj->msg_data.msgq_id = 347 msg_frame_obj->msg_data.msgq_id =
414 msg_queue_obj->msgq_id; 348 msg_queue_obj->msgq_id;
415 lst_put_tail(hmsg_mgr->msg_used_list, 349 list_add_tail(&msg_frame_obj->list_elem,
416 (struct list_head *)msg_frame_obj); 350 &hmsg_mgr->msg_used_list);
417 hmsg_mgr->msgs_pending++; 351 hmsg_mgr->msgs_pending++;
418 put_msg = true; 352
419 } 353 if (list_empty(&hmsg_mgr->msg_free_list))
420 if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
421 sync_reset_event(hmsg_mgr->sync_event); 354 sync_reset_event(hmsg_mgr->sync_event);
422 355
423 /* Release critical section before scheduling DPC */ 356 /* Release critical section before scheduling DPC */
424 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); 357 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
425 /* Schedule a DPC, to do the actual data transfer: */ 358 /* Schedule a DPC, to do the actual data transfer: */
426 iosm_schedule(hmsg_mgr->hio_mgr); 359 iosm_schedule(hmsg_mgr->iomgr);
427 } else { 360 return 0;
428 if (msg_queue_obj->done) 361 }
429 status = -EPERM;
430 else
431 msg_queue_obj->io_msg_pend++;
432 362
363 if (msg_queue_obj->done) {
433 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); 364 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
365 return -EPERM;
434 } 366 }
435 if (!status && !put_msg) { 367 msg_queue_obj->io_msg_pend++;
436 /* Wait til a free message frame is available, timeout, 368
437 * or done */ 369 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
438 syncs[0] = hmsg_mgr->sync_event; 370
439 syncs[1] = msg_queue_obj->sync_done; 371 /* Wait til a free message frame is available, timeout, or done */
440 status = sync_wait_on_multiple_events(syncs, 2, utimeout, 372 syncs[0] = hmsg_mgr->sync_event;
441 &index); 373 syncs[1] = msg_queue_obj->sync_done;
442 if (status) 374 status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
443 goto func_end; 375 if (status)
444 /* Enter critical section */ 376 return status;
445 spin_lock_bh(&hmsg_mgr->msg_mgr_lock); 377
446 if (msg_queue_obj->done) { 378 /* Enter critical section */
447 msg_queue_obj->io_msg_pend--; 379 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
448 /* Exit critical section */ 380 if (msg_queue_obj->done) {
449 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); 381 msg_queue_obj->io_msg_pend--;
450 /* Signal that we're not going to access msg_queue_obj 382 /* Exit critical section */
451 * anymore, so it can be deleted. */ 383 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
452 (void)sync_set_event(msg_queue_obj->sync_done_ack); 384 /*
453 status = -EPERM; 385 * Signal that we're not going to access msg_queue_obj
454 } else { 386 * anymore, so it can be deleted.
455 if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) { 387 */
456 status = -EFAULT; 388 sync_set_event(msg_queue_obj->sync_done_ack);
457 goto func_cont; 389 return -EPERM;
458 }
459 /* Get msg from free list */
460 msg_frame_obj = (struct msg_frame *)
461 lst_get_head(hmsg_mgr->msg_free_list);
462 /*
463 * Copy message into pmsg and put frame on the
464 * used list.
465 */
466 if (msg_frame_obj) {
467 msg_frame_obj->msg_data.msg = *pmsg;
468 msg_frame_obj->msg_data.msgq_id =
469 msg_queue_obj->msgq_id;
470 lst_put_tail(hmsg_mgr->msg_used_list,
471 (struct list_head *)msg_frame_obj);
472 hmsg_mgr->msgs_pending++;
473 /*
474 * Schedule a DPC, to do the actual
475 * data transfer.
476 */
477 iosm_schedule(hmsg_mgr->hio_mgr);
478 }
479
480 msg_queue_obj->io_msg_pend--;
481 /* Reset event if there are still frames available */
482 if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
483 sync_set_event(hmsg_mgr->sync_event);
484func_cont:
485 /* Exit critical section */
486 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
487 }
488 } 390 }
489func_end: 391
490 return status; 392 if (list_empty(&hmsg_mgr->msg_free_list)) {
393 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
394 return -EFAULT;
395 }
396
397 /* Get msg from free list */
398 msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
399 struct msg_frame, list_elem);
400 /*
401 * Copy message into pmsg and put frame on the
402 * used list.
403 */
404 list_del(&msg_frame_obj->list_elem);
405 msg_frame_obj->msg_data.msg = *pmsg;
406 msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id;
407 list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list);
408 hmsg_mgr->msgs_pending++;
409 /*
410 * Schedule a DPC, to do the actual
411 * data transfer.
412 */
413 iosm_schedule(hmsg_mgr->iomgr);
414
415 msg_queue_obj->io_msg_pend--;
416 /* Reset event if there are still frames available */
417 if (!list_empty(&hmsg_mgr->msg_free_list))
418 sync_set_event(hmsg_mgr->sync_event);
419
420 /* Exit critical section */
421 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
422
423 return 0;
491} 424}
492 425
493/* 426/*
@@ -551,20 +484,17 @@ void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
551 * ======== add_new_msg ======== 484 * ======== add_new_msg ========
552 * Must be called in message manager critical section. 485 * Must be called in message manager critical section.
553 */ 486 */
554static int add_new_msg(struct lst_list *msg_list) 487static int add_new_msg(struct list_head *msg_list)
555{ 488{
556 struct msg_frame *pmsg; 489 struct msg_frame *pmsg;
557 int status = 0;
558 490
559 pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC); 491 pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
560 if (pmsg != NULL) { 492 if (!pmsg)
561 lst_init_elem((struct list_head *)pmsg); 493 return -ENOMEM;
562 lst_put_tail(msg_list, (struct list_head *)pmsg);
563 } else {
564 status = -ENOMEM;
565 }
566 494
567 return status; 495 list_add_tail(&pmsg->list_elem, msg_list);
496
497 return 0;
568} 498}
569 499
570/* 500/*
@@ -573,30 +503,13 @@ static int add_new_msg(struct lst_list *msg_list)
573static void delete_msg_mgr(struct msg_mgr *hmsg_mgr) 503static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
574{ 504{
575 if (!hmsg_mgr) 505 if (!hmsg_mgr)
576 goto func_end; 506 return;
577
578 if (hmsg_mgr->queue_list) {
579 if (LST_IS_EMPTY(hmsg_mgr->queue_list)) {
580 kfree(hmsg_mgr->queue_list);
581 hmsg_mgr->queue_list = NULL;
582 }
583 }
584
585 if (hmsg_mgr->msg_free_list) {
586 free_msg_list(hmsg_mgr->msg_free_list);
587 hmsg_mgr->msg_free_list = NULL;
588 }
589
590 if (hmsg_mgr->msg_used_list) {
591 free_msg_list(hmsg_mgr->msg_used_list);
592 hmsg_mgr->msg_used_list = NULL;
593 }
594 507
508 /* FIXME: free elements from queue_list? */
509 free_msg_list(&hmsg_mgr->msg_free_list);
510 free_msg_list(&hmsg_mgr->msg_used_list);
595 kfree(hmsg_mgr->sync_event); 511 kfree(hmsg_mgr->sync_event);
596
597 kfree(hmsg_mgr); 512 kfree(hmsg_mgr);
598func_end:
599 return;
600} 513}
601 514
602/* 515/*
@@ -605,37 +518,26 @@ func_end:
605static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp) 518static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
606{ 519{
607 struct msg_mgr *hmsg_mgr; 520 struct msg_mgr *hmsg_mgr;
608 struct msg_frame *pmsg; 521 struct msg_frame *pmsg, *tmp;
609 u32 i; 522 u32 i;
610 523
611 if (!msg_queue_obj || 524 if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
612 !msg_queue_obj->hmsg_mgr || !msg_queue_obj->hmsg_mgr->msg_free_list) 525 return;
613 goto func_end;
614 526
615 hmsg_mgr = msg_queue_obj->hmsg_mgr; 527 hmsg_mgr = msg_queue_obj->msg_mgr;
616 528
617 /* Pull off num_to_dsp message frames from Msg manager and free */ 529 /* Pull off num_to_dsp message frames from Msg manager and free */
618 for (i = 0; i < num_to_dsp; i++) { 530 i = 0;
619 531 list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list,
620 if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) { 532 list_elem) {
621 pmsg = (struct msg_frame *) 533 list_del(&pmsg->list_elem);
622 lst_get_head(hmsg_mgr->msg_free_list); 534 kfree(pmsg);
623 kfree(pmsg); 535 if (i++ >= num_to_dsp)
624 } else {
625 /* Cannot free all of the message frames */
626 break; 536 break;
627 }
628 }
629
630 if (msg_queue_obj->msg_free_list) {
631 free_msg_list(msg_queue_obj->msg_free_list);
632 msg_queue_obj->msg_free_list = NULL;
633 } 537 }
634 538
635 if (msg_queue_obj->msg_used_list) { 539 free_msg_list(&msg_queue_obj->msg_free_list);
636 free_msg_list(msg_queue_obj->msg_used_list); 540 free_msg_list(&msg_queue_obj->msg_used_list);
637 msg_queue_obj->msg_used_list = NULL;
638 }
639 541
640 if (msg_queue_obj->ntfy_obj) { 542 if (msg_queue_obj->ntfy_obj) {
641 ntfy_delete(msg_queue_obj->ntfy_obj); 543 ntfy_delete(msg_queue_obj->ntfy_obj);
@@ -647,27 +549,20 @@ static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
647 kfree(msg_queue_obj->sync_done_ack); 549 kfree(msg_queue_obj->sync_done_ack);
648 550
649 kfree(msg_queue_obj); 551 kfree(msg_queue_obj);
650func_end:
651 return;
652
653} 552}
654 553
655/* 554/*
656 * ======== free_msg_list ======== 555 * ======== free_msg_list ========
657 */ 556 */
658static void free_msg_list(struct lst_list *msg_list) 557static void free_msg_list(struct list_head *msg_list)
659{ 558{
660 struct msg_frame *pmsg; 559 struct msg_frame *pmsg, *tmp;
661 560
662 if (!msg_list) 561 if (!msg_list)
663 goto func_end; 562 return;
664 563
665 while ((pmsg = (struct msg_frame *)lst_get_head(msg_list)) != NULL) 564 list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) {
565 list_del(&pmsg->list_elem);
666 kfree(pmsg); 566 kfree(pmsg);
667 567 }
668 DBC_ASSERT(LST_IS_EMPTY(msg_list));
669
670 kfree(msg_list);
671func_end:
672 return;
673} 568}
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index a3f69f6f505..e1c4492a710 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -229,11 +229,11 @@ static struct notifier_block dsp_mbox_notifier = {
229 229
230static inline void flush_all(struct bridge_dev_context *dev_context) 230static inline void flush_all(struct bridge_dev_context *dev_context)
231{ 231{
232 if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION || 232 if (dev_context->brd_state == BRD_DSP_HIBERNATION ||
233 dev_context->dw_brd_state == BRD_HIBERNATION) 233 dev_context->brd_state == BRD_HIBERNATION)
234 wake_dsp(dev_context, NULL); 234 wake_dsp(dev_context, NULL);
235 235
236 hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base); 236 hw_mmu_tlb_flush_all(dev_context->dsp_mmu_base);
237} 237}
238 238
239static void bad_page_dump(u32 pa, struct page *pg) 239static void bad_page_dump(u32 pa, struct page *pg)
@@ -259,8 +259,6 @@ void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
259 259
260 DBC_REQUIRE(driver_file_name != NULL); 260 DBC_REQUIRE(driver_file_name != NULL);
261 261
262 io_sm_init(); /* Initialization of io_sm module */
263
264 if (strcmp(driver_file_name, "UMA") == 0) 262 if (strcmp(driver_file_name, "UMA") == 0)
265 *drv_intf = &drv_interface_fxns; 263 *drv_intf = &drv_interface_fxns;
266 else 264 else
@@ -308,7 +306,7 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
308 dsp_clk_enable(DSP_CLK_IVA2); 306 dsp_clk_enable(DSP_CLK_IVA2);
309 307
310 /* set the device state to IDLE */ 308 /* set the device state to IDLE */
311 dev_context->dw_brd_state = BRD_IDLE; 309 dev_context->brd_state = BRD_IDLE;
312 310
313 return 0; 311 return 0;
314} 312}
@@ -325,16 +323,16 @@ static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
325 int status = 0; 323 int status = 0;
326 struct bridge_dev_context *dev_context = dev_ctxt; 324 struct bridge_dev_context *dev_context = dev_ctxt;
327 u32 offset; 325 u32 offset;
328 u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr; 326 u32 dsp_base_addr = dev_ctxt->dsp_base_addr;
329 327
330 if (dsp_addr < dev_context->dw_dsp_start_add) { 328 if (dsp_addr < dev_context->dsp_start_add) {
331 status = -EPERM; 329 status = -EPERM;
332 return status; 330 return status;
333 } 331 }
334 /* change here to account for the 3 bands of the DSP internal memory */ 332 /* change here to account for the 3 bands of the DSP internal memory */
335 if ((dsp_addr - dev_context->dw_dsp_start_add) < 333 if ((dsp_addr - dev_context->dsp_start_add) <
336 dev_context->dw_internal_size) { 334 dev_context->internal_size) {
337 offset = dsp_addr - dev_context->dw_dsp_start_add; 335 offset = dsp_addr - dev_context->dsp_start_add;
338 } else { 336 } else {
339 status = read_ext_dsp_data(dev_context, host_buff, dsp_addr, 337 status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
340 ul_num_bytes, mem_type); 338 ul_num_bytes, mem_type);
@@ -356,7 +354,7 @@ static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
356 int status = 0; 354 int status = 0;
357 struct bridge_dev_context *dev_context = dev_ctxt; 355 struct bridge_dev_context *dev_context = dev_ctxt;
358 356
359 dev_context->dw_brd_state = brd_state; 357 dev_context->brd_state = brd_state;
360 return status; 358 return status;
361} 359}
362 360
@@ -398,17 +396,17 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
398 * last dsp base image was loaded. The first entry is always 396 * last dsp base image was loaded. The first entry is always
399 * SHMMEM base. */ 397 * SHMMEM base. */
400 /* Get SHM_BEG - convert to byte address */ 398 /* Get SHM_BEG - convert to byte address */
401 (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME, 399 (void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME,
402 &ul_shm_base_virt); 400 &ul_shm_base_virt);
403 ul_shm_base_virt *= DSPWORDSIZE; 401 ul_shm_base_virt *= DSPWORDSIZE;
404 DBC_ASSERT(ul_shm_base_virt != 0); 402 DBC_ASSERT(ul_shm_base_virt != 0);
405 /* DSP Virtual address */ 403 /* DSP Virtual address */
406 ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va; 404 ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va;
407 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 405 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
408 ul_shm_offset_virt = 406 ul_shm_offset_virt =
409 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); 407 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
410 /* Kernel logical address */ 408 /* Kernel logical address */
411 ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt; 409 ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
412 410
413 DBC_ASSERT(ul_shm_base != 0); 411 DBC_ASSERT(ul_shm_base != 0);
414 /* 2nd wd is used as sync field */ 412 /* 2nd wd is used as sync field */
@@ -454,9 +452,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
454 udelay(100); 452 udelay(100);
455 453
456 /* Disbale the DSP MMU */ 454 /* Disbale the DSP MMU */
457 hw_mmu_disable(resources->dw_dmmu_base); 455 hw_mmu_disable(resources->dmmu_base);
458 /* Disable TWL */ 456 /* Disable TWL */
459 hw_mmu_twl_disable(resources->dw_dmmu_base); 457 hw_mmu_twl_disable(resources->dmmu_base);
460 458
461 /* Only make TLB entry if both addresses are non-zero */ 459 /* Only make TLB entry if both addresses are non-zero */
462 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; 460 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
@@ -468,20 +466,20 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
468 .mixed_size = e->mixed_mode, 466 .mixed_size = e->mixed_mode,
469 }; 467 };
470 468
471 if (!e->ul_gpp_pa || !e->ul_dsp_va) 469 if (!e->gpp_pa || !e->dsp_va)
472 continue; 470 continue;
473 471
474 dev_dbg(bridge, 472 dev_dbg(bridge,
475 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x", 473 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
476 itmp_entry_ndx, 474 itmp_entry_ndx,
477 e->ul_gpp_pa, 475 e->gpp_pa,
478 e->ul_dsp_va, 476 e->dsp_va,
479 e->ul_size); 477 e->size);
480 478
481 hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base, 479 hw_mmu_tlb_add(dev_context->dsp_mmu_base,
482 e->ul_gpp_pa, 480 e->gpp_pa,
483 e->ul_dsp_va, 481 e->dsp_va,
484 e->ul_size, 482 e->size,
485 itmp_entry_ndx, 483 itmp_entry_ndx,
486 &map_attrs, 1, 1); 484 &map_attrs, 1, 1);
487 485
@@ -492,24 +490,24 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
492 /* Lock the above TLB entries and get the BIOS and load monitor timer 490 /* Lock the above TLB entries and get the BIOS and load monitor timer
493 * information */ 491 * information */
494 if (!status) { 492 if (!status) {
495 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx); 493 hw_mmu_num_locked_set(resources->dmmu_base, itmp_entry_ndx);
496 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx); 494 hw_mmu_victim_num_set(resources->dmmu_base, itmp_entry_ndx);
497 hw_mmu_ttb_set(resources->dw_dmmu_base, 495 hw_mmu_ttb_set(resources->dmmu_base,
498 dev_context->pt_attrs->l1_base_pa); 496 dev_context->pt_attrs->l1_base_pa);
499 hw_mmu_twl_enable(resources->dw_dmmu_base); 497 hw_mmu_twl_enable(resources->dmmu_base);
500 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ 498 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
501 499
502 temp = __raw_readl((resources->dw_dmmu_base) + 0x10); 500 temp = __raw_readl((resources->dmmu_base) + 0x10);
503 temp = (temp & 0xFFFFFFEF) | 0x11; 501 temp = (temp & 0xFFFFFFEF) | 0x11;
504 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10); 502 __raw_writel(temp, (resources->dmmu_base) + 0x10);
505 503
506 /* Let the DSP MMU run */ 504 /* Let the DSP MMU run */
507 hw_mmu_enable(resources->dw_dmmu_base); 505 hw_mmu_enable(resources->dmmu_base);
508 506
509 /* Enable the BIOS clock */ 507 /* Enable the BIOS clock */
510 (void)dev_get_symbol(dev_context->hdev_obj, 508 (void)dev_get_symbol(dev_context->dev_obj,
511 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); 509 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
512 (void)dev_get_symbol(dev_context->hdev_obj, 510 (void)dev_get_symbol(dev_context->dev_obj,
513 BRIDGEINIT_LOADMON_GPTIMER, 511 BRIDGEINIT_LOADMON_GPTIMER,
514 &ul_load_monitor_timer); 512 &ul_load_monitor_timer);
515 } 513 }
@@ -538,7 +536,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
538 536
539 if (!status) { 537 if (!status) {
540 /* Set the DSP clock rate */ 538 /* Set the DSP clock rate */
541 (void)dev_get_symbol(dev_context->hdev_obj, 539 (void)dev_get_symbol(dev_context->dev_obj,
542 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); 540 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
543 /*Set Autoidle Mode for IVA2 PLL */ 541 /*Set Autoidle Mode for IVA2 PLL */
544 (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT, 542 (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
@@ -568,18 +566,18 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
568 } 566 }
569 if (!status) { 567 if (!status) {
570/*PM_IVA2GRPSEL_PER = 0xC0;*/ 568/*PM_IVA2GRPSEL_PER = 0xC0;*/
571 temp = readl(resources->dw_per_pm_base + 0xA8); 569 temp = readl(resources->per_pm_base + 0xA8);
572 temp = (temp & 0xFFFFFF30) | 0xC0; 570 temp = (temp & 0xFFFFFF30) | 0xC0;
573 writel(temp, resources->dw_per_pm_base + 0xA8); 571 writel(temp, resources->per_pm_base + 0xA8);
574 572
575/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */ 573/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
576 temp = readl(resources->dw_per_pm_base + 0xA4); 574 temp = readl(resources->per_pm_base + 0xA4);
577 temp = (temp & 0xFFFFFF3F); 575 temp = (temp & 0xFFFFFF3F);
578 writel(temp, resources->dw_per_pm_base + 0xA4); 576 writel(temp, resources->per_pm_base + 0xA4);
579/*CM_SLEEPDEP_PER |= 0x04; */ 577/*CM_SLEEPDEP_PER |= 0x04; */
580 temp = readl(resources->dw_per_base + 0x44); 578 temp = readl(resources->per_base + 0x44);
581 temp = (temp & 0xFFFFFFFB) | 0x04; 579 temp = (temp & 0xFFFFFFFB) | 0x04;
582 writel(temp, resources->dw_per_base + 0x44); 580 writel(temp, resources->per_base + 0x44);
583 581
584/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */ 582/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
585 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, 583 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
@@ -588,7 +586,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
588 /* Let DSP go */ 586 /* Let DSP go */
589 dev_dbg(bridge, "%s Unreset\n", __func__); 587 dev_dbg(bridge, "%s Unreset\n", __func__);
590 /* Enable DSP MMU Interrupts */ 588 /* Enable DSP MMU Interrupts */
591 hw_mmu_event_enable(resources->dw_dmmu_base, 589 hw_mmu_event_enable(resources->dmmu_base,
592 HW_MMU_ALL_INTERRUPTS); 590 HW_MMU_ALL_INTERRUPTS);
593 /* release the RST1, DSP starts executing now .. */ 591 /* release the RST1, DSP starts executing now .. */
594 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, 592 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
@@ -609,7 +607,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
609 dsp_wdt_sm_set((void *)ul_shm_base); 607 dsp_wdt_sm_set((void *)ul_shm_base);
610 dsp_wdt_enable(true); 608 dsp_wdt_enable(true);
611 609
612 status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr); 610 status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
613 if (hio_mgr) { 611 if (hio_mgr) {
614 io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL); 612 io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
615 /* Write the synchronization bit to indicate the 613 /* Write the synchronization bit to indicate the
@@ -618,10 +616,10 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
618 __raw_writel(0XCAFECAFE, dw_sync_addr); 616 __raw_writel(0XCAFECAFE, dw_sync_addr);
619 617
620 /* update board state */ 618 /* update board state */
621 dev_context->dw_brd_state = BRD_RUNNING; 619 dev_context->brd_state = BRD_RUNNING;
622 /* (void)chnlsm_enable_interrupt(dev_context); */ 620 /* (void)chnlsm_enable_interrupt(dev_context); */
623 } else { 621 } else {
624 dev_context->dw_brd_state = BRD_UNKNOWN; 622 dev_context->brd_state = BRD_UNKNOWN;
625 } 623 }
626 } 624 }
627 return status; 625 return status;
@@ -644,7 +642,7 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
644 struct omap_dsp_platform_data *pdata = 642 struct omap_dsp_platform_data *pdata =
645 omap_dspbridge_dev->dev.platform_data; 643 omap_dspbridge_dev->dev.platform_data;
646 644
647 if (dev_context->dw_brd_state == BRD_STOPPED) 645 if (dev_context->brd_state == BRD_STOPPED)
648 return status; 646 return status;
649 647
650 /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode, 648 /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
@@ -669,10 +667,10 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
669 udelay(10); 667 udelay(10);
670 /* Release the Ext Base virtual Address as the next DSP Program 668 /* Release the Ext Base virtual Address as the next DSP Program
671 * may have a different load address */ 669 * may have a different load address */
672 if (dev_context->dw_dsp_ext_base_addr) 670 if (dev_context->dsp_ext_base_addr)
673 dev_context->dw_dsp_ext_base_addr = 0; 671 dev_context->dsp_ext_base_addr = 0;
674 672
675 dev_context->dw_brd_state = BRD_STOPPED; /* update board state */ 673 dev_context->brd_state = BRD_STOPPED; /* update board state */
676 674
677 dsp_wdt_enable(false); 675 dsp_wdt_enable(false);
678 676
@@ -708,7 +706,7 @@ static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
708 int *board_state) 706 int *board_state)
709{ 707{
710 struct bridge_dev_context *dev_context = dev_ctxt; 708 struct bridge_dev_context *dev_context = dev_ctxt;
711 *board_state = dev_context->dw_brd_state; 709 *board_state = dev_context->brd_state;
712 return 0; 710 return 0;
713} 711}
714 712
@@ -723,12 +721,12 @@ static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
723 int status = 0; 721 int status = 0;
724 struct bridge_dev_context *dev_context = dev_ctxt; 722 struct bridge_dev_context *dev_context = dev_ctxt;
725 723
726 if (dsp_addr < dev_context->dw_dsp_start_add) { 724 if (dsp_addr < dev_context->dsp_start_add) {
727 status = -EPERM; 725 status = -EPERM;
728 return status; 726 return status;
729 } 727 }
730 if ((dsp_addr - dev_context->dw_dsp_start_add) < 728 if ((dsp_addr - dev_context->dsp_start_add) <
731 dev_context->dw_internal_size) { 729 dev_context->internal_size) {
732 status = write_dsp_data(dev_ctxt, host_buff, dsp_addr, 730 status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
733 ul_num_bytes, mem_type); 731 ul_num_bytes, mem_type);
734 } else { 732 } else {
@@ -766,24 +764,24 @@ static int bridge_dev_create(struct bridge_dev_context
766 goto func_end; 764 goto func_end;
767 } 765 }
768 766
769 dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE; 767 dev_context->dsp_start_add = (u32) OMAP_GEM_BASE;
770 dev_context->dw_self_loop = (u32) NULL; 768 dev_context->self_loop = (u32) NULL;
771 dev_context->dsp_per_clks = 0; 769 dev_context->dsp_per_clks = 0;
772 dev_context->dw_internal_size = OMAP_DSP_SIZE; 770 dev_context->internal_size = OMAP_DSP_SIZE;
773 /* Clear dev context MMU table entries. 771 /* Clear dev context MMU table entries.
774 * These get set on bridge_io_on_loaded() call after program loaded. */ 772 * These get set on bridge_io_on_loaded() call after program loaded. */
775 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) { 773 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
776 dev_context->atlb_entry[entry_ndx].ul_gpp_pa = 774 dev_context->atlb_entry[entry_ndx].gpp_pa =
777 dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0; 775 dev_context->atlb_entry[entry_ndx].dsp_va = 0;
778 } 776 }
779 dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) 777 dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
780 (config_param-> 778 (config_param->
781 dw_mem_base 779 mem_base
782 [3]), 780 [3]),
783 config_param-> 781 config_param->
784 dw_mem_length 782 mem_length
785 [3]); 783 [3]);
786 if (!dev_context->dw_dsp_base_addr) 784 if (!dev_context->dsp_base_addr)
787 status = -EPERM; 785 status = -EPERM;
788 786
789 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL); 787 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
@@ -871,12 +869,12 @@ static int bridge_dev_create(struct bridge_dev_context
871 udelay(5); 869 udelay(5);
872 /* MMU address is obtained from the host 870 /* MMU address is obtained from the host
873 * resources struct */ 871 * resources struct */
874 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base; 872 dev_context->dsp_mmu_base = resources->dmmu_base;
875 } 873 }
876 if (!status) { 874 if (!status) {
877 dev_context->hdev_obj = hdev_obj; 875 dev_context->dev_obj = hdev_obj;
878 /* Store current board state. */ 876 /* Store current board state. */
879 dev_context->dw_brd_state = BRD_UNKNOWN; 877 dev_context->brd_state = BRD_UNKNOWN;
880 dev_context->resources = resources; 878 dev_context->resources = resources;
881 dsp_clk_enable(DSP_CLK_IVA2); 879 dsp_clk_enable(DSP_CLK_IVA2);
882 bridge_brd_stop(dev_context); 880 bridge_brd_stop(dev_context);
@@ -1003,12 +1001,12 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
1003 host_res = dev_context->resources; 1001 host_res = dev_context->resources;
1004 shm_size = drv_datap->shm_size; 1002 shm_size = drv_datap->shm_size;
1005 if (shm_size >= 0x10000) { 1003 if (shm_size >= 0x10000) {
1006 if ((host_res->dw_mem_base[1]) && 1004 if ((host_res->mem_base[1]) &&
1007 (host_res->dw_mem_phys[1])) { 1005 (host_res->mem_phys[1])) {
1008 mem_free_phys_mem((void *) 1006 mem_free_phys_mem((void *)
1009 host_res->dw_mem_base 1007 host_res->mem_base
1010 [1], 1008 [1],
1011 host_res->dw_mem_phys 1009 host_res->mem_phys
1012 [1], shm_size); 1010 [1], shm_size);
1013 } 1011 }
1014 } else { 1012 } else {
@@ -1017,34 +1015,31 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
1017 "mem_free_phys_mem\n", __func__, 1015 "mem_free_phys_mem\n", __func__,
1018 status); 1016 status);
1019 } 1017 }
1020 host_res->dw_mem_base[1] = 0; 1018 host_res->mem_base[1] = 0;
1021 host_res->dw_mem_phys[1] = 0; 1019 host_res->mem_phys[1] = 0;
1022 1020
1023 if (host_res->dw_mem_base[0]) 1021 if (host_res->mem_base[0])
1024 iounmap((void *)host_res->dw_mem_base[0]); 1022 iounmap((void *)host_res->mem_base[0]);
1025 if (host_res->dw_mem_base[2]) 1023 if (host_res->mem_base[2])
1026 iounmap((void *)host_res->dw_mem_base[2]); 1024 iounmap((void *)host_res->mem_base[2]);
1027 if (host_res->dw_mem_base[3]) 1025 if (host_res->mem_base[3])
1028 iounmap((void *)host_res->dw_mem_base[3]); 1026 iounmap((void *)host_res->mem_base[3]);
1029 if (host_res->dw_mem_base[4]) 1027 if (host_res->mem_base[4])
1030 iounmap((void *)host_res->dw_mem_base[4]); 1028 iounmap((void *)host_res->mem_base[4]);
1031 if (host_res->dw_dmmu_base) 1029 if (host_res->dmmu_base)
1032 iounmap(host_res->dw_dmmu_base); 1030 iounmap(host_res->dmmu_base);
1033 if (host_res->dw_per_base) 1031 if (host_res->per_base)
1034 iounmap(host_res->dw_per_base); 1032 iounmap(host_res->per_base);
1035 if (host_res->dw_per_pm_base) 1033 if (host_res->per_pm_base)
1036 iounmap((void *)host_res->dw_per_pm_base); 1034 iounmap((void *)host_res->per_pm_base);
1037 if (host_res->dw_core_pm_base) 1035 if (host_res->core_pm_base)
1038 iounmap((void *)host_res->dw_core_pm_base); 1036 iounmap((void *)host_res->core_pm_base);
1039 if (host_res->dw_sys_ctrl_base) 1037
1040 iounmap(host_res->dw_sys_ctrl_base); 1038 host_res->mem_base[0] = (u32) NULL;
1041 1039 host_res->mem_base[2] = (u32) NULL;
1042 host_res->dw_mem_base[0] = (u32) NULL; 1040 host_res->mem_base[3] = (u32) NULL;
1043 host_res->dw_mem_base[2] = (u32) NULL; 1041 host_res->mem_base[4] = (u32) NULL;
1044 host_res->dw_mem_base[3] = (u32) NULL; 1042 host_res->dmmu_base = NULL;
1045 host_res->dw_mem_base[4] = (u32) NULL;
1046 host_res->dw_dmmu_base = NULL;
1047 host_res->dw_sys_ctrl_base = NULL;
1048 1043
1049 kfree(host_res); 1044 kfree(host_res);
1050 } 1045 }
@@ -1075,8 +1070,8 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
1075 status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr, 1070 status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
1076 copy_bytes, mem_type); 1071 copy_bytes, mem_type);
1077 if (!status) { 1072 if (!status) {
1078 if (dest_addr < (dev_context->dw_dsp_start_add + 1073 if (dest_addr < (dev_context->dsp_start_add +
1079 dev_context->dw_internal_size)) { 1074 dev_context->internal_size)) {
1080 /* Write to Internal memory */ 1075 /* Write to Internal memory */
1081 status = write_dsp_data(dev_ctxt, host_buf, 1076 status = write_dsp_data(dev_ctxt, host_buf,
1082 dest_addr, copy_bytes, 1077 dest_addr, copy_bytes,
@@ -1109,8 +1104,8 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
1109 while (ul_remain_bytes > 0 && !status) { 1104 while (ul_remain_bytes > 0 && !status) {
1110 ul_bytes = 1105 ul_bytes =
1111 ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes; 1106 ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
1112 if (dsp_addr < (dev_context->dw_dsp_start_add + 1107 if (dsp_addr < (dev_context->dsp_start_add +
1113 dev_context->dw_internal_size)) { 1108 dev_context->internal_size)) {
1114 status = 1109 status =
1115 write_dsp_data(dev_ctxt, host_buff, dsp_addr, 1110 write_dsp_data(dev_ctxt, host_buff, dsp_addr,
1116 ul_bytes, mem_type); 1111 ul_bytes, mem_type);
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index fb9026e1403..02dd4391309 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -29,13 +29,13 @@
29/* ----------------------------------- Platform Manager */ 29/* ----------------------------------- Platform Manager */
30#include <dspbridge/brddefs.h> 30#include <dspbridge/brddefs.h>
31#include <dspbridge/dev.h> 31#include <dspbridge/dev.h>
32#include <dspbridge/iodefs.h> 32#include <dspbridge/io.h>
33 33
34/* ------------------------------------ Hardware Abstraction Layer */ 34/* ------------------------------------ Hardware Abstraction Layer */
35#include <hw_defs.h> 35#include <hw_defs.h>
36#include <hw_mmu.h> 36#include <hw_mmu.h>
37 37
38#include <dspbridge/pwr_sh.h> 38#include <dspbridge/pwr.h>
39 39
40/* ----------------------------------- Bridge Driver */ 40/* ----------------------------------- Bridge Driver */
41#include <dspbridge/dspdeh.h> 41#include <dspbridge/dspdeh.h>
@@ -118,10 +118,10 @@ int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
118 118
119 if (!status) { 119 if (!status) {
120 /* Update the Bridger Driver state */ 120 /* Update the Bridger Driver state */
121 dev_context->dw_brd_state = BRD_DSP_HIBERNATION; 121 dev_context->brd_state = BRD_DSP_HIBERNATION;
122#ifdef CONFIG_TIDSPBRIDGE_DVFS 122#ifdef CONFIG_TIDSPBRIDGE_DVFS
123 status = 123 status =
124 dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr); 124 dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
125 if (!hio_mgr) { 125 if (!hio_mgr) {
126 status = DSP_EHANDLE; 126 status = DSP_EHANDLE;
127 return status; 127 return status;
@@ -163,7 +163,7 @@ int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd,
163 if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP)) 163 if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP))
164 return -EINVAL; 164 return -EINVAL;
165 165
166 switch (dev_context->dw_brd_state) { 166 switch (dev_context->brd_state) {
167 case BRD_RUNNING: 167 case BRD_RUNNING:
168 omap_mbox_save_ctx(dev_context->mbox); 168 omap_mbox_save_ctx(dev_context->mbox);
169 if (dsp_test_sleepstate == PWRDM_POWER_OFF) { 169 if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
@@ -216,16 +216,16 @@ int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd,
216 pr_err("%s: Timed out waiting for DSP off mode, state %x\n", 216 pr_err("%s: Timed out waiting for DSP off mode, state %x\n",
217 __func__, pwr_state); 217 __func__, pwr_state);
218#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR 218#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR
219 dev_get_deh_mgr(dev_context->hdev_obj, &hdeh_mgr); 219 dev_get_deh_mgr(dev_context->dev_obj, &hdeh_mgr);
220 bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0); 220 bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0);
221#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */ 221#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */
222 return -ETIMEDOUT; 222 return -ETIMEDOUT;
223 } else { 223 } else {
224 /* Update the Bridger Driver state */ 224 /* Update the Bridger Driver state */
225 if (dsp_test_sleepstate == PWRDM_POWER_OFF) 225 if (dsp_test_sleepstate == PWRDM_POWER_OFF)
226 dev_context->dw_brd_state = BRD_HIBERNATION; 226 dev_context->brd_state = BRD_HIBERNATION;
227 else 227 else
228 dev_context->dw_brd_state = BRD_RETENTION; 228 dev_context->brd_state = BRD_RETENTION;
229 229
230 /* Disable wdt on hibernation. */ 230 /* Disable wdt on hibernation. */
231 dsp_wdt_enable(false); 231 dsp_wdt_enable(false);
@@ -258,8 +258,8 @@ int wake_dsp(struct bridge_dev_context *dev_context, void *pargs)
258#ifdef CONFIG_PM 258#ifdef CONFIG_PM
259 259
260 /* Check the board state, if it is not 'SLEEP' then return */ 260 /* Check the board state, if it is not 'SLEEP' then return */
261 if (dev_context->dw_brd_state == BRD_RUNNING || 261 if (dev_context->brd_state == BRD_RUNNING ||
262 dev_context->dw_brd_state == BRD_STOPPED) { 262 dev_context->brd_state == BRD_STOPPED) {
263 /* The Device is in 'RET' or 'OFF' state and Bridge state is not 263 /* The Device is in 'RET' or 'OFF' state and Bridge state is not
264 * 'SLEEP', this means state inconsistency, so return */ 264 * 'SLEEP', this means state inconsistency, so return */
265 return 0; 265 return 0;
@@ -269,7 +269,7 @@ int wake_dsp(struct bridge_dev_context *dev_context, void *pargs)
269 sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP); 269 sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP);
270 270
271 /* Set the device state to RUNNIG */ 271 /* Set the device state to RUNNIG */
272 dev_context->dw_brd_state = BRD_RUNNING; 272 dev_context->brd_state = BRD_RUNNING;
273#endif /* CONFIG_PM */ 273#endif /* CONFIG_PM */
274 return status; 274 return status;
275} 275}
@@ -351,12 +351,12 @@ int pre_scale_dsp(struct bridge_dev_context *dev_context, void *pargs)
351 351
352 dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n", 352 dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
353 __func__, voltage_domain, level); 353 __func__, voltage_domain, level);
354 if ((dev_context->dw_brd_state == BRD_HIBERNATION) || 354 if ((dev_context->brd_state == BRD_HIBERNATION) ||
355 (dev_context->dw_brd_state == BRD_RETENTION) || 355 (dev_context->brd_state == BRD_RETENTION) ||
356 (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) { 356 (dev_context->brd_state == BRD_DSP_HIBERNATION)) {
357 dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n"); 357 dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n");
358 return 0; 358 return 0;
359 } else if ((dev_context->dw_brd_state == BRD_RUNNING)) { 359 } else if ((dev_context->brd_state == BRD_RUNNING)) {
360 /* Send a prenotificatio to DSP */ 360 /* Send a prenotificatio to DSP */
361 dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__); 361 dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__);
362 sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY); 362 sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY);
@@ -382,7 +382,7 @@ int post_scale_dsp(struct bridge_dev_context *dev_context,
382 u32 voltage_domain; 382 u32 voltage_domain;
383 struct io_mgr *hio_mgr; 383 struct io_mgr *hio_mgr;
384 384
385 status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr); 385 status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
386 if (!hio_mgr) 386 if (!hio_mgr)
387 return -EFAULT; 387 return -EFAULT;
388 388
@@ -390,14 +390,14 @@ int post_scale_dsp(struct bridge_dev_context *dev_context,
390 level = *((u32 *) pargs + 1); 390 level = *((u32 *) pargs + 1);
391 dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n", 391 dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
392 __func__, voltage_domain, level); 392 __func__, voltage_domain, level);
393 if ((dev_context->dw_brd_state == BRD_HIBERNATION) || 393 if ((dev_context->brd_state == BRD_HIBERNATION) ||
394 (dev_context->dw_brd_state == BRD_RETENTION) || 394 (dev_context->brd_state == BRD_RETENTION) ||
395 (dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) { 395 (dev_context->brd_state == BRD_DSP_HIBERNATION)) {
396 /* Update the OPP value in shared memory */ 396 /* Update the OPP value in shared memory */
397 io_sh_msetting(hio_mgr, SHM_CURROPP, &level); 397 io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
398 dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n", 398 dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n",
399 __func__); 399 __func__);
400 } else if ((dev_context->dw_brd_state == BRD_RUNNING)) { 400 } else if ((dev_context->brd_state == BRD_RUNNING)) {
401 /* Update the OPP value in shared memory */ 401 /* Update the OPP value in shared memory */
402 io_sh_msetting(hio_mgr, SHM_CURROPP, &level); 402 io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
403 /* Send a post notification to DSP */ 403 /* Send a post notification to DSP */
@@ -434,8 +434,8 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
434 434
435 switch (clock_id) { 435 switch (clock_id) {
436 case BPWR_GP_TIMER5: 436 case BPWR_GP_TIMER5:
437 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 437 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
438 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 438 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
439 if (enable) { 439 if (enable) {
440 iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK; 440 iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
441 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK; 441 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
@@ -443,12 +443,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
443 mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK; 443 mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
444 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK; 444 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
445 } 445 }
446 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 446 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
447 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 447 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
448 break; 448 break;
449 case BPWR_GP_TIMER6: 449 case BPWR_GP_TIMER6:
450 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 450 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
451 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 451 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
452 if (enable) { 452 if (enable) {
453 iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK; 453 iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
454 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK; 454 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
@@ -456,12 +456,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
456 mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK; 456 mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
457 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK; 457 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
458 } 458 }
459 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 459 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
460 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 460 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
461 break; 461 break;
462 case BPWR_GP_TIMER7: 462 case BPWR_GP_TIMER7:
463 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 463 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
464 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 464 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
465 if (enable) { 465 if (enable) {
466 iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK; 466 iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
467 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK; 467 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
@@ -469,12 +469,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
469 mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK; 469 mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
470 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK; 470 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
471 } 471 }
472 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 472 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
473 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 473 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
474 break; 474 break;
475 case BPWR_GP_TIMER8: 475 case BPWR_GP_TIMER8:
476 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 476 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
477 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 477 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
478 if (enable) { 478 if (enable) {
479 iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK; 479 iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
480 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK; 480 mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
@@ -482,12 +482,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
482 mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK; 482 mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
483 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK; 483 iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
484 } 484 }
485 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 485 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
486 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 486 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
487 break; 487 break;
488 case BPWR_MCBSP1: 488 case BPWR_MCBSP1:
489 iva2_grpsel = readl(resources->dw_core_pm_base + 0xA8); 489 iva2_grpsel = readl(resources->core_pm_base + 0xA8);
490 mpu_grpsel = readl(resources->dw_core_pm_base + 0xA4); 490 mpu_grpsel = readl(resources->core_pm_base + 0xA4);
491 if (enable) { 491 if (enable) {
492 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK; 492 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
493 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK; 493 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
@@ -495,12 +495,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
495 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK; 495 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
496 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK; 496 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
497 } 497 }
498 writel(iva2_grpsel, resources->dw_core_pm_base + 0xA8); 498 writel(iva2_grpsel, resources->core_pm_base + 0xA8);
499 writel(mpu_grpsel, resources->dw_core_pm_base + 0xA4); 499 writel(mpu_grpsel, resources->core_pm_base + 0xA4);
500 break; 500 break;
501 case BPWR_MCBSP2: 501 case BPWR_MCBSP2:
502 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 502 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
503 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 503 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
504 if (enable) { 504 if (enable) {
505 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK; 505 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
506 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK; 506 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
@@ -508,12 +508,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
508 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK; 508 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
509 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK; 509 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
510 } 510 }
511 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 511 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
512 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 512 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
513 break; 513 break;
514 case BPWR_MCBSP3: 514 case BPWR_MCBSP3:
515 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 515 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
516 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 516 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
517 if (enable) { 517 if (enable) {
518 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK; 518 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
519 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK; 519 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
@@ -521,12 +521,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
521 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK; 521 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
522 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK; 522 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
523 } 523 }
524 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 524 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
525 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 525 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
526 break; 526 break;
527 case BPWR_MCBSP4: 527 case BPWR_MCBSP4:
528 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 528 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
529 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 529 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
530 if (enable) { 530 if (enable) {
531 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK; 531 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
532 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK; 532 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
@@ -534,12 +534,12 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
534 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK; 534 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
535 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK; 535 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
536 } 536 }
537 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 537 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
538 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 538 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
539 break; 539 break;
540 case BPWR_MCBSP5: 540 case BPWR_MCBSP5:
541 iva2_grpsel = readl(resources->dw_per_pm_base + 0xA8); 541 iva2_grpsel = readl(resources->per_pm_base + 0xA8);
542 mpu_grpsel = readl(resources->dw_per_pm_base + 0xA4); 542 mpu_grpsel = readl(resources->per_pm_base + 0xA4);
543 if (enable) { 543 if (enable) {
544 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK; 544 iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
545 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK; 545 mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
@@ -547,8 +547,8 @@ void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
547 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK; 547 mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
548 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK; 548 iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
549 } 549 }
550 writel(iva2_grpsel, resources->dw_per_pm_base + 0xA8); 550 writel(iva2_grpsel, resources->per_pm_base + 0xA8);
551 writel(mpu_grpsel, resources->dw_per_pm_base + 0xA4); 551 writel(mpu_grpsel, resources->per_pm_base + 0xA4);
552 break; 552 break;
553 } 553 }
554} 554}
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index ba2961049da..dfb356eb672 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -61,24 +61,24 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
61 u32 ul_tlb_base_virt = 0; 61 u32 ul_tlb_base_virt = 0;
62 u32 ul_shm_offset_virt = 0; 62 u32 ul_shm_offset_virt = 0;
63 u32 dw_ext_prog_virt_mem; 63 u32 dw_ext_prog_virt_mem;
64 u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr; 64 u32 dw_base_addr = dev_context->dsp_ext_base_addr;
65 bool trace_read = false; 65 bool trace_read = false;
66 66
67 if (!ul_shm_base_virt) { 67 if (!ul_shm_base_virt) {
68 status = dev_get_symbol(dev_context->hdev_obj, 68 status = dev_get_symbol(dev_context->dev_obj,
69 SHMBASENAME, &ul_shm_base_virt); 69 SHMBASENAME, &ul_shm_base_virt);
70 } 70 }
71 DBC_ASSERT(ul_shm_base_virt != 0); 71 DBC_ASSERT(ul_shm_base_virt != 0);
72 72
73 /* Check if it is a read of Trace section */ 73 /* Check if it is a read of Trace section */
74 if (!status && !ul_trace_sec_beg) { 74 if (!status && !ul_trace_sec_beg) {
75 status = dev_get_symbol(dev_context->hdev_obj, 75 status = dev_get_symbol(dev_context->dev_obj,
76 DSP_TRACESEC_BEG, &ul_trace_sec_beg); 76 DSP_TRACESEC_BEG, &ul_trace_sec_beg);
77 } 77 }
78 DBC_ASSERT(ul_trace_sec_beg != 0); 78 DBC_ASSERT(ul_trace_sec_beg != 0);
79 79
80 if (!status && !ul_trace_sec_end) { 80 if (!status && !ul_trace_sec_end) {
81 status = dev_get_symbol(dev_context->hdev_obj, 81 status = dev_get_symbol(dev_context->dev_obj,
82 DSP_TRACESEC_END, &ul_trace_sec_end); 82 DSP_TRACESEC_END, &ul_trace_sec_end);
83 } 83 }
84 DBC_ASSERT(ul_trace_sec_end != 0); 84 DBC_ASSERT(ul_trace_sec_end != 0);
@@ -92,7 +92,7 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
92 /* If reading from TRACE, force remap/unmap */ 92 /* If reading from TRACE, force remap/unmap */
93 if (trace_read && dw_base_addr) { 93 if (trace_read && dw_base_addr) {
94 dw_base_addr = 0; 94 dw_base_addr = 0;
95 dev_context->dw_dsp_ext_base_addr = 0; 95 dev_context->dsp_ext_base_addr = 0;
96 } 96 }
97 97
98 if (!dw_base_addr) { 98 if (!dw_base_addr) {
@@ -102,19 +102,19 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
102 102
103 /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */ 103 /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */
104 if (!status && !ul_dyn_ext_base) { 104 if (!status && !ul_dyn_ext_base) {
105 status = dev_get_symbol(dev_context->hdev_obj, 105 status = dev_get_symbol(dev_context->dev_obj,
106 DYNEXTBASE, &ul_dyn_ext_base); 106 DYNEXTBASE, &ul_dyn_ext_base);
107 } 107 }
108 DBC_ASSERT(ul_dyn_ext_base != 0); 108 DBC_ASSERT(ul_dyn_ext_base != 0);
109 109
110 if (!status) { 110 if (!status) {
111 status = dev_get_symbol(dev_context->hdev_obj, 111 status = dev_get_symbol(dev_context->dev_obj,
112 EXTBASE, &ul_ext_base); 112 EXTBASE, &ul_ext_base);
113 } 113 }
114 DBC_ASSERT(ul_ext_base != 0); 114 DBC_ASSERT(ul_ext_base != 0);
115 115
116 if (!status) { 116 if (!status) {
117 status = dev_get_symbol(dev_context->hdev_obj, 117 status = dev_get_symbol(dev_context->dev_obj,
118 EXTEND, &ul_ext_end); 118 EXTEND, &ul_ext_end);
119 } 119 }
120 DBC_ASSERT(ul_ext_end != 0); 120 DBC_ASSERT(ul_ext_end != 0);
@@ -134,10 +134,10 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
134 134
135 if (!status) { 135 if (!status) {
136 ul_tlb_base_virt = 136 ul_tlb_base_virt =
137 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; 137 dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
139 dw_ext_prog_virt_mem = 139 dw_ext_prog_virt_mem =
140 dev_context->atlb_entry[0].ul_gpp_va; 140 dev_context->atlb_entry[0].gpp_va;
141 141
142 if (!trace_read) { 142 if (!trace_read) {
143 ul_shm_offset_virt = 143 ul_shm_offset_virt =
@@ -148,14 +148,14 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
148 dw_ext_prog_virt_mem -= ul_shm_offset_virt; 148 dw_ext_prog_virt_mem -= ul_shm_offset_virt;
149 dw_ext_prog_virt_mem += 149 dw_ext_prog_virt_mem +=
150 (ul_ext_base - ul_dyn_ext_base); 150 (ul_ext_base - ul_dyn_ext_base);
151 dev_context->dw_dsp_ext_base_addr = 151 dev_context->dsp_ext_base_addr =
152 dw_ext_prog_virt_mem; 152 dw_ext_prog_virt_mem;
153 153
154 /* 154 /*
155 * This dw_dsp_ext_base_addr will get cleared 155 * This dsp_ext_base_addr will get cleared
156 * only when the board is stopped. 156 * only when the board is stopped.
157 */ 157 */
158 if (!dev_context->dw_dsp_ext_base_addr) 158 if (!dev_context->dsp_ext_base_addr)
159 status = -EPERM; 159 status = -EPERM;
160 } 160 }
161 161
@@ -184,7 +184,7 @@ int write_dsp_data(struct bridge_dev_context *dev_context,
184 u32 mem_type) 184 u32 mem_type)
185{ 185{
186 u32 offset; 186 u32 offset;
187 u32 dw_base_addr = dev_context->dw_dsp_base_addr; 187 u32 dw_base_addr = dev_context->dsp_base_addr;
188 struct cfg_hostres *resources = dev_context->resources; 188 struct cfg_hostres *resources = dev_context->resources;
189 int status = 0; 189 int status = 0;
190 u32 base1, base2, base3; 190 u32 base1, base2, base3;
@@ -195,18 +195,18 @@ int write_dsp_data(struct bridge_dev_context *dev_context,
195 if (!resources) 195 if (!resources)
196 return -EPERM; 196 return -EPERM;
197 197
198 offset = dsp_addr - dev_context->dw_dsp_start_add; 198 offset = dsp_addr - dev_context->dsp_start_add;
199 if (offset < base1) { 199 if (offset < base1) {
200 dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2], 200 dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[2],
201 resources->dw_mem_length[2]); 201 resources->mem_length[2]);
202 } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) { 202 } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) {
203 dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[3], 203 dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[3],
204 resources->dw_mem_length[3]); 204 resources->mem_length[3]);
205 offset = offset - base2; 205 offset = offset - base2;
206 } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE && 206 } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE &&
207 offset < base3 + OMAP_DSP_MEM3_SIZE) { 207 offset < base3 + OMAP_DSP_MEM3_SIZE) {
208 dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[4], 208 dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[4],
209 resources->dw_mem_length[4]); 209 resources->mem_length[4]);
210 offset = offset - base3; 210 offset = offset - base3;
211 } else { 211 } else {
212 return -EPERM; 212 return -EPERM;
@@ -230,7 +230,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
230 u32 ul_num_bytes, u32 mem_type, 230 u32 ul_num_bytes, u32 mem_type,
231 bool dynamic_load) 231 bool dynamic_load)
232{ 232{
233 u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr; 233 u32 dw_base_addr = dev_context->dsp_ext_base_addr;
234 u32 dw_offset = 0; 234 u32 dw_offset = 0;
235 u8 temp_byte1, temp_byte2; 235 u8 temp_byte1, temp_byte2;
236 u8 remain_byte[4]; 236 u8 remain_byte[4];
@@ -246,10 +246,10 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
246 246
247 if (symbols_reloaded) { 247 if (symbols_reloaded) {
248 /* Check if it is a load to Trace section */ 248 /* Check if it is a load to Trace section */
249 ret = dev_get_symbol(dev_context->hdev_obj, 249 ret = dev_get_symbol(dev_context->dev_obj,
250 DSP_TRACESEC_BEG, &ul_trace_sec_beg); 250 DSP_TRACESEC_BEG, &ul_trace_sec_beg);
251 if (!ret) 251 if (!ret)
252 ret = dev_get_symbol(dev_context->hdev_obj, 252 ret = dev_get_symbol(dev_context->dev_obj,
253 DSP_TRACESEC_END, 253 DSP_TRACESEC_END,
254 &ul_trace_sec_end); 254 &ul_trace_sec_end);
255 } 255 }
@@ -263,13 +263,13 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
263 if ((dynamic_load || trace_load) && dw_base_addr) { 263 if ((dynamic_load || trace_load) && dw_base_addr) {
264 dw_base_addr = 0; 264 dw_base_addr = 0;
265 MEM_UNMAP_LINEAR_ADDRESS((void *) 265 MEM_UNMAP_LINEAR_ADDRESS((void *)
266 dev_context->dw_dsp_ext_base_addr); 266 dev_context->dsp_ext_base_addr);
267 dev_context->dw_dsp_ext_base_addr = 0x0; 267 dev_context->dsp_ext_base_addr = 0x0;
268 } 268 }
269 if (!dw_base_addr) { 269 if (!dw_base_addr) {
270 if (symbols_reloaded) 270 if (symbols_reloaded)
271 /* Get SHM_BEG EXT_BEG and EXT_END. */ 271 /* Get SHM_BEG EXT_BEG and EXT_END. */
272 ret = dev_get_symbol(dev_context->hdev_obj, 272 ret = dev_get_symbol(dev_context->dev_obj,
273 SHMBASENAME, &ul_shm_base_virt); 273 SHMBASENAME, &ul_shm_base_virt);
274 DBC_ASSERT(ul_shm_base_virt != 0); 274 DBC_ASSERT(ul_shm_base_virt != 0);
275 if (dynamic_load) { 275 if (dynamic_load) {
@@ -277,7 +277,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
277 if (symbols_reloaded) 277 if (symbols_reloaded)
278 ret = 278 ret =
279 dev_get_symbol 279 dev_get_symbol
280 (dev_context->hdev_obj, DYNEXTBASE, 280 (dev_context->dev_obj, DYNEXTBASE,
281 &ul_ext_base); 281 &ul_ext_base);
282 } 282 }
283 DBC_ASSERT(ul_ext_base != 0); 283 DBC_ASSERT(ul_ext_base != 0);
@@ -289,7 +289,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
289 if (symbols_reloaded) 289 if (symbols_reloaded)
290 ret = 290 ret =
291 dev_get_symbol 291 dev_get_symbol
292 (dev_context->hdev_obj, EXTEND, 292 (dev_context->dev_obj, EXTEND,
293 &ul_ext_end); 293 &ul_ext_end);
294 } 294 }
295 } else { 295 } else {
@@ -297,13 +297,13 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
297 if (!ret) 297 if (!ret)
298 ret = 298 ret =
299 dev_get_symbol 299 dev_get_symbol
300 (dev_context->hdev_obj, EXTBASE, 300 (dev_context->dev_obj, EXTBASE,
301 &ul_ext_base); 301 &ul_ext_base);
302 DBC_ASSERT(ul_ext_base != 0); 302 DBC_ASSERT(ul_ext_base != 0);
303 if (!ret) 303 if (!ret)
304 ret = 304 ret =
305 dev_get_symbol 305 dev_get_symbol
306 (dev_context->hdev_obj, EXTEND, 306 (dev_context->dev_obj, EXTEND,
307 &ul_ext_end); 307 &ul_ext_end);
308 } 308 }
309 } 309 }
@@ -319,17 +319,17 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
319 319
320 if (!ret) { 320 if (!ret) {
321 ul_tlb_base_virt = 321 ul_tlb_base_virt =
322 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; 322 dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE;
323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
324 324
325 if (symbols_reloaded) { 325 if (symbols_reloaded) {
326 ret = dev_get_symbol 326 ret = dev_get_symbol
327 (dev_context->hdev_obj, 327 (dev_context->dev_obj,
328 DSP_TRACESEC_END, &shm0_end); 328 DSP_TRACESEC_END, &shm0_end);
329 if (!ret) { 329 if (!ret) {
330 ret = 330 ret =
331 dev_get_symbol 331 dev_get_symbol
332 (dev_context->hdev_obj, DYNEXTBASE, 332 (dev_context->dev_obj, DYNEXTBASE,
333 &ul_dyn_ext_base); 333 &ul_dyn_ext_base);
334 } 334 }
335 } 335 }
@@ -337,21 +337,21 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
337 ul_shm_base_virt - ul_tlb_base_virt; 337 ul_shm_base_virt - ul_tlb_base_virt;
338 if (trace_load) { 338 if (trace_load) {
339 dw_ext_prog_virt_mem = 339 dw_ext_prog_virt_mem =
340 dev_context->atlb_entry[0].ul_gpp_va; 340 dev_context->atlb_entry[0].gpp_va;
341 } else { 341 } else {
342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; 342 dw_ext_prog_virt_mem = host_res->mem_base[1];
343 dw_ext_prog_virt_mem += 343 dw_ext_prog_virt_mem +=
344 (ul_ext_base - ul_dyn_ext_base); 344 (ul_ext_base - ul_dyn_ext_base);
345 } 345 }
346 346
347 dev_context->dw_dsp_ext_base_addr = 347 dev_context->dsp_ext_base_addr =
348 (u32) MEM_LINEAR_ADDRESS((void *) 348 (u32) MEM_LINEAR_ADDRESS((void *)
349 dw_ext_prog_virt_mem, 349 dw_ext_prog_virt_mem,
350 ul_ext_end - ul_ext_base); 350 ul_ext_end - ul_ext_base);
351 dw_base_addr += dev_context->dw_dsp_ext_base_addr; 351 dw_base_addr += dev_context->dsp_ext_base_addr;
352 /* This dw_dsp_ext_base_addr will get cleared only when 352 /* This dsp_ext_base_addr will get cleared only when
353 * the board is stopped. */ 353 * the board is stopped. */
354 if (!dev_context->dw_dsp_ext_base_addr) 354 if (!dev_context->dsp_ext_base_addr)
355 ret = -EPERM; 355 ret = -EPERM;
356 } 356 }
357 } 357 }
@@ -375,10 +375,10 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
375 *((u32 *) host_buff) = dw_base_addr + dw_offset; 375 *((u32 *) host_buff) = dw_base_addr + dw_offset;
376 } 376 }
377 /* Unmap here to force remap for other Ext loads */ 377 /* Unmap here to force remap for other Ext loads */
378 if ((dynamic_load || trace_load) && dev_context->dw_dsp_ext_base_addr) { 378 if ((dynamic_load || trace_load) && dev_context->dsp_ext_base_addr) {
379 MEM_UNMAP_LINEAR_ADDRESS((void *) 379 MEM_UNMAP_LINEAR_ADDRESS((void *)
380 dev_context->dw_dsp_ext_base_addr); 380 dev_context->dsp_ext_base_addr);
381 dev_context->dw_dsp_ext_base_addr = 0x0; 381 dev_context->dsp_ext_base_addr = 0x0;
382 } 382 }
383 symbols_reloaded = false; 383 symbols_reloaded = false;
384 return ret; 384 return ret;
@@ -401,8 +401,8 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
401 if (!resources) 401 if (!resources)
402 return -EPERM; 402 return -EPERM;
403 403
404 if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION || 404 if (dev_context->brd_state == BRD_DSP_HIBERNATION ||
405 dev_context->dw_brd_state == BRD_HIBERNATION) { 405 dev_context->brd_state == BRD_HIBERNATION) {
406#ifdef CONFIG_TIDSPBRIDGE_DVFS 406#ifdef CONFIG_TIDSPBRIDGE_DVFS
407 if (pdata->dsp_get_opp) 407 if (pdata->dsp_get_opp)
408 opplevel = (*pdata->dsp_get_opp) (); 408 opplevel = (*pdata->dsp_get_opp) ();
@@ -437,10 +437,10 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
437 omap_mbox_restore_ctx(dev_context->mbox); 437 omap_mbox_restore_ctx(dev_context->mbox);
438 438
439 /* Access MMU SYS CONFIG register to generate a short wakeup */ 439 /* Access MMU SYS CONFIG register to generate a short wakeup */
440 temp = readl(resources->dw_dmmu_base + 0x10); 440 temp = readl(resources->dmmu_base + 0x10);
441 441
442 dev_context->dw_brd_state = BRD_RUNNING; 442 dev_context->brd_state = BRD_RUNNING;
443 } else if (dev_context->dw_brd_state == BRD_RETENTION) { 443 } else if (dev_context->brd_state == BRD_RETENTION) {
444 /* Restart the peripheral clocks */ 444 /* Restart the peripheral clocks */
445 dsp_clock_enable_all(dev_context->dsp_per_clks); 445 dsp_clock_enable_all(dev_context->dsp_per_clks);
446 } 446 }
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index 3430418190d..006ffd75289 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -52,16 +52,16 @@ static irqreturn_t mmu_fault_isr(int irq, void *data)
52 if (!deh) 52 if (!deh)
53 return IRQ_HANDLED; 53 return IRQ_HANDLED;
54 54
55 resources = deh->hbridge_context->resources; 55 resources = deh->bridge_context->resources;
56 if (!resources) { 56 if (!resources) {
57 dev_dbg(bridge, "%s: Failed to get Host Resources\n", 57 dev_dbg(bridge, "%s: Failed to get Host Resources\n",
58 __func__); 58 __func__);
59 return IRQ_HANDLED; 59 return IRQ_HANDLED;
60 } 60 }
61 61
62 hw_mmu_event_status(resources->dw_dmmu_base, &event); 62 hw_mmu_event_status(resources->dmmu_base, &event);
63 if (event == HW_MMU_TRANSLATION_FAULT) { 63 if (event == HW_MMU_TRANSLATION_FAULT) {
64 hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr); 64 hw_mmu_fault_addr_read(resources->dmmu_base, &fault_addr);
65 dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, 65 dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
66 event, fault_addr); 66 event, fault_addr);
67 /* 67 /*
@@ -73,10 +73,10 @@ static irqreturn_t mmu_fault_isr(int irq, void *data)
73 73
74 /* Disable the MMU events, else once we clear it will 74 /* Disable the MMU events, else once we clear it will
75 * start to raise INTs again */ 75 * start to raise INTs again */
76 hw_mmu_event_disable(resources->dw_dmmu_base, 76 hw_mmu_event_disable(resources->dmmu_base,
77 HW_MMU_TRANSLATION_FAULT); 77 HW_MMU_TRANSLATION_FAULT);
78 } else { 78 } else {
79 hw_mmu_event_disable(resources->dw_dmmu_base, 79 hw_mmu_event_disable(resources->dmmu_base,
80 HW_MMU_ALL_INTERRUPTS); 80 HW_MMU_ALL_INTERRUPTS);
81 } 81 }
82 return IRQ_HANDLED; 82 return IRQ_HANDLED;
@@ -113,7 +113,7 @@ int bridge_deh_create(struct deh_mgr **ret_deh,
113 tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); 113 tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
114 114
115 /* Fill in context structure */ 115 /* Fill in context structure */
116 deh->hbridge_context = hbridge_context; 116 deh->bridge_context = hbridge_context;
117 117
118 /* Install ISR function for DSP MMU fault */ 118 /* Install ISR function for DSP MMU fault */
119 status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, 119 status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
@@ -185,10 +185,10 @@ static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
185 * access entry #0. Then add a new entry so that the DSP OS 185 * access entry #0. Then add a new entry so that the DSP OS
186 * can continue in order to dump the stack. 186 * can continue in order to dump the stack.
187 */ 187 */
188 hw_mmu_twl_disable(resources->dw_dmmu_base); 188 hw_mmu_twl_disable(resources->dmmu_base);
189 hw_mmu_tlb_flush_all(resources->dw_dmmu_base); 189 hw_mmu_tlb_flush_all(resources->dmmu_base);
190 190
191 hw_mmu_tlb_add(resources->dw_dmmu_base, 191 hw_mmu_tlb_add(resources->dmmu_base,
192 virt_to_phys(dummy_va_addr), fault_addr, 192 virt_to_phys(dummy_va_addr), fault_addr,
193 HW_PAGE_SIZE4KB, 1, 193 HW_PAGE_SIZE4KB, 1,
194 &map_attrs, HW_SET, HW_SET); 194 &map_attrs, HW_SET, HW_SET);
@@ -198,12 +198,12 @@ static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
198 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); 198 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
199 199
200 /* Clear MMU interrupt */ 200 /* Clear MMU interrupt */
201 hw_mmu_event_ack(resources->dw_dmmu_base, 201 hw_mmu_event_ack(resources->dmmu_base,
202 HW_MMU_TRANSLATION_FAULT); 202 HW_MMU_TRANSLATION_FAULT);
203 dump_dsp_stack(dev_context); 203 dump_dsp_stack(dev_context);
204 dsp_clk_disable(DSP_CLK_GPT8); 204 dsp_clk_disable(DSP_CLK_GPT8);
205 205
206 hw_mmu_disable(resources->dw_dmmu_base); 206 hw_mmu_disable(resources->dmmu_base);
207 free_page((unsigned long)dummy_va_addr); 207 free_page((unsigned long)dummy_va_addr);
208} 208}
209#endif 209#endif
@@ -228,7 +228,7 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
228 return; 228 return;
229 229
230 dev_dbg(bridge, "%s: device exception", __func__); 230 dev_dbg(bridge, "%s: device exception", __func__);
231 dev_context = deh->hbridge_context; 231 dev_context = deh->bridge_context;
232 232
233 switch (event) { 233 switch (event) {
234 case DSP_SYSERROR: 234 case DSP_SYSERROR:
@@ -254,7 +254,7 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
254 } 254 }
255 255
256 /* Filter subsequent notifications when an error occurs */ 256 /* Filter subsequent notifications when an error occurs */
257 if (dev_context->dw_brd_state != BRD_ERROR) { 257 if (dev_context->brd_state != BRD_ERROR) {
258 ntfy_notify(deh->ntfy_obj, event); 258 ntfy_notify(deh->ntfy_obj, event);
259#ifdef CONFIG_TIDSPBRIDGE_RECOVERY 259#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
260 bridge_recover_schedule(); 260 bridge_recover_schedule();
@@ -262,7 +262,7 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
262 } 262 }
263 263
264 /* Set the Board state as ERROR */ 264 /* Set the Board state as ERROR */
265 dev_context->dw_brd_state = BRD_ERROR; 265 dev_context->brd_state = BRD_ERROR;
266 /* Disable all the clocks that were enabled by DSP */ 266 /* Disable all the clocks that were enabled by DSP */
267 dsp_clock_disable_all(dev_context->dsp_per_clks); 267 dsp_clock_disable_all(dev_context->dsp_per_clks);
268 /* 268 /*
diff --git a/drivers/staging/tidspbridge/dynload/cload.c b/drivers/staging/tidspbridge/dynload/cload.c
index c85a5e88361..390040984e0 100644
--- a/drivers/staging/tidspbridge/dynload/cload.c
+++ b/drivers/staging/tidspbridge/dynload/cload.c
@@ -498,8 +498,8 @@ static void allocate_sections(struct dload_state *dlthis)
498 return; 498 return;
499 } 499 }
500 /* initialize the handle header */ 500 /* initialize the handle header */
501 hndl->dm.hnext = hndl->dm.hprev = hndl; /* circular list */ 501 hndl->dm.next = hndl->dm.prev = hndl; /* circular list */
502 hndl->dm.hroot = NULL; 502 hndl->dm.root = NULL;
503 hndl->dm.dbthis = 0; 503 hndl->dm.dbthis = 0;
504 dlthis->myhandle = hndl; /* save away for return */ 504 dlthis->myhandle = hndl; /* save away for return */
505 /* pointer to the section list of allocated sections */ 505 /* pointer to the section list of allocated sections */
@@ -1131,9 +1131,6 @@ static void dload_data(struct dload_state *dlthis)
1131 u16 curr_sect; 1131 u16 curr_sect;
1132 struct doff_scnhdr_t *sptr = dlthis->sect_hdrs; 1132 struct doff_scnhdr_t *sptr = dlthis->sect_hdrs;
1133 struct ldr_section_info *lptr = dlthis->ldr_sections; 1133 struct ldr_section_info *lptr = dlthis->ldr_sections;
1134#ifdef OPT_ZERO_COPY_LOADER
1135 bool zero_copy = false;
1136#endif
1137 u8 *dest; 1134 u8 *dest;
1138 1135
1139 struct { 1136 struct {
@@ -1192,17 +1189,6 @@ static void dload_data(struct dload_state *dlthis)
1192 return; 1189 return;
1193 } 1190 }
1194 dest = ibuf.bufr; 1191 dest = ibuf.bufr;
1195#ifdef OPT_ZERO_COPY_LOADER
1196 zero_copy = false;
1197 if (!dload_check_type(sptr, DLOAD_CINIT) {
1198 dlthis->myio->writemem(dlthis->myio,
1199 &dest,
1200 lptr->load_addr +
1201 image_offset,
1202 lptr, 0);
1203 zero_copy = (dest != ibuf.bufr);
1204 }
1205#endif
1206 /* End of determination */ 1192 /* End of determination */
1207 1193
1208 if (dlthis->strm->read_buffer(dlthis->strm, 1194 if (dlthis->strm->read_buffer(dlthis->strm,
@@ -1266,33 +1252,27 @@ static void dload_data(struct dload_state *dlthis)
1266 &ibuf.ipacket); 1252 &ibuf.ipacket);
1267 cinit_processed = true; 1253 cinit_processed = true;
1268 } else { 1254 } else {
1269#ifdef OPT_ZERO_COPY_LOADER 1255 /* FIXME */
1270 if (!zero_copy) { 1256 if (!dlthis->myio->
1271#endif 1257 writemem(dlthis->
1272 /* FIXME */ 1258 myio,
1273 if (!dlthis->myio-> 1259 ibuf.bufr,
1274 writemem(dlthis-> 1260 lptr->
1275 myio, 1261 load_addr +
1276 ibuf.bufr, 1262 image_offset,
1277 lptr-> 1263 lptr,
1278 load_addr + 1264 BYTE_TO_HOST
1279 image_offset, 1265 (ibuf.
1280 lptr, 1266 ipacket.
1281 BYTE_TO_HOST 1267 packet_size))) {
1282 (ibuf. 1268 DL_ERROR
1283 ipacket. 1269 ("Write to "
1284 packet_size))) { 1270 FMT_UI32
1285 DL_ERROR 1271 " failed",
1286 ("Write to " 1272 lptr->
1287 FMT_UI32 1273 load_addr +
1288 " failed", 1274 image_offset);
1289 lptr->
1290 load_addr +
1291 image_offset);
1292 }
1293#ifdef OPT_ZERO_COPY_LOADER
1294 } 1275 }
1295#endif
1296 } 1276 }
1297 } 1277 }
1298 image_offset += 1278 image_offset +=
@@ -1646,7 +1626,7 @@ static void init_module_handle(struct dload_state *dlthis)
1646 DL_ERROR(err_alloc, sizeof(struct dbg_mirror_root)); 1626 DL_ERROR(err_alloc, sizeof(struct dbg_mirror_root));
1647 return; 1627 return;
1648 } 1628 }
1649 mlst->hnext = NULL; 1629 mlst->next = NULL;
1650 mlst->changes = 0; 1630 mlst->changes = 0;
1651 mlst->refcount = 0; 1631 mlst->refcount = 0;
1652 mlst->dbthis = TDATA_TO_TADDR(dlmodsym->value); 1632 mlst->dbthis = TDATA_TO_TADDR(dlmodsym->value);
@@ -1671,7 +1651,7 @@ static void init_module_handle(struct dload_state *dlthis)
1671#else 1651#else
1672 mlist = (struct dbg_mirror_root *)&debug_list_header; 1652 mlist = (struct dbg_mirror_root *)&debug_list_header;
1673#endif 1653#endif
1674 hndl->dm.hroot = mlist; /* set pointer to root into our handle */ 1654 hndl->dm.root = mlist; /* set pointer to root into our handle */
1675 if (!dlthis->allocated_secn_count) 1655 if (!dlthis->allocated_secn_count)
1676 return; /* no load addresses to be recorded */ 1656 return; /* no load addresses to be recorded */
1677 /* reuse temporary symbol storage */ 1657 /* reuse temporary symbol storage */
@@ -1722,9 +1702,9 @@ static void init_module_handle(struct dload_state *dlthis)
1722 dllview_info.context = 0; 1702 dllview_info.context = 0;
1723 hndl->dm.context = 0; 1703 hndl->dm.context = 0;
1724 /* fill in next pointer and size */ 1704 /* fill in next pointer and size */
1725 if (mlist->hnext) { 1705 if (mlist->next) {
1726 dbmod->next_module = TADDR_TO_TDATA(mlist->hnext->dm.dbthis); 1706 dbmod->next_module = TADDR_TO_TDATA(mlist->next->dm.dbthis);
1727 dbmod->next_module_size = mlist->hnext->dm.dbsiz; 1707 dbmod->next_module_size = mlist->next->dm.dbsiz;
1728 } else { 1708 } else {
1729 dbmod->next_module_size = 0; 1709 dbmod->next_module_size = 0;
1730 dbmod->next_module = 0; 1710 dbmod->next_module = 0;
@@ -1770,11 +1750,11 @@ static void init_module_handle(struct dload_state *dlthis)
1770 } 1750 }
1771 /* Add the module handle to this processor's list 1751 /* Add the module handle to this processor's list
1772 of handles with debug info */ 1752 of handles with debug info */
1773 hndl->dm.hnext = mlist->hnext; 1753 hndl->dm.next = mlist->next;
1774 if (hndl->dm.hnext) 1754 if (hndl->dm.next)
1775 hndl->dm.hnext->dm.hprev = hndl; 1755 hndl->dm.next->dm.prev = hndl;
1776 hndl->dm.hprev = (struct my_handle *)mlist; 1756 hndl->dm.prev = (struct my_handle *)mlist;
1777 mlist->hnext = hndl; /* insert after root */ 1757 mlist->next = hndl; /* insert after root */
1778} /* init_module_handle */ 1758} /* init_module_handle */
1779 1759
1780/************************************************************************* 1760/*************************************************************************
@@ -1830,7 +1810,7 @@ int dynamic_unload_module(void *mhandle,
1830 asecs->name = NULL; 1810 asecs->name = NULL;
1831 alloc->dload_deallocate(alloc, asecs++); 1811 alloc->dload_deallocate(alloc, asecs++);
1832 } 1812 }
1833 root = hndl->dm.hroot; 1813 root = hndl->dm.root;
1834 if (!root) { 1814 if (!root) {
1835 /* there is a debug list containing this module */ 1815 /* there is a debug list containing this module */
1836 goto func_end; 1816 goto func_end;
@@ -1840,20 +1820,20 @@ int dynamic_unload_module(void *mhandle,
1840 } 1820 }
1841 /* Retrieve memory context in which .dllview was allocated */ 1821 /* Retrieve memory context in which .dllview was allocated */
1842 dllview_info.context = hndl->dm.context; 1822 dllview_info.context = hndl->dm.context;
1843 if (hndl->dm.hprev == hndl) 1823 if (hndl->dm.prev == hndl)
1844 goto exitunltgt; 1824 goto exitunltgt;
1845 1825
1846 /* target-side dllview record is in list */ 1826 /* target-side dllview record is in list */
1847 /* dequeue this record from our GPP-side mirror list */ 1827 /* dequeue this record from our GPP-side mirror list */
1848 hndl->dm.hprev->dm.hnext = hndl->dm.hnext; 1828 hndl->dm.prev->dm.next = hndl->dm.next;
1849 if (hndl->dm.hnext) 1829 if (hndl->dm.next)
1850 hndl->dm.hnext->dm.hprev = hndl->dm.hprev; 1830 hndl->dm.next->dm.prev = hndl->dm.prev;
1851 /* Update next_module of previous entry in target list 1831 /* Update next_module of previous entry in target list
1852 * We are using mhdr here as a surrogate for either a 1832 * We are using mhdr here as a surrogate for either a
1853 struct modules_header or a dll_module */ 1833 struct modules_header or a dll_module */
1854 if (hndl->dm.hnext) { 1834 if (hndl->dm.next) {
1855 mhdr.first_module = TADDR_TO_TDATA(hndl->dm.hnext->dm.dbthis); 1835 mhdr.first_module = TADDR_TO_TDATA(hndl->dm.next->dm.dbthis);
1856 mhdr.first_module_size = hndl->dm.hnext->dm.dbsiz; 1836 mhdr.first_module_size = hndl->dm.next->dm.dbsiz;
1857 } else { 1837 } else {
1858 mhdr.first_module = 0; 1838 mhdr.first_module = 0;
1859 mhdr.first_module_size = 0; 1839 mhdr.first_module_size = 0;
@@ -1871,7 +1851,7 @@ int dynamic_unload_module(void *mhandle,
1871 swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16), 1851 swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16),
1872 MODULES_HEADER_BITMAP); 1852 MODULES_HEADER_BITMAP);
1873 } 1853 }
1874 if (!init->writemem(init, &mhdr, hndl->dm.hprev->dm.dbthis, 1854 if (!init->writemem(init, &mhdr, hndl->dm.prev->dm.dbthis,
1875 &dllview_info, sizeof(struct modules_header) - 1855 &dllview_info, sizeof(struct modules_header) -
1876 sizeof(mhdr.update_flag))) { 1856 sizeof(mhdr.update_flag))) {
1877 dload_syms_error(syms, dlvwrite); 1857 dload_syms_error(syms, dlvwrite);
diff --git a/drivers/staging/tidspbridge/dynload/dload_internal.h b/drivers/staging/tidspbridge/dynload/dload_internal.h
index 302a7c53e12..7b77573fba5 100644
--- a/drivers/staging/tidspbridge/dynload/dload_internal.h
+++ b/drivers/staging/tidspbridge/dynload/dload_internal.h
@@ -78,15 +78,15 @@ struct my_handle;
78struct dbg_mirror_root { 78struct dbg_mirror_root {
79 /* must be same as dbg_mirror_list; __DLModules address on target */ 79 /* must be same as dbg_mirror_list; __DLModules address on target */
80 u32 dbthis; 80 u32 dbthis;
81 struct my_handle *hnext; /* must be same as dbg_mirror_list */ 81 struct my_handle *next; /* must be same as dbg_mirror_list */
82 u16 changes; /* change counter */ 82 u16 changes; /* change counter */
83 u16 refcount; /* number of modules referencing this root */ 83 u16 refcount; /* number of modules referencing this root */
84}; 84};
85 85
86struct dbg_mirror_list { 86struct dbg_mirror_list {
87 u32 dbthis; 87 u32 dbthis;
88 struct my_handle *hnext, *hprev; 88 struct my_handle *next, *prev;
89 struct dbg_mirror_root *hroot; 89 struct dbg_mirror_root *root;
90 u16 dbsiz; 90 u16 dbsiz;
91 u32 context; /* Save context for .dllview memory allocation */ 91 u32 context; /* Save context for .dllview memory allocation */
92}; 92};
diff --git a/drivers/staging/tidspbridge/gen/gb.c b/drivers/staging/tidspbridge/gen/gb.c
deleted file mode 100644
index 9f590230473..00000000000
--- a/drivers/staging/tidspbridge/gen/gb.c
+++ /dev/null
@@ -1,166 +0,0 @@
1/*
2 * gb.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Generic bitmap operations.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19/* ----------------------------------- DSP/BIOS Bridge */
20#include <linux/types.h>
21/* ----------------------------------- This */
22#include <dspbridge/gs.h>
23#include <dspbridge/gb.h>
24
25struct gb_t_map {
26 u32 len;
27 u32 wcnt;
28 u32 *words;
29};
30
31/*
32 * ======== gb_clear ========
33 * purpose:
34 * Clears a bit in the bit map.
35 */
36
37void gb_clear(struct gb_t_map *map, u32 bitn)
38{
39 u32 mask;
40
41 mask = 1L << (bitn % BITS_PER_LONG);
42 map->words[bitn / BITS_PER_LONG] &= ~mask;
43}
44
45/*
46 * ======== gb_create ========
47 * purpose:
48 * Creates a bit map.
49 */
50
51struct gb_t_map *gb_create(u32 len)
52{
53 struct gb_t_map *map;
54 u32 i;
55 map = (struct gb_t_map *)gs_alloc(sizeof(struct gb_t_map));
56 if (map != NULL) {
57 map->len = len;
58 map->wcnt = len / BITS_PER_LONG + 1;
59 map->words = (u32 *) gs_alloc(map->wcnt * sizeof(u32));
60 if (map->words != NULL) {
61 for (i = 0; i < map->wcnt; i++)
62 map->words[i] = 0L;
63
64 } else {
65 gs_frees(map, sizeof(struct gb_t_map));
66 map = NULL;
67 }
68 }
69
70 return map;
71}
72
73/*
74 * ======== gb_delete ========
75 * purpose:
76 * Frees a bit map.
77 */
78
79void gb_delete(struct gb_t_map *map)
80{
81 gs_frees(map->words, map->wcnt * sizeof(u32));
82 gs_frees(map, sizeof(struct gb_t_map));
83}
84
85/*
86 * ======== gb_findandset ========
87 * purpose:
88 * Finds a free bit and sets it.
89 */
90u32 gb_findandset(struct gb_t_map *map)
91{
92 u32 bitn;
93
94 bitn = gb_minclear(map);
95
96 if (bitn != GB_NOBITS)
97 gb_set(map, bitn);
98
99 return bitn;
100}
101
102/*
103 * ======== gb_minclear ========
104 * purpose:
105 * returns the location of the first unset bit in the bit map.
106 */
107u32 gb_minclear(struct gb_t_map *map)
108{
109 u32 bit_location = 0;
110 u32 bit_acc = 0;
111 u32 i;
112 u32 bit;
113 u32 *word;
114
115 for (word = map->words, i = 0; i < map->wcnt; word++, i++) {
116 if (~*word) {
117 for (bit = 0; bit < BITS_PER_LONG; bit++, bit_acc++) {
118 if (bit_acc == map->len)
119 return GB_NOBITS;
120
121 if (~*word & (1L << bit)) {
122 bit_location = i * BITS_PER_LONG + bit;
123 return bit_location;
124 }
125
126 }
127 } else {
128 bit_acc += BITS_PER_LONG;
129 }
130 }
131
132 return GB_NOBITS;
133}
134
135/*
136 * ======== gb_set ========
137 * purpose:
138 * Sets a bit in the bit map.
139 */
140
141void gb_set(struct gb_t_map *map, u32 bitn)
142{
143 u32 mask;
144
145 mask = 1L << (bitn % BITS_PER_LONG);
146 map->words[bitn / BITS_PER_LONG] |= mask;
147}
148
149/*
150 * ======== gb_test ========
151 * purpose:
152 * Returns true if the bit is set in the specified location.
153 */
154
155bool gb_test(struct gb_t_map *map, u32 bitn)
156{
157 bool state;
158 u32 mask;
159 u32 word;
160
161 mask = 1L << (bitn % BITS_PER_LONG);
162 word = map->words[bitn / BITS_PER_LONG];
163 state = word & mask ? true : false;
164
165 return state;
166}
diff --git a/drivers/staging/tidspbridge/gen/gh.c b/drivers/staging/tidspbridge/gen/gh.c
index f72d943c480..cd725033f27 100644
--- a/drivers/staging/tidspbridge/gen/gh.c
+++ b/drivers/staging/tidspbridge/gen/gh.c
@@ -17,9 +17,6 @@
17#include <linux/types.h> 17#include <linux/types.h>
18 18
19#include <dspbridge/host_os.h> 19#include <dspbridge/host_os.h>
20
21#include <dspbridge/gs.h>
22
23#include <dspbridge/gh.h> 20#include <dspbridge/gh.h>
24 21
25struct element { 22struct element {
@@ -37,8 +34,6 @@ struct gh_t_hash_tab {
37}; 34};
38 35
39static void noop(void *p); 36static void noop(void *p);
40static s32 cur_init;
41static void myfree(void *ptr, s32 size);
42 37
43/* 38/*
44 * ======== gh_create ======== 39 * ======== gh_create ========
@@ -51,8 +46,7 @@ struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
51{ 46{
52 struct gh_t_hash_tab *hash_tab; 47 struct gh_t_hash_tab *hash_tab;
53 u16 i; 48 u16 i;
54 hash_tab = 49 hash_tab = kzalloc(sizeof(struct gh_t_hash_tab), GFP_KERNEL);
55 (struct gh_t_hash_tab *)gs_alloc(sizeof(struct gh_t_hash_tab));
56 if (hash_tab == NULL) 50 if (hash_tab == NULL)
57 return NULL; 51 return NULL;
58 hash_tab->max_bucket = max_bucket; 52 hash_tab->max_bucket = max_bucket;
@@ -62,7 +56,7 @@ struct gh_t_hash_tab *gh_create(u16 max_bucket, u16 val_size,
62 hash_tab->delete = delete == NULL ? noop : delete; 56 hash_tab->delete = delete == NULL ? noop : delete;
63 57
64 hash_tab->buckets = (struct element **) 58 hash_tab->buckets = (struct element **)
65 gs_alloc(sizeof(struct element *) * max_bucket); 59 kzalloc(sizeof(struct element *) * max_bucket, GFP_KERNEL);
66 if (hash_tab->buckets == NULL) { 60 if (hash_tab->buckets == NULL) {
67 gh_delete(hash_tab); 61 gh_delete(hash_tab);
68 return NULL; 62 return NULL;
@@ -89,17 +83,14 @@ void gh_delete(struct gh_t_hash_tab *hash_tab)
89 elem = next) { 83 elem = next) {
90 next = elem->next; 84 next = elem->next;
91 (*hash_tab->delete) (elem->data); 85 (*hash_tab->delete) (elem->data);
92 myfree(elem, 86 kfree(elem);
93 sizeof(struct element) - 1 +
94 hash_tab->val_size);
95 } 87 }
96 } 88 }
97 89
98 myfree(hash_tab->buckets, sizeof(struct element *) 90 kfree(hash_tab->buckets);
99 * hash_tab->max_bucket);
100 } 91 }
101 92
102 myfree(hash_tab, sizeof(struct gh_t_hash_tab)); 93 kfree(hash_tab);
103 } 94 }
104} 95}
105 96
@@ -109,9 +100,7 @@ void gh_delete(struct gh_t_hash_tab *hash_tab)
109 100
110void gh_exit(void) 101void gh_exit(void)
111{ 102{
112 if (cur_init-- == 1) 103 /* Do nothing */
113 gs_exit();
114
115} 104}
116 105
117/* 106/*
@@ -138,8 +127,7 @@ void *gh_find(struct gh_t_hash_tab *hash_tab, void *key)
138 127
139void gh_init(void) 128void gh_init(void)
140{ 129{
141 if (cur_init++ == 0) 130 /* Do nothing */
142 gs_init();
143} 131}
144 132
145/* 133/*
@@ -152,8 +140,8 @@ void *gh_insert(struct gh_t_hash_tab *hash_tab, void *key, void *value)
152 u16 i; 140 u16 i;
153 char *src, *dst; 141 char *src, *dst;
154 142
155 elem = (struct element *)gs_alloc(sizeof(struct element) - 1 + 143 elem = kzalloc(sizeof(struct element) - 1 + hash_tab->val_size,
156 hash_tab->val_size); 144 GFP_KERNEL);
157 if (elem != NULL) { 145 if (elem != NULL) {
158 146
159 dst = (char *)elem->data; 147 dst = (char *)elem->data;
@@ -180,14 +168,6 @@ static void noop(void *p)
180 p = p; /* stifle compiler warning */ 168 p = p; /* stifle compiler warning */
181} 169}
182 170
183/*
184 * ======== myfree ========
185 */
186static void myfree(void *ptr, s32 size)
187{
188 gs_free(ptr);
189}
190
191#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE 171#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
192/** 172/**
193 * gh_iterate() - This function goes through all the elements in the hash table 173 * gh_iterate() - This function goes through all the elements in the hash table
diff --git a/drivers/staging/tidspbridge/gen/gs.c b/drivers/staging/tidspbridge/gen/gs.c
deleted file mode 100644
index 8335bf5e274..00000000000
--- a/drivers/staging/tidspbridge/gen/gs.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * gs.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * General storage memory allocator services.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <linux/types.h>
20/* ----------------------------------- DSP/BIOS Bridge */
21#include <dspbridge/dbdefs.h>
22
23/* ----------------------------------- This */
24#include <dspbridge/gs.h>
25
26#include <linux/slab.h>
27
28/* ----------------------------------- Globals */
29static u32 cumsize;
30
31/*
32 * ======== gs_alloc ========
33 * purpose:
34 * Allocates memory of the specified size.
35 */
36void *gs_alloc(u32 size)
37{
38 void *p;
39
40 p = kzalloc(size, GFP_KERNEL);
41 if (p == NULL)
42 return NULL;
43 cumsize += size;
44 return p;
45}
46
47/*
48 * ======== gs_exit ========
49 * purpose:
50 * Discontinue the usage of the GS module.
51 */
52void gs_exit(void)
53{
54 /* Do nothing */
55}
56
57/*
58 * ======== gs_free ========
59 * purpose:
60 * Frees the memory.
61 */
62void gs_free(void *ptr)
63{
64 kfree(ptr);
65 /* ack! no size info */
66 /* cumsize -= size; */
67}
68
69/*
70 * ======== gs_frees ========
71 * purpose:
72 * Frees the memory.
73 */
74void gs_frees(void *ptr, u32 size)
75{
76 kfree(ptr);
77 cumsize -= size;
78}
79
80/*
81 * ======== gs_init ========
82 * purpose:
83 * Initializes the GS module.
84 */
85void gs_init(void)
86{
87 /* Do nothing */
88}
diff --git a/drivers/staging/tidspbridge/gen/uuidutil.c b/drivers/staging/tidspbridge/gen/uuidutil.c
index da39c4fbf33..ff6ebadf98f 100644
--- a/drivers/staging/tidspbridge/gen/uuidutil.c
+++ b/drivers/staging/tidspbridge/gen/uuidutil.c
@@ -45,11 +45,11 @@ void uuid_uuid_to_string(struct dsp_uuid *uuid_obj, char *sz_uuid,
45 45
46 i = snprintf(sz_uuid, size, 46 i = snprintf(sz_uuid, size,
47 "%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X", 47 "%.8X_%.4X_%.4X_%.2X%.2X_%.2X%.2X%.2X%.2X%.2X%.2X",
48 uuid_obj->ul_data1, uuid_obj->us_data2, uuid_obj->us_data3, 48 uuid_obj->data1, uuid_obj->data2, uuid_obj->data3,
49 uuid_obj->uc_data4, uuid_obj->uc_data5, 49 uuid_obj->data4, uuid_obj->data5,
50 uuid_obj->uc_data6[0], uuid_obj->uc_data6[1], 50 uuid_obj->data6[0], uuid_obj->data6[1],
51 uuid_obj->uc_data6[2], uuid_obj->uc_data6[3], 51 uuid_obj->data6[2], uuid_obj->data6[3],
52 uuid_obj->uc_data6[4], uuid_obj->uc_data6[5]); 52 uuid_obj->data6[4], uuid_obj->data6[5]);
53 53
54 DBC_ENSURE(i != -1); 54 DBC_ENSURE(i != -1);
55} 55}
@@ -79,35 +79,35 @@ void uuid_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj)
79{ 79{
80 s32 j; 80 s32 j;
81 81
82 uuid_obj->ul_data1 = uuid_hex_to_bin(sz_uuid, 8); 82 uuid_obj->data1 = uuid_hex_to_bin(sz_uuid, 8);
83 sz_uuid += 8; 83 sz_uuid += 8;
84 84
85 /* Step over underscore */ 85 /* Step over underscore */
86 sz_uuid++; 86 sz_uuid++;
87 87
88 uuid_obj->us_data2 = (u16) uuid_hex_to_bin(sz_uuid, 4); 88 uuid_obj->data2 = (u16) uuid_hex_to_bin(sz_uuid, 4);
89 sz_uuid += 4; 89 sz_uuid += 4;
90 90
91 /* Step over underscore */ 91 /* Step over underscore */
92 sz_uuid++; 92 sz_uuid++;
93 93
94 uuid_obj->us_data3 = (u16) uuid_hex_to_bin(sz_uuid, 4); 94 uuid_obj->data3 = (u16) uuid_hex_to_bin(sz_uuid, 4);
95 sz_uuid += 4; 95 sz_uuid += 4;
96 96
97 /* Step over underscore */ 97 /* Step over underscore */
98 sz_uuid++; 98 sz_uuid++;
99 99
100 uuid_obj->uc_data4 = (u8) uuid_hex_to_bin(sz_uuid, 2); 100 uuid_obj->data4 = (u8) uuid_hex_to_bin(sz_uuid, 2);
101 sz_uuid += 2; 101 sz_uuid += 2;
102 102
103 uuid_obj->uc_data5 = (u8) uuid_hex_to_bin(sz_uuid, 2); 103 uuid_obj->data5 = (u8) uuid_hex_to_bin(sz_uuid, 2);
104 sz_uuid += 2; 104 sz_uuid += 2;
105 105
106 /* Step over underscore */ 106 /* Step over underscore */
107 sz_uuid++; 107 sz_uuid++;
108 108
109 for (j = 0; j < 6; j++) { 109 for (j = 0; j < 6; j++) {
110 uuid_obj->uc_data6[j] = (u8) uuid_hex_to_bin(sz_uuid, 2); 110 uuid_obj->data6[j] = (u8) uuid_hex_to_bin(sz_uuid, 2);
111 sz_uuid += 2; 111 sz_uuid += 2;
112 } 112 }
113} 113}
diff --git a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
index 8efd1fba2f6..d60e2525802 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h
@@ -26,7 +26,7 @@
26#include <dspbridge/dspapi.h> 26#include <dspbridge/dspapi.h>
27#include <dspbridge/dspdefs.h> 27#include <dspbridge/dspdefs.h>
28 28
29#include <dspbridge/list.h> 29#include <linux/list.h>
30#include <dspbridge/ntfy.h> 30#include <dspbridge/ntfy.h>
31 31
32/* 32/*
@@ -114,20 +114,20 @@ struct shm {
114struct chnl_mgr { 114struct chnl_mgr {
115 /* Function interface to Bridge driver */ 115 /* Function interface to Bridge driver */
116 struct bridge_drv_interface *intf_fxns; 116 struct bridge_drv_interface *intf_fxns;
117 struct io_mgr *hio_mgr; /* IO manager */ 117 struct io_mgr *iomgr; /* IO manager */
118 /* Device this board represents */ 118 /* Device this board represents */
119 struct dev_object *hdev_obj; 119 struct dev_object *dev_obj;
120 120
121 /* These fields initialized in bridge_chnl_create(): */ 121 /* These fields initialized in bridge_chnl_create(): */
122 u32 dw_output_mask; /* Host output channels w/ full buffers */ 122 u32 output_mask; /* Host output channels w/ full buffers */
123 u32 dw_last_output; /* Last output channel fired from DPC */ 123 u32 last_output; /* Last output channel fired from DPC */
124 /* Critical section object handle */ 124 /* Critical section object handle */
125 spinlock_t chnl_mgr_lock; 125 spinlock_t chnl_mgr_lock;
126 u32 word_size; /* Size in bytes of DSP word */ 126 u32 word_size; /* Size in bytes of DSP word */
127 u8 max_channels; /* Total number of channels */ 127 u8 max_channels; /* Total number of channels */
128 u8 open_channels; /* Total number of open channels */ 128 u8 open_channels; /* Total number of open channels */
129 struct chnl_object **ap_channel; /* Array of channels */ 129 struct chnl_object **channels; /* Array of channels */
130 u8 dw_type; /* Type of channel class library */ 130 u8 type; /* Type of channel class library */
131 /* If no shm syms, return for CHNL_Open */ 131 /* If no shm syms, return for CHNL_Open */
132 int chnl_open_status; 132 int chnl_open_status;
133}; 133};
@@ -140,21 +140,21 @@ struct chnl_object {
140 /* Pointer back to channel manager */ 140 /* Pointer back to channel manager */
141 struct chnl_mgr *chnl_mgr_obj; 141 struct chnl_mgr *chnl_mgr_obj;
142 u32 chnl_id; /* Channel id */ 142 u32 chnl_id; /* Channel id */
143 u8 dw_state; /* Current channel state */ 143 u8 state; /* Current channel state */
144 s8 chnl_mode; /* Chnl mode and attributes */ 144 s8 chnl_mode; /* Chnl mode and attributes */
145 /* Chnl I/O completion event (user mode) */ 145 /* Chnl I/O completion event (user mode) */
146 void *user_event; 146 void *user_event;
147 /* Abstract syncronization object */ 147 /* Abstract syncronization object */
148 struct sync_object *sync_event; 148 struct sync_object *sync_event;
149 u32 process; /* Process which created this channel */ 149 u32 process; /* Process which created this channel */
150 u32 pcb_arg; /* Argument to use with callback */ 150 u32 cb_arg; /* Argument to use with callback */
151 struct lst_list *pio_requests; /* List of IOR's to driver */ 151 struct list_head io_requests; /* List of IOR's to driver */
152 s32 cio_cs; /* Number of IOC's in queue */ 152 s32 cio_cs; /* Number of IOC's in queue */
153 s32 cio_reqs; /* Number of IORequests in queue */ 153 s32 cio_reqs; /* Number of IORequests in queue */
154 s32 chnl_packets; /* Initial number of free Irps */ 154 s32 chnl_packets; /* Initial number of free Irps */
155 /* List of IOC's from driver */ 155 /* List of IOC's from driver */
156 struct lst_list *pio_completions; 156 struct list_head io_completions;
157 struct lst_list *free_packets_list; /* List of free Irps */ 157 struct list_head free_packets_list; /* List of free Irps */
158 struct ntfy_object *ntfy_obj; 158 struct ntfy_object *ntfy_obj;
159 u32 bytes_moved; /* Total number of bytes transfered */ 159 u32 bytes_moved; /* Total number of bytes transfered */
160 160
@@ -171,7 +171,7 @@ struct chnl_irp {
171 u8 *host_user_buf; 171 u8 *host_user_buf;
172 /* Buffer to be filled/emptied. (System) */ 172 /* Buffer to be filled/emptied. (System) */
173 u8 *host_sys_buf; 173 u8 *host_sys_buf;
174 u32 dw_arg; /* Issue/Reclaim argument. */ 174 u32 arg; /* Issue/Reclaim argument. */
175 u32 dsp_tx_addr; /* Transfer address on DSP side. */ 175 u32 dsp_tx_addr; /* Transfer address on DSP side. */
176 u32 byte_size; /* Bytes transferred. */ 176 u32 byte_size; /* Bytes transferred. */
177 u32 buf_size; /* Actual buffer size when allocated. */ 177 u32 buf_size; /* Actual buffer size when allocated. */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/brddefs.h b/drivers/staging/tidspbridge/include/dspbridge/brddefs.h
index f80d9a5f05a..725d7b37414 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/brddefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/brddefs.h
@@ -24,9 +24,7 @@
24#define BRD_IDLE 0x1 /* Monitor Loaded, but suspended. */ 24#define BRD_IDLE 0x1 /* Monitor Loaded, but suspended. */
25#define BRD_RUNNING 0x2 /* Monitor loaded, and executing. */ 25#define BRD_RUNNING 0x2 /* Monitor loaded, and executing. */
26#define BRD_UNKNOWN 0x3 /* Board state is indeterminate. */ 26#define BRD_UNKNOWN 0x3 /* Board state is indeterminate. */
27#define BRD_SYNCINIT 0x4
28#define BRD_LOADED 0x5 27#define BRD_LOADED 0x5
29#define BRD_LASTSTATE BRD_LOADED /* Set to highest legal board state. */
30#define BRD_SLEEP_TRANSITION 0x6 /* Sleep transition in progress */ 28#define BRD_SLEEP_TRANSITION 0x6 /* Sleep transition in progress */
31#define BRD_HIBERNATION 0x7 /* MPU initiated hibernation */ 29#define BRD_HIBERNATION 0x7 /* MPU initiated hibernation */
32#define BRD_RETENTION 0x8 /* Retention mode */ 30#define BRD_RETENTION 0x8 /* Retention mode */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
index 38122dbf877..60a278136bd 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
@@ -19,25 +19,12 @@
19#ifndef CFGDEFS_ 19#ifndef CFGDEFS_
20#define CFGDEFS_ 20#define CFGDEFS_
21 21
22/* Maximum length of module search path. */
23#define CFG_MAXSEARCHPATHLEN 255
24
25/* Maximum length of general paths. */
26#define CFG_MAXPATH 255
27
28/* Host Resources: */ 22/* Host Resources: */
29#define CFG_MAXMEMREGISTERS 9 23#define CFG_MAXMEMREGISTERS 9
30#define CFG_MAXIOPORTS 20
31#define CFG_MAXIRQS 7
32#define CFG_MAXDMACHANNELS 7
33 24
34/* IRQ flag */ 25/* IRQ flag */
35#define CFG_IRQSHARED 0x01 /* IRQ can be shared */ 26#define CFG_IRQSHARED 0x01 /* IRQ can be shared */
36 27
37/* DSP Resources: */
38#define CFG_DSPMAXMEMTYPES 10
39#define CFG_DEFAULT_NUM_WINDOWS 1 /* We support only one window. */
40
41/* A platform-related device handle: */ 28/* A platform-related device handle: */
42struct cfg_devnode; 29struct cfg_devnode;
43 30
@@ -47,35 +34,28 @@ struct cfg_devnode;
47struct cfg_hostres { 34struct cfg_hostres {
48 u32 num_mem_windows; /* Set to default */ 35 u32 num_mem_windows; /* Set to default */
49 /* This is the base.memory */ 36 /* This is the base.memory */
50 u32 dw_mem_base[CFG_MAXMEMREGISTERS]; /* shm virtual address */ 37 u32 mem_base[CFG_MAXMEMREGISTERS]; /* shm virtual address */
51 u32 dw_mem_length[CFG_MAXMEMREGISTERS]; /* Length of the Base */ 38 u32 mem_length[CFG_MAXMEMREGISTERS]; /* Length of the Base */
52 u32 dw_mem_phys[CFG_MAXMEMREGISTERS]; /* shm Physical address */ 39 u32 mem_phys[CFG_MAXMEMREGISTERS]; /* shm Physical address */
53 u8 birq_registers; /* IRQ Number */ 40 u8 birq_registers; /* IRQ Number */
54 u8 birq_attrib; /* IRQ Attribute */ 41 u8 birq_attrib; /* IRQ Attribute */
55 u32 dw_offset_for_monitor; /* The Shared memory starts from 42 u32 offset_for_monitor; /* The Shared memory starts from
56 * dw_mem_base + this offset */ 43 * mem_base + this offset */
57 /* 44 /*
58 * Info needed by NODE for allocating channels to communicate with RMS: 45 * Info needed by NODE for allocating channels to communicate with RMS:
59 * dw_chnl_offset: Offset of RMS channels. Lower channels are 46 * chnl_offset: Offset of RMS channels. Lower channels are
60 * reserved. 47 * reserved.
61 * dw_chnl_buf_size: Size of channel buffer to send to RMS 48 * chnl_buf_size: Size of channel buffer to send to RMS
62 * dw_num_chnls: Total number of channels 49 * num_chnls: Total number of channels
63 * (including reserved). 50 * (including reserved).
64 */ 51 */
65 u32 dw_chnl_offset; 52 u32 chnl_offset;
66 u32 dw_chnl_buf_size; 53 u32 chnl_buf_size;
67 u32 dw_num_chnls; 54 u32 num_chnls;
68 void __iomem *dw_per_base; 55 void __iomem *per_base;
69 u32 dw_per_pm_base; 56 u32 per_pm_base;
70 u32 dw_core_pm_base; 57 u32 core_pm_base;
71 void __iomem *dw_dmmu_base; 58 void __iomem *dmmu_base;
72 void __iomem *dw_sys_ctrl_base;
73};
74
75struct cfg_dspmemdesc {
76 u32 mem_type; /* Type of memory. */
77 u32 ul_min; /* Minimum amount of memory of this type. */
78 u32 ul_max; /* Maximum amount of memory of this type. */
79}; 59};
80 60
81#endif /* CFGDEFS_ */ 61#endif /* CFGDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnl.h b/drivers/staging/tidspbridge/include/dspbridge/chnl.h
index 8733b3b8193..92f6a13424f 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/chnl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/chnl.h
@@ -25,27 +25,6 @@
25#include <dspbridge/chnlpriv.h> 25#include <dspbridge/chnlpriv.h>
26 26
27/* 27/*
28 * ======== chnl_close ========
29 * Purpose:
30 * Ensures all pending I/O on this channel is cancelled, discards all
31 * queued I/O completion notifications, then frees the resources allocated
32 * for this channel, and makes the corresponding logical channel id
33 * available for subsequent use.
34 * Parameters:
35 * chnl_obj: Channel object handle.
36 * Returns:
37 * 0: Success;
38 * -EFAULT: Invalid chnl_obj.
39 * Requires:
40 * chnl_init(void) called.
41 * No thread must be blocked on this channel's I/O completion event.
42 * Ensures:
43 * 0: The I/O completion event for this channel is freed.
44 * chnl_obj is no longer valid.
45 */
46extern int chnl_close(struct chnl_object *chnl_obj);
47
48/*
49 * ======== chnl_create ======== 28 * ======== chnl_create ========
50 * Purpose: 29 * Purpose:
51 * Create a channel manager object, responsible for opening new channels 30 * Create a channel manager object, responsible for opening new channels
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h b/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h
index 5bf5f6b0b7b..cb67c309b6c 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h
@@ -22,9 +22,6 @@
22/* Channel id option. */ 22/* Channel id option. */
23#define CHNL_PICKFREE (~0UL) /* Let manager pick a free channel. */ 23#define CHNL_PICKFREE (~0UL) /* Let manager pick a free channel. */
24 24
25/* Channel manager limits: */
26#define CHNL_INITIOREQS 4 /* Default # of I/O requests. */
27
28/* Channel modes */ 25/* Channel modes */
29#define CHNL_MODETODSP 0 /* Data streaming to the DSP. */ 26#define CHNL_MODETODSP 0 /* Data streaming to the DSP. */
30#define CHNL_MODEFROMDSP 1 /* Data streaming from the DSP. */ 27#define CHNL_MODEFROMDSP 1 /* Data streaming from the DSP. */
@@ -48,7 +45,7 @@
48struct chnl_attr { 45struct chnl_attr {
49 u32 uio_reqs; /* Max # of preallocated I/O requests. */ 46 u32 uio_reqs; /* Max # of preallocated I/O requests. */
50 void *event_obj; /* User supplied auto-reset event object. */ 47 void *event_obj; /* User supplied auto-reset event object. */
51 char *pstr_event_name; /* Ptr to name of user event object. */ 48 char *str_event_name; /* Ptr to name of user event object. */
52 void *reserved1; /* Reserved for future use. */ 49 void *reserved1; /* Reserved for future use. */
53 u32 reserved2; /* Reserved for future use. */ 50 u32 reserved2; /* Reserved for future use. */
54 51
@@ -56,11 +53,11 @@ struct chnl_attr {
56 53
57/* I/O completion record: */ 54/* I/O completion record: */
58struct chnl_ioc { 55struct chnl_ioc {
59 void *pbuf; /* Buffer to be filled/emptied. */ 56 void *buf; /* Buffer to be filled/emptied. */
60 u32 byte_size; /* Bytes transferred. */ 57 u32 byte_size; /* Bytes transferred. */
61 u32 buf_size; /* Actual buffer size in bytes */ 58 u32 buf_size; /* Actual buffer size in bytes */
62 u32 status; /* Status of IO completion. */ 59 u32 status; /* Status of IO completion. */
63 u32 dw_arg; /* User argument associated with pbuf. */ 60 u32 arg; /* User argument associated with buf. */
64}; 61};
65 62
66#endif /* CHNLDEFS_ */ 63#endif /* CHNLDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h b/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
index 9292100b1c0..4114c79e246 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h
@@ -39,12 +39,6 @@
39 */ 39 */
40#define CHNL_PCPY 0 /* Proc-copy transport 0 */ 40#define CHNL_PCPY 0 /* Proc-copy transport 0 */
41 41
42#define CHNL_MAXIRQ 0xff /* Arbitrarily large number. */
43
44/* The following modes are private: */
45#define CHNL_MODEUSEREVENT 0x1000 /* User provided the channel event. */
46#define CHNL_MODEMASK 0x1001
47
48/* Higher level channel states: */ 42/* Higher level channel states: */
49#define CHNL_STATEREADY 0 /* Channel ready for I/O. */ 43#define CHNL_STATEREADY 0 /* Channel ready for I/O. */
50#define CHNL_STATECANCEL 1 /* I/O was cancelled. */ 44#define CHNL_STATECANCEL 1 /* I/O was cancelled. */
@@ -56,23 +50,16 @@
56 50
57/* Types of channel class libraries: */ 51/* Types of channel class libraries: */
58#define CHNL_TYPESM 1 /* Shared memory driver. */ 52#define CHNL_TYPESM 1 /* Shared memory driver. */
59#define CHNL_TYPEBM 2 /* Bus Mastering driver. */
60
61/* Max string length of channel I/O completion event name - change if needed */
62#define CHNL_MAXEVTNAMELEN 32
63
64/* Max memory pages lockable in CHNL_PrepareBuffer() - change if needed */
65#define CHNL_MAXLOCKPAGES 64
66 53
67/* Channel info. */ 54/* Channel info. */
68struct chnl_info { 55struct chnl_info {
69 struct chnl_mgr *hchnl_mgr; /* Owning channel manager. */ 56 struct chnl_mgr *chnl_mgr; /* Owning channel manager. */
70 u32 cnhl_id; /* Channel ID. */ 57 u32 cnhl_id; /* Channel ID. */
71 void *event_obj; /* Channel I/O completion event. */ 58 void *event_obj; /* Channel I/O completion event. */
72 /*Abstraction of I/O completion event. */ 59 /*Abstraction of I/O completion event. */
73 struct sync_object *sync_event; 60 struct sync_object *sync_event;
74 s8 dw_mode; /* Channel mode. */ 61 s8 mode; /* Channel mode. */
75 u8 dw_state; /* Current channel state. */ 62 u8 state; /* Current channel state. */
76 u32 bytes_tx; /* Total bytes transferred. */ 63 u32 bytes_tx; /* Total bytes transferred. */
77 u32 cio_cs; /* Number of IOCs in queue. */ 64 u32 cio_cs; /* Number of IOCs in queue. */
78 u32 cio_reqs; /* Number of IO Requests in queue. */ 65 u32 cio_reqs; /* Number of IO Requests in queue. */
@@ -81,7 +68,7 @@ struct chnl_info {
81 68
82/* Channel manager info: */ 69/* Channel manager info: */
83struct chnl_mgrinfo { 70struct chnl_mgrinfo {
84 u8 dw_type; /* Type of channel class library. */ 71 u8 type; /* Type of channel class library. */
85 /* Channel handle, given the channel id. */ 72 /* Channel handle, given the channel id. */
86 struct chnl_object *chnl_obj; 73 struct chnl_object *chnl_obj;
87 u8 open_channels; /* Number of open channels. */ 74 u8 open_channels; /* Number of open channels. */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmm.h b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
index 6ad313fbc66..27a21b5f3ff 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
@@ -81,7 +81,7 @@ extern void *cmm_calloc_buf(struct cmm_object *hcmm_mgr,
81 * Requires: 81 * Requires:
82 * cmm_init(void) called. 82 * cmm_init(void) called.
83 * ph_cmm_mgr != NULL. 83 * ph_cmm_mgr != NULL.
84 * mgr_attrts->ul_min_block_size >= 4 bytes. 84 * mgr_attrts->min_block_size >= 4 bytes.
85 * Ensures: 85 * Ensures:
86 * 86 *
87 */ 87 */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
index fbff372d2f5..a264fa69a4f 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h
@@ -19,18 +19,17 @@
19#ifndef CMMDEFS_ 19#ifndef CMMDEFS_
20#define CMMDEFS_ 20#define CMMDEFS_
21 21
22#include <dspbridge/list.h>
23 22
24/* Cmm attributes used in cmm_create() */ 23/* Cmm attributes used in cmm_create() */
25struct cmm_mgrattrs { 24struct cmm_mgrattrs {
26 /* Minimum SM allocation; default 32 bytes. */ 25 /* Minimum SM allocation; default 32 bytes. */
27 u32 ul_min_block_size; 26 u32 min_block_size;
28}; 27};
29 28
30/* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */ 29/* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */
31struct cmm_attrs { 30struct cmm_attrs {
32 u32 ul_seg_id; /* 1,2... are SM segments. 0 is not. */ 31 u32 seg_id; /* 1,2... are SM segments. 0 is not. */
33 u32 ul_alignment; /* 0,1,2,4....ul_min_block_size */ 32 u32 alignment; /* 0,1,2,4....min_block_size */
34}; 33};
35 34
36/* 35/*
@@ -52,40 +51,40 @@ struct cmm_attrs {
52 */ 51 */
53 52
54struct cmm_seginfo { 53struct cmm_seginfo {
55 u32 dw_seg_base_pa; /* Start Phys address of SM segment */ 54 u32 seg_base_pa; /* Start Phys address of SM segment */
56 /* Total size in bytes of segment: DSP+GPP */ 55 /* Total size in bytes of segment: DSP+GPP */
57 u32 ul_total_seg_size; 56 u32 total_seg_size;
58 u32 dw_gpp_base_pa; /* Start Phys addr of Gpp SM seg */ 57 u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */
59 u32 ul_gpp_size; /* Size of Gpp SM seg in bytes */ 58 u32 gpp_size; /* Size of Gpp SM seg in bytes */
60 u32 dw_dsp_base_va; /* DSP virt base byte address */ 59 u32 dsp_base_va; /* DSP virt base byte address */
61 u32 ul_dsp_size; /* DSP seg size in bytes */ 60 u32 dsp_size; /* DSP seg size in bytes */
62 /* # of current GPP allocations from this segment */ 61 /* # of current GPP allocations from this segment */
63 u32 ul_in_use_cnt; 62 u32 in_use_cnt;
64 u32 dw_seg_base_va; /* Start Virt address of SM seg */ 63 u32 seg_base_va; /* Start Virt address of SM seg */
65 64
66}; 65};
67 66
68/* CMM useful information */ 67/* CMM useful information */
69struct cmm_info { 68struct cmm_info {
70 /* # of SM segments registered with this Cmm. */ 69 /* # of SM segments registered with this Cmm. */
71 u32 ul_num_gppsm_segs; 70 u32 num_gppsm_segs;
72 /* Total # of allocations outstanding for CMM */ 71 /* Total # of allocations outstanding for CMM */
73 u32 ul_total_in_use_cnt; 72 u32 total_in_use_cnt;
74 /* Min SM block size allocation from cmm_create() */ 73 /* Min SM block size allocation from cmm_create() */
75 u32 ul_min_block_size; 74 u32 min_block_size;
76 /* Info per registered SM segment. */ 75 /* Info per registered SM segment. */
77 struct cmm_seginfo seg_info[CMM_MAXGPPSEGS]; 76 struct cmm_seginfo seg_info[CMM_MAXGPPSEGS];
78}; 77};
79 78
80/* XlatorCreate attributes */ 79/* XlatorCreate attributes */
81struct cmm_xlatorattrs { 80struct cmm_xlatorattrs {
82 u32 ul_seg_id; /* segment Id used for SM allocations */ 81 u32 seg_id; /* segment Id used for SM allocations */
83 u32 dw_dsp_bufs; /* # of DSP-side bufs */ 82 u32 dsp_bufs; /* # of DSP-side bufs */
84 u32 dw_dsp_buf_size; /* size of DSP-side bufs in GPP bytes */ 83 u32 dsp_buf_size; /* size of DSP-side bufs in GPP bytes */
85 /* Vm base address alloc'd in client process context */ 84 /* Vm base address alloc'd in client process context */
86 void *vm_base; 85 void *vm_base;
87 /* dw_vm_size must be >= (dwMaxNumBufs * dwMaxSize) */ 86 /* vm_size must be >= (dwMaxNumBufs * dwMaxSize) */
88 u32 dw_vm_size; 87 u32 vm_size;
89}; 88};
90 89
91/* 90/*
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cod.h b/drivers/staging/tidspbridge/include/dspbridge/cod.h
index 42bce2eec80..53bd4bb8b0b 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cod.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cod.h
@@ -27,9 +27,6 @@
27#define COD_TRACEBEG "SYS_PUTCBEG" 27#define COD_TRACEBEG "SYS_PUTCBEG"
28#define COD_TRACEEND "SYS_PUTCEND" 28#define COD_TRACEEND "SYS_PUTCEND"
29#define COD_TRACECURPOS "BRIDGE_SYS_PUTC_current" 29#define COD_TRACECURPOS "BRIDGE_SYS_PUTC_current"
30#define COD_TRACESECT "trace"
31#define COD_TRACEBEGOLD "PUTCBEG"
32#define COD_TRACEENDOLD "PUTCEND"
33 30
34#define COD_NOLOAD DBLL_NOLOAD 31#define COD_NOLOAD DBLL_NOLOAD
35#define COD_SYMB DBLL_SYMB 32#define COD_SYMB DBLL_SYMB
@@ -40,11 +37,6 @@ struct cod_manager;
40/* COD library handle */ 37/* COD library handle */
41struct cod_libraryobj; 38struct cod_libraryobj;
42 39
43/* COD attributes */
44struct cod_attrs {
45 u32 ul_reserved;
46};
47
48/* 40/*
49 * Function prototypes for writing memory to a DSP system, allocating 41 * Function prototypes for writing memory to a DSP system, allocating
50 * and freeing DSP memory. 42 * and freeing DSP memory.
@@ -79,8 +71,6 @@ extern void cod_close(struct cod_libraryobj *lib);
79 * Parameters: 71 * Parameters:
80 * manager: created manager object 72 * manager: created manager object
81 * str_zl_file: ZL DLL filename, of length < COD_MAXPATHLENGTH. 73 * str_zl_file: ZL DLL filename, of length < COD_MAXPATHLENGTH.
82 * attrs: attributes to be used by this object. A NULL value
83 * will cause default attrs to be used.
84 * Returns: 74 * Returns:
85 * 0: Success. 75 * 0: Success.
86 * -ESPIPE: ZL_Create failed. 76 * -ESPIPE: ZL_Create failed.
@@ -92,8 +82,7 @@ extern void cod_close(struct cod_libraryobj *lib);
92 * Ensures: 82 * Ensures:
93 */ 83 */
94extern int cod_create(struct cod_manager **mgr, 84extern int cod_create(struct cod_manager **mgr,
95 char *str_zl_file, 85 char *str_zl_file);
96 const struct cod_attrs *attrs);
97 86
98/* 87/*
99 * ======== cod_delete ======== 88 * ======== cod_delete ========
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h b/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h
index 1daa4b57b73..bc201b32903 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h
@@ -48,15 +48,15 @@ struct dcd_nodeprops {
48 struct dsp_ndbprops ndb_props; 48 struct dsp_ndbprops ndb_props;
49 u32 msg_segid; 49 u32 msg_segid;
50 u32 msg_notify_type; 50 u32 msg_notify_type;
51 char *pstr_create_phase_fxn; 51 char *str_create_phase_fxn;
52 char *pstr_delete_phase_fxn; 52 char *str_delete_phase_fxn;
53 char *pstr_execute_phase_fxn; 53 char *str_execute_phase_fxn;
54 char *pstr_i_alg_name; 54 char *str_i_alg_name;
55 55
56 /* Dynamic load properties */ 56 /* Dynamic load properties */
57 u16 us_load_type; /* Static, dynamic, overlay */ 57 u16 load_type; /* Static, dynamic, overlay */
58 u32 ul_data_mem_seg_mask; /* Data memory requirements */ 58 u32 data_mem_seg_mask; /* Data memory requirements */
59 u32 ul_code_mem_seg_mask; /* Code memory requirements */ 59 u32 code_mem_seg_mask; /* Code memory requirements */
60}; 60};
61 61
62/* DCD Generic Object Type */ 62/* DCD Generic Object Type */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h
index 5af075def87..c8f464505ef 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h
@@ -31,9 +31,6 @@
31/* API return value and calling convention */ 31/* API return value and calling convention */
32#define DBAPI int 32#define DBAPI int
33 33
34/* Infinite time value for the utimeout parameter to DSPStream_Select() */
35#define DSP_FOREVER (-1)
36
37/* Maximum length of node name, used in dsp_ndbprops */ 34/* Maximum length of node name, used in dsp_ndbprops */
38#define DSP_MAXNAMELEN 32 35#define DSP_MAXNAMELEN 32
39 36
@@ -74,16 +71,9 @@
74#define DSP_NODE_MIN_PRIORITY 1 71#define DSP_NODE_MIN_PRIORITY 1
75#define DSP_NODE_MAX_PRIORITY 15 72#define DSP_NODE_MAX_PRIORITY 15
76 73
77/* Pre-Defined Message Command Codes available to user: */
78#define DSP_RMSUSERCODESTART RMS_USER /* Start of RMS user cmd codes */
79/* end of user codes */
80#define DSP_RMSUSERCODEEND (RMS_USER + RMS_MAXUSERCODES);
81/* msg_ctrl contains SM buffer description */ 74/* msg_ctrl contains SM buffer description */
82#define DSP_RMSBUFDESC RMS_BUFDESC 75#define DSP_RMSBUFDESC RMS_BUFDESC
83 76
84/* Shared memory identifier for MEM segment named "SHMSEG0" */
85#define DSP_SHMSEG0 (u32)(-1)
86
87/* Processor ID numbers */ 77/* Processor ID numbers */
88#define DSP_UNIT 0 78#define DSP_UNIT 0
89#define IVA_UNIT 1 79#define IVA_UNIT 1
@@ -91,15 +81,6 @@
91#define DSPWORD unsigned char 81#define DSPWORD unsigned char
92#define DSPWORDSIZE sizeof(DSPWORD) 82#define DSPWORDSIZE sizeof(DSPWORD)
93 83
94/* Power control enumerations */
95#define PROC_PWRCONTROL 0x8070
96
97#define PROC_PWRMGT_ENABLE (PROC_PWRCONTROL + 0x3)
98#define PROC_PWRMGT_DISABLE (PROC_PWRCONTROL + 0x4)
99
100/* Bridge Code Version */
101#define BRIDGE_VERSION_CODE 333
102
103#define MAX_PROFILES 16 84#define MAX_PROFILES 16
104 85
105/* DSP chip type */ 86/* DSP chip type */
@@ -118,12 +99,12 @@ static inline bool is_valid_proc_event(u32 x)
118 99
119/* The Node UUID structure */ 100/* The Node UUID structure */
120struct dsp_uuid { 101struct dsp_uuid {
121 u32 ul_data1; 102 u32 data1;
122 u16 us_data2; 103 u16 data2;
123 u16 us_data3; 104 u16 data3;
124 u8 uc_data4; 105 u8 data4;
125 u8 uc_data5; 106 u8 data5;
126 u8 uc_data6[6]; 107 u8 data6[6];
127}; 108};
128 109
129/* DCD types */ 110/* DCD types */
@@ -227,11 +208,11 @@ enum dsp_flushtype {
227 208
228/* Memory Segment Status Values */ 209/* Memory Segment Status Values */
229struct dsp_memstat { 210struct dsp_memstat {
230 u32 ul_size; 211 u32 size;
231 u32 ul_total_free_size; 212 u32 total_free_size;
232 u32 ul_len_max_free_block; 213 u32 len_max_free_block;
233 u32 ul_num_free_blocks; 214 u32 num_free_blocks;
234 u32 ul_num_alloc_blocks; 215 u32 num_alloc_blocks;
235}; 216};
236 217
237/* Processor Load information Values */ 218/* Processor Load information Values */
@@ -248,11 +229,11 @@ struct dsp_strmattr {
248 u32 buf_size; /* Buffer size (DSP words) */ 229 u32 buf_size; /* Buffer size (DSP words) */
249 u32 num_bufs; /* Number of buffers */ 230 u32 num_bufs; /* Number of buffers */
250 u32 buf_alignment; /* Buffer alignment */ 231 u32 buf_alignment; /* Buffer alignment */
251 u32 utimeout; /* Timeout for blocking STRM calls */ 232 u32 timeout; /* Timeout for blocking STRM calls */
252 enum dsp_strmmode strm_mode; /* mode of stream when opened */ 233 enum dsp_strmmode strm_mode; /* mode of stream when opened */
253 /* DMA chnl id if dsp_strmmode is LDMA or RDMA */ 234 /* DMA chnl id if dsp_strmmode is LDMA or RDMA */
254 u32 udma_chnl_id; 235 u32 dma_chnl_id;
255 u32 udma_priority; /* DMA channel priority 0=lowest, >0=high */ 236 u32 dma_priority; /* DMA channel priority 0=lowest, >0=high */
256}; 237};
257 238
258/* The dsp_cbdata structure */ 239/* The dsp_cbdata structure */
@@ -263,9 +244,9 @@ struct dsp_cbdata {
263 244
264/* The dsp_msg structure */ 245/* The dsp_msg structure */
265struct dsp_msg { 246struct dsp_msg {
266 u32 dw_cmd; 247 u32 cmd;
267 u32 dw_arg1; 248 u32 arg1;
268 u32 dw_arg2; 249 u32 arg2;
269}; 250};
270 251
271/* The dsp_resourcereqmts structure for node's resource requirements */ 252/* The dsp_resourcereqmts structure for node's resource requirements */
@@ -274,9 +255,9 @@ struct dsp_resourcereqmts {
274 u32 static_data_size; 255 u32 static_data_size;
275 u32 global_data_size; 256 u32 global_data_size;
276 u32 program_mem_size; 257 u32 program_mem_size;
277 u32 uwc_execution_time; 258 u32 wc_execution_time;
278 u32 uwc_period; 259 u32 wc_period;
279 u32 uwc_deadline; 260 u32 wc_deadline;
280 u32 avg_exection_time; 261 u32 avg_exection_time;
281 u32 minimum_period; 262 u32 minimum_period;
282}; 263};
@@ -295,7 +276,7 @@ struct dsp_streamconnect {
295}; 276};
296 277
297struct dsp_nodeprofs { 278struct dsp_nodeprofs {
298 u32 ul_heap_size; 279 u32 heap_size;
299}; 280};
300 281
301/* The dsp_ndbprops structure reports the attributes of a node */ 282/* The dsp_ndbprops structure reports the attributes of a node */
@@ -313,7 +294,7 @@ struct dsp_ndbprops {
313 u32 message_depth; 294 u32 message_depth;
314 u32 num_input_streams; 295 u32 num_input_streams;
315 u32 num_output_streams; 296 u32 num_output_streams;
316 u32 utimeout; 297 u32 timeout;
317 u32 count_profiles; /* Number of supported profiles */ 298 u32 count_profiles; /* Number of supported profiles */
318 /* Array of profiles */ 299 /* Array of profiles */
319 struct dsp_nodeprofs node_profiles[MAX_PROFILES]; 300 struct dsp_nodeprofs node_profiles[MAX_PROFILES];
@@ -325,7 +306,7 @@ struct dsp_ndbprops {
325struct dsp_nodeattrin { 306struct dsp_nodeattrin {
326 u32 cb_struct; 307 u32 cb_struct;
327 s32 prio; 308 s32 prio;
328 u32 utimeout; 309 u32 timeout;
329 u32 profile_id; 310 u32 profile_id;
330 /* Reserved, for Bridge Internal use only */ 311 /* Reserved, for Bridge Internal use only */
331 u32 heap_size; 312 u32 heap_size;
@@ -359,14 +340,14 @@ struct dsp_nodeattr {
359 * window handle. 340 * window handle.
360 */ 341 */
361struct dsp_notification { 342struct dsp_notification {
362 char *ps_name; 343 char *name;
363 void *handle; 344 void *handle;
364}; 345};
365 346
366/* The dsp_processorattrin structure describes the attributes of a processor */ 347/* The dsp_processorattrin structure describes the attributes of a processor */
367struct dsp_processorattrin { 348struct dsp_processorattrin {
368 u32 cb_struct; 349 u32 cb_struct;
369 u32 utimeout; 350 u32 timeout;
370}; 351};
371/* 352/*
372 * The dsp_processorinfo structure describes basic capabilities of a 353 * The dsp_processorinfo structure describes basic capabilities of a
@@ -377,8 +358,8 @@ struct dsp_processorinfo {
377 int processor_family; 358 int processor_family;
378 int processor_type; 359 int processor_type;
379 u32 clock_rate; 360 u32 clock_rate;
380 u32 ul_internal_mem_size; 361 u32 internal_mem_size;
381 u32 ul_external_mem_size; 362 u32 external_mem_size;
382 u32 processor_id; 363 u32 processor_id;
383 int ty_running_rtos; 364 int ty_running_rtos;
384 s32 node_min_priority; 365 s32 node_min_priority;
@@ -387,10 +368,10 @@ struct dsp_processorinfo {
387 368
388/* Error information of last DSP exception signalled to the GPP */ 369/* Error information of last DSP exception signalled to the GPP */
389struct dsp_errorinfo { 370struct dsp_errorinfo {
390 u32 dw_err_mask; 371 u32 err_mask;
391 u32 dw_val1; 372 u32 val1;
392 u32 dw_val2; 373 u32 val2;
393 u32 dw_val3; 374 u32 val3;
394}; 375};
395 376
396/* The dsp_processorstate structure describes the state of a DSP processor */ 377/* The dsp_processorstate structure describes the state of a DSP processor */
@@ -407,7 +388,7 @@ struct dsp_resourceinfo {
407 u32 cb_struct; 388 u32 cb_struct;
408 enum dsp_resourceinfotype resource_type; 389 enum dsp_resourceinfotype resource_type;
409 union { 390 union {
410 u32 ul_resource; 391 u32 resource;
411 struct dsp_memstat mem_stat; 392 struct dsp_memstat mem_stat;
412 struct dsp_procloadstat proc_load_stat; 393 struct dsp_procloadstat proc_load_stat;
413 } result; 394 } result;
@@ -420,13 +401,13 @@ struct dsp_resourceinfo {
420 */ 401 */
421struct dsp_streamattrin { 402struct dsp_streamattrin {
422 u32 cb_struct; 403 u32 cb_struct;
423 u32 utimeout; 404 u32 timeout;
424 u32 segment_id; 405 u32 segment_id;
425 u32 buf_alignment; 406 u32 buf_alignment;
426 u32 num_bufs; 407 u32 num_bufs;
427 enum dsp_strmmode strm_mode; 408 enum dsp_strmmode strm_mode;
428 u32 udma_chnl_id; 409 u32 dma_chnl_id;
429 u32 udma_priority; 410 u32 dma_priority;
430}; 411};
431 412
432/* The dsp_bufferattr structure describes the attributes of a data buffer */ 413/* The dsp_bufferattr structure describes the attributes of a data buffer */
@@ -444,7 +425,7 @@ struct dsp_streaminfo {
444 u32 cb_struct; 425 u32 cb_struct;
445 u32 number_bufs_allowed; 426 u32 number_bufs_allowed;
446 u32 number_bufs_in_stream; 427 u32 number_bufs_in_stream;
447 u32 ul_number_bytes; 428 u32 number_bytes;
448 void *sync_object_handle; 429 void *sync_object_handle;
449 enum dsp_streamstate ss_stream_state; 430 enum dsp_streamstate ss_stream_state;
450}; 431};
@@ -501,13 +482,6 @@ bit 15 - Output (writeable) buffer
501#define DSPPROCTYPE_C64 6410 482#define DSPPROCTYPE_C64 6410
502#define IVAPROCTYPE_ARM7 470 483#define IVAPROCTYPE_ARM7 470
503 484
504#define REG_MGR_OBJECT 1
505#define REG_DRV_OBJECT 2
506
507/* registry */
508#define DRVOBJECT "DrvObject"
509#define MGROBJECT "MgrObject"
510
511/* Max registry path length. Also the max registry value length. */ 485/* Max registry path length. Also the max registry value length. */
512#define MAXREGPATHLENGTH 255 486#define MAXREGPATHLENGTH 255
513 487
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbldefs.h b/drivers/staging/tidspbridge/include/dspbridge/dbldefs.h
deleted file mode 100644
index bf4fb99529a..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dbldefs.h
+++ /dev/null
@@ -1,141 +0,0 @@
1/*
2 * dbldefs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2005-2006 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef DBLDEFS_
18#define DBLDEFS_
19
20/*
21 * Bit masks for dbl_flags.
22 */
23#define DBL_NOLOAD 0x0 /* Don't load symbols, code, or data */
24#define DBL_SYMB 0x1 /* load symbols */
25#define DBL_CODE 0x2 /* load code */
26#define DBL_DATA 0x4 /* load data */
27#define DBL_DYNAMIC 0x8 /* dynamic load */
28#define DBL_BSS 0x20 /* Unitialized section */
29
30#define DBL_MAXPATHLENGTH 255
31
32/*
33 * ======== dbl_flags ========
34 * Specifies whether to load code, data, or symbols
35 */
36typedef s32 dbl_flags;
37
38/*
39 * ======== dbl_sect_info ========
40 * For collecting info on overlay sections
41 */
42struct dbl_sect_info {
43 const char *name; /* name of section */
44 u32 sect_run_addr; /* run address of section */
45 u32 sect_load_addr; /* load address of section */
46 u32 size; /* size of section (target MAUs) */
47 dbl_flags type; /* Code, data, or BSS */
48};
49
50/*
51 * ======== dbl_symbol ========
52 * (Needed for dynamic load library)
53 */
54struct dbl_symbol {
55 u32 value;
56};
57
58/*
59 * ======== dbl_alloc_fxn ========
60 * Allocate memory function. Allocate or reserve (if reserved == TRUE)
61 * "size" bytes of memory from segment "space" and return the address in
62 * *dsp_address (or starting at *dsp_address if reserve == TRUE). Returns 0 on
63 * success, or an error code on failure.
64 */
65typedef s32(*dbl_alloc_fxn) (void *hdl, s32 space, u32 size, u32 align,
66 u32 *dsp_address, s32 seg_id, s32 req,
67 bool reserved);
68
69/*
70 * ======== dbl_free_fxn ========
71 * Free memory function. Free, or unreserve (if reserved == TRUE) "size"
72 * bytes of memory from segment "space"
73 */
74typedef bool(*dbl_free_fxn) (void *hdl, u32 addr, s32 space, u32 size,
75 bool reserved);
76
77/*
78 * ======== dbl_log_write_fxn ========
79 * Function to call when writing data from a section, to log the info.
80 * Can be NULL if no logging is required.
81 */
82typedef int(*dbl_log_write_fxn) (void *handle,
83 struct dbl_sect_info *sect, u32 addr,
84 u32 bytes);
85
86/*
87 * ======== dbl_sym_lookup ========
88 * Symbol lookup function - Find the symbol name and return its value.
89 *
90 * Parameters:
91 * handle - Opaque handle
92 * parg - Opaque argument.
93 * name - Name of symbol to lookup.
94 * sym - Location to store address of symbol structure.
95 *
96 * Returns:
97 * TRUE: Success (symbol was found).
98 * FALSE: Failed to find symbol.
99 */
100typedef bool(*dbl_sym_lookup) (void *handle, void *parg, void *rmm_handle,
101 const char *name, struct dbl_symbol ** sym);
102
103/*
104 * ======== dbl_write_fxn ========
105 * Write memory function. Write "n" HOST bytes of memory to segment "mtype"
106 * starting at address "dsp_address" from the buffer "buf". The buffer is
107 * formatted as an array of words appropriate for the DSP.
108 */
109typedef s32(*dbl_write_fxn) (void *hdl, u32 dsp_address, void *buf,
110 u32 n, s32 mtype);
111
112/*
113 * ======== dbl_attrs ========
114 */
115struct dbl_attrs {
116 dbl_alloc_fxn alloc;
117 dbl_free_fxn free;
118 void *rmm_handle; /* Handle to pass to alloc, free functions */
119 dbl_write_fxn write;
120 void *input_params; /* Handle to pass to write, cinit function */
121
122 dbl_log_write_fxn log_write;
123 void *log_write_handle;
124
125 /* Symbol matching function and handle to pass to it */
126 dbl_sym_lookup sym_lookup;
127 void *sym_handle;
128 void *sym_arg;
129
130 /*
131 * These file manipulation functions should be compatible with the
132 * "C" run time library functions of the same name.
133 */
134 s32(*fread) (void *, size_t, size_t, void *);
135 s32(*fseek) (void *, long, int);
136 s32(*ftell) (void *);
137 s32(*fclose) (void *);
138 void *(*fopen) (const char *, const char *);
139};
140
141#endif /* DBLDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbll.h b/drivers/staging/tidspbridge/include/dspbridge/dbll.h
index b0186761466..46a9e0027ea 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dbll.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dbll.h
@@ -42,18 +42,12 @@ extern bool dbll_init(void);
42extern int dbll_load(struct dbll_library_obj *lib, 42extern int dbll_load(struct dbll_library_obj *lib,
43 dbll_flags flags, 43 dbll_flags flags,
44 struct dbll_attrs *attrs, u32 * entry); 44 struct dbll_attrs *attrs, u32 * entry);
45extern int dbll_load_sect(struct dbll_library_obj *zl_lib,
46 char *sec_name, struct dbll_attrs *attrs);
47extern int dbll_open(struct dbll_tar_obj *target, char *file, 45extern int dbll_open(struct dbll_tar_obj *target, char *file,
48 dbll_flags flags, 46 dbll_flags flags,
49 struct dbll_library_obj **lib_obj); 47 struct dbll_library_obj **lib_obj);
50extern int dbll_read_sect(struct dbll_library_obj *lib, 48extern int dbll_read_sect(struct dbll_library_obj *lib,
51 char *name, char *buf, u32 size); 49 char *name, char *buf, u32 size);
52extern void dbll_set_attrs(struct dbll_tar_obj *target,
53 struct dbll_attrs *pattrs);
54extern void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs); 50extern void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs);
55extern int dbll_unload_sect(struct dbll_library_obj *lib,
56 char *sect_name, struct dbll_attrs *attrs);
57#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE 51#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
58bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address, 52bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address,
59 u32 offset_range, u32 *sym_addr_output, char *name_output); 53 u32 offset_range, u32 *sym_addr_output, char *name_output);
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h b/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h
index d2b4fda3429..30e0aa0540d 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h
@@ -348,29 +348,6 @@ typedef bool(*dbll_init_fxn) (void);
348typedef int(*dbll_load_fxn) (struct dbll_library_obj *lib, 348typedef int(*dbll_load_fxn) (struct dbll_library_obj *lib,
349 dbll_flags flags, 349 dbll_flags flags,
350 struct dbll_attrs *attrs, u32 *entry); 350 struct dbll_attrs *attrs, u32 *entry);
351
352/*
353 * ======== dbll_load_sect ========
354 * Load a named section from an library (for overlay support).
355 * Parameters:
356 * lib - Handle returned from dbll_open().
357 * sec_name - Name of section to load.
358 * attrs - Contains write function and handle to pass to it.
359 * Returns:
360 * 0: Success.
361 * -ENXIO: Section not found.
362 * -ENOSYS: Function not implemented.
363 * Requires:
364 * Valid lib.
365 * sec_name != NULL.
366 * attrs != NULL.
367 * attrs->write != NULL.
368 * Ensures:
369 */
370typedef int(*dbll_load_sect_fxn) (struct dbll_library_obj *lib,
371 char *sz_sect_name,
372 struct dbll_attrs *attrs);
373
374/* 351/*
375 * ======== dbll_open ======== 352 * ======== dbll_open ========
376 * dbll_open() returns a library handle that can be used to load/unload 353 * dbll_open() returns a library handle that can be used to load/unload
@@ -421,23 +398,6 @@ typedef int(*dbll_open_fxn) (struct dbll_tar_obj *target, char *file,
421typedef int(*dbll_read_sect_fxn) (struct dbll_library_obj *lib, 398typedef int(*dbll_read_sect_fxn) (struct dbll_library_obj *lib,
422 char *name, char *content, 399 char *name, char *content,
423 u32 cont_size); 400 u32 cont_size);
424
425/*
426 * ======== dbll_set_attrs ========
427 * Set the attributes of the target.
428 * Parameters:
429 * target - Handle returned from dbll_create().
430 * pattrs - New attributes.
431 * Returns:
432 * Requires:
433 * DBL initialized.
434 * Valid target.
435 * pattrs != NULL.
436 * Ensures:
437 */
438typedef void (*dbll_set_attrs_fxn) (struct dbll_tar_obj *target,
439 struct dbll_attrs *attrs);
440
441/* 401/*
442 * ======== dbll_unload ======== 402 * ======== dbll_unload ========
443 * Unload library loaded with dbll_load(). 403 * Unload library loaded with dbll_load().
@@ -452,28 +412,6 @@ typedef void (*dbll_set_attrs_fxn) (struct dbll_tar_obj *target,
452 */ 412 */
453typedef void (*dbll_unload_fxn) (struct dbll_library_obj *library, 413typedef void (*dbll_unload_fxn) (struct dbll_library_obj *library,
454 struct dbll_attrs *attrs); 414 struct dbll_attrs *attrs);
455
456/*
457 * ======== dbll_unload_sect ========
458 * Unload a named section from an library (for overlay support).
459 * Parameters:
460 * lib - Handle returned from dbll_open().
461 * sec_name - Name of section to load.
462 * attrs - Contains free() function and handle to pass to it.
463 * Returns:
464 * 0: Success.
465 * -ENXIO: Named section not found.
466 * -ENOSYS
467 * Requires:
468 * DBL initialized.
469 * Valid lib.
470 * sec_name != NULL.
471 * Ensures:
472 */
473typedef int(*dbll_unload_sect_fxn) (struct dbll_library_obj *lib,
474 char *sz_sect_name,
475 struct dbll_attrs *attrs);
476
477struct dbll_fxns { 415struct dbll_fxns {
478 dbll_close_fxn close_fxn; 416 dbll_close_fxn close_fxn;
479 dbll_create_fxn create_fxn; 417 dbll_create_fxn create_fxn;
@@ -485,12 +423,9 @@ struct dbll_fxns {
485 dbll_get_sect_fxn get_sect_fxn; 423 dbll_get_sect_fxn get_sect_fxn;
486 dbll_init_fxn init_fxn; 424 dbll_init_fxn init_fxn;
487 dbll_load_fxn load_fxn; 425 dbll_load_fxn load_fxn;
488 dbll_load_sect_fxn load_sect_fxn;
489 dbll_open_fxn open_fxn; 426 dbll_open_fxn open_fxn;
490 dbll_read_sect_fxn read_sect_fxn; 427 dbll_read_sect_fxn read_sect_fxn;
491 dbll_set_attrs_fxn set_attrs_fxn;
492 dbll_unload_fxn unload_fxn; 428 dbll_unload_fxn unload_fxn;
493 dbll_unload_sect_fxn unload_sect_fxn;
494}; 429};
495 430
496#endif /* DBLDEFS_ */ 431#endif /* DBLDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dehdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dehdefs.h
deleted file mode 100644
index 09f8bf83ab0..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dehdefs.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * dehdefs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Definition for Bridge driver module DEH.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef DEHDEFS_
20#define DEHDEFS_
21
22#include <dspbridge/mbx_sh.h> /* shared mailbox codes */
23
24/* DEH object manager */
25struct deh_mgr;
26
27/* Magic code used to determine if DSP signaled exception. */
28#define DEH_BASE MBX_DEH_BASE
29#define DEH_USERS_BASE MBX_DEH_USERS_BASE
30#define DEH_LIMIT MBX_DEH_LIMIT
31
32#endif /* _DEHDEFS_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
index 357458fadd2..f41e4783157 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dev.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -23,9 +23,9 @@
23#include <dspbridge/chnldefs.h> 23#include <dspbridge/chnldefs.h>
24#include <dspbridge/cmm.h> 24#include <dspbridge/cmm.h>
25#include <dspbridge/cod.h> 25#include <dspbridge/cod.h>
26#include <dspbridge/dehdefs.h> 26#include <dspbridge/dspdeh.h>
27#include <dspbridge/nodedefs.h> 27#include <dspbridge/nodedefs.h>
28#include <dspbridge/dispdefs.h> 28#include <dspbridge/disp.h>
29#include <dspbridge/dspdefs.h> 29#include <dspbridge/dspdefs.h>
30#include <dspbridge/dmm.h> 30#include <dspbridge/dmm.h>
31#include <dspbridge/host_os.h> 31#include <dspbridge/host_os.h>
@@ -95,43 +95,6 @@ extern int dev_create_device(struct dev_object
95 struct cfg_devnode *dev_node_obj); 95 struct cfg_devnode *dev_node_obj);
96 96
97/* 97/*
98 * ======== dev_create_iva_device ========
99 * Purpose:
100 * Called by the operating system to load the Bridge Driver for IVA.
101 * Parameters:
102 * device_obj: Ptr to location to receive the device object handle.
103 * driver_file_name: Name of Bridge driver PE DLL file to load. If the
104 * absolute path is not provided, the file is loaded
105 * through 'Bridge's module search path.
106 * host_config: Host configuration information, to be passed down
107 * to the Bridge driver when bridge_dev_create() is called.
108 * pDspConfig: DSP resources, to be passed down to the Bridge driver
109 * when bridge_dev_create() is called.
110 * dev_node_obj: Platform specific device node.
111 * Returns:
112 * 0: Module is loaded, device object has been created
113 * -ENOMEM: Insufficient memory to create needed resources.
114 * -EPERM: Unable to find Bridge driver entry point function.
115 * -ESPIPE: Unable to load ZL DLL.
116 * Requires:
117 * DEV Initialized.
118 * device_obj != NULL.
119 * driver_file_name != NULL.
120 * host_config != NULL.
121 * pDspConfig != NULL.
122 * Ensures:
123 * 0: *device_obj will contain handle to the new device object.
124 * Otherwise, does not create the device object, ensures the Bridge driver
125 * module is unloaded, and sets *device_obj to NULL.
126 */
127extern int dev_create_iva_device(struct dev_object
128 **device_obj,
129 const char *driver_file_name,
130 const struct cfg_hostres
131 *host_config,
132 struct cfg_devnode *dev_node_obj);
133
134/*
135 * ======== dev_create2 ======== 98 * ======== dev_create2 ========
136 * Purpose: 99 * Purpose:
137 * After successful loading of the image from api_init_complete2 100 * After successful loading of the image from api_init_complete2
@@ -146,8 +109,8 @@ extern int dev_create_iva_device(struct dev_object
146 * DEV Initialized 109 * DEV Initialized
147 * Valid hdev_obj 110 * Valid hdev_obj
148 * Ensures: 111 * Ensures:
149 * 0 and hdev_obj->hnode_mgr != NULL 112 * 0 and hdev_obj->node_mgr != NULL
150 * else hdev_obj->hnode_mgr == NULL 113 * else hdev_obj->node_mgr == NULL
151 */ 114 */
152extern int dev_create2(struct dev_object *hdev_obj); 115extern int dev_create2(struct dev_object *hdev_obj);
153 116
@@ -164,7 +127,7 @@ extern int dev_create2(struct dev_object *hdev_obj);
164 * DEV Initialized 127 * DEV Initialized
165 * Valid hdev_obj 128 * Valid hdev_obj
166 * Ensures: 129 * Ensures:
167 * 0 and hdev_obj->hnode_mgr == NULL 130 * 0 and hdev_obj->node_mgr == NULL
168 * else -EPERM. 131 * else -EPERM.
169 */ 132 */
170extern int dev_destroy2(struct dev_object *hdev_obj); 133extern int dev_destroy2(struct dev_object *hdev_obj);
@@ -542,24 +505,6 @@ extern void dev_exit(void);
542extern bool dev_init(void); 505extern bool dev_init(void);
543 506
544/* 507/*
545 * ======== dev_is_locked ========
546 * Purpose:
547 * Predicate function to determine if the device has been
548 * locked by a client for exclusive access.
549 * Parameters:
550 * hdev_obj: Handle to device object created with
551 * dev_create_device().
552 * Returns:
553 * 0: TRUE: device has been locked.
554 * 0: FALSE: device not locked.
555 * -EFAULT: hdev_obj was invalid.
556 * Requires:
557 * DEV Initialized.
558 * Ensures:
559 */
560extern int dev_is_locked(struct dev_object *hdev_obj);
561
562/*
563 * ======== dev_insert_proc_object ======== 508 * ======== dev_insert_proc_object ========
564 * Purpose: 509 * Purpose:
565 * Inserts the Processor Object into the List of PROC Objects 510 * Inserts the Processor Object into the List of PROC Objects
diff --git a/drivers/staging/tidspbridge/include/dspbridge/disp.h b/drivers/staging/tidspbridge/include/dspbridge/disp.h
index 82bf721447a..5dfdc8cfb93 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/disp.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/disp.h
@@ -22,7 +22,20 @@
22#include <dspbridge/dbdefs.h> 22#include <dspbridge/dbdefs.h>
23#include <dspbridge/nodedefs.h> 23#include <dspbridge/nodedefs.h>
24#include <dspbridge/nodepriv.h> 24#include <dspbridge/nodepriv.h>
25#include <dspbridge/dispdefs.h> 25
26struct disp_object;
27
28/* Node Dispatcher attributes */
29struct disp_attr {
30 u32 chnl_offset; /* Offset of channel ids reserved for RMS */
31 /* Size of buffer for sending data to RMS */
32 u32 chnl_buf_size;
33 int proc_family; /* eg, 5000 */
34 int proc_type; /* eg, 5510 */
35 void *reserved1; /* Reserved for future use. */
36 u32 reserved2; /* Reserved for future use. */
37};
38
26 39
27/* 40/*
28 * ======== disp_create ======== 41 * ======== disp_create ========
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dispdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dispdefs.h
deleted file mode 100644
index 946551a3dbb..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dispdefs.h
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * dispdefs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Global DISP constants and types, shared by PROCESSOR, NODE, and DISP.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef DISPDEFS_
20#define DISPDEFS_
21
22struct disp_object;
23
24/* Node Dispatcher attributes */
25struct disp_attr {
26 u32 ul_chnl_offset; /* Offset of channel ids reserved for RMS */
27 /* Size of buffer for sending data to RMS */
28 u32 ul_chnl_buf_size;
29 int proc_family; /* eg, 5000 */
30 int proc_type; /* eg, 5510 */
31 void *reserved1; /* Reserved for future use. */
32 u32 reserved2; /* Reserved for future use. */
33};
34
35#endif /* DISPDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
index c1f363ec9af..bb044097323 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/drv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -23,11 +23,10 @@
23 23
24#include <dspbridge/devdefs.h> 24#include <dspbridge/devdefs.h>
25 25
26#include <dspbridge/drvdefs.h>
27#include <linux/idr.h> 26#include <linux/idr.h>
28 27
29#define DRV_ASSIGN 1 28/* Bridge Driver Object */
30#define DRV_RELEASE 0 29struct drv_object;
31 30
32/* Provide the DSP Internal memory windows that can be accessed from L3 address 31/* Provide the DSP Internal memory windows that can be accessed from L3 address
33 * space */ 32 * space */
@@ -38,23 +37,14 @@
38/* MEM1 is L2 RAM + L2 Cache space */ 37/* MEM1 is L2 RAM + L2 Cache space */
39#define OMAP_DSP_MEM1_BASE 0x5C7F8000 38#define OMAP_DSP_MEM1_BASE 0x5C7F8000
40#define OMAP_DSP_MEM1_SIZE 0x18000 39#define OMAP_DSP_MEM1_SIZE 0x18000
41#define OMAP_DSP_GEM1_BASE 0x107F8000
42 40
43/* MEM2 is L1P RAM/CACHE space */ 41/* MEM2 is L1P RAM/CACHE space */
44#define OMAP_DSP_MEM2_BASE 0x5CE00000 42#define OMAP_DSP_MEM2_BASE 0x5CE00000
45#define OMAP_DSP_MEM2_SIZE 0x8000 43#define OMAP_DSP_MEM2_SIZE 0x8000
46#define OMAP_DSP_GEM2_BASE 0x10E00000
47 44
48/* MEM3 is L1D RAM/CACHE space */ 45/* MEM3 is L1D RAM/CACHE space */
49#define OMAP_DSP_MEM3_BASE 0x5CF04000 46#define OMAP_DSP_MEM3_BASE 0x5CF04000
50#define OMAP_DSP_MEM3_SIZE 0x14000 47#define OMAP_DSP_MEM3_SIZE 0x14000
51#define OMAP_DSP_GEM3_BASE 0x10F04000
52
53#define OMAP_IVA2_PRM_BASE 0x48306000
54#define OMAP_IVA2_PRM_SIZE 0x1000
55
56#define OMAP_IVA2_CM_BASE 0x48004000
57#define OMAP_IVA2_CM_SIZE 0x1000
58 48
59#define OMAP_PER_CM_BASE 0x48005000 49#define OMAP_PER_CM_BASE 0x48005000
60#define OMAP_PER_CM_SIZE 0x1000 50#define OMAP_PER_CM_SIZE 0x1000
@@ -65,20 +55,14 @@
65#define OMAP_CORE_PRM_BASE 0x48306A00 55#define OMAP_CORE_PRM_BASE 0x48306A00
66#define OMAP_CORE_PRM_SIZE 0x1000 56#define OMAP_CORE_PRM_SIZE 0x1000
67 57
68#define OMAP_SYSC_BASE 0x48002000
69#define OMAP_SYSC_SIZE 0x1000
70
71#define OMAP_DMMU_BASE 0x5D000000 58#define OMAP_DMMU_BASE 0x5D000000
72#define OMAP_DMMU_SIZE 0x1000 59#define OMAP_DMMU_SIZE 0x1000
73 60
74#define OMAP_PRCM_VDD1_DOMAIN 1
75#define OMAP_PRCM_VDD2_DOMAIN 2
76
77/* GPP PROCESS CLEANUP Data structures */ 61/* GPP PROCESS CLEANUP Data structures */
78 62
79/* New structure (member of process context) abstracts NODE resource info */ 63/* New structure (member of process context) abstracts NODE resource info */
80struct node_res_object { 64struct node_res_object {
81 void *hnode; 65 void *node;
82 s32 node_allocated; /* Node status */ 66 s32 node_allocated; /* Node status */
83 s32 heap_allocated; /* Heap status */ 67 s32 heap_allocated; /* Heap status */
84 s32 streams_allocated; /* Streams status */ 68 s32 streams_allocated; /* Streams status */
@@ -114,21 +98,10 @@ struct dmm_rsv_object {
114 u32 dsp_reserved_addr; 98 u32 dsp_reserved_addr;
115}; 99};
116 100
117/* New structure (member of process context) abstracts DMM resource info */
118struct dspheap_res_object {
119 s32 heap_allocated; /* DMM status */
120 u32 ul_mpu_addr;
121 u32 ul_dsp_addr;
122 u32 ul_dsp_res_addr;
123 u32 heap_size;
124 void *hprocessor;
125 struct dspheap_res_object *next;
126};
127
128/* New structure (member of process context) abstracts stream resource info */ 101/* New structure (member of process context) abstracts stream resource info */
129struct strm_res_object { 102struct strm_res_object {
130 s32 stream_allocated; /* Stream status */ 103 s32 stream_allocated; /* Stream status */
131 void *hstream; 104 void *stream;
132 u32 num_bufs; 105 u32 num_bufs;
133 u32 dir; 106 u32 dir;
134 int id; 107 int id;
@@ -156,7 +129,7 @@ struct process_context {
156 enum gpp_proc_res_state res_state; 129 enum gpp_proc_res_state res_state;
157 130
158 /* Handle to Processor */ 131 /* Handle to Processor */
159 void *hprocessor; 132 void *processor;
160 133
161 /* DSP Node resources */ 134 /* DSP Node resources */
162 struct idr *node_id; 135 struct idr *node_id;
@@ -169,9 +142,6 @@ struct process_context {
169 struct list_head dmm_rsv_list; 142 struct list_head dmm_rsv_list;
170 spinlock_t dmm_rsv_lock; 143 spinlock_t dmm_rsv_lock;
171 144
172 /* DSP Heap resources */
173 struct dspheap_res_object *pdspheap_list;
174
175 /* Stream resources */ 145 /* Stream resources */
176 struct idr *stream_id; 146 struct idr *stream_id;
177}; 147};
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drvdefs.h b/drivers/staging/tidspbridge/include/dspbridge/drvdefs.h
deleted file mode 100644
index 2920917bbc5..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/drvdefs.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * drvdefs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Definition of common struct between dspdefs.h and drv.h.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef DRVDEFS_
20#define DRVDEFS_
21
22/* Bridge Driver Object */
23struct drv_object;
24
25#endif /* DRVDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h
index 8da5bd8ede8..6ff808297c1 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h
@@ -29,22 +29,22 @@ union trapped_args {
29 /* MGR Module */ 29 /* MGR Module */
30 struct { 30 struct {
31 u32 node_id; 31 u32 node_id;
32 struct dsp_ndbprops __user *pndb_props; 32 struct dsp_ndbprops __user *ndb_props;
33 u32 undb_props_size; 33 u32 ndb_props_size;
34 u32 __user *pu_num_nodes; 34 u32 __user *num_nodes;
35 } args_mgr_enumnode_info; 35 } args_mgr_enumnode_info;
36 36
37 struct { 37 struct {
38 u32 processor_id; 38 u32 processor_id;
39 struct dsp_processorinfo __user *processor_info; 39 struct dsp_processorinfo __user *processor_info;
40 u32 processor_info_size; 40 u32 processor_info_size;
41 u32 __user *pu_num_procs; 41 u32 __user *num_procs;
42 } args_mgr_enumproc_info; 42 } args_mgr_enumproc_info;
43 43
44 struct { 44 struct {
45 struct dsp_uuid *uuid_obj; 45 struct dsp_uuid *uuid_obj;
46 enum dsp_dcdobjtype obj_type; 46 enum dsp_dcdobjtype obj_type;
47 char *psz_path_name; 47 char *sz_path_name;
48 } args_mgr_registerobject; 48 } args_mgr_registerobject;
49 49
50 struct { 50 struct {
@@ -55,8 +55,8 @@ union trapped_args {
55 struct { 55 struct {
56 struct dsp_notification __user *__user *anotifications; 56 struct dsp_notification __user *__user *anotifications;
57 u32 count; 57 u32 count;
58 u32 __user *pu_index; 58 u32 __user *index;
59 u32 utimeout; 59 u32 timeout;
60 } args_mgr_wait; 60 } args_mgr_wait;
61 61
62 /* PROC Module */ 62 /* PROC Module */
@@ -67,196 +67,188 @@ union trapped_args {
67 } args_proc_attach; 67 } args_proc_attach;
68 68
69 struct { 69 struct {
70 void *hprocessor; 70 void *processor;
71 u32 dw_cmd; 71 u32 cmd;
72 struct dsp_cbdata __user *pargs; 72 struct dsp_cbdata __user *args;
73 } args_proc_ctrl; 73 } args_proc_ctrl;
74 74
75 struct { 75 struct {
76 void *hprocessor; 76 void *processor;
77 } args_proc_detach; 77 } args_proc_detach;
78 78
79 struct { 79 struct {
80 void *hprocessor; 80 void *processor;
81 void *__user *node_tab; 81 void *__user *node_tab;
82 u32 node_tab_size; 82 u32 node_tab_size;
83 u32 __user *pu_num_nodes; 83 u32 __user *num_nodes;
84 u32 __user *pu_allocated; 84 u32 __user *allocated;
85 } args_proc_enumnode_info; 85 } args_proc_enumnode_info;
86 86
87 struct { 87 struct {
88 void *hprocessor; 88 void *processor;
89 u32 resource_type; 89 u32 resource_type;
90 struct dsp_resourceinfo *resource_info; 90 struct dsp_resourceinfo *resource_info;
91 u32 resource_info_size; 91 u32 resource_info_size;
92 } args_proc_enumresources; 92 } args_proc_enumresources;
93 93
94 struct { 94 struct {
95 void *hprocessor; 95 void *processor;
96 struct dsp_processorstate __user *proc_state_obj; 96 struct dsp_processorstate __user *proc_state_obj;
97 u32 state_info_size; 97 u32 state_info_size;
98 } args_proc_getstate; 98 } args_proc_getstate;
99 99
100 struct { 100 struct {
101 void *hprocessor; 101 void *processor;
102 u8 __user *pbuf; 102 u8 __user *buf;
103 u8 __user *psize; 103 u8 __user *size;
104 u32 max_size; 104 u32 max_size;
105 } args_proc_gettrace; 105 } args_proc_gettrace;
106 106
107 struct { 107 struct {
108 void *hprocessor; 108 void *processor;
109 s32 argc_index; 109 s32 argc_index;
110 char __user *__user *user_args; 110 char __user *__user *user_args;
111 char *__user *user_envp; 111 char *__user *user_envp;
112 } args_proc_load; 112 } args_proc_load;
113 113
114 struct { 114 struct {
115 void *hprocessor; 115 void *processor;
116 u32 event_mask; 116 u32 event_mask;
117 u32 notify_type; 117 u32 notify_type;
118 struct dsp_notification __user *hnotification; 118 struct dsp_notification __user *notification;
119 } args_proc_register_notify; 119 } args_proc_register_notify;
120 120
121 struct { 121 struct {
122 void *hprocessor; 122 void *processor;
123 } args_proc_start; 123 u32 size;
124 124 void *__user *rsv_addr;
125 struct {
126 void *hprocessor;
127 u32 ul_size;
128 void *__user *pp_rsv_addr;
129 } args_proc_rsvmem; 125 } args_proc_rsvmem;
130 126
131 struct { 127 struct {
132 void *hprocessor; 128 void *processor;
133 u32 ul_size; 129 u32 size;
134 void *prsv_addr; 130 void *rsv_addr;
135 } args_proc_unrsvmem; 131 } args_proc_unrsvmem;
136 132
137 struct { 133 struct {
138 void *hprocessor; 134 void *processor;
139 void *pmpu_addr; 135 void *mpu_addr;
140 u32 ul_size; 136 u32 size;
141 void *req_addr; 137 void *req_addr;
142 void *__user *pp_map_addr; 138 void *__user *map_addr;
143 u32 ul_map_attr; 139 u32 map_attr;
144 } args_proc_mapmem; 140 } args_proc_mapmem;
145 141
146 struct { 142 struct {
147 void *hprocessor; 143 void *processor;
148 u32 ul_size; 144 u32 size;
149 void *map_addr; 145 void *map_addr;
150 } args_proc_unmapmem; 146 } args_proc_unmapmem;
151 147
152 struct { 148 struct {
153 void *hprocessor; 149 void *processor;
154 void *pmpu_addr; 150 void *mpu_addr;
155 u32 ul_size; 151 u32 size;
156 u32 dir; 152 u32 dir;
157 } args_proc_dma; 153 } args_proc_dma;
158 154
159 struct { 155 struct {
160 void *hprocessor; 156 void *processor;
161 void *pmpu_addr; 157 void *mpu_addr;
162 u32 ul_size; 158 u32 size;
163 u32 ul_flags; 159 u32 flags;
164 } args_proc_flushmemory; 160 } args_proc_flushmemory;
165 161
166 struct { 162 struct {
167 void *hprocessor; 163 void *processor;
168 } args_proc_stop; 164 void *mpu_addr;
169 165 u32 size;
170 struct {
171 void *hprocessor;
172 void *pmpu_addr;
173 u32 ul_size;
174 } args_proc_invalidatememory; 166 } args_proc_invalidatememory;
175 167
176 /* NODE Module */ 168 /* NODE Module */
177 struct { 169 struct {
178 void *hprocessor; 170 void *processor;
179 struct dsp_uuid __user *node_id_ptr; 171 struct dsp_uuid __user *node_id_ptr;
180 struct dsp_cbdata __user *pargs; 172 struct dsp_cbdata __user *args;
181 struct dsp_nodeattrin __user *attr_in; 173 struct dsp_nodeattrin __user *attr_in;
182 void *__user *ph_node; 174 void *__user *node;
183 } args_node_allocate; 175 } args_node_allocate;
184 176
185 struct { 177 struct {
186 void *hnode; 178 void *node;
187 u32 usize; 179 u32 size;
188 struct dsp_bufferattr __user *pattr; 180 struct dsp_bufferattr __user *attr;
189 u8 *__user *pbuffer; 181 u8 *__user *buffer;
190 } args_node_allocmsgbuf; 182 } args_node_allocmsgbuf;
191 183
192 struct { 184 struct {
193 void *hnode; 185 void *node;
194 s32 prio; 186 s32 prio;
195 } args_node_changepriority; 187 } args_node_changepriority;
196 188
197 struct { 189 struct {
198 void *hnode; 190 void *node;
199 u32 stream_id; 191 u32 stream_id;
200 void *other_node; 192 void *other_node;
201 u32 other_stream; 193 u32 other_stream;
202 struct dsp_strmattr __user *pattrs; 194 struct dsp_strmattr __user *attrs;
203 struct dsp_cbdata __user *conn_param; 195 struct dsp_cbdata __user *conn_param;
204 } args_node_connect; 196 } args_node_connect;
205 197
206 struct { 198 struct {
207 void *hnode; 199 void *node;
208 } args_node_create; 200 } args_node_create;
209 201
210 struct { 202 struct {
211 void *hnode; 203 void *node;
212 } args_node_delete; 204 } args_node_delete;
213 205
214 struct { 206 struct {
215 void *hnode; 207 void *node;
216 struct dsp_bufferattr __user *pattr; 208 struct dsp_bufferattr __user *attr;
217 u8 *pbuffer; 209 u8 *buffer;
218 } args_node_freemsgbuf; 210 } args_node_freemsgbuf;
219 211
220 struct { 212 struct {
221 void *hnode; 213 void *node;
222 struct dsp_nodeattr __user *pattr; 214 struct dsp_nodeattr __user *attr;
223 u32 attr_size; 215 u32 attr_size;
224 } args_node_getattr; 216 } args_node_getattr;
225 217
226 struct { 218 struct {
227 void *hnode; 219 void *node;
228 struct dsp_msg __user *message; 220 struct dsp_msg __user *message;
229 u32 utimeout; 221 u32 timeout;
230 } args_node_getmessage; 222 } args_node_getmessage;
231 223
232 struct { 224 struct {
233 void *hnode; 225 void *node;
234 } args_node_pause; 226 } args_node_pause;
235 227
236 struct { 228 struct {
237 void *hnode; 229 void *node;
238 struct dsp_msg __user *message; 230 struct dsp_msg __user *message;
239 u32 utimeout; 231 u32 timeout;
240 } args_node_putmessage; 232 } args_node_putmessage;
241 233
242 struct { 234 struct {
243 void *hnode; 235 void *node;
244 u32 event_mask; 236 u32 event_mask;
245 u32 notify_type; 237 u32 notify_type;
246 struct dsp_notification __user *hnotification; 238 struct dsp_notification __user *notification;
247 } args_node_registernotify; 239 } args_node_registernotify;
248 240
249 struct { 241 struct {
250 void *hnode; 242 void *node;
251 } args_node_run; 243 } args_node_run;
252 244
253 struct { 245 struct {
254 void *hnode; 246 void *node;
255 int __user *pstatus; 247 int __user *status;
256 } args_node_terminate; 248 } args_node_terminate;
257 249
258 struct { 250 struct {
259 void *hprocessor; 251 void *processor;
260 struct dsp_uuid __user *node_id_ptr; 252 struct dsp_uuid __user *node_id_ptr;
261 struct dsp_ndbprops __user *node_props; 253 struct dsp_ndbprops __user *node_props;
262 } args_node_getuuidprops; 254 } args_node_getuuidprops;
@@ -264,104 +256,104 @@ union trapped_args {
264 /* STRM module */ 256 /* STRM module */
265 257
266 struct { 258 struct {
267 void *hstream; 259 void *stream;
268 u32 usize; 260 u32 size;
269 u8 *__user *ap_buffer; 261 u8 *__user *ap_buffer;
270 u32 num_bufs; 262 u32 num_bufs;
271 } args_strm_allocatebuffer; 263 } args_strm_allocatebuffer;
272 264
273 struct { 265 struct {
274 void *hstream; 266 void *stream;
275 } args_strm_close; 267 } args_strm_close;
276 268
277 struct { 269 struct {
278 void *hstream; 270 void *stream;
279 u8 *__user *ap_buffer; 271 u8 *__user *ap_buffer;
280 u32 num_bufs; 272 u32 num_bufs;
281 } args_strm_freebuffer; 273 } args_strm_freebuffer;
282 274
283 struct { 275 struct {
284 void *hstream; 276 void *stream;
285 void **ph_event; 277 void **event;
286 } args_strm_geteventhandle; 278 } args_strm_geteventhandle;
287 279
288 struct { 280 struct {
289 void *hstream; 281 void *stream;
290 struct stream_info __user *stream_info; 282 struct stream_info __user *stream_info;
291 u32 stream_info_size; 283 u32 stream_info_size;
292 } args_strm_getinfo; 284 } args_strm_getinfo;
293 285
294 struct { 286 struct {
295 void *hstream; 287 void *stream;
296 bool flush_flag; 288 bool flush_flag;
297 } args_strm_idle; 289 } args_strm_idle;
298 290
299 struct { 291 struct {
300 void *hstream; 292 void *stream;
301 u8 *pbuffer; 293 u8 *buffer;
302 u32 dw_bytes; 294 u32 bytes;
303 u32 dw_buf_size; 295 u32 buf_size;
304 u32 dw_arg; 296 u32 arg;
305 } args_strm_issue; 297 } args_strm_issue;
306 298
307 struct { 299 struct {
308 void *hnode; 300 void *node;
309 u32 direction; 301 u32 direction;
310 u32 index; 302 u32 index;
311 struct strm_attr __user *attr_in; 303 struct strm_attr __user *attr_in;
312 void *__user *ph_stream; 304 void *__user *stream;
313 } args_strm_open; 305 } args_strm_open;
314 306
315 struct { 307 struct {
316 void *hstream; 308 void *stream;
317 u8 *__user *buf_ptr; 309 u8 *__user *buf_ptr;
318 u32 __user *bytes; 310 u32 __user *bytes;
319 u32 __user *buf_size_ptr; 311 u32 __user *buf_size_ptr;
320 u32 __user *pdw_arg; 312 u32 __user *arg;
321 } args_strm_reclaim; 313 } args_strm_reclaim;
322 314
323 struct { 315 struct {
324 void *hstream; 316 void *stream;
325 u32 event_mask; 317 u32 event_mask;
326 u32 notify_type; 318 u32 notify_type;
327 struct dsp_notification __user *hnotification; 319 struct dsp_notification __user *notification;
328 } args_strm_registernotify; 320 } args_strm_registernotify;
329 321
330 struct { 322 struct {
331 void *__user *stream_tab; 323 void *__user *stream_tab;
332 u32 strm_num; 324 u32 strm_num;
333 u32 __user *pmask; 325 u32 __user *mask;
334 u32 utimeout; 326 u32 timeout;
335 } args_strm_select; 327 } args_strm_select;
336 328
337 /* CMM Module */ 329 /* CMM Module */
338 struct { 330 struct {
339 struct cmm_object *hcmm_mgr; 331 struct cmm_object *cmm_mgr;
340 u32 usize; 332 u32 size;
341 struct cmm_attrs *pattrs; 333 struct cmm_attrs *attrs;
342 void **pp_buf_va; 334 void **buf_va;
343 } args_cmm_allocbuf; 335 } args_cmm_allocbuf;
344 336
345 struct { 337 struct {
346 struct cmm_object *hcmm_mgr; 338 struct cmm_object *cmm_mgr;
347 void *buf_pa; 339 void *buf_pa;
348 u32 ul_seg_id; 340 u32 seg_id;
349 } args_cmm_freebuf; 341 } args_cmm_freebuf;
350 342
351 struct { 343 struct {
352 void *hprocessor; 344 void *processor;
353 struct cmm_object *__user *ph_cmm_mgr; 345 struct cmm_object *__user *cmm_mgr;
354 } args_cmm_gethandle; 346 } args_cmm_gethandle;
355 347
356 struct { 348 struct {
357 struct cmm_object *hcmm_mgr; 349 struct cmm_object *cmm_mgr;
358 struct cmm_info __user *cmm_info_obj; 350 struct cmm_info __user *cmm_info_obj;
359 } args_cmm_getinfo; 351 } args_cmm_getinfo;
360 352
361 /* UTIL module */ 353 /* UTIL module */
362 struct { 354 struct {
363 s32 util_argc; 355 s32 util_argc;
364 char **pp_argv; 356 char **argv;
365 } args_util_testdll; 357 } args_util_testdll;
366}; 358};
367 359
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
index 0ae7d1646a1..c2ba26c0930 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
@@ -32,17 +32,11 @@
32#include <dspbridge/brddefs.h> 32#include <dspbridge/brddefs.h>
33#include <dspbridge/cfgdefs.h> 33#include <dspbridge/cfgdefs.h>
34#include <dspbridge/chnlpriv.h> 34#include <dspbridge/chnlpriv.h>
35#include <dspbridge/dehdefs.h> 35#include <dspbridge/dspdeh.h>
36#include <dspbridge/devdefs.h> 36#include <dspbridge/devdefs.h>
37#include <dspbridge/iodefs.h> 37#include <dspbridge/io.h>
38#include <dspbridge/msgdefs.h> 38#include <dspbridge/msgdefs.h>
39 39
40/*
41 * Any IOCTLS at or above this value are reserved for standard Bridge driver
42 * interfaces.
43 */
44#define BRD_RESERVEDIOCTLBASE 0x8000
45
46/* Handle to Bridge driver's private device context. */ 40/* Handle to Bridge driver's private device context. */
47struct bridge_dev_context; 41struct bridge_dev_context;
48 42
@@ -306,7 +300,7 @@ typedef int(*fxn_brd_write) (struct bridge_dev_context *dev_ctxt,
306 * mgr_attrts->irq_shared: TRUE if the IRQ is shareable. 300 * mgr_attrts->irq_shared: TRUE if the IRQ is shareable.
307 * mgr_attrts->word_size: DSP Word size in equivalent PC bytes.. 301 * mgr_attrts->word_size: DSP Word size in equivalent PC bytes..
308 * mgr_attrts->shm_base: Base physical address of shared memory, if any. 302 * mgr_attrts->shm_base: Base physical address of shared memory, if any.
309 * mgr_attrts->usm_length: Bytes of shared memory block. 303 * mgr_attrts->sm_length: Bytes of shared memory block.
310 * Returns: 304 * Returns:
311 * 0: Success; 305 * 0: Success;
312 * -ENOMEM: Insufficient memory for requested resources. 306 * -ENOMEM: Insufficient memory for requested resources.
@@ -981,51 +975,51 @@ typedef void (*fxn_msg_setqueueid) (struct msg_queue *msg_queue_obj,
981struct bridge_drv_interface { 975struct bridge_drv_interface {
982 u32 brd_api_major_version; /* Set to BRD_API_MAJOR_VERSION. */ 976 u32 brd_api_major_version; /* Set to BRD_API_MAJOR_VERSION. */
983 u32 brd_api_minor_version; /* Set to BRD_API_MINOR_VERSION. */ 977 u32 brd_api_minor_version; /* Set to BRD_API_MINOR_VERSION. */
984 fxn_dev_create pfn_dev_create; /* Create device context */ 978 fxn_dev_create dev_create; /* Create device context */
985 fxn_dev_destroy pfn_dev_destroy; /* Destroy device context */ 979 fxn_dev_destroy dev_destroy; /* Destroy device context */
986 fxn_dev_ctrl pfn_dev_cntrl; /* Optional vendor interface */ 980 fxn_dev_ctrl dev_cntrl; /* Optional vendor interface */
987 fxn_brd_monitor pfn_brd_monitor; /* Load and/or start monitor */ 981 fxn_brd_monitor brd_monitor; /* Load and/or start monitor */
988 fxn_brd_start pfn_brd_start; /* Start DSP program. */ 982 fxn_brd_start brd_start; /* Start DSP program. */
989 fxn_brd_stop pfn_brd_stop; /* Stop/reset board. */ 983 fxn_brd_stop brd_stop; /* Stop/reset board. */
990 fxn_brd_status pfn_brd_status; /* Get current board status. */ 984 fxn_brd_status brd_status; /* Get current board status. */
991 fxn_brd_read pfn_brd_read; /* Read board memory */ 985 fxn_brd_read brd_read; /* Read board memory */
992 fxn_brd_write pfn_brd_write; /* Write board memory. */ 986 fxn_brd_write brd_write; /* Write board memory. */
993 fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ 987 fxn_brd_setstate brd_set_state; /* Sets the Board State */
994 fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ 988 fxn_brd_memcopy brd_mem_copy; /* Copies DSP Memory */
995 fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ 989 fxn_brd_memwrite brd_mem_write; /* Write DSP Memory w/o halt */
996 fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */ 990 fxn_brd_memmap brd_mem_map; /* Maps MPU mem to DSP mem */
997 fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */ 991 fxn_brd_memunmap brd_mem_un_map; /* Unmaps MPU mem to DSP mem */
998 fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ 992 fxn_chnl_create chnl_create; /* Create channel manager. */
999 fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ 993 fxn_chnl_destroy chnl_destroy; /* Destroy channel manager. */
1000 fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ 994 fxn_chnl_open chnl_open; /* Create a new channel. */
1001 fxn_chnl_close pfn_chnl_close; /* Close a channel. */ 995 fxn_chnl_close chnl_close; /* Close a channel. */
1002 fxn_chnl_addioreq pfn_chnl_add_io_req; /* Req I/O on a channel. */ 996 fxn_chnl_addioreq chnl_add_io_req; /* Req I/O on a channel. */
1003 fxn_chnl_getioc pfn_chnl_get_ioc; /* Wait for I/O completion. */ 997 fxn_chnl_getioc chnl_get_ioc; /* Wait for I/O completion. */
1004 fxn_chnl_cancelio pfn_chnl_cancel_io; /* Cancl I/O on a channel. */ 998 fxn_chnl_cancelio chnl_cancel_io; /* Cancl I/O on a channel. */
1005 fxn_chnl_flushio pfn_chnl_flush_io; /* Flush I/O. */ 999 fxn_chnl_flushio chnl_flush_io; /* Flush I/O. */
1006 fxn_chnl_getinfo pfn_chnl_get_info; /* Get channel specific info */ 1000 fxn_chnl_getinfo chnl_get_info; /* Get channel specific info */
1007 /* Get channel manager info. */ 1001 /* Get channel manager info. */
1008 fxn_chnl_getmgrinfo pfn_chnl_get_mgr_info; 1002 fxn_chnl_getmgrinfo chnl_get_mgr_info;
1009 fxn_chnl_idle pfn_chnl_idle; /* Idle the channel */ 1003 fxn_chnl_idle chnl_idle; /* Idle the channel */
1010 /* Register for notif. */ 1004 /* Register for notif. */
1011 fxn_chnl_registernotify pfn_chnl_register_notify; 1005 fxn_chnl_registernotify chnl_register_notify;
1012 fxn_io_create pfn_io_create; /* Create IO manager */ 1006 fxn_io_create io_create; /* Create IO manager */
1013 fxn_io_destroy pfn_io_destroy; /* Destroy IO manager */ 1007 fxn_io_destroy io_destroy; /* Destroy IO manager */
1014 fxn_io_onloaded pfn_io_on_loaded; /* Notify of program loaded */ 1008 fxn_io_onloaded io_on_loaded; /* Notify of program loaded */
1015 /* Get Processor's current and predicted load */ 1009 /* Get Processor's current and predicted load */
1016 fxn_io_getprocload pfn_io_get_proc_load; 1010 fxn_io_getprocload io_get_proc_load;
1017 fxn_msg_create pfn_msg_create; /* Create message manager */ 1011 fxn_msg_create msg_create; /* Create message manager */
1018 /* Create message queue */ 1012 /* Create message queue */
1019 fxn_msg_createqueue pfn_msg_create_queue; 1013 fxn_msg_createqueue msg_create_queue;
1020 fxn_msg_delete pfn_msg_delete; /* Delete message manager */ 1014 fxn_msg_delete msg_delete; /* Delete message manager */
1021 /* Delete message queue */ 1015 /* Delete message queue */
1022 fxn_msg_deletequeue pfn_msg_delete_queue; 1016 fxn_msg_deletequeue msg_delete_queue;
1023 fxn_msg_get pfn_msg_get; /* Get a message */ 1017 fxn_msg_get msg_get; /* Get a message */
1024 fxn_msg_put pfn_msg_put; /* Send a message */ 1018 fxn_msg_put msg_put; /* Send a message */
1025 /* Register for notif. */ 1019 /* Register for notif. */
1026 fxn_msg_registernotify pfn_msg_register_notify; 1020 fxn_msg_registernotify msg_register_notify;
1027 /* Set message queue id */ 1021 /* Set message queue id */
1028 fxn_msg_setqueueid pfn_msg_set_queue_id; 1022 fxn_msg_setqueueid msg_set_queue_id;
1029}; 1023};
1030 1024
1031/* 1025/*
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h b/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h
index 0bb250f95ba..7adf1e70531 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h
@@ -20,8 +20,6 @@
20#if !defined _DSPDRV_H_ 20#if !defined _DSPDRV_H_
21#define _DSPDRV_H_ 21#define _DSPDRV_H_
22 22
23#define MAX_DEV 10 /* Max support of 10 devices */
24
25/* 23/*
26 * ======== dsp_deinit ======== 24 * ======== dsp_deinit ========
27 * Purpose: 25 * Purpose:
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspio.h b/drivers/staging/tidspbridge/include/dspbridge/dspio.h
index 88f5f90fe92..66b64fadf19 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspio.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspio.h
@@ -24,7 +24,8 @@
24#define DSPIO_ 24#define DSPIO_
25 25
26#include <dspbridge/devdefs.h> 26#include <dspbridge/devdefs.h>
27#include <dspbridge/iodefs.h> 27#include <dspbridge/io.h>
28
28 29
29extern int bridge_io_create(struct io_mgr **io_man, 30extern int bridge_io_create(struct io_mgr **io_man,
30 struct dev_object *hdev_obj, 31 struct dev_object *hdev_obj,
@@ -34,7 +35,6 @@ extern int bridge_io_destroy(struct io_mgr *hio_mgr);
34 35
35extern int bridge_io_on_loaded(struct io_mgr *hio_mgr); 36extern int bridge_io_on_loaded(struct io_mgr *hio_mgr);
36 37
37extern int iva_io_on_loaded(struct io_mgr *hio_mgr);
38extern int bridge_io_get_proc_load(struct io_mgr *hio_mgr, 38extern int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
39 struct dsp_procloadstat *proc_lstat); 39 struct dsp_procloadstat *proc_lstat);
40 40
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
index 41e0594dff3..0c7ec04448f 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
@@ -31,9 +31,6 @@
31 31
32#define BRDIOCTL_CHNLREAD (BRDIOCTL_RESERVEDBASE + 0x10) 32#define BRDIOCTL_CHNLREAD (BRDIOCTL_RESERVEDBASE + 0x10)
33#define BRDIOCTL_CHNLWRITE (BRDIOCTL_RESERVEDBASE + 0x20) 33#define BRDIOCTL_CHNLWRITE (BRDIOCTL_RESERVEDBASE + 0x20)
34#define BRDIOCTL_GETINTRCOUNT (BRDIOCTL_RESERVEDBASE + 0x30)
35#define BRDIOCTL_RESETINTRCOUNT (BRDIOCTL_RESERVEDBASE + 0x40)
36#define BRDIOCTL_INTERRUPTDSP (BRDIOCTL_RESERVEDBASE + 0x50)
37/* DMMU */ 34/* DMMU */
38#define BRDIOCTL_SETMMUCONFIG (BRDIOCTL_RESERVEDBASE + 0x60) 35#define BRDIOCTL_SETMMUCONFIG (BRDIOCTL_RESERVEDBASE + 0x60)
39/* PWR */ 36/* PWR */
@@ -47,8 +44,6 @@
47#define BRDIOCTL_DEEPSLEEP (BRDIOCTL_PWRCONTROL + 0x0) 44#define BRDIOCTL_DEEPSLEEP (BRDIOCTL_PWRCONTROL + 0x0)
48#define BRDIOCTL_EMERGENCYSLEEP (BRDIOCTL_PWRCONTROL + 0x1) 45#define BRDIOCTL_EMERGENCYSLEEP (BRDIOCTL_PWRCONTROL + 0x1)
49#define BRDIOCTL_WAKEUP (BRDIOCTL_PWRCONTROL + 0x2) 46#define BRDIOCTL_WAKEUP (BRDIOCTL_PWRCONTROL + 0x2)
50#define BRDIOCTL_PWRENABLE (BRDIOCTL_PWRCONTROL + 0x3)
51#define BRDIOCTL_PWRDISABLE (BRDIOCTL_PWRCONTROL + 0x4)
52#define BRDIOCTL_CLK_CTRL (BRDIOCTL_PWRCONTROL + 0x7) 47#define BRDIOCTL_CLK_CTRL (BRDIOCTL_PWRCONTROL + 0x7)
53/* DSP Initiated Hibernate */ 48/* DSP Initiated Hibernate */
54#define BRDIOCTL_PWR_HIBERNATE (BRDIOCTL_PWRCONTROL + 0x8) 49#define BRDIOCTL_PWR_HIBERNATE (BRDIOCTL_PWRCONTROL + 0x8)
@@ -60,11 +55,11 @@
60#define BRDIOCTL_NUMOFMMUTLB 32 55#define BRDIOCTL_NUMOFMMUTLB 32
61 56
62struct bridge_ioctl_extproc { 57struct bridge_ioctl_extproc {
63 u32 ul_dsp_va; /* DSP virtual address */ 58 u32 dsp_va; /* DSP virtual address */
64 u32 ul_gpp_pa; /* GPP physical address */ 59 u32 gpp_pa; /* GPP physical address */
65 /* GPP virtual address. __va does not work for ioremapped addresses */ 60 /* GPP virtual address. __va does not work for ioremapped addresses */
66 u32 ul_gpp_va; 61 u32 gpp_va;
67 u32 ul_size; /* Size of the mapped memory in bytes */ 62 u32 size; /* Size of the mapped memory in bytes */
68 enum hw_endianism_t endianism; 63 enum hw_endianism_t endianism;
69 enum hw_mmu_mixed_size_t mixed_mode; 64 enum hw_mmu_mixed_size_t mixed_mode;
70 enum hw_element_size_t elem_size; 65 enum hw_element_size_t elem_size;
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h b/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h
index 4b109d173b1..052d27ee8b1 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h
@@ -46,8 +46,6 @@ struct dynamic_loader_initialize;
46 * Option flags to modify the behavior of module loading 46 * Option flags to modify the behavior of module loading
47 */ 47 */
48#define DLOAD_INITBSS 0x1 /* initialize BSS sections to zero */ 48#define DLOAD_INITBSS 0x1 /* initialize BSS sections to zero */
49#define DLOAD_BIGEND 0x2 /* require big-endian load module */
50#define DLOAD_LITTLE 0x4 /* require little-endian load module */
51 49
52/***************************************************************************** 50/*****************************************************************************
53 * Procedure dynamic_load_module 51 * Procedure dynamic_load_module
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gb.h b/drivers/staging/tidspbridge/include/dspbridge/gb.h
deleted file mode 100644
index fda783aa160..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/gb.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * gb.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Generic bitmap manager.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef GB_
20#define GB_
21
22#define GB_NOBITS (~0)
23#include <dspbridge/host_os.h>
24
25struct gb_t_map;
26
27/*
28 * ======== gb_clear ========
29 * Clear the bit in position bitn in the bitmap map. Bit positions are
30 * zero based.
31 */
32
33extern void gb_clear(struct gb_t_map *map, u32 bitn);
34
35/*
36 * ======== gb_create ========
37 * Create a bit map with len bits. Initially all bits are cleared.
38 */
39
40extern struct gb_t_map *gb_create(u32 len);
41
42/*
43 * ======== gb_delete ========
44 * Delete previously created bit map
45 */
46
47extern void gb_delete(struct gb_t_map *map);
48
49/*
50 * ======== gb_findandset ========
51 * Finds a clear bit, sets it, and returns the position
52 */
53
54extern u32 gb_findandset(struct gb_t_map *map);
55
56/*
57 * ======== gb_minclear ========
58 * gb_minclear returns the minimum clear bit position. If no bit is
59 * clear, gb_minclear returns -1.
60 */
61extern u32 gb_minclear(struct gb_t_map *map);
62
63/*
64 * ======== gb_set ========
65 * Set the bit in position bitn in the bitmap map. Bit positions are
66 * zero based.
67 */
68
69extern void gb_set(struct gb_t_map *map, u32 bitn);
70
71/*
72 * ======== gb_test ========
73 * Returns TRUE if the bit in position bitn is set in map; otherwise
74 * gb_test returns FALSE. Bit positions are zero based.
75 */
76
77extern bool gb_test(struct gb_t_map *map, u32 bitn);
78
79#endif /*GB_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gs.h b/drivers/staging/tidspbridge/include/dspbridge/gs.h
deleted file mode 100644
index f32d8d9af41..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/gs.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * gs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Memory allocation/release wrappers. This module allows clients to
7 * avoid OS spacific issues related to memory allocation. It also provides
8 * simple diagnostic capabilities to assist in the detection of memory
9 * leaks.
10 *
11 * Copyright (C) 2005-2006 Texas Instruments, Inc.
12 *
13 * This package is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 *
17 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
19 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 */
21
22#ifndef GS_
23#define GS_
24
25/*
26 * ======== gs_alloc ========
27 * Alloc size bytes of space. Returns pointer to space
28 * allocated, otherwise NULL.
29 */
30extern void *gs_alloc(u32 size);
31
32/*
33 * ======== gs_exit ========
34 * Module exit. Do not change to "#define gs_init()"; in
35 * some environments this operation must actually do some work!
36 */
37extern void gs_exit(void);
38
39/*
40 * ======== gs_free ========
41 * Free space allocated by gs_alloc() or GS_calloc().
42 */
43extern void gs_free(void *ptr);
44
45/*
46 * ======== gs_frees ========
47 * Free space allocated by gs_alloc() or GS_calloc() and assert that
48 * the size of the allocation is size bytes.
49 */
50extern void gs_frees(void *ptr, u32 size);
51
52/*
53 * ======== gs_init ========
54 * Module initialization. Do not change to "#define gs_init()"; in
55 * some environments this operation must actually do some work!
56 */
57extern void gs_init(void);
58
59#endif /*GS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/host_os.h b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
index 6549898ac63..b1b8acb5d3c 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/host_os.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
@@ -57,13 +57,4 @@
57extern struct platform_device *omap_dspbridge_dev; 57extern struct platform_device *omap_dspbridge_dev;
58extern struct device *bridge; 58extern struct device *bridge;
59 59
60#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
61extern void dspbridge_reserve_sdram(void);
62#else
63static inline void dspbridge_reserve_sdram(void)
64{
65}
66#endif
67
68extern unsigned long dspbridge_get_mempool_base(void);
69#endif 60#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io.h b/drivers/staging/tidspbridge/include/dspbridge/io.h
index bc346f9a01c..500bbd71684 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io.h
@@ -22,7 +22,18 @@
22#include <dspbridge/cfgdefs.h> 22#include <dspbridge/cfgdefs.h>
23#include <dspbridge/devdefs.h> 23#include <dspbridge/devdefs.h>
24 24
25#include <dspbridge/iodefs.h> 25/* IO Objects: */
26struct io_mgr;
27
28/* IO manager attributes: */
29struct io_attrs {
30 u8 birq; /* Channel's I/O IRQ number. */
31 bool irq_shared; /* TRUE if the IRQ is shareable. */
32 u32 word_size; /* DSP Word size. */
33 u32 shm_base; /* Physical base address of shared memory. */
34 u32 sm_length; /* Size (in bytes) of shared memory. */
35};
36
26 37
27/* 38/*
28 * ======== io_create ======== 39 * ======== io_create ========
@@ -95,20 +106,4 @@ extern void io_exit(void);
95 */ 106 */
96extern bool io_init(void); 107extern bool io_init(void);
97 108
98/*
99 * ======== io_on_loaded ========
100 * Purpose:
101 * Called when a program is loaded so IO manager can update its
102 * internal state.
103 * Parameters:
104 * hio_mgr: IOmanager object.
105 * Returns:
106 * 0: Success.
107 * -EFAULT: hio_mgr was invalid.
108 * Requires:
109 * io_init(void) called.
110 * Ensures:
111 */
112extern int io_on_loaded(struct io_mgr *hio_mgr);
113
114#endif /* CHNL_ */ 109#endif /* CHNL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
index 8242c70e09d..a054dad2133 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
@@ -23,12 +23,16 @@
23#include <dspbridge/_chnl_sm.h> 23#include <dspbridge/_chnl_sm.h>
24#include <dspbridge/host_os.h> 24#include <dspbridge/host_os.h>
25 25
26#include <dspbridge/iodefs.h> 26#include <dspbridge/io.h>
27#include <dspbridge/mbx_sh.h> /* shared mailbox codes */
28
29/* Magic code used to determine if DSP signaled exception. */
30#define DEH_BASE MBX_DEH_BASE
31#define DEH_LIMIT MBX_DEH_LIMIT
27 32
28#define IO_INPUT 0 33#define IO_INPUT 0
29#define IO_OUTPUT 1 34#define IO_OUTPUT 1
30#define IO_SERVICE 2 35#define IO_SERVICE 2
31#define IO_MAXSERVICE IO_SERVICE
32 36
33#ifdef CONFIG_TIDSPBRIDGE_DVFS 37#ifdef CONFIG_TIDSPBRIDGE_DVFS
34/* The maximum number of OPPs that are supported */ 38/* The maximum number of OPPs that are supported */
@@ -116,122 +120,6 @@ extern void io_request_chnl(struct io_mgr *io_manager,
116extern void iosm_schedule(struct io_mgr *io_manager); 120extern void iosm_schedule(struct io_mgr *io_manager);
117 121
118/* 122/*
119 * DSP-DMA IO functions
120 */
121
122/*
123 * ======== io_ddma_init_chnl_desc ========
124 * Purpose:
125 * Initialize DSP DMA channel descriptor.
126 * Parameters:
127 * hio_mgr: Handle to a I/O manager.
128 * ddma_chnl_id: DDMA channel identifier.
129 * num_desc: Number of buffer descriptors(equals # of IOReqs &
130 * Chirps)
131 * dsp: Dsp address;
132 * Returns:
133 * Requires:
134 * ddma_chnl_id < DDMA_MAXDDMACHNLS
135 * num_desc > 0
136 * pVa != NULL
137 * pDspPa != NULL
138 *
139 * Ensures:
140 */
141extern void io_ddma_init_chnl_desc(struct io_mgr *hio_mgr, u32 ddma_chnl_id,
142 u32 num_desc, void *dsp);
143
144/*
145 * ======== io_ddma_clear_chnl_desc ========
146 * Purpose:
147 * Clear DSP DMA channel descriptor.
148 * Parameters:
149 * hio_mgr: Handle to a I/O manager.
150 * ddma_chnl_id: DDMA channel identifier.
151 * Returns:
152 * Requires:
153 * ddma_chnl_id < DDMA_MAXDDMACHNLS
154 * Ensures:
155 */
156extern void io_ddma_clear_chnl_desc(struct io_mgr *hio_mgr, u32 ddma_chnl_id);
157
158/*
159 * ======== io_ddma_request_chnl ========
160 * Purpose:
161 * Request channel DSP-DMA from the DSP. Sets up SM descriptors and
162 * control fields in shared memory.
163 * Parameters:
164 * hio_mgr: Handle to a I/O manager.
165 * pchnl: Ptr to channel object
166 * chnl_packet_obj: Ptr to channel i/o request packet.
167 * Returns:
168 * Requires:
169 * pchnl != NULL
170 * pchnl->cio_reqs > 0
171 * chnl_packet_obj != NULL
172 * Ensures:
173 */
174extern void io_ddma_request_chnl(struct io_mgr *hio_mgr,
175 struct chnl_object *pchnl,
176 struct chnl_irp *chnl_packet_obj,
177 u16 *mbx_val);
178
179/*
180 * Zero-copy IO functions
181 */
182
183/*
184 * ======== io_ddzc_init_chnl_desc ========
185 * Purpose:
186 * Initialize ZCPY channel descriptor.
187 * Parameters:
188 * hio_mgr: Handle to a I/O manager.
189 * zid: zero-copy channel identifier.
190 * Returns:
191 * Requires:
192 * ddma_chnl_id < DDMA_MAXZCPYCHNLS
193 * hio_mgr != Null
194 * Ensures:
195 */
196extern void io_ddzc_init_chnl_desc(struct io_mgr *hio_mgr, u32 zid);
197
198/*
199 * ======== io_ddzc_clear_chnl_desc ========
200 * Purpose:
201 * Clear DSP ZC channel descriptor.
202 * Parameters:
203 * hio_mgr: Handle to a I/O manager.
204 * ch_id: ZC channel identifier.
205 * Returns:
206 * Requires:
207 * hio_mgr is valid
208 * ch_id < DDMA_MAXZCPYCHNLS
209 * Ensures:
210 */
211extern void io_ddzc_clear_chnl_desc(struct io_mgr *hio_mgr, u32 ch_id);
212
213/*
214 * ======== io_ddzc_request_chnl ========
215 * Purpose:
216 * Request zero-copy channel transfer. Sets up SM descriptors and
217 * control fields in shared memory.
218 * Parameters:
219 * hio_mgr: Handle to a I/O manager.
220 * pchnl: Ptr to channel object
221 * chnl_packet_obj: Ptr to channel i/o request packet.
222 * Returns:
223 * Requires:
224 * pchnl != NULL
225 * pchnl->cio_reqs > 0
226 * chnl_packet_obj != NULL
227 * Ensures:
228 */
229extern void io_ddzc_request_chnl(struct io_mgr *hio_mgr,
230 struct chnl_object *pchnl,
231 struct chnl_irp *chnl_packet_obj,
232 u16 *mbx_val);
233
234/*
235 * ======== io_sh_msetting ======== 123 * ======== io_sh_msetting ========
236 * Purpose: 124 * Purpose:
237 * Sets the shared memory setting 125 * Sets the shared memory setting
@@ -254,25 +142,6 @@ extern int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs);
254/* Maximum channel bufsize that can be used. */ 142/* Maximum channel bufsize that can be used. */
255extern u32 io_buf_size(struct io_mgr *hio_mgr); 143extern u32 io_buf_size(struct io_mgr *hio_mgr);
256 144
257extern u32 io_read_value(struct bridge_dev_context *dev_ctxt, u32 dsp_addr);
258
259extern void io_write_value(struct bridge_dev_context *dev_ctxt,
260 u32 dsp_addr, u32 value);
261
262extern u32 io_read_value_long(struct bridge_dev_context *dev_ctxt,
263 u32 dsp_addr);
264
265extern void io_write_value_long(struct bridge_dev_context *dev_ctxt,
266 u32 dsp_addr, u32 value);
267
268extern void io_or_set_value(struct bridge_dev_context *dev_ctxt,
269 u32 dsp_addr, u32 value);
270
271extern void io_and_set_value(struct bridge_dev_context *dev_ctxt,
272 u32 dsp_addr, u32 value);
273
274extern void io_sm_init(void);
275
276#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE 145#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
277/* 146/*
278 * ========print_dsp_trace_buffer ======== 147 * ========print_dsp_trace_buffer ========
diff --git a/drivers/staging/tidspbridge/include/dspbridge/iodefs.h b/drivers/staging/tidspbridge/include/dspbridge/iodefs.h
deleted file mode 100644
index 8bd10a04200..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/iodefs.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * iodefs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * System-wide channel objects and constants.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef IODEFS_
20#define IODEFS_
21
22#define IO_MAXIRQ 0xff /* Arbitrarily large number. */
23
24/* IO Objects: */
25struct io_mgr;
26
27/* IO manager attributes: */
28struct io_attrs {
29 u8 birq; /* Channel's I/O IRQ number. */
30 bool irq_shared; /* TRUE if the IRQ is shareable. */
31 u32 word_size; /* DSP Word size. */
32 u32 shm_base; /* Physical base address of shared memory. */
33 u32 usm_length; /* Size (in bytes) of shared memory. */
34};
35
36#endif /* IODEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/ldr.h b/drivers/staging/tidspbridge/include/dspbridge/ldr.h
deleted file mode 100644
index 6a0269cd07e..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/ldr.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * ldr.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Provide module loading services and symbol export services.
7 *
8 * Notes:
9 * This service is meant to be used by modules of the DSP/BIOS Bridge
10 * driver.
11 *
12 * Copyright (C) 2005-2006 Texas Instruments, Inc.
13 *
14 * This package is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 *
18 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
20 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 */
22
23#ifndef LDR_
24#define LDR_
25
26/* Loader objects: */
27struct ldr_module;
28
29#endif /* LDR_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/list.h b/drivers/staging/tidspbridge/include/dspbridge/list.h
deleted file mode 100644
index 6837b614073..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/list.h
+++ /dev/null
@@ -1,225 +0,0 @@
1/*
2 * list.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Declarations of list management control structures and definitions
7 * of inline list management functions.
8 *
9 * Copyright (C) 2008 Texas Instruments, Inc.
10 *
11 * This package is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
17 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 */
19
20#ifndef LIST_
21#define LIST_
22
23#include <dspbridge/host_os.h>
24#include <linux/list.h>
25
26#define LST_IS_EMPTY(l) list_empty(&(l)->head)
27
28struct lst_list {
29 struct list_head head;
30};
31
32/*
33 * ======== lst_first ========
34 * Purpose:
35 * Returns a pointer to the first element of the list, or NULL if the list
36 * is empty.
37 * Parameters:
38 * lst: Pointer to list control structure.
39 * Returns:
40 * Pointer to first list element, or NULL.
41 * Requires:
42 * - LST initialized.
43 * - lst != NULL.
44 * Ensures:
45 */
46static inline struct list_head *lst_first(struct lst_list *lst)
47{
48 if (lst && !list_empty(&lst->head))
49 return lst->head.next;
50 return NULL;
51}
52
53/*
54 * ======== lst_get_head ========
55 * Purpose:
56 * Pops the head off the list and returns a pointer to it.
57 * Details:
58 * If the list is empty, returns NULL.
59 * Else, removes the element at the head of the list, making the next
60 * element the head of the list.
61 * The head is removed by making the tail element of the list point its
62 * "next" pointer at the next element after the head, and by making the
63 * "prev" pointer of the next element after the head point at the tail
64 * element. So the next element after the head becomes the new head of
65 * the list.
66 * Parameters:
67 * lst: Pointer to list control structure of list whose head
68 * element is to be removed
69 * Returns:
70 * Pointer to element that was at the head of the list (success)
71 * NULL No elements in list
72 * Requires:
73 * - LST initialized.
74 * - lst != NULL.
75 * Ensures:
76 * Notes:
77 * Because the tail of the list points forward (its "next" pointer) to
78 * the head of the list, and the head of the list points backward (its
79 * "prev" pointer) to the tail of the list, this list is circular.
80 */
81static inline struct list_head *lst_get_head(struct lst_list *lst)
82{
83 struct list_head *elem_list;
84
85 if (!lst || list_empty(&lst->head))
86 return NULL;
87
88 elem_list = lst->head.next;
89 lst->head.next = elem_list->next;
90 elem_list->next->prev = &lst->head;
91
92 return elem_list;
93}
94
95/*
96 * ======== lst_init_elem ========
97 * Purpose:
98 * Initializes a list element to default (cleared) values
99 * Details:
100 * Parameters:
101 * elem_list: Pointer to list element to be reset
102 * Returns:
103 * Requires:
104 * LST initialized.
105 * Ensures:
106 * Notes:
107 * This function must not be called to "reset" an element in the middle
108 * of a list chain -- that would break the chain.
109 *
110 */
111static inline void lst_init_elem(struct list_head *elem_list)
112{
113 if (elem_list) {
114 elem_list->next = NULL;
115 elem_list->prev = NULL;
116 }
117}
118
119/*
120 * ======== lst_insert_before ========
121 * Purpose:
122 * Insert the element before the existing element.
123 * Parameters:
124 * lst: Pointer to list control structure.
125 * elem_list: Pointer to element in list to insert.
126 * elem_existing: Pointer to existing list element.
127 * Returns:
128 * Requires:
129 * - LST initialized.
130 * - lst != NULL.
131 * - elem_list != NULL.
132 * - elem_existing != NULL.
133 * Ensures:
134 */
135static inline void lst_insert_before(struct lst_list *lst,
136 struct list_head *elem_list,
137 struct list_head *elem_existing)
138{
139 if (lst && elem_list && elem_existing)
140 list_add_tail(elem_list, elem_existing);
141}
142
143/*
144 * ======== lst_next ========
145 * Purpose:
146 * Returns a pointer to the next element of the list, or NULL if the next
147 * element is the head of the list or the list is empty.
148 * Parameters:
149 * lst: Pointer to list control structure.
150 * cur_elem: Pointer to element in list to remove.
151 * Returns:
152 * Pointer to list element, or NULL.
153 * Requires:
154 * - LST initialized.
155 * - lst != NULL.
156 * - cur_elem != NULL.
157 * Ensures:
158 */
159static inline struct list_head *lst_next(struct lst_list *lst,
160 struct list_head *cur_elem)
161{
162 if (lst && !list_empty(&lst->head) && cur_elem &&
163 (cur_elem->next != &lst->head))
164 return cur_elem->next;
165 return NULL;
166}
167
168/*
169 * ======== lst_put_tail ========
170 * Purpose:
171 * Adds the specified element to the tail of the list
172 * Details:
173 * Sets new element's "prev" pointer to the address previously held by
174 * the head element's prev pointer. This is the previous tail member of
175 * the list.
176 * Sets the new head's prev pointer to the address of the element.
177 * Sets next pointer of the previous tail member of the list to point to
178 * the new element (rather than the head, which it had been pointing at).
179 * Sets new element's next pointer to the address of the head element.
180 * Sets head's prev pointer to the address of the new element.
181 * Parameters:
182 * lst: Pointer to list control structure to which *elem_list will be
183 * added
184 * elem_list: Pointer to list element to be added
185 * Returns:
186 * Void
187 * Requires:
188 * *elem_list and *lst must both exist.
189 * LST initialized.
190 * Ensures:
191 * Notes:
192 * Because the tail is always "just before" the head of the list (the
193 * tail's "next" pointer points at the head of the list, and the head's
194 * "prev" pointer points at the tail of the list), the list is circular.
195 */
196static inline void lst_put_tail(struct lst_list *lst,
197 struct list_head *elem_list)
198{
199 if (lst && elem_list)
200 list_add_tail(elem_list, &lst->head);
201}
202
203/*
204 * ======== lst_remove_elem ========
205 * Purpose:
206 * Removes (unlinks) the given element from the list, if the list is not
207 * empty. Does not free the list element.
208 * Parameters:
209 * lst: Pointer to list control structure.
210 * cur_elem: Pointer to element in list to remove.
211 * Returns:
212 * Requires:
213 * - LST initialized.
214 * - lst != NULL.
215 * - cur_elem != NULL.
216 * Ensures:
217 */
218static inline void lst_remove_elem(struct lst_list *lst,
219 struct list_head *cur_elem)
220{
221 if (lst && !list_empty(&lst->head) && cur_elem)
222 list_del_init(cur_elem);
223}
224
225#endif /* LIST_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h b/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h
index 5d165cd932f..7424c888d63 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h
@@ -110,13 +110,7 @@
110#ifndef _MBX_SH_H 110#ifndef _MBX_SH_H
111#define _MBX_SH_H 111#define _MBX_SH_H
112 112
113#define MBX_CLASS_MSK 0xFC00 /* Class bits are 10 thru 15 */
114#define MBX_VALUE_MSK 0x03FF /* Value is 0 thru 9 */
115
116#define MBX_DEH_CLASS 0x0000 /* DEH owns Mbx INTR */
117#define MBX_DDMA_CLASS 0x0400 /* DSP-DMA link drvr chnls owns INTR */
118#define MBX_PCPY_CLASS 0x0800 /* PROC-COPY " */ 113#define MBX_PCPY_CLASS 0x0800 /* PROC-COPY " */
119#define MBX_ZCPY_CLASS 0x1000 /* ZERO-COPY " */
120#define MBX_PM_CLASS 0x2000 /* Power Management */ 114#define MBX_PM_CLASS 0x2000 /* Power Management */
121#define MBX_DBG_CLASS 0x4000 /* For debugging purpose */ 115#define MBX_DBG_CLASS 0x4000 /* For debugging purpose */
122 116
@@ -128,55 +122,21 @@
128#define MBX_DEH_USERS_BASE 0x100 /* 256 */ 122#define MBX_DEH_USERS_BASE 0x100 /* 256 */
129#define MBX_DEH_LIMIT 0x3FF /* 1023 */ 123#define MBX_DEH_LIMIT 0x3FF /* 1023 */
130#define MBX_DEH_RESET 0x101 /* DSP RESET (DEH) */ 124#define MBX_DEH_RESET 0x101 /* DSP RESET (DEH) */
131#define MBX_DEH_EMMU 0X103 /*DSP MMU FAULT RECOVERY */
132 125
133/* 126/*
134 * Link driver command/status codes. 127 * Link driver command/status codes.
135 */ 128 */
136/* DSP-DMA */
137#define MBX_DDMA_NUMCHNLBITS 5 /* # chnl Id: # bits available */
138#define MBX_DDMA_CHNLSHIFT 0 /* # of bits to shift */
139#define MBX_DDMA_CHNLMSK 0x01F /* bits 0 thru 4 */
140
141#define MBX_DDMA_NUMBUFBITS 5 /* buffer index: # of bits avail */
142#define MBX_DDMA_BUFSHIFT (MBX_DDMA_NUMCHNLBITS + MBX_DDMA_CHNLSHIFT)
143#define MBX_DDMA_BUFMSK 0x3E0 /* bits 5 thru 9 */
144
145/* Zero-Copy */
146#define MBX_ZCPY_NUMCHNLBITS 5 /* # chnl Id: # bits available */
147#define MBX_ZCPY_CHNLSHIFT 0 /* # of bits to shift */
148#define MBX_ZCPY_CHNLMSK 0x01F /* bits 0 thru 4 */
149 129
150/* Power Management Commands */ 130/* Power Management Commands */
151#define MBX_PM_DSPIDLE (MBX_PM_CLASS + 0x0) 131#define MBX_PM_DSPIDLE (MBX_PM_CLASS + 0x0)
152#define MBX_PM_DSPWAKEUP (MBX_PM_CLASS + 0x1) 132#define MBX_PM_DSPWAKEUP (MBX_PM_CLASS + 0x1)
153#define MBX_PM_EMERGENCYSLEEP (MBX_PM_CLASS + 0x2) 133#define MBX_PM_EMERGENCYSLEEP (MBX_PM_CLASS + 0x2)
154#define MBX_PM_SLEEPUNTILRESTART (MBX_PM_CLASS + 0x3)
155#define MBX_PM_DSPGLOBALIDLE_OFF (MBX_PM_CLASS + 0x4)
156#define MBX_PM_DSPGLOBALIDLE_ON (MBX_PM_CLASS + 0x5)
157#define MBX_PM_SETPOINT_PRENOTIFY (MBX_PM_CLASS + 0x6) 134#define MBX_PM_SETPOINT_PRENOTIFY (MBX_PM_CLASS + 0x6)
158#define MBX_PM_SETPOINT_POSTNOTIFY (MBX_PM_CLASS + 0x7) 135#define MBX_PM_SETPOINT_POSTNOTIFY (MBX_PM_CLASS + 0x7)
159#define MBX_PM_DSPRETN (MBX_PM_CLASS + 0x8)
160#define MBX_PM_DSPRETENTION (MBX_PM_CLASS + 0x8) 136#define MBX_PM_DSPRETENTION (MBX_PM_CLASS + 0x8)
161#define MBX_PM_DSPHIBERNATE (MBX_PM_CLASS + 0x9) 137#define MBX_PM_DSPHIBERNATE (MBX_PM_CLASS + 0x9)
162#define MBX_PM_HIBERNATE_EN (MBX_PM_CLASS + 0xA) 138#define MBX_PM_HIBERNATE_EN (MBX_PM_CLASS + 0xA)
163#define MBX_PM_OPP_REQ (MBX_PM_CLASS + 0xB) 139#define MBX_PM_OPP_REQ (MBX_PM_CLASS + 0xB)
164#define MBX_PM_OPP_CHG (MBX_PM_CLASS + 0xC)
165
166#define MBX_PM_TYPE_MASK 0x0300
167#define MBX_PM_TYPE_PWR_CHNG 0x0100
168#define MBX_PM_TYPE_OPP_PRECHNG 0x0200
169#define MBX_PM_TYPE_OPP_POSTCHNG 0x0300
170#define MBX_PM_TYPE_OPP_MASK 0x0300
171#define MBX_PM_OPP_PRECHNG (MBX_PM_CLASS | MBX_PM_TYPE_OPP_PRECHNG)
172/* DSP to MPU */
173#define MBX_PM_OPP_CHNG(OPP) (MBX_PM_CLASS | MBX_PM_TYPE_OPP_PRECHNG | (OPP))
174#define MBX_PM_RET (MBX_PM_CLASS | MBX_PM_TYPE_PWR_CHNG | 0x0006)
175#define MBX_PM_HIB (MBX_PM_CLASS | MBX_PM_TYPE_PWR_CHNG | 0x0002)
176#define MBX_PM_OPP1 0
177#define MBX_PM_OPP2 1
178#define MBX_PM_OPP3 2
179#define MBX_PM_OPP4 3
180 140
181/* Bridge Debug Commands */ 141/* Bridge Debug Commands */
182#define MBX_DBG_SYSPRINTF (MBX_DBG_CLASS + 0x0) 142#define MBX_DBG_SYSPRINTF (MBX_DBG_CLASS + 0x0)
diff --git a/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h b/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h
index bca4e103c7f..3a4e337c040 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h
@@ -28,8 +28,8 @@
28struct mgr_object; 28struct mgr_object;
29 29
30struct mgr_tlbentry { 30struct mgr_tlbentry {
31 u32 ul_dsp_virt; /* DSP virtual address */ 31 u32 dsp_virt; /* DSP virtual address */
32 u32 ul_gpp_phys; /* GPP physical address */ 32 u32 gpp_phys; /* GPP physical address */
33}; 33};
34 34
35/* 35/*
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
index c85d3da3fe2..ee3a85f08fc 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h
@@ -82,10 +82,10 @@ typedef u32(*nldr_writefxn) (void *priv_ref,
82 * Attributes passed to nldr_create function. 82 * Attributes passed to nldr_create function.
83 */ 83 */
84struct nldr_attrs { 84struct nldr_attrs {
85 nldr_ovlyfxn pfn_ovly; 85 nldr_ovlyfxn ovly;
86 nldr_writefxn pfn_write; 86 nldr_writefxn write;
87 u16 us_dsp_word_size; 87 u16 dsp_word_size;
88 u16 us_dsp_mau_size; 88 u16 dsp_mau_size;
89}; 89};
90 90
91/* 91/*
@@ -280,14 +280,14 @@ typedef int(*nldr_unloadfxn) (struct nldr_nodeobject *nldr_node_obj,
280 * ======== node_ldr_fxns ======== 280 * ======== node_ldr_fxns ========
281 */ 281 */
282struct node_ldr_fxns { 282struct node_ldr_fxns {
283 nldr_allocatefxn pfn_allocate; 283 nldr_allocatefxn allocate;
284 nldr_createfxn pfn_create; 284 nldr_createfxn create;
285 nldr_deletefxn pfn_delete; 285 nldr_deletefxn delete;
286 nldr_exitfxn pfn_exit; 286 nldr_exitfxn exit;
287 nldr_getfxnaddrfxn pfn_get_fxn_addr; 287 nldr_getfxnaddrfxn get_fxn_addr;
288 nldr_initfxn pfn_init; 288 nldr_initfxn init;
289 nldr_loadfxn pfn_load; 289 nldr_loadfxn load;
290 nldr_unloadfxn pfn_unload; 290 nldr_unloadfxn unload;
291}; 291};
292 292
293#endif /* NLDRDEFS_ */ 293#endif /* NLDRDEFS_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/node.h b/drivers/staging/tidspbridge/include/dspbridge/node.h
index 49ed5c1128e..63739c8ffe0 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/node.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/node.h
@@ -22,7 +22,7 @@
22#include <dspbridge/procpriv.h> 22#include <dspbridge/procpriv.h>
23 23
24#include <dspbridge/nodedefs.h> 24#include <dspbridge/nodedefs.h>
25#include <dspbridge/dispdefs.h> 25#include <dspbridge/disp.h>
26#include <dspbridge/nldrdefs.h> 26#include <dspbridge/nldrdefs.h>
27#include <dspbridge/drv.h> 27#include <dspbridge/drv.h>
28 28
@@ -113,24 +113,6 @@ extern int node_alloc_msg_buf(struct node_object *hnode,
113extern int node_change_priority(struct node_object *hnode, s32 prio); 113extern int node_change_priority(struct node_object *hnode, s32 prio);
114 114
115/* 115/*
116 * ======== node_close_orphans ========
117 * Purpose:
118 * Delete all nodes whose owning processor is being destroyed.
119 * Parameters:
120 * hnode_mgr: Node manager object.
121 * proc: Handle to processor object being destroyed.
122 * Returns:
123 * 0: Success.
124 * -EPERM: Unable to delete all nodes belonging to proc.
125 * Requires:
126 * Valid hnode_mgr.
127 * proc != NULL.
128 * Ensures:
129 */
130extern int node_close_orphans(struct node_mgr *hnode_mgr,
131 struct proc_object *proc);
132
133/*
134 * ======== node_connect ======== 116 * ======== node_connect ========
135 * Purpose: 117 * Purpose:
136 * Connect two nodes on the DSP, or a node on the DSP to the GPP. In the 118 * Connect two nodes on the DSP, or a node on the DSP to the GPP. In the
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
index 16b0233fc5d..9c1e06758c8 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h
@@ -43,7 +43,7 @@ struct node_strmdef {
43 u32 buf_size; /* Size of buffers for SIO stream */ 43 u32 buf_size; /* Size of buffers for SIO stream */
44 u32 num_bufs; /* max # of buffers in SIO stream at once */ 44 u32 num_bufs; /* max # of buffers in SIO stream at once */
45 u32 seg_id; /* Memory segment id to allocate buffers */ 45 u32 seg_id; /* Memory segment id to allocate buffers */
46 u32 utimeout; /* Timeout for blocking SIO calls */ 46 u32 timeout; /* Timeout for blocking SIO calls */
47 u32 buf_alignment; /* Buffer alignment */ 47 u32 buf_alignment; /* Buffer alignment */
48 char *sz_device; /* Device name for stream */ 48 char *sz_device; /* Device name for stream */
49}; 49};
@@ -55,14 +55,14 @@ struct node_taskargs {
55 u32 stack_size; 55 u32 stack_size;
56 u32 sys_stack_size; 56 u32 sys_stack_size;
57 u32 stack_seg; 57 u32 stack_seg;
58 u32 udsp_heap_res_addr; /* DSP virtual heap address */ 58 u32 dsp_heap_res_addr; /* DSP virtual heap address */
59 u32 udsp_heap_addr; /* DSP virtual heap address */ 59 u32 dsp_heap_addr; /* DSP virtual heap address */
60 u32 heap_size; /* Heap size */ 60 u32 heap_size; /* Heap size */
61 u32 ugpp_heap_addr; /* GPP virtual heap address */ 61 u32 gpp_heap_addr; /* GPP virtual heap address */
62 u32 profile_id; /* Profile ID */ 62 u32 profile_id; /* Profile ID */
63 u32 num_inputs; 63 u32 num_inputs;
64 u32 num_outputs; 64 u32 num_outputs;
65 u32 ul_dais_arg; /* Address of iAlg object */ 65 u32 dais_arg; /* Address of iAlg object */
66 struct node_strmdef *strm_in_def; 66 struct node_strmdef *strm_in_def;
67 struct node_strmdef *strm_out_def; 67 struct node_strmdef *strm_out_def;
68}; 68};
diff --git a/drivers/staging/tidspbridge/include/dspbridge/pwr.h b/drivers/staging/tidspbridge/include/dspbridge/pwr.h
index a6dc783904e..5e3ab2123aa 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/pwr.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/pwr.h
@@ -18,7 +18,13 @@
18#define PWR_ 18#define PWR_
19 19
20#include <dspbridge/dbdefs.h> 20#include <dspbridge/dbdefs.h>
21#include <dspbridge/pwr_sh.h> 21#include <dspbridge/mbx_sh.h>
22
23/* valid sleep command codes that can be sent by GPP via mailbox: */
24#define PWR_DEEPSLEEP MBX_PM_DSPIDLE
25#define PWR_EMERGENCYDEEPSLEEP MBX_PM_EMERGENCYSLEEP
26#define PWR_WAKEUP MBX_PM_DSPWAKEUP
27
22 28
23/* 29/*
24 * ======== pwr_sleep_dsp ======== 30 * ======== pwr_sleep_dsp ========
diff --git a/drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h b/drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h
deleted file mode 100644
index 1b4a090abe7..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/pwr_sh.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * pwr_sh.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Power Manager shared definitions (used on both GPP and DSP sides).
7 *
8 * Copyright (C) 2008 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef PWR_SH_
20#define PWR_SH_
21
22#include <dspbridge/mbx_sh.h>
23
24/* valid sleep command codes that can be sent by GPP via mailbox: */
25#define PWR_DEEPSLEEP MBX_PM_DSPIDLE
26#define PWR_EMERGENCYDEEPSLEEP MBX_PM_EMERGENCYSLEEP
27#define PWR_SLEEPUNTILRESTART MBX_PM_SLEEPUNTILRESTART
28#define PWR_WAKEUP MBX_PM_DSPWAKEUP
29#define PWR_AUTOENABLE MBX_PM_PWRENABLE
30#define PWR_AUTODISABLE MBX_PM_PWRDISABLE
31#define PWR_RETENTION MBX_PM_DSPRETN
32
33#endif /* PWR_SH_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h b/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h
index dfaf0c6c06f..8c9c902a043 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h
@@ -17,23 +17,12 @@
17#include <dspbridge/nodepriv.h> 17#include <dspbridge/nodepriv.h>
18#include <dspbridge/drv.h> 18#include <dspbridge/drv.h>
19 19
20extern int drv_get_proc_ctxt_list(struct process_context **pctxt,
21 struct drv_object *hdrv_obj);
22
23extern int drv_insert_proc_context(struct drv_object *driver_obj,
24 void *process_ctxt);
25
26extern int drv_remove_all_dmm_res_elements(void *process_ctxt); 20extern int drv_remove_all_dmm_res_elements(void *process_ctxt);
27 21
28extern int drv_remove_all_node_res_elements(void *process_ctxt); 22extern int drv_remove_all_node_res_elements(void *process_ctxt);
29 23
30extern int drv_proc_set_pid(void *ctxt, s32 process);
31
32extern int drv_remove_all_resources(void *process_ctxt); 24extern int drv_remove_all_resources(void *process_ctxt);
33 25
34extern int drv_remove_proc_context(struct drv_object *driver_obj,
35 void *pr_ctxt);
36
37extern int drv_insert_node_res_element(void *hnode, void *node_resource, 26extern int drv_insert_node_res_element(void *hnode, void *node_resource,
38 void *process_ctxt); 27 void *process_ctxt);
39 28
diff --git a/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h b/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h
index 7bc5574342a..ba7f4784567 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h
@@ -22,27 +22,18 @@
22 22
23#include <dspbridge/rmstypes.h> 23#include <dspbridge/rmstypes.h>
24 24
25/* Node Types: */
26#define RMS_TASK 1 /* Task node */
27#define RMS_DAIS 2 /* xDAIS socket node */
28#define RMS_MSG 3 /* Message node */
29
30/* Memory Types: */ 25/* Memory Types: */
31#define RMS_CODE 0 /* Program space */ 26#define RMS_CODE 0 /* Program space */
32#define RMS_DATA 1 /* Data space */ 27#define RMS_DATA 1 /* Data space */
33#define RMS_IO 2 /* I/O space */
34 28
35/* RM Server Command and Response Buffer Sizes: */ 29/* RM Server Command and Response Buffer Sizes: */
36#define RMS_COMMANDBUFSIZE 256 /* Size of command buffer */ 30#define RMS_COMMANDBUFSIZE 256 /* Size of command buffer */
37#define RMS_RESPONSEBUFSIZE 16 /* Size of response buffer */
38 31
39/* Pre-Defined Command/Response Codes: */ 32/* Pre-Defined Command/Response Codes: */
40#define RMS_EXIT 0x80000000 /* GPP->Node: shutdown */ 33#define RMS_EXIT 0x80000000 /* GPP->Node: shutdown */
41#define RMS_EXITACK 0x40000000 /* Node->GPP: ack shutdown */ 34#define RMS_EXITACK 0x40000000 /* Node->GPP: ack shutdown */
42#define RMS_BUFDESC 0x20000000 /* Arg1 SM buf, Arg2 SM size */ 35#define RMS_BUFDESC 0x20000000 /* Arg1 SM buf, Arg2 SM size */
43#define RMS_KILLTASK 0x10000000 /* GPP->Node: Kill Task */ 36#define RMS_KILLTASK 0x10000000 /* GPP->Node: Kill Task */
44#define RMS_USER 0x0 /* Start of user-defined msg codes */
45#define RMS_MAXUSERCODES 0xfff /* Maximum user defined C/R Codes */
46 37
47/* RM Server RPC Command Structure: */ 38/* RM Server RPC Command Structure: */
48struct rms_command { 39struct rms_command {
diff --git a/drivers/staging/tidspbridge/include/dspbridge/strm.h b/drivers/staging/tidspbridge/include/dspbridge/strm.h
index 3e4671e7f91..613fe53dd23 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/strm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/strm.h
@@ -142,25 +142,6 @@ extern int strm_free_buffer(struct strm_res_object *strmres,
142 struct process_context *pr_ctxt); 142 struct process_context *pr_ctxt);
143 143
144/* 144/*
145 * ======== strm_get_event_handle ========
146 * Purpose:
147 * Get stream's user event handle. This function is used when closing
148 * a stream, so the event can be closed.
149 * Parameter:
150 * stream_obj: Stream handle returned from strm_open().
151 * ph_event: Location to store event handle on output.
152 * Returns:
153 * 0: Success.
154 * -EFAULT: Invalid stream_obj.
155 * Requires:
156 * strm_init(void) called.
157 * ph_event != NULL.
158 * Ensures:
159 */
160extern int strm_get_event_handle(struct strm_object *stream_obj,
161 void **ph_event);
162
163/*
164 * ======== strm_get_info ======== 145 * ======== strm_get_info ========
165 * Purpose: 146 * Purpose:
166 * Get information about a stream. User's dsp_streaminfo is contained 147 * Get information about a stream. User's dsp_streaminfo is contained
@@ -276,27 +257,6 @@ extern int strm_open(struct node_object *hnode, u32 dir,
276 struct process_context *pr_ctxt); 257 struct process_context *pr_ctxt);
277 258
278/* 259/*
279 * ======== strm_prepare_buffer ========
280 * Purpose:
281 * Prepare a data buffer not allocated by DSPStream_AllocateBuffers()
282 * for use with a stream.
283 * Parameter:
284 * stream_obj: Stream handle returned from strm_open().
285 * usize: Size (GPP bytes) of the buffer.
286 * pbuffer: Buffer address.
287 * Returns:
288 * 0: Success.
289 * -EFAULT: Invalid stream_obj.
290 * -EPERM: Failure occurred, unable to prepare buffer.
291 * Requires:
292 * strm_init(void) called.
293 * pbuffer != NULL.
294 * Ensures:
295 */
296extern int strm_prepare_buffer(struct strm_object *stream_obj,
297 u32 usize, u8 *pbuffer);
298
299/*
300 * ======== strm_reclaim ======== 260 * ======== strm_reclaim ========
301 * Purpose: 261 * Purpose:
302 * Request a buffer back from a stream. 262 * Request a buffer back from a stream.
@@ -379,26 +339,4 @@ extern int strm_register_notify(struct strm_object *stream_obj,
379extern int strm_select(struct strm_object **strm_tab, 339extern int strm_select(struct strm_object **strm_tab,
380 u32 strms, u32 *pmask, u32 utimeout); 340 u32 strms, u32 *pmask, u32 utimeout);
381 341
382/*
383 * ======== strm_unprepare_buffer ========
384 * Purpose:
385 * Unprepare a data buffer that was previously prepared for a stream
386 * with DSPStream_PrepareBuffer(), and that will no longer be used with
387 * the stream.
388 * Parameter:
389 * stream_obj: Stream handle returned from strm_open().
390 * usize: Size (GPP bytes) of the buffer.
391 * pbuffer: Buffer address.
392 * Returns:
393 * 0: Success.
394 * -EFAULT: Invalid stream_obj.
395 * -EPERM: Failure occurred, unable to unprepare buffer.
396 * Requires:
397 * strm_init(void) called.
398 * pbuffer != NULL.
399 * Ensures:
400 */
401extern int strm_unprepare_buffer(struct strm_object *stream_obj,
402 u32 usize, u8 *pbuffer);
403
404#endif /* STRM_ */ 342#endif /* STRM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h b/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h
index b363f794de3..4f90e6ba69e 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h
@@ -19,18 +19,16 @@
19#ifndef STRMDEFS_ 19#ifndef STRMDEFS_
20#define STRMDEFS_ 20#define STRMDEFS_
21 21
22#define STRM_MAXEVTNAMELEN 32
23
24struct strm_mgr; 22struct strm_mgr;
25 23
26struct strm_object; 24struct strm_object;
27 25
28struct strm_attr { 26struct strm_attr {
29 void *user_event; 27 void *user_event;
30 char *pstr_event_name; 28 char *str_event_name;
31 void *virt_base; /* Process virtual base address of 29 void *virt_base; /* Process virtual base address of
32 * mapped SM */ 30 * mapped SM */
33 u32 ul_virt_size; /* Size of virtual space in bytes */ 31 u32 virt_size; /* Size of virtual space in bytes */
34 struct dsp_streamattrin *stream_attr_in; 32 struct dsp_streamattrin *stream_attr_in;
35}; 33};
36 34
diff --git a/drivers/staging/tidspbridge/include/dspbridge/sync.h b/drivers/staging/tidspbridge/include/dspbridge/sync.h
index e2651e7b1c4..b1e75eb8847 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/sync.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/sync.h
@@ -20,6 +20,7 @@
20#define _SYNC_H 20#define _SYNC_H
21 21
22#include <dspbridge/dbdefs.h> 22#include <dspbridge/dbdefs.h>
23#include <dspbridge/host_os.h>
23 24
24 25
25/* Special timeout value indicating an infinite wait: */ 26/* Special timeout value indicating an infinite wait: */
@@ -80,13 +81,22 @@ void sync_set_event(struct sync_object *event);
80 * This functios will wait until @event is set or until timeout. In case of 81 * This functios will wait until @event is set or until timeout. In case of
81 * success the function will return 0 and 82 * success the function will return 0 and
82 * in case of timeout the function will return -ETIME 83 * in case of timeout the function will return -ETIME
84 * in case of signal the function will return -ERESTARTSYS
83 */ 85 */
84 86
85static inline int sync_wait_on_event(struct sync_object *event, 87static inline int sync_wait_on_event(struct sync_object *event,
86 unsigned timeout) 88 unsigned timeout)
87{ 89{
88 return wait_for_completion_timeout(&event->comp, 90 int res;
89 msecs_to_jiffies(timeout)) ? 0 : -ETIME; 91
92 res = wait_for_completion_interruptible_timeout(&event->comp,
93 msecs_to_jiffies(timeout));
94 if (!res)
95 res = -ETIME;
96 else if (res > 0)
97 res = 0;
98
99 return res;
90} 100}
91 101
92/** 102/**
diff --git a/drivers/staging/tidspbridge/include/dspbridge/utildefs.h b/drivers/staging/tidspbridge/include/dspbridge/utildefs.h
deleted file mode 100644
index 8fe5414824c..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/utildefs.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * utildefs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Global UTIL constants and types, shared between DSP API and DSPSYS.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef UTILDEFS_
20#define UTILDEFS_
21
22/* constants taken from configmg.h */
23#define UTIL_MAXMEMREGS 9
24#define UTIL_MAXIOPORTS 20
25#define UTIL_MAXIRQS 7
26#define UTIL_MAXDMACHNLS 7
27
28/* misc. constants */
29#define UTIL_MAXARGVS 10
30
31/* Platform specific important info */
32struct util_sysinfo {
33 /* Granularity of page protection; usually 1k or 4k */
34 u32 dw_page_size;
35 u32 dw_allocation_granularity; /* VM granularity, usually 64K */
36 u32 dw_number_of_processors; /* Used as sanity check */
37};
38
39#endif /* UTILDEFS_ */
diff --git a/drivers/staging/tidspbridge/pmgr/chnl.c b/drivers/staging/tidspbridge/pmgr/chnl.c
index 78b0d0f303d..245de82e2d6 100644
--- a/drivers/staging/tidspbridge/pmgr/chnl.c
+++ b/drivers/staging/tidspbridge/pmgr/chnl.c
@@ -87,7 +87,7 @@ int chnl_create(struct chnl_mgr **channel_mgr,
87 struct bridge_drv_interface *intf_fxns; 87 struct bridge_drv_interface *intf_fxns;
88 dev_get_intf_fxns(hdev_obj, &intf_fxns); 88 dev_get_intf_fxns(hdev_obj, &intf_fxns);
89 /* Let Bridge channel module finish the create: */ 89 /* Let Bridge channel module finish the create: */
90 status = (*intf_fxns->pfn_chnl_create) (&hchnl_mgr, hdev_obj, 90 status = (*intf_fxns->chnl_create) (&hchnl_mgr, hdev_obj,
91 mgr_attrts); 91 mgr_attrts);
92 if (!status) { 92 if (!status) {
93 /* Fill in DSP API channel module's fields of the 93 /* Fill in DSP API channel module's fields of the
@@ -120,7 +120,7 @@ int chnl_destroy(struct chnl_mgr *hchnl_mgr)
120 if (chnl_mgr_obj) { 120 if (chnl_mgr_obj) {
121 intf_fxns = chnl_mgr_obj->intf_fxns; 121 intf_fxns = chnl_mgr_obj->intf_fxns;
122 /* Let Bridge channel module destroy the chnl_mgr: */ 122 /* Let Bridge channel module destroy the chnl_mgr: */
123 status = (*intf_fxns->pfn_chnl_destroy) (hchnl_mgr); 123 status = (*intf_fxns->chnl_destroy) (hchnl_mgr);
124 } else { 124 } else {
125 status = -EFAULT; 125 status = -EFAULT;
126 } 126 }
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
index 93a7c4fd57e..e6b2c8962f8 100644
--- a/drivers/staging/tidspbridge/pmgr/cmm.c
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -12,7 +12,7 @@
12 * describes a block of physically contiguous shared memory used for 12 * describes a block of physically contiguous shared memory used for
13 * future allocations by CMM. 13 * future allocations by CMM.
14 * 14 *
15 * Memory is coelesced back to the appropriate heap when a buffer is 15 * Memory is coalesced back to the appropriate heap when a buffer is
16 * freed. 16 * freed.
17 * 17 *
18 * Notes: 18 * Notes:
@@ -30,6 +30,7 @@
30 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 30 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
31 */ 31 */
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/list.h>
33 34
34/* ----------------------------------- DSP/BIOS Bridge */ 35/* ----------------------------------- DSP/BIOS Bridge */
35#include <dspbridge/dbdefs.h> 36#include <dspbridge/dbdefs.h>
@@ -38,9 +39,7 @@
38#include <dspbridge/dbc.h> 39#include <dspbridge/dbc.h>
39 40
40/* ----------------------------------- OS Adaptation Layer */ 41/* ----------------------------------- OS Adaptation Layer */
41#include <dspbridge/list.h>
42#include <dspbridge/sync.h> 42#include <dspbridge/sync.h>
43#include <dspbridge/utildefs.h>
44 43
45/* ----------------------------------- Platform Manager */ 44/* ----------------------------------- Platform Manager */
46#include <dspbridge/dev.h> 45#include <dspbridge/dev.h>
@@ -50,7 +49,7 @@
50#include <dspbridge/cmm.h> 49#include <dspbridge/cmm.h>
51 50
52/* ----------------------------------- Defines, Data Structures, Typedefs */ 51/* ----------------------------------- Defines, Data Structures, Typedefs */
53#define NEXT_PA(pnode) (pnode->dw_pa + pnode->ul_size) 52#define NEXT_PA(pnode) (pnode->pa + pnode->size)
54 53
55/* Other bus/platform translations */ 54/* Other bus/platform translations */
56#define DSPPA2GPPPA(base, x, y) ((x)+(y)) 55#define DSPPA2GPPPA(base, x, y) ((x)+(y))
@@ -64,32 +63,32 @@
64 */ 63 */
65struct cmm_allocator { /* sma */ 64struct cmm_allocator { /* sma */
66 unsigned int shm_base; /* Start of physical SM block */ 65 unsigned int shm_base; /* Start of physical SM block */
67 u32 ul_sm_size; /* Size of SM block in bytes */ 66 u32 sm_size; /* Size of SM block in bytes */
68 unsigned int dw_vm_base; /* Start of VM block. (Dev driver 67 unsigned int vm_base; /* Start of VM block. (Dev driver
69 * context for 'sma') */ 68 * context for 'sma') */
70 u32 dw_dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this 69 u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
71 * SM space */ 70 * SM space */
72 s8 c_factor; /* DSPPa to GPPPa Conversion Factor */ 71 s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
73 unsigned int dw_dsp_base; /* DSP virt base byte address */ 72 unsigned int dsp_base; /* DSP virt base byte address */
74 u32 ul_dsp_size; /* DSP seg size in bytes */ 73 u32 dsp_size; /* DSP seg size in bytes */
75 struct cmm_object *hcmm_mgr; /* back ref to parent mgr */ 74 struct cmm_object *cmm_mgr; /* back ref to parent mgr */
76 /* node list of available memory */ 75 /* node list of available memory */
77 struct lst_list *free_list_head; 76 struct list_head free_list;
78 /* node list of memory in use */ 77 /* node list of memory in use */
79 struct lst_list *in_use_list_head; 78 struct list_head in_use_list;
80}; 79};
81 80
82struct cmm_xlator { /* Pa<->Va translator object */ 81struct cmm_xlator { /* Pa<->Va translator object */
83 /* CMM object this translator associated */ 82 /* CMM object this translator associated */
84 struct cmm_object *hcmm_mgr; 83 struct cmm_object *cmm_mgr;
85 /* 84 /*
86 * Client process virtual base address that corresponds to phys SM 85 * Client process virtual base address that corresponds to phys SM
87 * base address for translator's ul_seg_id. 86 * base address for translator's seg_id.
88 * Only 1 segment ID currently supported. 87 * Only 1 segment ID currently supported.
89 */ 88 */
90 unsigned int dw_virt_base; /* virtual base address */ 89 unsigned int virt_base; /* virtual base address */
91 u32 ul_virt_size; /* size of virt space in bytes */ 90 u32 virt_size; /* size of virt space in bytes */
92 u32 ul_seg_id; /* Segment Id */ 91 u32 seg_id; /* Segment Id */
93}; 92};
94 93
95/* CMM Mgr */ 94/* CMM Mgr */
@@ -98,40 +97,40 @@ struct cmm_object {
98 * Cmm Lock is used to serialize access mem manager for multi-threads. 97 * Cmm Lock is used to serialize access mem manager for multi-threads.
99 */ 98 */
100 struct mutex cmm_lock; /* Lock to access cmm mgr */ 99 struct mutex cmm_lock; /* Lock to access cmm mgr */
101 struct lst_list *node_free_list_head; /* Free list of memory nodes */ 100 struct list_head node_free_list; /* Free list of memory nodes */
102 u32 ul_min_block_size; /* Min SM block; default 16 bytes */ 101 u32 min_block_size; /* Min SM block; default 16 bytes */
103 u32 dw_page_size; /* Memory Page size (1k/4k) */ 102 u32 page_size; /* Memory Page size (1k/4k) */
104 /* GPP SM segment ptrs */ 103 /* GPP SM segment ptrs */
105 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS]; 104 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
106}; 105};
107 106
108/* Default CMM Mgr attributes */ 107/* Default CMM Mgr attributes */
109static struct cmm_mgrattrs cmm_dfltmgrattrs = { 108static struct cmm_mgrattrs cmm_dfltmgrattrs = {
110 /* ul_min_block_size, min block size(bytes) allocated by cmm mgr */ 109 /* min_block_size, min block size(bytes) allocated by cmm mgr */
111 16 110 16
112}; 111};
113 112
114/* Default allocation attributes */ 113/* Default allocation attributes */
115static struct cmm_attrs cmm_dfltalctattrs = { 114static struct cmm_attrs cmm_dfltalctattrs = {
116 1 /* ul_seg_id, default segment Id for allocator */ 115 1 /* seg_id, default segment Id for allocator */
117}; 116};
118 117
119/* Address translator default attrs */ 118/* Address translator default attrs */
120static struct cmm_xlatorattrs cmm_dfltxlatorattrs = { 119static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
121 /* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */ 120 /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
122 1, 121 1,
123 0, /* dw_dsp_bufs */ 122 0, /* dsp_bufs */
124 0, /* dw_dsp_buf_size */ 123 0, /* dsp_buf_size */
125 NULL, /* vm_base */ 124 NULL, /* vm_base */
126 0, /* dw_vm_size */ 125 0, /* vm_size */
127}; 126};
128 127
129/* SM node representing a block of memory. */ 128/* SM node representing a block of memory. */
130struct cmm_mnode { 129struct cmm_mnode {
131 struct list_head link; /* must be 1st element */ 130 struct list_head link; /* must be 1st element */
132 u32 dw_pa; /* Phys addr */ 131 u32 pa; /* Phys addr */
133 u32 dw_va; /* Virtual address in device process context */ 132 u32 va; /* Virtual address in device process context */
134 u32 ul_size; /* SM block size in bytes */ 133 u32 size; /* SM block size in bytes */
135 u32 client_proc; /* Process that allocated this mem block */ 134 u32 client_proc; /* Process that allocated this mem block */
136}; 135};
137 136
@@ -181,32 +180,32 @@ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
181 *pp_buf_va = NULL; 180 *pp_buf_va = NULL;
182 181
183 if (cmm_mgr_obj && (usize != 0)) { 182 if (cmm_mgr_obj && (usize != 0)) {
184 if (pattrs->ul_seg_id > 0) { 183 if (pattrs->seg_id > 0) {
185 /* SegId > 0 is SM */ 184 /* SegId > 0 is SM */
186 /* get the allocator object for this segment id */ 185 /* get the allocator object for this segment id */
187 allocator = 186 allocator =
188 get_allocator(cmm_mgr_obj, pattrs->ul_seg_id); 187 get_allocator(cmm_mgr_obj, pattrs->seg_id);
189 /* keep block size a multiple of ul_min_block_size */ 188 /* keep block size a multiple of min_block_size */
190 usize = 189 usize =
191 ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size - 190 ((usize - 1) & ~(cmm_mgr_obj->min_block_size -
192 1)) 191 1))
193 + cmm_mgr_obj->ul_min_block_size; 192 + cmm_mgr_obj->min_block_size;
194 mutex_lock(&cmm_mgr_obj->cmm_lock); 193 mutex_lock(&cmm_mgr_obj->cmm_lock);
195 pnode = get_free_block(allocator, usize); 194 pnode = get_free_block(allocator, usize);
196 } 195 }
197 if (pnode) { 196 if (pnode) {
198 delta_size = (pnode->ul_size - usize); 197 delta_size = (pnode->size - usize);
199 if (delta_size >= cmm_mgr_obj->ul_min_block_size) { 198 if (delta_size >= cmm_mgr_obj->min_block_size) {
200 /* create a new block with the leftovers and 199 /* create a new block with the leftovers and
201 * add to freelist */ 200 * add to freelist */
202 new_node = 201 new_node =
203 get_node(cmm_mgr_obj, pnode->dw_pa + usize, 202 get_node(cmm_mgr_obj, pnode->pa + usize,
204 pnode->dw_va + usize, 203 pnode->va + usize,
205 (u32) delta_size); 204 (u32) delta_size);
206 /* leftovers go free */ 205 /* leftovers go free */
207 add_to_free_list(allocator, new_node); 206 add_to_free_list(allocator, new_node);
208 /* adjust our node's size */ 207 /* adjust our node's size */
209 pnode->ul_size = usize; 208 pnode->size = usize;
210 } 209 }
211 /* Tag node with client process requesting allocation 210 /* Tag node with client process requesting allocation
212 * We'll need to free up a process's alloc'd SM if the 211 * We'll need to free up a process's alloc'd SM if the
@@ -216,17 +215,16 @@ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
216 pnode->client_proc = current->tgid; 215 pnode->client_proc = current->tgid;
217 216
218 /* put our node on InUse list */ 217 /* put our node on InUse list */
219 lst_put_tail(allocator->in_use_list_head, 218 list_add_tail(&pnode->link, &allocator->in_use_list);
220 (struct list_head *)pnode); 219 buf_pa = (void *)pnode->pa; /* physical address */
221 buf_pa = (void *)pnode->dw_pa; /* physical address */
222 /* clear mem */ 220 /* clear mem */
223 pbyte = (u8 *) pnode->dw_va; 221 pbyte = (u8 *) pnode->va;
224 for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++) 222 for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
225 *pbyte = 0; 223 *pbyte = 0;
226 224
227 if (pp_buf_va != NULL) { 225 if (pp_buf_va != NULL) {
228 /* Virtual address */ 226 /* Virtual address */
229 *pp_buf_va = (void *)pnode->dw_va; 227 *pp_buf_va = (void *)pnode->va;
230 } 228 }
231 } 229 }
232 mutex_unlock(&cmm_mgr_obj->cmm_lock); 230 mutex_unlock(&cmm_mgr_obj->cmm_lock);
@@ -245,7 +243,6 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
245{ 243{
246 struct cmm_object *cmm_obj = NULL; 244 struct cmm_object *cmm_obj = NULL;
247 int status = 0; 245 int status = 0;
248 struct util_sysinfo sys_info;
249 246
250 DBC_REQUIRE(refs > 0); 247 DBC_REQUIRE(refs > 0);
251 DBC_REQUIRE(ph_cmm_mgr != NULL); 248 DBC_REQUIRE(ph_cmm_mgr != NULL);
@@ -253,40 +250,23 @@ int cmm_create(struct cmm_object **ph_cmm_mgr,
253 *ph_cmm_mgr = NULL; 250 *ph_cmm_mgr = NULL;
254 /* create, zero, and tag a cmm mgr object */ 251 /* create, zero, and tag a cmm mgr object */
255 cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL); 252 cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
256 if (cmm_obj != NULL) { 253 if (!cmm_obj)
257 if (mgr_attrts == NULL) 254 return -ENOMEM;
258 mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */ 255
259 256 if (mgr_attrts == NULL)
260 /* 4 bytes minimum */ 257 mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
261 DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4); 258
262 /* save away smallest block allocation for this cmm mgr */ 259 /* 4 bytes minimum */
263 cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size; 260 DBC_ASSERT(mgr_attrts->min_block_size >= 4);
264 /* save away the systems memory page size */ 261 /* save away smallest block allocation for this cmm mgr */
265 sys_info.dw_page_size = PAGE_SIZE; 262 cmm_obj->min_block_size = mgr_attrts->min_block_size;
266 sys_info.dw_allocation_granularity = PAGE_SIZE; 263 cmm_obj->page_size = PAGE_SIZE;
267 sys_info.dw_number_of_processors = 1; 264
268 265 /* create node free list */
269 cmm_obj->dw_page_size = sys_info.dw_page_size; 266 INIT_LIST_HEAD(&cmm_obj->node_free_list);
270 267 mutex_init(&cmm_obj->cmm_lock);
271 /* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by 268 *ph_cmm_mgr = cmm_obj;
272 * MEM_ALLOC_OBJECT */ 269
273
274 /* create node free list */
275 cmm_obj->node_free_list_head =
276 kzalloc(sizeof(struct lst_list),
277 GFP_KERNEL);
278 if (cmm_obj->node_free_list_head == NULL) {
279 status = -ENOMEM;
280 cmm_destroy(cmm_obj, true);
281 } else {
282 INIT_LIST_HEAD(&cmm_obj->
283 node_free_list_head->head);
284 mutex_init(&cmm_obj->cmm_lock);
285 *ph_cmm_mgr = cmm_obj;
286 }
287 } else {
288 status = -ENOMEM;
289 }
290 return status; 270 return status;
291} 271}
292 272
@@ -301,7 +281,7 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
301 struct cmm_info temp_info; 281 struct cmm_info temp_info;
302 int status = 0; 282 int status = 0;
303 s32 slot_seg; 283 s32 slot_seg;
304 struct cmm_mnode *pnode; 284 struct cmm_mnode *node, *tmp;
305 285
306 DBC_REQUIRE(refs > 0); 286 DBC_REQUIRE(refs > 0);
307 if (!hcmm_mgr) { 287 if (!hcmm_mgr) {
@@ -314,7 +294,7 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
314 /* Check for outstanding memory allocations */ 294 /* Check for outstanding memory allocations */
315 status = cmm_get_info(hcmm_mgr, &temp_info); 295 status = cmm_get_info(hcmm_mgr, &temp_info);
316 if (!status) { 296 if (!status) {
317 if (temp_info.ul_total_in_use_cnt > 0) { 297 if (temp_info.total_in_use_cnt > 0) {
318 /* outstanding allocations */ 298 /* outstanding allocations */
319 status = -EPERM; 299 status = -EPERM;
320 } 300 }
@@ -331,15 +311,10 @@ int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
331 } 311 }
332 } 312 }
333 } 313 }
334 if (cmm_mgr_obj->node_free_list_head != NULL) { 314 list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list,
335 /* Free the free nodes */ 315 link) {
336 while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) { 316 list_del(&node->link);
337 pnode = (struct cmm_mnode *) 317 kfree(node);
338 lst_get_head(cmm_mgr_obj->node_free_list_head);
339 kfree(pnode);
340 }
341 /* delete NodeFreeList list */
342 kfree(cmm_mgr_obj->node_free_list_head);
343 } 318 }
344 mutex_unlock(&cmm_mgr_obj->cmm_lock); 319 mutex_unlock(&cmm_mgr_obj->cmm_lock);
345 if (!status) { 320 if (!status) {
@@ -368,13 +343,12 @@ void cmm_exit(void)
368 * Purpose: 343 * Purpose:
369 * Free the given buffer. 344 * Free the given buffer.
370 */ 345 */
371int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, 346int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
372 u32 ul_seg_id)
373{ 347{
374 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; 348 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
375 int status = -EFAULT; 349 int status = -EFAULT;
376 struct cmm_mnode *mnode_obj = NULL; 350 struct cmm_mnode *curr, *tmp;
377 struct cmm_allocator *allocator = NULL; 351 struct cmm_allocator *allocator;
378 struct cmm_attrs *pattrs; 352 struct cmm_attrs *pattrs;
379 353
380 DBC_REQUIRE(refs > 0); 354 DBC_REQUIRE(refs > 0);
@@ -382,35 +356,28 @@ int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
382 356
383 if (ul_seg_id == 0) { 357 if (ul_seg_id == 0) {
384 pattrs = &cmm_dfltalctattrs; 358 pattrs = &cmm_dfltalctattrs;
385 ul_seg_id = pattrs->ul_seg_id; 359 ul_seg_id = pattrs->seg_id;
386 } 360 }
387 if (!hcmm_mgr || !(ul_seg_id > 0)) { 361 if (!hcmm_mgr || !(ul_seg_id > 0)) {
388 status = -EFAULT; 362 status = -EFAULT;
389 return status; 363 return status;
390 } 364 }
391 /* get the allocator for this segment id */ 365
392 allocator = get_allocator(cmm_mgr_obj, ul_seg_id); 366 allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
393 if (allocator != NULL) { 367 if (!allocator)
394 mutex_lock(&cmm_mgr_obj->cmm_lock); 368 return status;
395 mnode_obj = 369
396 (struct cmm_mnode *)lst_first(allocator->in_use_list_head); 370 mutex_lock(&cmm_mgr_obj->cmm_lock);
397 while (mnode_obj) { 371 list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) {
398 if ((u32) buf_pa == mnode_obj->dw_pa) { 372 if (curr->pa == (u32) buf_pa) {
399 /* Found it */ 373 list_del(&curr->link);
400 lst_remove_elem(allocator->in_use_list_head, 374 add_to_free_list(allocator, curr);
401 (struct list_head *)mnode_obj); 375 status = 0;
402 /* back to freelist */ 376 break;
403 add_to_free_list(allocator, mnode_obj);
404 status = 0; /* all right! */
405 break;
406 }
407 /* next node. */
408 mnode_obj = (struct cmm_mnode *)
409 lst_next(allocator->in_use_list_head,
410 (struct list_head *)mnode_obj);
411 } 377 }
412 mutex_unlock(&cmm_mgr_obj->cmm_lock);
413 } 378 }
379 mutex_unlock(&cmm_mgr_obj->cmm_lock);
380
414 return status; 381 return status;
415} 382}
416 383
@@ -450,7 +417,7 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
450 u32 ul_seg; 417 u32 ul_seg;
451 int status = 0; 418 int status = 0;
452 struct cmm_allocator *altr; 419 struct cmm_allocator *altr;
453 struct cmm_mnode *mnode_obj = NULL; 420 struct cmm_mnode *curr;
454 421
455 DBC_REQUIRE(cmm_info_obj != NULL); 422 DBC_REQUIRE(cmm_info_obj != NULL);
456 423
@@ -459,46 +426,39 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
459 return status; 426 return status;
460 } 427 }
461 mutex_lock(&cmm_mgr_obj->cmm_lock); 428 mutex_lock(&cmm_mgr_obj->cmm_lock);
462 cmm_info_obj->ul_num_gppsm_segs = 0; /* # of SM segments */ 429 cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */
463 /* Total # of outstanding alloc */ 430 /* Total # of outstanding alloc */
464 cmm_info_obj->ul_total_in_use_cnt = 0; 431 cmm_info_obj->total_in_use_cnt = 0;
465 /* min block size */ 432 /* min block size */
466 cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size; 433 cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
467 /* check SM memory segments */ 434 /* check SM memory segments */
468 for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) { 435 for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
469 /* get the allocator object for this segment id */ 436 /* get the allocator object for this segment id */
470 altr = get_allocator(cmm_mgr_obj, ul_seg); 437 altr = get_allocator(cmm_mgr_obj, ul_seg);
471 if (altr != NULL) { 438 if (!altr)
472 cmm_info_obj->ul_num_gppsm_segs++; 439 continue;
473 cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa = 440 cmm_info_obj->num_gppsm_segs++;
474 altr->shm_base - altr->ul_dsp_size; 441 cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
475 cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size = 442 altr->shm_base - altr->dsp_size;
476 altr->ul_dsp_size + altr->ul_sm_size; 443 cmm_info_obj->seg_info[ul_seg - 1].total_seg_size =
477 cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa = 444 altr->dsp_size + altr->sm_size;
478 altr->shm_base; 445 cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
479 cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size = 446 altr->shm_base;
480 altr->ul_sm_size; 447 cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
481 cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va = 448 altr->sm_size;
482 altr->dw_dsp_base; 449 cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
483 cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size = 450 altr->dsp_base;
484 altr->ul_dsp_size; 451 cmm_info_obj->seg_info[ul_seg - 1].dsp_size =
485 cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va = 452 altr->dsp_size;
486 altr->dw_vm_base - altr->ul_dsp_size; 453 cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
487 cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0; 454 altr->vm_base - altr->dsp_size;
488 mnode_obj = (struct cmm_mnode *) 455 cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
489 lst_first(altr->in_use_list_head); 456
490 /* Count inUse blocks */ 457 list_for_each_entry(curr, &altr->in_use_list, link) {
491 while (mnode_obj) { 458 cmm_info_obj->total_in_use_cnt++;
492 cmm_info_obj->ul_total_in_use_cnt++; 459 cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
493 cmm_info_obj->seg_info[ul_seg -
494 1].ul_in_use_cnt++;
495 /* next node. */
496 mnode_obj = (struct cmm_mnode *)
497 lst_next(altr->in_use_list_head,
498 (struct list_head *)mnode_obj);
499 }
500 } 460 }
501 } /* end for */ 461 }
502 mutex_unlock(&cmm_mgr_obj->cmm_lock); 462 mutex_unlock(&cmm_mgr_obj->cmm_lock);
503 return status; 463 return status;
504} 464}
@@ -544,75 +504,62 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
544 DBC_REQUIRE(dw_gpp_base_pa != 0); 504 DBC_REQUIRE(dw_gpp_base_pa != 0);
545 DBC_REQUIRE(gpp_base_va != 0); 505 DBC_REQUIRE(gpp_base_va != 0);
546 DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) && 506 DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
547 (c_factor >= CMM_SUBFROMDSPPA)); 507 (c_factor >= CMM_SUBFROMDSPPA));
508
548 dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x " 509 dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
549 "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__, 510 "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
550 dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base, 511 __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
551 ul_dsp_size, gpp_base_va); 512 dw_dsp_base, ul_dsp_size, gpp_base_va);
552 if (!hcmm_mgr) { 513
553 status = -EFAULT; 514 if (!hcmm_mgr)
554 return status; 515 return -EFAULT;
555 } 516
556 /* make sure we have room for another allocator */ 517 /* make sure we have room for another allocator */
557 mutex_lock(&cmm_mgr_obj->cmm_lock); 518 mutex_lock(&cmm_mgr_obj->cmm_lock);
519
558 slot_seg = get_slot(cmm_mgr_obj); 520 slot_seg = get_slot(cmm_mgr_obj);
559 if (slot_seg < 0) { 521 if (slot_seg < 0) {
560 /* get a slot number */
561 status = -EPERM; 522 status = -EPERM;
562 goto func_end; 523 goto func_end;
563 } 524 }
525
564 /* Check if input ul_size is big enough to alloc at least one block */ 526 /* Check if input ul_size is big enough to alloc at least one block */
565 if (ul_size < cmm_mgr_obj->ul_min_block_size) { 527 if (ul_size < cmm_mgr_obj->min_block_size) {
566 status = -EINVAL; 528 status = -EINVAL;
567 goto func_end; 529 goto func_end;
568 } 530 }
569 531
570 /* create, zero, and tag an SM allocator object */ 532 /* create, zero, and tag an SM allocator object */
571 psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL); 533 psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
572 if (psma != NULL) { 534 if (!psma) {
573 psma->hcmm_mgr = hcmm_mgr; /* ref to parent */ 535 status = -ENOMEM;
574 psma->shm_base = dw_gpp_base_pa; /* SM Base phys */ 536 goto func_end;
575 psma->ul_sm_size = ul_size; /* SM segment size in bytes */ 537 }
576 psma->dw_vm_base = gpp_base_va; 538
577 psma->dw_dsp_phys_addr_offset = dsp_addr_offset; 539 psma->cmm_mgr = hcmm_mgr; /* ref to parent */
578 psma->c_factor = c_factor; 540 psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
579 psma->dw_dsp_base = dw_dsp_base; 541 psma->sm_size = ul_size; /* SM segment size in bytes */
580 psma->ul_dsp_size = ul_dsp_size; 542 psma->vm_base = gpp_base_va;
581 if (psma->dw_vm_base == 0) { 543 psma->dsp_phys_addr_offset = dsp_addr_offset;
582 status = -EPERM; 544 psma->c_factor = c_factor;
583 goto func_end; 545 psma->dsp_base = dw_dsp_base;
584 } 546 psma->dsp_size = ul_dsp_size;
585 /* return the actual segment identifier */ 547 if (psma->vm_base == 0) {
586 *sgmt_id = (u32) slot_seg + 1; 548 status = -EPERM;
587 /* create memory free list */ 549 goto func_end;
588 psma->free_list_head = kzalloc(sizeof(struct lst_list), 550 }
589 GFP_KERNEL); 551 /* return the actual segment identifier */
590 if (psma->free_list_head == NULL) { 552 *sgmt_id = (u32) slot_seg + 1;
591 status = -ENOMEM; 553
592 goto func_end; 554 INIT_LIST_HEAD(&psma->free_list);
593 } 555 INIT_LIST_HEAD(&psma->in_use_list);
594 INIT_LIST_HEAD(&psma->free_list_head->head); 556
595 557 /* Get a mem node for this hunk-o-memory */
596 /* create memory in-use list */ 558 new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
597 psma->in_use_list_head = kzalloc(sizeof(struct 559 psma->vm_base, ul_size);
598 lst_list), GFP_KERNEL); 560 /* Place node on the SM allocator's free list */
599 if (psma->in_use_list_head == NULL) { 561 if (new_node) {
600 status = -ENOMEM; 562 list_add_tail(&new_node->link, &psma->free_list);
601 goto func_end;
602 }
603 INIT_LIST_HEAD(&psma->in_use_list_head->head);
604
605 /* Get a mem node for this hunk-o-memory */
606 new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
607 psma->dw_vm_base, ul_size);
608 /* Place node on the SM allocator's free list */
609 if (new_node) {
610 lst_put_tail(psma->free_list_head,
611 (struct list_head *)new_node);
612 } else {
613 status = -ENOMEM;
614 goto func_end;
615 }
616 } else { 563 } else {
617 status = -ENOMEM; 564 status = -ENOMEM;
618 goto func_end; 565 goto func_end;
@@ -621,12 +568,11 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
621 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma; 568 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
622 569
623func_end: 570func_end:
624 if (status && psma) { 571 /* Cleanup allocator */
625 /* Cleanup allocator */ 572 if (status && psma)
626 un_register_gppsm_seg(psma); 573 un_register_gppsm_seg(psma);
627 }
628
629 mutex_unlock(&cmm_mgr_obj->cmm_lock); 574 mutex_unlock(&cmm_mgr_obj->cmm_lock);
575
630 return status; 576 return status;
631} 577}
632 578
@@ -644,36 +590,36 @@ int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
644 u32 ul_id = ul_seg_id; 590 u32 ul_id = ul_seg_id;
645 591
646 DBC_REQUIRE(ul_seg_id > 0); 592 DBC_REQUIRE(ul_seg_id > 0);
647 if (hcmm_mgr) { 593 if (!hcmm_mgr)
648 if (ul_seg_id == CMM_ALLSEGMENTS) 594 return -EFAULT;
649 ul_id = 1; 595
650 596 if (ul_seg_id == CMM_ALLSEGMENTS)
651 if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) { 597 ul_id = 1;
652 while (ul_id <= CMM_MAXGPPSEGS) { 598
653 mutex_lock(&cmm_mgr_obj->cmm_lock); 599 if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS))
654 /* slot = seg_id-1 */ 600 return -EINVAL;
655 psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1]; 601
656 if (psma != NULL) { 602 /*
657 un_register_gppsm_seg(psma); 603 * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like
658 /* Set alctr ptr to NULL for future 604 * the ul_seg_id is not needed here. It must be always 1.
659 * reuse */ 605 */
660 cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 606 while (ul_id <= CMM_MAXGPPSEGS) {
661 1] = NULL; 607 mutex_lock(&cmm_mgr_obj->cmm_lock);
662 } else if (ul_seg_id != CMM_ALLSEGMENTS) { 608 /* slot = seg_id-1 */
663 status = -EPERM; 609 psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
664 } 610 if (psma != NULL) {
665 mutex_unlock(&cmm_mgr_obj->cmm_lock); 611 un_register_gppsm_seg(psma);
666 if (ul_seg_id != CMM_ALLSEGMENTS) 612 /* Set alctr ptr to NULL for future reuse */
667 break; 613 cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL;
668 614 } else if (ul_seg_id != CMM_ALLSEGMENTS) {
669 ul_id++; 615 status = -EPERM;
670 } /* end while */
671 } else {
672 status = -EINVAL;
673 } 616 }
674 } else { 617 mutex_unlock(&cmm_mgr_obj->cmm_lock);
675 status = -EFAULT; 618 if (ul_seg_id != CMM_ALLSEGMENTS)
676 } 619 break;
620
621 ul_id++;
622 } /* end while */
677 return status; 623 return status;
678} 624}
679 625
@@ -687,43 +633,24 @@ int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
687 */ 633 */
688static void un_register_gppsm_seg(struct cmm_allocator *psma) 634static void un_register_gppsm_seg(struct cmm_allocator *psma)
689{ 635{
690 struct cmm_mnode *mnode_obj = NULL; 636 struct cmm_mnode *curr, *tmp;
691 struct cmm_mnode *next_node = NULL;
692 637
693 DBC_REQUIRE(psma != NULL); 638 DBC_REQUIRE(psma != NULL);
694 if (psma->free_list_head != NULL) { 639
695 /* free nodes on free list */ 640 /* free nodes on free list */
696 mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head); 641 list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
697 while (mnode_obj) { 642 list_del(&curr->link);
698 next_node = 643 kfree(curr);
699 (struct cmm_mnode *)lst_next(psma->free_list_head,
700 (struct list_head *)
701 mnode_obj);
702 lst_remove_elem(psma->free_list_head,
703 (struct list_head *)mnode_obj);
704 kfree((void *)mnode_obj);
705 /* next node. */
706 mnode_obj = next_node;
707 }
708 kfree(psma->free_list_head); /* delete freelist */
709 /* free nodes on InUse list */
710 mnode_obj =
711 (struct cmm_mnode *)lst_first(psma->in_use_list_head);
712 while (mnode_obj) {
713 next_node =
714 (struct cmm_mnode *)lst_next(psma->in_use_list_head,
715 (struct list_head *)
716 mnode_obj);
717 lst_remove_elem(psma->in_use_list_head,
718 (struct list_head *)mnode_obj);
719 kfree((void *)mnode_obj);
720 /* next node. */
721 mnode_obj = next_node;
722 }
723 kfree(psma->in_use_list_head); /* delete InUse list */
724 } 644 }
725 if ((void *)psma->dw_vm_base != NULL) 645
726 MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base); 646 /* free nodes on InUse list */
647 list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) {
648 list_del(&curr->link);
649 kfree(curr);
650 }
651
652 if ((void *)psma->vm_base != NULL)
653 MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base);
727 654
728 /* Free allocator itself */ 655 /* Free allocator itself */
729 kfree(psma); 656 kfree(psma);
@@ -758,26 +685,29 @@ static s32 get_slot(struct cmm_object *cmm_mgr_obj)
758static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa, 685static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
759 u32 dw_va, u32 ul_size) 686 u32 dw_va, u32 ul_size)
760{ 687{
761 struct cmm_mnode *pnode = NULL; 688 struct cmm_mnode *pnode;
762 689
763 DBC_REQUIRE(cmm_mgr_obj != NULL); 690 DBC_REQUIRE(cmm_mgr_obj != NULL);
764 DBC_REQUIRE(dw_pa != 0); 691 DBC_REQUIRE(dw_pa != 0);
765 DBC_REQUIRE(dw_va != 0); 692 DBC_REQUIRE(dw_va != 0);
766 DBC_REQUIRE(ul_size != 0); 693 DBC_REQUIRE(ul_size != 0);
694
767 /* Check cmm mgr's node freelist */ 695 /* Check cmm mgr's node freelist */
768 if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) { 696 if (list_empty(&cmm_mgr_obj->node_free_list)) {
769 pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL); 697 pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
698 if (!pnode)
699 return NULL;
770 } else { 700 } else {
771 /* surely a valid element */ 701 /* surely a valid element */
772 pnode = (struct cmm_mnode *) 702 pnode = list_first_entry(&cmm_mgr_obj->node_free_list,
773 lst_get_head(cmm_mgr_obj->node_free_list_head); 703 struct cmm_mnode, link);
774 } 704 list_del_init(&pnode->link);
775 if (pnode) {
776 lst_init_elem((struct list_head *)pnode); /* set self */
777 pnode->dw_pa = dw_pa; /* Physical addr of start of block */
778 pnode->dw_va = dw_va; /* Virtual " " */
779 pnode->ul_size = ul_size; /* Size of block */
780 } 705 }
706
707 pnode->pa = dw_pa;
708 pnode->va = dw_va;
709 pnode->size = ul_size;
710
781 return pnode; 711 return pnode;
782} 712}
783 713
@@ -790,9 +720,7 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
790static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode) 720static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
791{ 721{
792 DBC_REQUIRE(pnode != NULL); 722 DBC_REQUIRE(pnode != NULL);
793 lst_init_elem((struct list_head *)pnode); /* init .self ptr */ 723 list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
794 lst_put_tail(cmm_mgr_obj->node_free_list_head,
795 (struct list_head *)pnode);
796} 724}
797 725
798/* 726/*
@@ -804,103 +732,57 @@ static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
804static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator, 732static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
805 u32 usize) 733 u32 usize)
806{ 734{
807 if (allocator) { 735 struct cmm_mnode *node, *tmp;
808 struct cmm_mnode *mnode_obj = (struct cmm_mnode *) 736
809 lst_first(allocator->free_list_head); 737 if (!allocator)
810 while (mnode_obj) { 738 return NULL;
811 if (usize <= (u32) mnode_obj->ul_size) { 739
812 lst_remove_elem(allocator->free_list_head, 740 list_for_each_entry_safe(node, tmp, &allocator->free_list, link) {
813 (struct list_head *)mnode_obj); 741 if (usize <= node->size) {
814 return mnode_obj; 742 list_del(&node->link);
815 } 743 return node;
816 /* next node. */
817 mnode_obj = (struct cmm_mnode *)
818 lst_next(allocator->free_list_head,
819 (struct list_head *)mnode_obj);
820 } 744 }
821 } 745 }
746
822 return NULL; 747 return NULL;
823} 748}
824 749
825/* 750/*
826 * ======== add_to_free_list ======== 751 * ======== add_to_free_list ========
827 * Purpose: 752 * Purpose:
828 * Coelesce node into the freelist in ascending size order. 753 * Coalesce node into the freelist in ascending size order.
829 */ 754 */
830static void add_to_free_list(struct cmm_allocator *allocator, 755static void add_to_free_list(struct cmm_allocator *allocator,
831 struct cmm_mnode *pnode) 756 struct cmm_mnode *node)
832{ 757{
833 struct cmm_mnode *node_prev = NULL; 758 struct cmm_mnode *curr;
834 struct cmm_mnode *node_next = NULL;
835 struct cmm_mnode *mnode_obj;
836 u32 dw_this_pa;
837 u32 dw_next_pa;
838 759
839 DBC_REQUIRE(pnode != NULL); 760 if (!node) {
840 DBC_REQUIRE(allocator != NULL); 761 pr_err("%s: failed - node is NULL\n", __func__);
841 dw_this_pa = pnode->dw_pa; 762 return;
842 dw_next_pa = NEXT_PA(pnode);
843 mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
844 while (mnode_obj) {
845 if (dw_this_pa == NEXT_PA(mnode_obj)) {
846 /* found the block ahead of this one */
847 node_prev = mnode_obj;
848 } else if (dw_next_pa == mnode_obj->dw_pa) {
849 node_next = mnode_obj;
850 }
851 if ((node_prev == NULL) || (node_next == NULL)) {
852 /* next node. */
853 mnode_obj = (struct cmm_mnode *)
854 lst_next(allocator->free_list_head,
855 (struct list_head *)mnode_obj);
856 } else {
857 /* got 'em */
858 break;
859 }
860 } /* while */
861 if (node_prev != NULL) {
862 /* combine with previous block */
863 lst_remove_elem(allocator->free_list_head,
864 (struct list_head *)node_prev);
865 /* grow node to hold both */
866 pnode->ul_size += node_prev->ul_size;
867 pnode->dw_pa = node_prev->dw_pa;
868 pnode->dw_va = node_prev->dw_va;
869 /* place node on mgr nodeFreeList */
870 delete_node((struct cmm_object *)allocator->hcmm_mgr,
871 node_prev);
872 }
873 if (node_next != NULL) {
874 /* combine with next block */
875 lst_remove_elem(allocator->free_list_head,
876 (struct list_head *)node_next);
877 /* grow da node */
878 pnode->ul_size += node_next->ul_size;
879 /* place node on mgr nodeFreeList */
880 delete_node((struct cmm_object *)allocator->hcmm_mgr,
881 node_next);
882 } 763 }
883 /* Now, let's add to freelist in increasing size order */
884 mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
885 while (mnode_obj) {
886 if (pnode->ul_size <= mnode_obj->ul_size)
887 break;
888 764
889 /* next node. */ 765 list_for_each_entry(curr, &allocator->free_list, link) {
890 mnode_obj = 766 if (NEXT_PA(curr) == node->pa) {
891 (struct cmm_mnode *)lst_next(allocator->free_list_head, 767 curr->size += node->size;
892 (struct list_head *)mnode_obj); 768 delete_node(allocator->cmm_mgr, node);
769 return;
770 }
771 if (curr->pa == NEXT_PA(node)) {
772 curr->pa = node->pa;
773 curr->va = node->va;
774 curr->size += node->size;
775 delete_node(allocator->cmm_mgr, node);
776 return;
777 }
893 } 778 }
894 /* if mnode_obj is NULL then add our pnode to the end of the freelist */ 779 list_for_each_entry(curr, &allocator->free_list, link) {
895 if (mnode_obj == NULL) { 780 if (curr->size >= node->size) {
896 lst_put_tail(allocator->free_list_head, 781 list_add_tail(&node->link, &curr->link);
897 (struct list_head *)pnode); 782 return;
898 } else { 783 }
899 /* insert our node before the current traversed node */
900 lst_insert_before(allocator->free_list_head,
901 (struct list_head *)pnode,
902 (struct list_head *)mnode_obj);
903 } 784 }
785 list_add_tail(&node->link, &allocator->free_list);
904} 786}
905 787
906/* 788/*
@@ -912,19 +794,10 @@ static void add_to_free_list(struct cmm_allocator *allocator,
912static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj, 794static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
913 u32 ul_seg_id) 795 u32 ul_seg_id)
914{ 796{
915 struct cmm_allocator *allocator = NULL;
916
917 DBC_REQUIRE(cmm_mgr_obj != NULL); 797 DBC_REQUIRE(cmm_mgr_obj != NULL);
918 DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS)); 798 DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
919 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1]; 799
920 if (allocator != NULL) { 800 return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
921 /* make sure it's for real */
922 if (!allocator) {
923 allocator = NULL;
924 DBC_ASSERT(false);
925 }
926 }
927 return allocator;
928} 801}
929 802
930/* 803/*
@@ -955,9 +828,9 @@ int cmm_xlator_create(struct cmm_xlatorobject **xlator,
955 828
956 xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL); 829 xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
957 if (xlator_object != NULL) { 830 if (xlator_object != NULL) {
958 xlator_object->hcmm_mgr = hcmm_mgr; /* ref back to CMM */ 831 xlator_object->cmm_mgr = hcmm_mgr; /* ref back to CMM */
959 /* SM seg_id */ 832 /* SM seg_id */
960 xlator_object->ul_seg_id = xlator_attrs->ul_seg_id; 833 xlator_object->seg_id = xlator_attrs->seg_id;
961 } else { 834 } else {
962 status = -ENOMEM; 835 status = -ENOMEM;
963 } 836 }
@@ -980,17 +853,17 @@ void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
980 853
981 DBC_REQUIRE(refs > 0); 854 DBC_REQUIRE(refs > 0);
982 DBC_REQUIRE(xlator != NULL); 855 DBC_REQUIRE(xlator != NULL);
983 DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL); 856 DBC_REQUIRE(xlator_obj->cmm_mgr != NULL);
984 DBC_REQUIRE(va_buf != NULL); 857 DBC_REQUIRE(va_buf != NULL);
985 DBC_REQUIRE(pa_size > 0); 858 DBC_REQUIRE(pa_size > 0);
986 DBC_REQUIRE(xlator_obj->ul_seg_id > 0); 859 DBC_REQUIRE(xlator_obj->seg_id > 0);
987 860
988 if (xlator_obj) { 861 if (xlator_obj) {
989 attrs.ul_seg_id = xlator_obj->ul_seg_id; 862 attrs.seg_id = xlator_obj->seg_id;
990 __raw_writel(0, va_buf); 863 __raw_writel(0, va_buf);
991 /* Alloc SM */ 864 /* Alloc SM */
992 pbuf = 865 pbuf =
993 cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL); 866 cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL);
994 if (pbuf) { 867 if (pbuf) {
995 /* convert to translator(node/strm) process Virtual 868 /* convert to translator(node/strm) process Virtual
996 * address */ 869 * address */
@@ -1016,14 +889,14 @@ int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
1016 889
1017 DBC_REQUIRE(refs > 0); 890 DBC_REQUIRE(refs > 0);
1018 DBC_REQUIRE(buf_va != NULL); 891 DBC_REQUIRE(buf_va != NULL);
1019 DBC_REQUIRE(xlator_obj->ul_seg_id > 0); 892 DBC_REQUIRE(xlator_obj->seg_id > 0);
1020 893
1021 if (xlator_obj) { 894 if (xlator_obj) {
1022 /* convert Va to Pa so we can free it. */ 895 /* convert Va to Pa so we can free it. */
1023 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA); 896 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
1024 if (buf_pa) { 897 if (buf_pa) {
1025 status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa, 898 status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa,
1026 xlator_obj->ul_seg_id); 899 xlator_obj->seg_id);
1027 if (status) { 900 if (status) {
1028 /* Uh oh, this shouldn't happen. Descriptor 901 /* Uh oh, this shouldn't happen. Descriptor
1029 * gone! */ 902 * gone! */
@@ -1052,10 +925,10 @@ int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
1052 if (xlator_obj) { 925 if (xlator_obj) {
1053 if (set_info) { 926 if (set_info) {
1054 /* set translators virtual address range */ 927 /* set translators virtual address range */
1055 xlator_obj->dw_virt_base = (u32) *paddr; 928 xlator_obj->virt_base = (u32) *paddr;
1056 xlator_obj->ul_virt_size = ul_size; 929 xlator_obj->virt_size = ul_size;
1057 } else { /* return virt base address */ 930 } else { /* return virt base address */
1058 *paddr = (u8 *) xlator_obj->dw_virt_base; 931 *paddr = (u8 *) xlator_obj->virt_base;
1059 } 932 }
1060 } else { 933 } else {
1061 status = -EFAULT; 934 status = -EFAULT;
@@ -1082,10 +955,10 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1082 if (!xlator_obj) 955 if (!xlator_obj)
1083 goto loop_cont; 956 goto loop_cont;
1084 957
1085 cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr; 958 cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
1086 /* get this translator's default SM allocator */ 959 /* get this translator's default SM allocator */
1087 DBC_ASSERT(xlator_obj->ul_seg_id > 0); 960 DBC_ASSERT(xlator_obj->seg_id > 0);
1088 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1]; 961 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
1089 if (!allocator) 962 if (!allocator)
1090 goto loop_cont; 963 goto loop_cont;
1091 964
@@ -1095,21 +968,21 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1095 /* Gpp Va = Va Base + offset */ 968 /* Gpp Va = Va Base + offset */
1096 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base - 969 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
1097 allocator-> 970 allocator->
1098 ul_dsp_size); 971 dsp_size);
1099 dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset; 972 dw_addr_xlate = xlator_obj->virt_base + dw_offset;
1100 /* Check if translated Va base is in range */ 973 /* Check if translated Va base is in range */
1101 if ((dw_addr_xlate < xlator_obj->dw_virt_base) || 974 if ((dw_addr_xlate < xlator_obj->virt_base) ||
1102 (dw_addr_xlate >= 975 (dw_addr_xlate >=
1103 (xlator_obj->dw_virt_base + 976 (xlator_obj->virt_base +
1104 xlator_obj->ul_virt_size))) { 977 xlator_obj->virt_size))) {
1105 dw_addr_xlate = 0; /* bad address */ 978 dw_addr_xlate = 0; /* bad address */
1106 } 979 }
1107 } else { 980 } else {
1108 /* Gpp PA = Gpp Base + offset */ 981 /* Gpp PA = Gpp Base + offset */
1109 dw_offset = 982 dw_offset =
1110 (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base; 983 (u8 *) paddr - (u8 *) xlator_obj->virt_base;
1111 dw_addr_xlate = 984 dw_addr_xlate =
1112 allocator->shm_base - allocator->ul_dsp_size + 985 allocator->shm_base - allocator->dsp_size +
1113 dw_offset; 986 dw_offset;
1114 } 987 }
1115 } else { 988 } else {
@@ -1119,16 +992,16 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1119 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) { 992 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
1120 /* Got Gpp Pa now, convert to DSP Pa */ 993 /* Got Gpp Pa now, convert to DSP Pa */
1121 dw_addr_xlate = 994 dw_addr_xlate =
1122 GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size), 995 GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size),
1123 dw_addr_xlate, 996 dw_addr_xlate,
1124 allocator->dw_dsp_phys_addr_offset * 997 allocator->dsp_phys_addr_offset *
1125 allocator->c_factor); 998 allocator->c_factor);
1126 } else if (xtype == CMM_DSPPA2PA) { 999 } else if (xtype == CMM_DSPPA2PA) {
1127 /* Got DSP Pa, convert to GPP Pa */ 1000 /* Got DSP Pa, convert to GPP Pa */
1128 dw_addr_xlate = 1001 dw_addr_xlate =
1129 DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size, 1002 DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size,
1130 dw_addr_xlate, 1003 dw_addr_xlate,
1131 allocator->dw_dsp_phys_addr_offset * 1004 allocator->dsp_phys_addr_offset *
1132 allocator->c_factor); 1005 allocator->c_factor);
1133 } 1006 }
1134loop_cont: 1007loop_cont:
diff --git a/drivers/staging/tidspbridge/pmgr/cod.c b/drivers/staging/tidspbridge/pmgr/cod.c
index 52989ab67cf..1a29264b585 100644
--- a/drivers/staging/tidspbridge/pmgr/cod.c
+++ b/drivers/staging/tidspbridge/pmgr/cod.c
@@ -33,9 +33,6 @@
33/* ----------------------------------- Trace & Debug */ 33/* ----------------------------------- Trace & Debug */
34#include <dspbridge/dbc.h> 34#include <dspbridge/dbc.h>
35 35
36/* ----------------------------------- OS Adaptation Layer */
37#include <dspbridge/ldr.h>
38
39/* ----------------------------------- Platform Manager */ 36/* ----------------------------------- Platform Manager */
40/* Include appropriate loader header file */ 37/* Include appropriate loader header file */
41#include <dspbridge/dbll.h> 38#include <dspbridge/dbll.h>
@@ -50,8 +47,7 @@ struct cod_manager {
50 struct dbll_tar_obj *target; 47 struct dbll_tar_obj *target;
51 struct dbll_library_obj *base_lib; 48 struct dbll_library_obj *base_lib;
52 bool loaded; /* Base library loaded? */ 49 bool loaded; /* Base library loaded? */
53 u32 ul_entry; 50 u32 entry;
54 struct ldr_module *dll_obj;
55 struct dbll_fxns fxns; 51 struct dbll_fxns fxns;
56 struct dbll_attrs attrs; 52 struct dbll_attrs attrs;
57 char sz_zl_file[COD_MAXPATHLENGTH]; 53 char sz_zl_file[COD_MAXPATHLENGTH];
@@ -78,12 +74,9 @@ static struct dbll_fxns ldr_fxns = {
78 (dbll_get_sect_fxn) dbll_get_sect, 74 (dbll_get_sect_fxn) dbll_get_sect,
79 (dbll_init_fxn) dbll_init, 75 (dbll_init_fxn) dbll_init,
80 (dbll_load_fxn) dbll_load, 76 (dbll_load_fxn) dbll_load,
81 (dbll_load_sect_fxn) dbll_load_sect,
82 (dbll_open_fxn) dbll_open, 77 (dbll_open_fxn) dbll_open,
83 (dbll_read_sect_fxn) dbll_read_sect, 78 (dbll_read_sect_fxn) dbll_read_sect,
84 (dbll_set_attrs_fxn) dbll_set_attrs,
85 (dbll_unload_fxn) dbll_unload, 79 (dbll_unload_fxn) dbll_unload,
86 (dbll_unload_sect_fxn) dbll_unload_sect,
87}; 80};
88 81
89static bool no_op(void); 82static bool no_op(void);
@@ -209,8 +202,7 @@ void cod_close(struct cod_libraryobj *lib)
209 * dynamically loaded object files. 202 * dynamically loaded object files.
210 * 203 *
211 */ 204 */
212int cod_create(struct cod_manager **mgr, char *str_zl_file, 205int cod_create(struct cod_manager **mgr, char *str_zl_file)
213 const struct cod_attrs *attrs)
214{ 206{
215 struct cod_manager *mgr_new; 207 struct cod_manager *mgr_new;
216 struct dbll_attrs zl_attrs; 208 struct dbll_attrs zl_attrs;
@@ -222,10 +214,6 @@ int cod_create(struct cod_manager **mgr, char *str_zl_file,
222 /* assume failure */ 214 /* assume failure */
223 *mgr = NULL; 215 *mgr = NULL;
224 216
225 /* we don't support non-default attrs yet */
226 if (attrs != NULL)
227 return -ENOSYS;
228
229 mgr_new = kzalloc(sizeof(struct cod_manager), GFP_KERNEL); 217 mgr_new = kzalloc(sizeof(struct cod_manager), GFP_KERNEL);
230 if (mgr_new == NULL) 218 if (mgr_new == NULL)
231 return -ENOMEM; 219 return -ENOMEM;
@@ -358,7 +346,7 @@ int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *entry_pt)
358 DBC_REQUIRE(cod_mgr_obj); 346 DBC_REQUIRE(cod_mgr_obj);
359 DBC_REQUIRE(entry_pt != NULL); 347 DBC_REQUIRE(entry_pt != NULL);
360 348
361 *entry_pt = cod_mgr_obj->ul_entry; 349 *entry_pt = cod_mgr_obj->entry;
362 350
363 return 0; 351 return 0;
364} 352}
@@ -528,7 +516,7 @@ int cod_load_base(struct cod_manager *cod_mgr_obj, u32 num_argc, char *args[],
528 flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB; 516 flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
529 status = cod_mgr_obj->fxns.load_fxn(cod_mgr_obj->base_lib, flags, 517 status = cod_mgr_obj->fxns.load_fxn(cod_mgr_obj->base_lib, flags,
530 &new_attrs, 518 &new_attrs,
531 &cod_mgr_obj->ul_entry); 519 &cod_mgr_obj->entry);
532 if (status) 520 if (status)
533 cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib); 521 cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib);
534 522
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index 878aa50718e..2e20f78e2c3 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -123,7 +123,7 @@ struct dbll_library_obj {
123 u32 open_ref; /* Number of times opened */ 123 u32 open_ref; /* Number of times opened */
124 u32 load_ref; /* Number of times loaded */ 124 u32 load_ref; /* Number of times loaded */
125 struct gh_t_hash_tab *sym_tab; /* Hash table of symbols */ 125 struct gh_t_hash_tab *sym_tab; /* Hash table of symbols */
126 u32 ul_pos; 126 u32 pos;
127}; 127};
128 128
129/* 129/*
@@ -398,7 +398,7 @@ int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr,
398 398
399 } else { 399 } else {
400 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, 400 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
401 zl_lib->ul_pos, 401 zl_lib->pos,
402 SEEK_SET); 402 SEEK_SET);
403 } 403 }
404 } else { 404 } else {
@@ -522,7 +522,7 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
522 522
523 } 523 }
524 if (!status) { 524 if (!status) {
525 zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) 525 zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell))
526 (zl_lib->fp); 526 (zl_lib->fp);
527 /* Reset file cursor */ 527 /* Reset file cursor */
528 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, 528 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
@@ -568,18 +568,6 @@ int dbll_load(struct dbll_library_obj *lib, dbll_flags flags,
568} 568}
569 569
570/* 570/*
571 * ======== dbll_load_sect ========
572 * Not supported for COFF.
573 */
574int dbll_load_sect(struct dbll_library_obj *zl_lib, char *sec_name,
575 struct dbll_attrs *attrs)
576{
577 DBC_REQUIRE(zl_lib);
578
579 return -ENOSYS;
580}
581
582/*
583 * ======== dbll_open ======== 571 * ======== dbll_open ========
584 */ 572 */
585int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags, 573int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
@@ -611,7 +599,7 @@ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
611 if (zl_lib == NULL) { 599 if (zl_lib == NULL) {
612 status = -ENOMEM; 600 status = -ENOMEM;
613 } else { 601 } else {
614 zl_lib->ul_pos = 0; 602 zl_lib->pos = 0;
615 /* Increment ref count to allow close on failure 603 /* Increment ref count to allow close on failure
616 * later on */ 604 * later on */
617 zl_lib->open_ref++; 605 zl_lib->open_ref++;
@@ -661,7 +649,7 @@ int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags,
661 if (!status && zl_lib->fp == NULL) 649 if (!status && zl_lib->fp == NULL)
662 status = dof_open(zl_lib); 650 status = dof_open(zl_lib);
663 651
664 zl_lib->ul_pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp); 652 zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp);
665 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET); 653 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET);
666 /* Create a hash table for symbols if flag is set */ 654 /* Create a hash table for symbols if flag is set */
667 if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB)) 655 if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB))
@@ -750,7 +738,7 @@ int dbll_read_sect(struct dbll_library_obj *lib, char *name,
750 738
751 } else { 739 } else {
752 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, 740 (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp,
753 zl_lib->ul_pos, 741 zl_lib->pos,
754 SEEK_SET); 742 SEEK_SET);
755 } 743 }
756 } else { 744 } else {
@@ -794,22 +782,6 @@ func_cont:
794} 782}
795 783
796/* 784/*
797 * ======== dbll_set_attrs ========
798 * Set the attributes of the target.
799 */
800void dbll_set_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs)
801{
802 struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target;
803 DBC_REQUIRE(refs > 0);
804 DBC_REQUIRE(zl_target);
805 DBC_REQUIRE(pattrs != NULL);
806
807 if ((pattrs != NULL) && (zl_target != NULL))
808 zl_target->attrs = *pattrs;
809
810}
811
812/*
813 * ======== dbll_unload ======== 785 * ======== dbll_unload ========
814 */ 786 */
815void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs) 787void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs)
@@ -848,19 +820,6 @@ func_end:
848} 820}
849 821
850/* 822/*
851 * ======== dbll_unload_sect ========
852 * Not supported for COFF.
853 */
854int dbll_unload_sect(struct dbll_library_obj *lib, char *sec_name,
855 struct dbll_attrs *attrs)
856{
857 DBC_REQUIRE(refs > 0);
858 DBC_REQUIRE(sec_name != NULL);
859
860 return -ENOSYS;
861}
862
863/*
864 * ======== dof_close ======== 823 * ======== dof_close ========
865 */ 824 */
866static void dof_close(struct dbll_library_obj *zl_lib) 825static void dof_close(struct dbll_library_obj *zl_lib)
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 132e960967b..9a38d86a84a 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -16,6 +16,7 @@
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */ 17 */
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/list.h>
19 20
20/* ----------------------------------- Host OS */ 21/* ----------------------------------- Host OS */
21#include <dspbridge/host_os.h> 22#include <dspbridge/host_os.h>
@@ -26,10 +27,6 @@
26/* ----------------------------------- Trace & Debug */ 27/* ----------------------------------- Trace & Debug */
27#include <dspbridge/dbc.h> 28#include <dspbridge/dbc.h>
28 29
29/* ----------------------------------- OS Adaptation Layer */
30#include <dspbridge/ldr.h>
31#include <dspbridge/list.h>
32
33/* ----------------------------------- Platform Manager */ 30/* ----------------------------------- Platform Manager */
34#include <dspbridge/cod.h> 31#include <dspbridge/cod.h>
35#include <dspbridge/drv.h> 32#include <dspbridge/drv.h>
@@ -60,28 +57,26 @@
60 57
61/* The Bridge device object: */ 58/* The Bridge device object: */
62struct dev_object { 59struct dev_object {
63 /* LST requires "link" to be first field! */
64 struct list_head link; /* Link to next dev_object. */ 60 struct list_head link; /* Link to next dev_object. */
65 u8 dev_type; /* Device Type */ 61 u8 dev_type; /* Device Type */
66 struct cfg_devnode *dev_node_obj; /* Platform specific dev id */ 62 struct cfg_devnode *dev_node_obj; /* Platform specific dev id */
67 /* Bridge Context Handle */ 63 /* Bridge Context Handle */
68 struct bridge_dev_context *hbridge_context; 64 struct bridge_dev_context *bridge_context;
69 /* Function interface to Bridge driver. */ 65 /* Function interface to Bridge driver. */
70 struct bridge_drv_interface bridge_interface; 66 struct bridge_drv_interface bridge_interface;
71 struct brd_object *lock_owner; /* Client with exclusive access. */ 67 struct brd_object *lock_owner; /* Client with exclusive access. */
72 struct cod_manager *cod_mgr; /* Code manager handle. */ 68 struct cod_manager *cod_mgr; /* Code manager handle. */
73 struct chnl_mgr *hchnl_mgr; /* Channel manager. */ 69 struct chnl_mgr *chnl_mgr; /* Channel manager. */
74 struct deh_mgr *hdeh_mgr; /* DEH manager. */ 70 struct deh_mgr *deh_mgr; /* DEH manager. */
75 struct msg_mgr *hmsg_mgr; /* Message manager. */ 71 struct msg_mgr *msg_mgr; /* Message manager. */
76 struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ 72 struct io_mgr *iomgr; /* IO manager (CHNL, msg_ctrl) */
77 struct cmm_object *hcmm_mgr; /* SM memory manager. */ 73 struct cmm_object *cmm_mgr; /* SM memory manager. */
78 struct dmm_object *dmm_mgr; /* Dynamic memory manager. */ 74 struct dmm_object *dmm_mgr; /* Dynamic memory manager. */
79 struct ldr_module *module_obj; /* Bridge Module handle. */
80 u32 word_size; /* DSP word size: quick access. */ 75 u32 word_size; /* DSP word size: quick access. */
81 struct drv_object *hdrv_obj; /* Driver Object */ 76 struct drv_object *drv_obj; /* Driver Object */
82 struct lst_list *proc_list; /* List of Proceeosr attached to 77 /* List of Processors attached to this device */
83 * this device */ 78 struct list_head proc_list;
84 struct node_mgr *hnode_mgr; 79 struct node_mgr *node_mgr;
85}; 80};
86 81
87struct drv_ext { 82struct drv_ext {
@@ -115,9 +110,9 @@ u32 dev_brd_write_fxn(void *arb, u32 dsp_add, void *host_buf,
115 DBC_REQUIRE(host_buf != NULL); /* Required of BrdWrite(). */ 110 DBC_REQUIRE(host_buf != NULL); /* Required of BrdWrite(). */
116 if (dev_obj) { 111 if (dev_obj) {
117 /* Require of BrdWrite() */ 112 /* Require of BrdWrite() */
118 DBC_ASSERT(dev_obj->hbridge_context != NULL); 113 DBC_ASSERT(dev_obj->bridge_context != NULL);
119 status = (*dev_obj->bridge_interface.pfn_brd_write) ( 114 status = (*dev_obj->bridge_interface.brd_write) (
120 dev_obj->hbridge_context, host_buf, 115 dev_obj->bridge_context, host_buf,
121 dsp_add, ul_num_bytes, mem_space); 116 dsp_add, ul_num_bytes, mem_space);
122 /* Special case of getting the address only */ 117 /* Special case of getting the address only */
123 if (ul_num_bytes == 0) 118 if (ul_num_bytes == 0)
@@ -140,7 +135,6 @@ int dev_create_device(struct dev_object **device_obj,
140 struct cfg_devnode *dev_node_obj) 135 struct cfg_devnode *dev_node_obj)
141{ 136{
142 struct cfg_hostres *host_res; 137 struct cfg_hostres *host_res;
143 struct ldr_module *module_obj = NULL;
144 struct bridge_drv_interface *drv_fxns = NULL; 138 struct bridge_drv_interface *drv_fxns = NULL;
145 struct dev_object *dev_obj = NULL; 139 struct dev_object *dev_obj = NULL;
146 struct chnl_mgrattrs mgr_attrs; 140 struct chnl_mgrattrs mgr_attrs;
@@ -180,13 +174,12 @@ int dev_create_device(struct dev_object **device_obj,
180 if (dev_obj) { 174 if (dev_obj) {
181 /* Fill out the rest of the Dev Object structure: */ 175 /* Fill out the rest of the Dev Object structure: */
182 dev_obj->dev_node_obj = dev_node_obj; 176 dev_obj->dev_node_obj = dev_node_obj;
183 dev_obj->module_obj = module_obj;
184 dev_obj->cod_mgr = NULL; 177 dev_obj->cod_mgr = NULL;
185 dev_obj->hchnl_mgr = NULL; 178 dev_obj->chnl_mgr = NULL;
186 dev_obj->hdeh_mgr = NULL; 179 dev_obj->deh_mgr = NULL;
187 dev_obj->lock_owner = NULL; 180 dev_obj->lock_owner = NULL;
188 dev_obj->word_size = DSPWORDSIZE; 181 dev_obj->word_size = DSPWORDSIZE;
189 dev_obj->hdrv_obj = hdrv_obj; 182 dev_obj->drv_obj = hdrv_obj;
190 dev_obj->dev_type = DSP_UNIT; 183 dev_obj->dev_type = DSP_UNIT;
191 /* Store this Bridge's interface functions, based on its 184 /* Store this Bridge's interface functions, based on its
192 * version. */ 185 * version. */
@@ -195,12 +188,12 @@ int dev_create_device(struct dev_object **device_obj,
195 188
196 /* Call fxn_dev_create() to get the Bridge's device 189 /* Call fxn_dev_create() to get the Bridge's device
197 * context handle. */ 190 * context handle. */
198 status = (dev_obj->bridge_interface.pfn_dev_create) 191 status = (dev_obj->bridge_interface.dev_create)
199 (&dev_obj->hbridge_context, dev_obj, 192 (&dev_obj->bridge_context, dev_obj,
200 host_res); 193 host_res);
201 /* Assert bridge_dev_create()'s ensure clause: */ 194 /* Assert bridge_dev_create()'s ensure clause: */
202 DBC_ASSERT(status 195 DBC_ASSERT(status
203 || (dev_obj->hbridge_context != NULL)); 196 || (dev_obj->bridge_context != NULL));
204 } else { 197 } else {
205 status = -ENOMEM; 198 status = -ENOMEM;
206 } 199 }
@@ -220,54 +213,47 @@ int dev_create_device(struct dev_object **device_obj,
220 num_windows = host_res->num_mem_windows; 213 num_windows = host_res->num_mem_windows;
221 if (num_windows) { 214 if (num_windows) {
222 /* Assume last memory window is for CHNL */ 215 /* Assume last memory window is for CHNL */
223 io_mgr_attrs.shm_base = host_res->dw_mem_base[1] + 216 io_mgr_attrs.shm_base = host_res->mem_base[1] +
224 host_res->dw_offset_for_monitor; 217 host_res->offset_for_monitor;
225 io_mgr_attrs.usm_length = 218 io_mgr_attrs.sm_length =
226 host_res->dw_mem_length[1] - 219 host_res->mem_length[1] -
227 host_res->dw_offset_for_monitor; 220 host_res->offset_for_monitor;
228 } else { 221 } else {
229 io_mgr_attrs.shm_base = 0; 222 io_mgr_attrs.shm_base = 0;
230 io_mgr_attrs.usm_length = 0; 223 io_mgr_attrs.sm_length = 0;
231 pr_err("%s: No memory reserved for shared structures\n", 224 pr_err("%s: No memory reserved for shared structures\n",
232 __func__); 225 __func__);
233 } 226 }
234 status = chnl_create(&dev_obj->hchnl_mgr, dev_obj, &mgr_attrs); 227 status = chnl_create(&dev_obj->chnl_mgr, dev_obj, &mgr_attrs);
235 if (status == -ENOSYS) { 228 if (status == -ENOSYS) {
236 /* It's OK for a device not to have a channel 229 /* It's OK for a device not to have a channel
237 * manager: */ 230 * manager: */
238 status = 0; 231 status = 0;
239 } 232 }
240 /* Create CMM mgr even if Msg Mgr not impl. */ 233 /* Create CMM mgr even if Msg Mgr not impl. */
241 status = cmm_create(&dev_obj->hcmm_mgr, 234 status = cmm_create(&dev_obj->cmm_mgr,
242 (struct dev_object *)dev_obj, NULL); 235 (struct dev_object *)dev_obj, NULL);
243 /* Only create IO manager if we have a channel manager */ 236 /* Only create IO manager if we have a channel manager */
244 if (!status && dev_obj->hchnl_mgr) { 237 if (!status && dev_obj->chnl_mgr) {
245 status = io_create(&dev_obj->hio_mgr, dev_obj, 238 status = io_create(&dev_obj->iomgr, dev_obj,
246 &io_mgr_attrs); 239 &io_mgr_attrs);
247 } 240 }
248 /* Only create DEH manager if we have an IO manager */ 241 /* Only create DEH manager if we have an IO manager */
249 if (!status) { 242 if (!status) {
250 /* Instantiate the DEH module */ 243 /* Instantiate the DEH module */
251 status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); 244 status = bridge_deh_create(&dev_obj->deh_mgr, dev_obj);
252 } 245 }
253 /* Create DMM mgr . */ 246 /* Create DMM mgr . */
254 status = dmm_create(&dev_obj->dmm_mgr, 247 status = dmm_create(&dev_obj->dmm_mgr,
255 (struct dev_object *)dev_obj, NULL); 248 (struct dev_object *)dev_obj, NULL);
256 } 249 }
257 /* Add the new DEV_Object to the global list: */ 250 /* Add the new DEV_Object to the global list: */
258 if (!status) { 251 if (!status)
259 lst_init_elem(&dev_obj->link);
260 status = drv_insert_dev_object(hdrv_obj, dev_obj); 252 status = drv_insert_dev_object(hdrv_obj, dev_obj);
261 } 253
262 /* Create the Processor List */ 254 /* Create the Processor List */
263 if (!status) { 255 if (!status)
264 dev_obj->proc_list = kzalloc(sizeof(struct lst_list), 256 INIT_LIST_HEAD(&dev_obj->proc_list);
265 GFP_KERNEL);
266 if (!(dev_obj->proc_list))
267 status = -EPERM;
268 else
269 INIT_LIST_HEAD(&dev_obj->proc_list->head);
270 }
271leave: 257leave:
272 /* If all went well, return a handle to the dev object; 258 /* If all went well, return a handle to the dev object;
273 * else, cleanup and return NULL in the OUT parameter. */ 259 * else, cleanup and return NULL in the OUT parameter. */
@@ -275,7 +261,6 @@ leave:
275 *device_obj = dev_obj; 261 *device_obj = dev_obj;
276 } else { 262 } else {
277 if (dev_obj) { 263 if (dev_obj) {
278 kfree(dev_obj->proc_list);
279 if (dev_obj->cod_mgr) 264 if (dev_obj->cod_mgr)
280 cod_delete(dev_obj->cod_mgr); 265 cod_delete(dev_obj->cod_mgr);
281 if (dev_obj->dmm_mgr) 266 if (dev_obj->dmm_mgr)
@@ -306,13 +291,13 @@ int dev_create2(struct dev_object *hdev_obj)
306 DBC_REQUIRE(hdev_obj); 291 DBC_REQUIRE(hdev_obj);
307 292
308 /* There can be only one Node Manager per DEV object */ 293 /* There can be only one Node Manager per DEV object */
309 DBC_ASSERT(!dev_obj->hnode_mgr); 294 DBC_ASSERT(!dev_obj->node_mgr);
310 status = node_create_mgr(&dev_obj->hnode_mgr, hdev_obj); 295 status = node_create_mgr(&dev_obj->node_mgr, hdev_obj);
311 if (status) 296 if (status)
312 dev_obj->hnode_mgr = NULL; 297 dev_obj->node_mgr = NULL;
313 298
314 DBC_ENSURE((!status && dev_obj->hnode_mgr != NULL) 299 DBC_ENSURE((!status && dev_obj->node_mgr != NULL)
315 || (status && dev_obj->hnode_mgr == NULL)); 300 || (status && dev_obj->node_mgr == NULL));
316 return status; 301 return status;
317} 302}
318 303
@@ -329,15 +314,15 @@ int dev_destroy2(struct dev_object *hdev_obj)
329 DBC_REQUIRE(refs > 0); 314 DBC_REQUIRE(refs > 0);
330 DBC_REQUIRE(hdev_obj); 315 DBC_REQUIRE(hdev_obj);
331 316
332 if (dev_obj->hnode_mgr) { 317 if (dev_obj->node_mgr) {
333 if (node_delete_mgr(dev_obj->hnode_mgr)) 318 if (node_delete_mgr(dev_obj->node_mgr))
334 status = -EPERM; 319 status = -EPERM;
335 else 320 else
336 dev_obj->hnode_mgr = NULL; 321 dev_obj->node_mgr = NULL;
337 322
338 } 323 }
339 324
340 DBC_ENSURE((!status && dev_obj->hnode_mgr == NULL) || status); 325 DBC_ENSURE((!status && dev_obj->node_mgr == NULL) || status);
341 return status; 326 return status;
342} 327}
343 328
@@ -360,33 +345,33 @@ int dev_destroy_device(struct dev_object *hdev_obj)
360 dev_obj->cod_mgr = NULL; 345 dev_obj->cod_mgr = NULL;
361 } 346 }
362 347
363 if (dev_obj->hnode_mgr) { 348 if (dev_obj->node_mgr) {
364 node_delete_mgr(dev_obj->hnode_mgr); 349 node_delete_mgr(dev_obj->node_mgr);
365 dev_obj->hnode_mgr = NULL; 350 dev_obj->node_mgr = NULL;
366 } 351 }
367 352
368 /* Free the io, channel, and message managers for this board: */ 353 /* Free the io, channel, and message managers for this board: */
369 if (dev_obj->hio_mgr) { 354 if (dev_obj->iomgr) {
370 io_destroy(dev_obj->hio_mgr); 355 io_destroy(dev_obj->iomgr);
371 dev_obj->hio_mgr = NULL; 356 dev_obj->iomgr = NULL;
372 } 357 }
373 if (dev_obj->hchnl_mgr) { 358 if (dev_obj->chnl_mgr) {
374 chnl_destroy(dev_obj->hchnl_mgr); 359 chnl_destroy(dev_obj->chnl_mgr);
375 dev_obj->hchnl_mgr = NULL; 360 dev_obj->chnl_mgr = NULL;
376 } 361 }
377 if (dev_obj->hmsg_mgr) { 362 if (dev_obj->msg_mgr) {
378 msg_delete(dev_obj->hmsg_mgr); 363 msg_delete(dev_obj->msg_mgr);
379 dev_obj->hmsg_mgr = NULL; 364 dev_obj->msg_mgr = NULL;
380 } 365 }
381 366
382 if (dev_obj->hdeh_mgr) { 367 if (dev_obj->deh_mgr) {
383 /* Uninitialize DEH module. */ 368 /* Uninitialize DEH module. */
384 bridge_deh_destroy(dev_obj->hdeh_mgr); 369 bridge_deh_destroy(dev_obj->deh_mgr);
385 dev_obj->hdeh_mgr = NULL; 370 dev_obj->deh_mgr = NULL;
386 } 371 }
387 if (dev_obj->hcmm_mgr) { 372 if (dev_obj->cmm_mgr) {
388 cmm_destroy(dev_obj->hcmm_mgr, true); 373 cmm_destroy(dev_obj->cmm_mgr, true);
389 dev_obj->hcmm_mgr = NULL; 374 dev_obj->cmm_mgr = NULL;
390 } 375 }
391 376
392 if (dev_obj->dmm_mgr) { 377 if (dev_obj->dmm_mgr) {
@@ -396,18 +381,15 @@ int dev_destroy_device(struct dev_object *hdev_obj)
396 381
397 /* Call the driver's bridge_dev_destroy() function: */ 382 /* Call the driver's bridge_dev_destroy() function: */
398 /* Require of DevDestroy */ 383 /* Require of DevDestroy */
399 if (dev_obj->hbridge_context) { 384 if (dev_obj->bridge_context) {
400 status = (*dev_obj->bridge_interface.pfn_dev_destroy) 385 status = (*dev_obj->bridge_interface.dev_destroy)
401 (dev_obj->hbridge_context); 386 (dev_obj->bridge_context);
402 dev_obj->hbridge_context = NULL; 387 dev_obj->bridge_context = NULL;
403 } else 388 } else
404 status = -EPERM; 389 status = -EPERM;
405 if (!status) { 390 if (!status) {
406 kfree(dev_obj->proc_list);
407 dev_obj->proc_list = NULL;
408
409 /* Remove this DEV_Object from the global list: */ 391 /* Remove this DEV_Object from the global list: */
410 drv_remove_dev_object(dev_obj->hdrv_obj, dev_obj); 392 drv_remove_dev_object(dev_obj->drv_obj, dev_obj);
411 /* Free The library * LDR_FreeModule 393 /* Free The library * LDR_FreeModule
412 * (dev_obj->module_obj); */ 394 * (dev_obj->module_obj); */
413 /* Free this dev object: */ 395 /* Free this dev object: */
@@ -437,7 +419,7 @@ int dev_get_chnl_mgr(struct dev_object *hdev_obj,
437 DBC_REQUIRE(mgr != NULL); 419 DBC_REQUIRE(mgr != NULL);
438 420
439 if (hdev_obj) { 421 if (hdev_obj) {
440 *mgr = dev_obj->hchnl_mgr; 422 *mgr = dev_obj->chnl_mgr;
441 } else { 423 } else {
442 *mgr = NULL; 424 *mgr = NULL;
443 status = -EFAULT; 425 status = -EFAULT;
@@ -463,7 +445,7 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
463 DBC_REQUIRE(mgr != NULL); 445 DBC_REQUIRE(mgr != NULL);
464 446
465 if (hdev_obj) { 447 if (hdev_obj) {
466 *mgr = dev_obj->hcmm_mgr; 448 *mgr = dev_obj->cmm_mgr;
467 } else { 449 } else {
468 *mgr = NULL; 450 *mgr = NULL;
469 status = -EFAULT; 451 status = -EFAULT;
@@ -536,7 +518,7 @@ int dev_get_deh_mgr(struct dev_object *hdev_obj,
536 DBC_REQUIRE(deh_manager != NULL); 518 DBC_REQUIRE(deh_manager != NULL);
537 DBC_REQUIRE(hdev_obj); 519 DBC_REQUIRE(hdev_obj);
538 if (hdev_obj) { 520 if (hdev_obj) {
539 *deh_manager = hdev_obj->hdeh_mgr; 521 *deh_manager = hdev_obj->deh_mgr;
540 } else { 522 } else {
541 *deh_manager = NULL; 523 *deh_manager = NULL;
542 status = -EFAULT; 524 status = -EFAULT;
@@ -623,7 +605,7 @@ int dev_get_io_mgr(struct dev_object *hdev_obj,
623 DBC_REQUIRE(hdev_obj); 605 DBC_REQUIRE(hdev_obj);
624 606
625 if (hdev_obj) { 607 if (hdev_obj) {
626 *io_man = hdev_obj->hio_mgr; 608 *io_man = hdev_obj->iomgr;
627 } else { 609 } else {
628 *io_man = NULL; 610 *io_man = NULL;
629 status = -EFAULT; 611 status = -EFAULT;
@@ -660,7 +642,7 @@ void dev_get_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr **msg_man)
660 DBC_REQUIRE(msg_man != NULL); 642 DBC_REQUIRE(msg_man != NULL);
661 DBC_REQUIRE(hdev_obj); 643 DBC_REQUIRE(hdev_obj);
662 644
663 *msg_man = hdev_obj->hmsg_mgr; 645 *msg_man = hdev_obj->msg_mgr;
664} 646}
665 647
666/* 648/*
@@ -678,7 +660,7 @@ int dev_get_node_manager(struct dev_object *hdev_obj,
678 DBC_REQUIRE(node_man != NULL); 660 DBC_REQUIRE(node_man != NULL);
679 661
680 if (hdev_obj) { 662 if (hdev_obj) {
681 *node_man = dev_obj->hnode_mgr; 663 *node_man = dev_obj->node_mgr;
682 } else { 664 } else {
683 *node_man = NULL; 665 *node_man = NULL;
684 status = -EFAULT; 666 status = -EFAULT;
@@ -728,7 +710,7 @@ int dev_get_bridge_context(struct dev_object *hdev_obj,
728 DBC_REQUIRE(phbridge_context != NULL); 710 DBC_REQUIRE(phbridge_context != NULL);
729 711
730 if (hdev_obj) { 712 if (hdev_obj) {
731 *phbridge_context = dev_obj->hbridge_context; 713 *phbridge_context = dev_obj->bridge_context;
732 } else { 714 } else {
733 *phbridge_context = NULL; 715 *phbridge_context = NULL;
734 status = -EFAULT; 716 status = -EFAULT;
@@ -799,20 +781,18 @@ bool dev_init(void)
799 * Purpose: 781 * Purpose:
800 * Notify all clients of this device of a change in device status. 782 * Notify all clients of this device of a change in device status.
801 */ 783 */
802int dev_notify_clients(struct dev_object *hdev_obj, u32 ret) 784int dev_notify_clients(struct dev_object *dev_obj, u32 ret)
803{ 785{
804 int status = 0; 786 struct list_head *curr;
805
806 struct dev_object *dev_obj = hdev_obj;
807 void *proc_obj;
808 787
809 for (proc_obj = (void *)lst_first(dev_obj->proc_list); 788 /*
810 proc_obj != NULL; 789 * FIXME: this code needs struct proc_object to have a list_head
811 proc_obj = (void *)lst_next(dev_obj->proc_list, 790 * at the begining. If not, this can go horribly wrong.
812 (struct list_head *)proc_obj)) 791 */
813 proc_notify_clients(proc_obj, (u32) ret); 792 list_for_each(curr, &dev_obj->proc_list)
793 proc_notify_clients((void *)curr, ret);
814 794
815 return status; 795 return 0;
816} 796}
817 797
818/* 798/*
@@ -864,11 +844,11 @@ int dev_set_chnl_mgr(struct dev_object *hdev_obj,
864 DBC_REQUIRE(refs > 0); 844 DBC_REQUIRE(refs > 0);
865 845
866 if (hdev_obj) 846 if (hdev_obj)
867 dev_obj->hchnl_mgr = hmgr; 847 dev_obj->chnl_mgr = hmgr;
868 else 848 else
869 status = -EFAULT; 849 status = -EFAULT;
870 850
871 DBC_ENSURE(status || (dev_obj->hchnl_mgr == hmgr)); 851 DBC_ENSURE(status || (dev_obj->chnl_mgr == hmgr));
872 return status; 852 return status;
873} 853}
874 854
@@ -882,7 +862,7 @@ void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr)
882 DBC_REQUIRE(refs > 0); 862 DBC_REQUIRE(refs > 0);
883 DBC_REQUIRE(hdev_obj); 863 DBC_REQUIRE(hdev_obj);
884 864
885 hdev_obj->hmsg_mgr = hmgr; 865 hdev_obj->msg_mgr = hmgr;
886} 866}
887 867
888/* 868/*
@@ -894,7 +874,7 @@ int dev_start_device(struct cfg_devnode *dev_node_obj)
894{ 874{
895 struct dev_object *hdev_obj = NULL; /* handle to 'Bridge Device */ 875 struct dev_object *hdev_obj = NULL; /* handle to 'Bridge Device */
896 /* Bridge driver filename */ 876 /* Bridge driver filename */
897 char bridge_file_name[CFG_MAXSEARCHPATHLEN] = "UMA"; 877 char *bridge_file_name = "UMA";
898 int status; 878 int status;
899 struct mgr_object *hmgr_obj = NULL; 879 struct mgr_object *hmgr_obj = NULL;
900 struct drv_data *drv_datap = dev_get_drvdata(bridge); 880 struct drv_data *drv_datap = dev_get_drvdata(bridge);
@@ -967,7 +947,7 @@ static int init_cod_mgr(struct dev_object *dev_obj)
967 DBC_REQUIRE(refs > 0); 947 DBC_REQUIRE(refs > 0);
968 DBC_REQUIRE(!dev_obj || (dev_obj->cod_mgr == NULL)); 948 DBC_REQUIRE(!dev_obj || (dev_obj->cod_mgr == NULL));
969 949
970 status = cod_create(&dev_obj->cod_mgr, sz_dummy_file, NULL); 950 status = cod_create(&dev_obj->cod_mgr, sz_dummy_file);
971 951
972 return status; 952 return status;
973} 953}
@@ -994,23 +974,23 @@ static int init_cod_mgr(struct dev_object *dev_obj)
994int dev_insert_proc_object(struct dev_object *hdev_obj, 974int dev_insert_proc_object(struct dev_object *hdev_obj,
995 u32 proc_obj, bool *already_attached) 975 u32 proc_obj, bool *already_attached)
996{ 976{
997 int status = 0;
998 struct dev_object *dev_obj = (struct dev_object *)hdev_obj; 977 struct dev_object *dev_obj = (struct dev_object *)hdev_obj;
999 978
1000 DBC_REQUIRE(refs > 0); 979 DBC_REQUIRE(refs > 0);
1001 DBC_REQUIRE(dev_obj); 980 DBC_REQUIRE(dev_obj);
1002 DBC_REQUIRE(proc_obj != 0); 981 DBC_REQUIRE(proc_obj != 0);
1003 DBC_REQUIRE(dev_obj->proc_list != NULL);
1004 DBC_REQUIRE(already_attached != NULL); 982 DBC_REQUIRE(already_attached != NULL);
1005 if (!LST_IS_EMPTY(dev_obj->proc_list)) 983 if (!list_empty(&dev_obj->proc_list))
1006 *already_attached = true; 984 *already_attached = true;
1007 985
1008 /* Add DevObject to tail. */ 986 /* Add DevObject to tail. */
1009 lst_put_tail(dev_obj->proc_list, (struct list_head *)proc_obj); 987 /*
1010 988 * FIXME: this code needs struct proc_object to have a list_head
1011 DBC_ENSURE(!status && !LST_IS_EMPTY(dev_obj->proc_list)); 989 * at the begining. If not, this can go horribly wrong.
990 */
991 list_add_tail((struct list_head *)proc_obj, &dev_obj->proc_list);
1012 992
1013 return status; 993 return 0;
1014} 994}
1015 995
1016/* 996/*
@@ -1039,15 +1019,12 @@ int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj)
1039 1019
1040 DBC_REQUIRE(dev_obj); 1020 DBC_REQUIRE(dev_obj);
1041 DBC_REQUIRE(proc_obj != 0); 1021 DBC_REQUIRE(proc_obj != 0);
1042 DBC_REQUIRE(dev_obj->proc_list != NULL); 1022 DBC_REQUIRE(!list_empty(&dev_obj->proc_list));
1043 DBC_REQUIRE(!LST_IS_EMPTY(dev_obj->proc_list));
1044 1023
1045 /* Search list for dev_obj: */ 1024 /* Search list for dev_obj: */
1046 for (cur_elem = lst_first(dev_obj->proc_list); cur_elem != NULL; 1025 list_for_each(cur_elem, &dev_obj->proc_list) {
1047 cur_elem = lst_next(dev_obj->proc_list, cur_elem)) {
1048 /* If found, remove it. */
1049 if ((u32) cur_elem == proc_obj) { 1026 if ((u32) cur_elem == proc_obj) {
1050 lst_remove_elem(dev_obj->proc_list, cur_elem); 1027 list_del(cur_elem);
1051 status = 0; 1028 status = 0;
1052 break; 1029 break;
1053 } 1030 }
@@ -1056,14 +1033,10 @@ int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj)
1056 return status; 1033 return status;
1057} 1034}
1058 1035
1059int dev_get_dev_type(struct dev_object *device_obj, u8 *dev_type) 1036int dev_get_dev_type(struct dev_object *dev_obj, u8 *dev_type)
1060{ 1037{
1061 int status = 0;
1062 struct dev_object *dev_obj = (struct dev_object *)device_obj;
1063
1064 *dev_type = dev_obj->dev_type; 1038 *dev_type = dev_obj->dev_type;
1065 1039 return 0;
1066 return status;
1067} 1040}
1068 1041
1069/* 1042/*
@@ -1106,73 +1079,73 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
1106 intf_fxns->brd_api_minor_version = drv_fxns->brd_api_minor_version; 1079 intf_fxns->brd_api_minor_version = drv_fxns->brd_api_minor_version;
1107 /* Install functions up to DSP API version .80 (first alpha): */ 1080 /* Install functions up to DSP API version .80 (first alpha): */
1108 if (bridge_version > 0) { 1081 if (bridge_version > 0) {
1109 STORE_FXN(fxn_dev_create, pfn_dev_create); 1082 STORE_FXN(fxn_dev_create, dev_create);
1110 STORE_FXN(fxn_dev_destroy, pfn_dev_destroy); 1083 STORE_FXN(fxn_dev_destroy, dev_destroy);
1111 STORE_FXN(fxn_dev_ctrl, pfn_dev_cntrl); 1084 STORE_FXN(fxn_dev_ctrl, dev_cntrl);
1112 STORE_FXN(fxn_brd_monitor, pfn_brd_monitor); 1085 STORE_FXN(fxn_brd_monitor, brd_monitor);
1113 STORE_FXN(fxn_brd_start, pfn_brd_start); 1086 STORE_FXN(fxn_brd_start, brd_start);
1114 STORE_FXN(fxn_brd_stop, pfn_brd_stop); 1087 STORE_FXN(fxn_brd_stop, brd_stop);
1115 STORE_FXN(fxn_brd_status, pfn_brd_status); 1088 STORE_FXN(fxn_brd_status, brd_status);
1116 STORE_FXN(fxn_brd_read, pfn_brd_read); 1089 STORE_FXN(fxn_brd_read, brd_read);
1117 STORE_FXN(fxn_brd_write, pfn_brd_write); 1090 STORE_FXN(fxn_brd_write, brd_write);
1118 STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); 1091 STORE_FXN(fxn_brd_setstate, brd_set_state);
1119 STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); 1092 STORE_FXN(fxn_brd_memcopy, brd_mem_copy);
1120 STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); 1093 STORE_FXN(fxn_brd_memwrite, brd_mem_write);
1121 STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map); 1094 STORE_FXN(fxn_brd_memmap, brd_mem_map);
1122 STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map); 1095 STORE_FXN(fxn_brd_memunmap, brd_mem_un_map);
1123 STORE_FXN(fxn_chnl_create, pfn_chnl_create); 1096 STORE_FXN(fxn_chnl_create, chnl_create);
1124 STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); 1097 STORE_FXN(fxn_chnl_destroy, chnl_destroy);
1125 STORE_FXN(fxn_chnl_open, pfn_chnl_open); 1098 STORE_FXN(fxn_chnl_open, chnl_open);
1126 STORE_FXN(fxn_chnl_close, pfn_chnl_close); 1099 STORE_FXN(fxn_chnl_close, chnl_close);
1127 STORE_FXN(fxn_chnl_addioreq, pfn_chnl_add_io_req); 1100 STORE_FXN(fxn_chnl_addioreq, chnl_add_io_req);
1128 STORE_FXN(fxn_chnl_getioc, pfn_chnl_get_ioc); 1101 STORE_FXN(fxn_chnl_getioc, chnl_get_ioc);
1129 STORE_FXN(fxn_chnl_cancelio, pfn_chnl_cancel_io); 1102 STORE_FXN(fxn_chnl_cancelio, chnl_cancel_io);
1130 STORE_FXN(fxn_chnl_flushio, pfn_chnl_flush_io); 1103 STORE_FXN(fxn_chnl_flushio, chnl_flush_io);
1131 STORE_FXN(fxn_chnl_getinfo, pfn_chnl_get_info); 1104 STORE_FXN(fxn_chnl_getinfo, chnl_get_info);
1132 STORE_FXN(fxn_chnl_getmgrinfo, pfn_chnl_get_mgr_info); 1105 STORE_FXN(fxn_chnl_getmgrinfo, chnl_get_mgr_info);
1133 STORE_FXN(fxn_chnl_idle, pfn_chnl_idle); 1106 STORE_FXN(fxn_chnl_idle, chnl_idle);
1134 STORE_FXN(fxn_chnl_registernotify, pfn_chnl_register_notify); 1107 STORE_FXN(fxn_chnl_registernotify, chnl_register_notify);
1135 STORE_FXN(fxn_io_create, pfn_io_create); 1108 STORE_FXN(fxn_io_create, io_create);
1136 STORE_FXN(fxn_io_destroy, pfn_io_destroy); 1109 STORE_FXN(fxn_io_destroy, io_destroy);
1137 STORE_FXN(fxn_io_onloaded, pfn_io_on_loaded); 1110 STORE_FXN(fxn_io_onloaded, io_on_loaded);
1138 STORE_FXN(fxn_io_getprocload, pfn_io_get_proc_load); 1111 STORE_FXN(fxn_io_getprocload, io_get_proc_load);
1139 STORE_FXN(fxn_msg_create, pfn_msg_create); 1112 STORE_FXN(fxn_msg_create, msg_create);
1140 STORE_FXN(fxn_msg_createqueue, pfn_msg_create_queue); 1113 STORE_FXN(fxn_msg_createqueue, msg_create_queue);
1141 STORE_FXN(fxn_msg_delete, pfn_msg_delete); 1114 STORE_FXN(fxn_msg_delete, msg_delete);
1142 STORE_FXN(fxn_msg_deletequeue, pfn_msg_delete_queue); 1115 STORE_FXN(fxn_msg_deletequeue, msg_delete_queue);
1143 STORE_FXN(fxn_msg_get, pfn_msg_get); 1116 STORE_FXN(fxn_msg_get, msg_get);
1144 STORE_FXN(fxn_msg_put, pfn_msg_put); 1117 STORE_FXN(fxn_msg_put, msg_put);
1145 STORE_FXN(fxn_msg_registernotify, pfn_msg_register_notify); 1118 STORE_FXN(fxn_msg_registernotify, msg_register_notify);
1146 STORE_FXN(fxn_msg_setqueueid, pfn_msg_set_queue_id); 1119 STORE_FXN(fxn_msg_setqueueid, msg_set_queue_id);
1147 } 1120 }
1148 /* Add code for any additional functions in newerBridge versions here */ 1121 /* Add code for any additional functions in newerBridge versions here */
1149 /* Ensure postcondition: */ 1122 /* Ensure postcondition: */
1150 DBC_ENSURE(intf_fxns->pfn_dev_create != NULL); 1123 DBC_ENSURE(intf_fxns->dev_create != NULL);
1151 DBC_ENSURE(intf_fxns->pfn_dev_destroy != NULL); 1124 DBC_ENSURE(intf_fxns->dev_destroy != NULL);
1152 DBC_ENSURE(intf_fxns->pfn_dev_cntrl != NULL); 1125 DBC_ENSURE(intf_fxns->dev_cntrl != NULL);
1153 DBC_ENSURE(intf_fxns->pfn_brd_monitor != NULL); 1126 DBC_ENSURE(intf_fxns->brd_monitor != NULL);
1154 DBC_ENSURE(intf_fxns->pfn_brd_start != NULL); 1127 DBC_ENSURE(intf_fxns->brd_start != NULL);
1155 DBC_ENSURE(intf_fxns->pfn_brd_stop != NULL); 1128 DBC_ENSURE(intf_fxns->brd_stop != NULL);
1156 DBC_ENSURE(intf_fxns->pfn_brd_status != NULL); 1129 DBC_ENSURE(intf_fxns->brd_status != NULL);
1157 DBC_ENSURE(intf_fxns->pfn_brd_read != NULL); 1130 DBC_ENSURE(intf_fxns->brd_read != NULL);
1158 DBC_ENSURE(intf_fxns->pfn_brd_write != NULL); 1131 DBC_ENSURE(intf_fxns->brd_write != NULL);
1159 DBC_ENSURE(intf_fxns->pfn_chnl_create != NULL); 1132 DBC_ENSURE(intf_fxns->chnl_create != NULL);
1160 DBC_ENSURE(intf_fxns->pfn_chnl_destroy != NULL); 1133 DBC_ENSURE(intf_fxns->chnl_destroy != NULL);
1161 DBC_ENSURE(intf_fxns->pfn_chnl_open != NULL); 1134 DBC_ENSURE(intf_fxns->chnl_open != NULL);
1162 DBC_ENSURE(intf_fxns->pfn_chnl_close != NULL); 1135 DBC_ENSURE(intf_fxns->chnl_close != NULL);
1163 DBC_ENSURE(intf_fxns->pfn_chnl_add_io_req != NULL); 1136 DBC_ENSURE(intf_fxns->chnl_add_io_req != NULL);
1164 DBC_ENSURE(intf_fxns->pfn_chnl_get_ioc != NULL); 1137 DBC_ENSURE(intf_fxns->chnl_get_ioc != NULL);
1165 DBC_ENSURE(intf_fxns->pfn_chnl_cancel_io != NULL); 1138 DBC_ENSURE(intf_fxns->chnl_cancel_io != NULL);
1166 DBC_ENSURE(intf_fxns->pfn_chnl_flush_io != NULL); 1139 DBC_ENSURE(intf_fxns->chnl_flush_io != NULL);
1167 DBC_ENSURE(intf_fxns->pfn_chnl_get_info != NULL); 1140 DBC_ENSURE(intf_fxns->chnl_get_info != NULL);
1168 DBC_ENSURE(intf_fxns->pfn_chnl_get_mgr_info != NULL); 1141 DBC_ENSURE(intf_fxns->chnl_get_mgr_info != NULL);
1169 DBC_ENSURE(intf_fxns->pfn_chnl_idle != NULL); 1142 DBC_ENSURE(intf_fxns->chnl_idle != NULL);
1170 DBC_ENSURE(intf_fxns->pfn_chnl_register_notify != NULL); 1143 DBC_ENSURE(intf_fxns->chnl_register_notify != NULL);
1171 DBC_ENSURE(intf_fxns->pfn_io_create != NULL); 1144 DBC_ENSURE(intf_fxns->io_create != NULL);
1172 DBC_ENSURE(intf_fxns->pfn_io_destroy != NULL); 1145 DBC_ENSURE(intf_fxns->io_destroy != NULL);
1173 DBC_ENSURE(intf_fxns->pfn_io_on_loaded != NULL); 1146 DBC_ENSURE(intf_fxns->io_on_loaded != NULL);
1174 DBC_ENSURE(intf_fxns->pfn_io_get_proc_load != NULL); 1147 DBC_ENSURE(intf_fxns->io_get_proc_load != NULL);
1175 DBC_ENSURE(intf_fxns->pfn_msg_set_queue_id != NULL); 1148 DBC_ENSURE(intf_fxns->msg_set_queue_id != NULL);
1176 1149
1177#undef STORE_FXN 1150#undef STORE_FXN
1178} 1151}
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 86ca785f191..767ffe270ed 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -68,7 +68,7 @@
68/* Device IOCtl function pointer */ 68/* Device IOCtl function pointer */
69struct api_cmd { 69struct api_cmd {
70 u32(*fxn) (union trapped_args *args, void *pr_ctxt); 70 u32(*fxn) (union trapped_args *args, void *pr_ctxt);
71 u32 dw_index; 71 u32 index;
72}; 72};
73 73
74/* ----------------------------------- Globals */ 74/* ----------------------------------- Globals */
@@ -416,7 +416,7 @@ u32 mgrwrap_enum_node_info(union trapped_args *args, void *pr_ctxt)
416 u8 *pndb_props; 416 u8 *pndb_props;
417 u32 num_nodes; 417 u32 num_nodes;
418 int status = 0; 418 int status = 0;
419 u32 size = args->args_mgr_enumnode_info.undb_props_size; 419 u32 size = args->args_mgr_enumnode_info.ndb_props_size;
420 420
421 if (size < sizeof(struct dsp_ndbprops)) 421 if (size < sizeof(struct dsp_ndbprops))
422 return -EINVAL; 422 return -EINVAL;
@@ -431,9 +431,9 @@ u32 mgrwrap_enum_node_info(union trapped_args *args, void *pr_ctxt)
431 (struct dsp_ndbprops *)pndb_props, size, 431 (struct dsp_ndbprops *)pndb_props, size,
432 &num_nodes); 432 &num_nodes);
433 } 433 }
434 CP_TO_USR(args->args_mgr_enumnode_info.pndb_props, pndb_props, status, 434 CP_TO_USR(args->args_mgr_enumnode_info.ndb_props, pndb_props, status,
435 size); 435 size);
436 CP_TO_USR(args->args_mgr_enumnode_info.pu_num_nodes, &num_nodes, status, 436 CP_TO_USR(args->args_mgr_enumnode_info.num_nodes, &num_nodes, status,
437 1); 437 1);
438 kfree(pndb_props); 438 kfree(pndb_props);
439 439
@@ -466,7 +466,7 @@ u32 mgrwrap_enum_proc_info(union trapped_args *args, void *pr_ctxt)
466 } 466 }
467 CP_TO_USR(args->args_mgr_enumproc_info.processor_info, processor_info, 467 CP_TO_USR(args->args_mgr_enumproc_info.processor_info, processor_info,
468 status, size); 468 status, size);
469 CP_TO_USR(args->args_mgr_enumproc_info.pu_num_procs, &num_procs, 469 CP_TO_USR(args->args_mgr_enumproc_info.num_procs, &num_procs,
470 status, 1); 470 status, 1);
471 kfree(processor_info); 471 kfree(processor_info);
472 472
@@ -490,7 +490,7 @@ u32 mgrwrap_register_object(union trapped_args *args, void *pr_ctxt)
490 goto func_end; 490 goto func_end;
491 /* path_size is increased by 1 to accommodate NULL */ 491 /* path_size is increased by 1 to accommodate NULL */
492 path_size = strlen_user((char *) 492 path_size = strlen_user((char *)
493 args->args_mgr_registerobject.psz_path_name) + 493 args->args_mgr_registerobject.sz_path_name) +
494 1; 494 1;
495 psz_path_name = kmalloc(path_size, GFP_KERNEL); 495 psz_path_name = kmalloc(path_size, GFP_KERNEL);
496 if (!psz_path_name) { 496 if (!psz_path_name) {
@@ -499,7 +499,7 @@ u32 mgrwrap_register_object(union trapped_args *args, void *pr_ctxt)
499 } 499 }
500 ret = strncpy_from_user(psz_path_name, 500 ret = strncpy_from_user(psz_path_name,
501 (char *)args->args_mgr_registerobject. 501 (char *)args->args_mgr_registerobject.
502 psz_path_name, path_size); 502 sz_path_name, path_size);
503 if (!ret) { 503 if (!ret) {
504 status = -EFAULT; 504 status = -EFAULT;
505 goto func_end; 505 goto func_end;
@@ -569,9 +569,9 @@ u32 mgrwrap_wait_for_bridge_events(union trapped_args *args, void *pr_ctxt)
569 status = mgr_wait_for_bridge_events(anotifications, count, 569 status = mgr_wait_for_bridge_events(anotifications, count,
570 &index, 570 &index,
571 args->args_mgr_wait. 571 args->args_mgr_wait.
572 utimeout); 572 timeout);
573 } 573 }
574 CP_TO_USR(args->args_mgr_wait.pu_index, &index, status, 1); 574 CP_TO_USR(args->args_mgr_wait.index, &index, status, 1);
575 return status; 575 return status;
576} 576}
577 577
@@ -617,10 +617,10 @@ func_end:
617u32 procwrap_ctrl(union trapped_args *args, void *pr_ctxt) 617u32 procwrap_ctrl(union trapped_args *args, void *pr_ctxt)
618{ 618{
619 u32 cb_data_size, __user * psize = (u32 __user *) 619 u32 cb_data_size, __user * psize = (u32 __user *)
620 args->args_proc_ctrl.pargs; 620 args->args_proc_ctrl.args;
621 u8 *pargs = NULL; 621 u8 *pargs = NULL;
622 int status = 0; 622 int status = 0;
623 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 623 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
624 624
625 if (psize) { 625 if (psize) {
626 if (get_user(cb_data_size, psize)) { 626 if (get_user(cb_data_size, psize)) {
@@ -634,16 +634,16 @@ u32 procwrap_ctrl(union trapped_args *args, void *pr_ctxt)
634 goto func_end; 634 goto func_end;
635 } 635 }
636 636
637 CP_FM_USR(pargs, args->args_proc_ctrl.pargs, status, 637 CP_FM_USR(pargs, args->args_proc_ctrl.args, status,
638 cb_data_size); 638 cb_data_size);
639 } 639 }
640 if (!status) { 640 if (!status) {
641 status = proc_ctrl(hprocessor, 641 status = proc_ctrl(hprocessor,
642 args->args_proc_ctrl.dw_cmd, 642 args->args_proc_ctrl.cmd,
643 (struct dsp_cbdata *)pargs); 643 (struct dsp_cbdata *)pargs);
644 } 644 }
645 645
646 /* CP_TO_USR(args->args_proc_ctrl.pargs, pargs, status, 1); */ 646 /* CP_TO_USR(args->args_proc_ctrl.args, pargs, status, 1); */
647 kfree(pargs); 647 kfree(pargs);
648func_end: 648func_end:
649 return status; 649 return status;
@@ -668,7 +668,7 @@ u32 procwrap_enum_node_info(union trapped_args *args, void *pr_ctxt)
668 void *node_tab[MAX_NODES]; 668 void *node_tab[MAX_NODES];
669 u32 num_nodes; 669 u32 num_nodes;
670 u32 alloc_cnt; 670 u32 alloc_cnt;
671 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 671 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
672 672
673 if (!args->args_proc_enumnode_info.node_tab_size) 673 if (!args->args_proc_enumnode_info.node_tab_size)
674 return -EINVAL; 674 return -EINVAL;
@@ -679,9 +679,9 @@ u32 procwrap_enum_node_info(union trapped_args *args, void *pr_ctxt)
679 &num_nodes, &alloc_cnt); 679 &num_nodes, &alloc_cnt);
680 CP_TO_USR(args->args_proc_enumnode_info.node_tab, node_tab, status, 680 CP_TO_USR(args->args_proc_enumnode_info.node_tab, node_tab, status,
681 num_nodes); 681 num_nodes);
682 CP_TO_USR(args->args_proc_enumnode_info.pu_num_nodes, &num_nodes, 682 CP_TO_USR(args->args_proc_enumnode_info.num_nodes, &num_nodes,
683 status, 1); 683 status, 1);
684 CP_TO_USR(args->args_proc_enumnode_info.pu_allocated, &alloc_cnt, 684 CP_TO_USR(args->args_proc_enumnode_info.allocated, &alloc_cnt,
685 status, 1); 685 status, 1);
686 return status; 686 return status;
687} 687}
@@ -694,8 +694,8 @@ u32 procwrap_end_dma(union trapped_args *args, void *pr_ctxt)
694 return -EINVAL; 694 return -EINVAL;
695 695
696 status = proc_end_dma(pr_ctxt, 696 status = proc_end_dma(pr_ctxt,
697 args->args_proc_dma.pmpu_addr, 697 args->args_proc_dma.mpu_addr,
698 args->args_proc_dma.ul_size, 698 args->args_proc_dma.size,
699 args->args_proc_dma.dir); 699 args->args_proc_dma.dir);
700 return status; 700 return status;
701} 701}
@@ -708,8 +708,8 @@ u32 procwrap_begin_dma(union trapped_args *args, void *pr_ctxt)
708 return -EINVAL; 708 return -EINVAL;
709 709
710 status = proc_begin_dma(pr_ctxt, 710 status = proc_begin_dma(pr_ctxt,
711 args->args_proc_dma.pmpu_addr, 711 args->args_proc_dma.mpu_addr,
712 args->args_proc_dma.ul_size, 712 args->args_proc_dma.size,
713 args->args_proc_dma.dir); 713 args->args_proc_dma.dir);
714 return status; 714 return status;
715} 715}
@@ -721,14 +721,14 @@ u32 procwrap_flush_memory(union trapped_args *args, void *pr_ctxt)
721{ 721{
722 int status; 722 int status;
723 723
724 if (args->args_proc_flushmemory.ul_flags > 724 if (args->args_proc_flushmemory.flags >
725 PROC_WRITEBACK_INVALIDATE_MEM) 725 PROC_WRITEBACK_INVALIDATE_MEM)
726 return -EINVAL; 726 return -EINVAL;
727 727
728 status = proc_flush_memory(pr_ctxt, 728 status = proc_flush_memory(pr_ctxt,
729 args->args_proc_flushmemory.pmpu_addr, 729 args->args_proc_flushmemory.mpu_addr,
730 args->args_proc_flushmemory.ul_size, 730 args->args_proc_flushmemory.size,
731 args->args_proc_flushmemory.ul_flags); 731 args->args_proc_flushmemory.flags);
732 return status; 732 return status;
733} 733}
734 734
@@ -741,8 +741,8 @@ u32 procwrap_invalidate_memory(union trapped_args *args, void *pr_ctxt)
741 741
742 status = 742 status =
743 proc_invalidate_memory(pr_ctxt, 743 proc_invalidate_memory(pr_ctxt,
744 args->args_proc_invalidatememory.pmpu_addr, 744 args->args_proc_invalidatememory.mpu_addr,
745 args->args_proc_invalidatememory.ul_size); 745 args->args_proc_invalidatememory.size);
746 return status; 746 return status;
747} 747}
748 748
@@ -753,7 +753,7 @@ u32 procwrap_enum_resources(union trapped_args *args, void *pr_ctxt)
753{ 753{
754 int status = 0; 754 int status = 0;
755 struct dsp_resourceinfo resource_info; 755 struct dsp_resourceinfo resource_info;
756 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 756 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
757 757
758 if (args->args_proc_enumresources.resource_info_size < 758 if (args->args_proc_enumresources.resource_info_size <
759 sizeof(struct dsp_resourceinfo)) 759 sizeof(struct dsp_resourceinfo))
@@ -780,7 +780,7 @@ u32 procwrap_get_state(union trapped_args *args, void *pr_ctxt)
780{ 780{
781 int status; 781 int status;
782 struct dsp_processorstate proc_state; 782 struct dsp_processorstate proc_state;
783 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 783 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
784 784
785 if (args->args_proc_getstate.state_info_size < 785 if (args->args_proc_getstate.state_info_size <
786 sizeof(struct dsp_processorstate)) 786 sizeof(struct dsp_processorstate))
@@ -801,7 +801,7 @@ u32 procwrap_get_trace(union trapped_args *args, void *pr_ctxt)
801{ 801{
802 int status; 802 int status;
803 u8 *pbuf; 803 u8 *pbuf;
804 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 804 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
805 805
806 if (args->args_proc_gettrace.max_size > MAX_TRACEBUFLEN) 806 if (args->args_proc_gettrace.max_size > MAX_TRACEBUFLEN)
807 return -EINVAL; 807 return -EINVAL;
@@ -813,7 +813,7 @@ u32 procwrap_get_trace(union trapped_args *args, void *pr_ctxt)
813 } else { 813 } else {
814 status = -ENOMEM; 814 status = -ENOMEM;
815 } 815 }
816 CP_TO_USR(args->args_proc_gettrace.pbuf, pbuf, status, 816 CP_TO_USR(args->args_proc_gettrace.buf, pbuf, status,
817 args->args_proc_gettrace.max_size); 817 args->args_proc_gettrace.max_size);
818 kfree(pbuf); 818 kfree(pbuf);
819 819
@@ -830,7 +830,7 @@ u32 procwrap_load(union trapped_args *args, void *pr_ctxt)
830 char *temp; 830 char *temp;
831 s32 count = args->args_proc_load.argc_index; 831 s32 count = args->args_proc_load.argc_index;
832 u8 **argv = NULL, **envp = NULL; 832 u8 **argv = NULL, **envp = NULL;
833 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 833 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
834 834
835 if (count <= 0 || count > MAX_LOADARGS) { 835 if (count <= 0 || count > MAX_LOADARGS) {
836 status = -EINVAL; 836 status = -EINVAL;
@@ -948,18 +948,18 @@ u32 procwrap_map(union trapped_args *args, void *pr_ctxt)
948{ 948{
949 int status; 949 int status;
950 void *map_addr; 950 void *map_addr;
951 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 951 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
952 952
953 if (!args->args_proc_mapmem.ul_size) 953 if (!args->args_proc_mapmem.size)
954 return -EINVAL; 954 return -EINVAL;
955 955
956 status = proc_map(args->args_proc_mapmem.hprocessor, 956 status = proc_map(args->args_proc_mapmem.processor,
957 args->args_proc_mapmem.pmpu_addr, 957 args->args_proc_mapmem.mpu_addr,
958 args->args_proc_mapmem.ul_size, 958 args->args_proc_mapmem.size,
959 args->args_proc_mapmem.req_addr, &map_addr, 959 args->args_proc_mapmem.req_addr, &map_addr,
960 args->args_proc_mapmem.ul_map_attr, pr_ctxt); 960 args->args_proc_mapmem.map_attr, pr_ctxt);
961 if (!status) { 961 if (!status) {
962 if (put_user(map_addr, args->args_proc_mapmem.pp_map_addr)) { 962 if (put_user(map_addr, args->args_proc_mapmem.map_addr)) {
963 status = -EINVAL; 963 status = -EINVAL;
964 proc_un_map(hprocessor, map_addr, pr_ctxt); 964 proc_un_map(hprocessor, map_addr, pr_ctxt);
965 } 965 }
@@ -975,17 +975,17 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt)
975{ 975{
976 int status; 976 int status;
977 struct dsp_notification notification; 977 struct dsp_notification notification;
978 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 978 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
979 979
980 /* Initialize the notification data structure */ 980 /* Initialize the notification data structure */
981 notification.ps_name = NULL; 981 notification.name = NULL;
982 notification.handle = NULL; 982 notification.handle = NULL;
983 983
984 status = proc_register_notify(hprocessor, 984 status = proc_register_notify(hprocessor,
985 args->args_proc_register_notify.event_mask, 985 args->args_proc_register_notify.event_mask,
986 args->args_proc_register_notify.notify_type, 986 args->args_proc_register_notify.notify_type,
987 &notification); 987 &notification);
988 CP_TO_USR(args->args_proc_register_notify.hnotification, &notification, 988 CP_TO_USR(args->args_proc_register_notify.notification, &notification,
989 status, 1); 989 status, 1);
990 return status; 990 return status;
991} 991}
@@ -997,20 +997,20 @@ u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
997{ 997{
998 int status; 998 int status;
999 void *prsv_addr; 999 void *prsv_addr;
1000 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 1000 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
1001 1001
1002 if ((args->args_proc_rsvmem.ul_size <= 0) || 1002 if ((args->args_proc_rsvmem.size <= 0) ||
1003 (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0) 1003 (args->args_proc_rsvmem.size & (PG_SIZE4K - 1)) != 0)
1004 return -EINVAL; 1004 return -EINVAL;
1005 1005
1006 status = proc_reserve_memory(hprocessor, 1006 status = proc_reserve_memory(hprocessor,
1007 args->args_proc_rsvmem.ul_size, &prsv_addr, 1007 args->args_proc_rsvmem.size, &prsv_addr,
1008 pr_ctxt); 1008 pr_ctxt);
1009 if (!status) { 1009 if (!status) {
1010 if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) { 1010 if (put_user(prsv_addr, args->args_proc_rsvmem.rsv_addr)) {
1011 status = -EINVAL; 1011 status = -EINVAL;
1012 proc_un_reserve_memory(args->args_proc_rsvmem. 1012 proc_un_reserve_memory(args->args_proc_rsvmem.
1013 hprocessor, prsv_addr, pr_ctxt); 1013 processor, prsv_addr, pr_ctxt);
1014 } 1014 }
1015 } 1015 }
1016 return status; 1016 return status;
@@ -1023,7 +1023,7 @@ u32 procwrap_start(union trapped_args *args, void *pr_ctxt)
1023{ 1023{
1024 u32 ret; 1024 u32 ret;
1025 1025
1026 ret = proc_start(((struct process_context *)pr_ctxt)->hprocessor); 1026 ret = proc_start(((struct process_context *)pr_ctxt)->processor);
1027 return ret; 1027 return ret;
1028} 1028}
1029 1029
@@ -1034,7 +1034,7 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
1034{ 1034{
1035 int status; 1035 int status;
1036 1036
1037 status = proc_un_map(((struct process_context *)pr_ctxt)->hprocessor, 1037 status = proc_un_map(((struct process_context *)pr_ctxt)->processor,
1038 args->args_proc_unmapmem.map_addr, pr_ctxt); 1038 args->args_proc_unmapmem.map_addr, pr_ctxt);
1039 return status; 1039 return status;
1040} 1040}
@@ -1045,10 +1045,10 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
1045u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt) 1045u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
1046{ 1046{
1047 int status; 1047 int status;
1048 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 1048 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
1049 1049
1050 status = proc_un_reserve_memory(hprocessor, 1050 status = proc_un_reserve_memory(hprocessor,
1051 args->args_proc_unrsvmem.prsv_addr, 1051 args->args_proc_unrsvmem.rsv_addr,
1052 pr_ctxt); 1052 pr_ctxt);
1053 return status; 1053 return status;
1054} 1054}
@@ -1060,7 +1060,7 @@ u32 procwrap_stop(union trapped_args *args, void *pr_ctxt)
1060{ 1060{
1061 u32 ret; 1061 u32 ret;
1062 1062
1063 ret = proc_stop(((struct process_context *)pr_ctxt)->hprocessor); 1063 ret = proc_stop(((struct process_context *)pr_ctxt)->processor);
1064 1064
1065 return ret; 1065 return ret;
1066} 1066}
@@ -1087,12 +1087,12 @@ u32 nodewrap_allocate(union trapped_args *args, void *pr_ctxt)
1087 int status = 0; 1087 int status = 0;
1088 struct dsp_uuid node_uuid; 1088 struct dsp_uuid node_uuid;
1089 u32 cb_data_size = 0; 1089 u32 cb_data_size = 0;
1090 u32 __user *psize = (u32 __user *) args->args_node_allocate.pargs; 1090 u32 __user *psize = (u32 __user *) args->args_node_allocate.args;
1091 u8 *pargs = NULL; 1091 u8 *pargs = NULL;
1092 struct dsp_nodeattrin proc_attr_in, *attr_in = NULL; 1092 struct dsp_nodeattrin proc_attr_in, *attr_in = NULL;
1093 struct node_res_object *node_res; 1093 struct node_res_object *node_res;
1094 int nodeid; 1094 int nodeid;
1095 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 1095 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
1096 1096
1097 /* Optional argument */ 1097 /* Optional argument */
1098 if (psize) { 1098 if (psize) {
@@ -1106,7 +1106,7 @@ u32 nodewrap_allocate(union trapped_args *args, void *pr_ctxt)
1106 status = -ENOMEM; 1106 status = -ENOMEM;
1107 1107
1108 } 1108 }
1109 CP_FM_USR(pargs, args->args_node_allocate.pargs, status, 1109 CP_FM_USR(pargs, args->args_node_allocate.args, status,
1110 cb_data_size); 1110 cb_data_size);
1111 } 1111 }
1112 CP_FM_USR(&node_uuid, args->args_node_allocate.node_id_ptr, status, 1); 1112 CP_FM_USR(&node_uuid, args->args_node_allocate.node_id_ptr, status, 1);
@@ -1129,7 +1129,7 @@ u32 nodewrap_allocate(union trapped_args *args, void *pr_ctxt)
1129 } 1129 }
1130 if (!status) { 1130 if (!status) {
1131 nodeid = node_res->id + 1; 1131 nodeid = node_res->id + 1;
1132 CP_TO_USR(args->args_node_allocate.ph_node, &nodeid, 1132 CP_TO_USR(args->args_node_allocate.node, &nodeid,
1133 status, 1); 1133 status, 1);
1134 if (status) { 1134 if (status) {
1135 status = -EFAULT; 1135 status = -EFAULT;
@@ -1154,28 +1154,28 @@ u32 nodewrap_alloc_msg_buf(union trapped_args *args, void *pr_ctxt)
1154 struct node_res_object *node_res; 1154 struct node_res_object *node_res;
1155 1155
1156 find_node_handle(&node_res, pr_ctxt, 1156 find_node_handle(&node_res, pr_ctxt,
1157 args->args_node_allocmsgbuf.hnode); 1157 args->args_node_allocmsgbuf.node);
1158 1158
1159 if (!node_res) 1159 if (!node_res)
1160 return -EFAULT; 1160 return -EFAULT;
1161 1161
1162 if (!args->args_node_allocmsgbuf.usize) 1162 if (!args->args_node_allocmsgbuf.size)
1163 return -EINVAL; 1163 return -EINVAL;
1164 1164
1165 if (args->args_node_allocmsgbuf.pattr) { /* Optional argument */ 1165 if (args->args_node_allocmsgbuf.attr) { /* Optional argument */
1166 CP_FM_USR(&attr, args->args_node_allocmsgbuf.pattr, status, 1); 1166 CP_FM_USR(&attr, args->args_node_allocmsgbuf.attr, status, 1);
1167 if (!status) 1167 if (!status)
1168 pattr = &attr; 1168 pattr = &attr;
1169 1169
1170 } 1170 }
1171 /* argument */ 1171 /* argument */
1172 CP_FM_USR(&pbuffer, args->args_node_allocmsgbuf.pbuffer, status, 1); 1172 CP_FM_USR(&pbuffer, args->args_node_allocmsgbuf.buffer, status, 1);
1173 if (!status) { 1173 if (!status) {
1174 status = node_alloc_msg_buf(node_res->hnode, 1174 status = node_alloc_msg_buf(node_res->node,
1175 args->args_node_allocmsgbuf.usize, 1175 args->args_node_allocmsgbuf.size,
1176 pattr, &pbuffer); 1176 pattr, &pbuffer);
1177 } 1177 }
1178 CP_TO_USR(args->args_node_allocmsgbuf.pbuffer, &pbuffer, status, 1); 1178 CP_TO_USR(args->args_node_allocmsgbuf.buffer, &pbuffer, status, 1);
1179 return status; 1179 return status;
1180} 1180}
1181 1181
@@ -1188,12 +1188,12 @@ u32 nodewrap_change_priority(union trapped_args *args, void *pr_ctxt)
1188 struct node_res_object *node_res; 1188 struct node_res_object *node_res;
1189 1189
1190 find_node_handle(&node_res, pr_ctxt, 1190 find_node_handle(&node_res, pr_ctxt,
1191 args->args_node_changepriority.hnode); 1191 args->args_node_changepriority.node);
1192 1192
1193 if (!node_res) 1193 if (!node_res)
1194 return -EFAULT; 1194 return -EFAULT;
1195 1195
1196 ret = node_change_priority(node_res->hnode, 1196 ret = node_change_priority(node_res->node,
1197 args->args_node_changepriority.prio); 1197 args->args_node_changepriority.prio);
1198 1198
1199 return ret; 1199 return ret;
@@ -1213,20 +1213,20 @@ u32 nodewrap_connect(union trapped_args *args, void *pr_ctxt)
1213 struct node_res_object *node_res1, *node_res2; 1213 struct node_res_object *node_res1, *node_res2;
1214 struct node_object *node1 = NULL, *node2 = NULL; 1214 struct node_object *node1 = NULL, *node2 = NULL;
1215 1215
1216 if ((int)args->args_node_connect.hnode != DSP_HGPPNODE) { 1216 if ((int)args->args_node_connect.node != DSP_HGPPNODE) {
1217 find_node_handle(&node_res1, pr_ctxt, 1217 find_node_handle(&node_res1, pr_ctxt,
1218 args->args_node_connect.hnode); 1218 args->args_node_connect.node);
1219 if (node_res1) 1219 if (node_res1)
1220 node1 = node_res1->hnode; 1220 node1 = node_res1->node;
1221 } else { 1221 } else {
1222 node1 = args->args_node_connect.hnode; 1222 node1 = args->args_node_connect.node;
1223 } 1223 }
1224 1224
1225 if ((int)args->args_node_connect.other_node != DSP_HGPPNODE) { 1225 if ((int)args->args_node_connect.other_node != DSP_HGPPNODE) {
1226 find_node_handle(&node_res2, pr_ctxt, 1226 find_node_handle(&node_res2, pr_ctxt,
1227 args->args_node_connect.other_node); 1227 args->args_node_connect.other_node);
1228 if (node_res2) 1228 if (node_res2)
1229 node2 = node_res2->hnode; 1229 node2 = node_res2->node;
1230 } else { 1230 } else {
1231 node2 = args->args_node_connect.other_node; 1231 node2 = args->args_node_connect.other_node;
1232 } 1232 }
@@ -1253,8 +1253,8 @@ u32 nodewrap_connect(union trapped_args *args, void *pr_ctxt)
1253 if (status) 1253 if (status)
1254 goto func_cont; 1254 goto func_cont;
1255 } 1255 }
1256 if (args->args_node_connect.pattrs) { /* Optional argument */ 1256 if (args->args_node_connect.attrs) { /* Optional argument */
1257 CP_FM_USR(&attrs, args->args_node_connect.pattrs, status, 1); 1257 CP_FM_USR(&attrs, args->args_node_connect.attrs, status, 1);
1258 if (!status) 1258 if (!status)
1259 pattrs = &attrs; 1259 pattrs = &attrs;
1260 1260
@@ -1280,12 +1280,12 @@ u32 nodewrap_create(union trapped_args *args, void *pr_ctxt)
1280 u32 ret; 1280 u32 ret;
1281 struct node_res_object *node_res; 1281 struct node_res_object *node_res;
1282 1282
1283 find_node_handle(&node_res, pr_ctxt, args->args_node_create.hnode); 1283 find_node_handle(&node_res, pr_ctxt, args->args_node_create.node);
1284 1284
1285 if (!node_res) 1285 if (!node_res)
1286 return -EFAULT; 1286 return -EFAULT;
1287 1287
1288 ret = node_create(node_res->hnode); 1288 ret = node_create(node_res->node);
1289 1289
1290 return ret; 1290 return ret;
1291} 1291}
@@ -1298,7 +1298,7 @@ u32 nodewrap_delete(union trapped_args *args, void *pr_ctxt)
1298 u32 ret; 1298 u32 ret;
1299 struct node_res_object *node_res; 1299 struct node_res_object *node_res;
1300 1300
1301 find_node_handle(&node_res, pr_ctxt, args->args_node_delete.hnode); 1301 find_node_handle(&node_res, pr_ctxt, args->args_node_delete.node);
1302 1302
1303 if (!node_res) 1303 if (!node_res)
1304 return -EFAULT; 1304 return -EFAULT;
@@ -1318,24 +1318,24 @@ u32 nodewrap_free_msg_buf(union trapped_args *args, void *pr_ctxt)
1318 struct dsp_bufferattr attr; 1318 struct dsp_bufferattr attr;
1319 struct node_res_object *node_res; 1319 struct node_res_object *node_res;
1320 1320
1321 find_node_handle(&node_res, pr_ctxt, args->args_node_freemsgbuf.hnode); 1321 find_node_handle(&node_res, pr_ctxt, args->args_node_freemsgbuf.node);
1322 1322
1323 if (!node_res) 1323 if (!node_res)
1324 return -EFAULT; 1324 return -EFAULT;
1325 1325
1326 if (args->args_node_freemsgbuf.pattr) { /* Optional argument */ 1326 if (args->args_node_freemsgbuf.attr) { /* Optional argument */
1327 CP_FM_USR(&attr, args->args_node_freemsgbuf.pattr, status, 1); 1327 CP_FM_USR(&attr, args->args_node_freemsgbuf.attr, status, 1);
1328 if (!status) 1328 if (!status)
1329 pattr = &attr; 1329 pattr = &attr;
1330 1330
1331 } 1331 }
1332 1332
1333 if (!args->args_node_freemsgbuf.pbuffer) 1333 if (!args->args_node_freemsgbuf.buffer)
1334 return -EFAULT; 1334 return -EFAULT;
1335 1335
1336 if (!status) { 1336 if (!status) {
1337 status = node_free_msg_buf(node_res->hnode, 1337 status = node_free_msg_buf(node_res->node,
1338 args->args_node_freemsgbuf.pbuffer, 1338 args->args_node_freemsgbuf.buffer,
1339 pattr); 1339 pattr);
1340 } 1340 }
1341 1341
@@ -1351,14 +1351,14 @@ u32 nodewrap_get_attr(union trapped_args *args, void *pr_ctxt)
1351 struct dsp_nodeattr attr; 1351 struct dsp_nodeattr attr;
1352 struct node_res_object *node_res; 1352 struct node_res_object *node_res;
1353 1353
1354 find_node_handle(&node_res, pr_ctxt, args->args_node_getattr.hnode); 1354 find_node_handle(&node_res, pr_ctxt, args->args_node_getattr.node);
1355 1355
1356 if (!node_res) 1356 if (!node_res)
1357 return -EFAULT; 1357 return -EFAULT;
1358 1358
1359 status = node_get_attr(node_res->hnode, &attr, 1359 status = node_get_attr(node_res->node, &attr,
1360 args->args_node_getattr.attr_size); 1360 args->args_node_getattr.attr_size);
1361 CP_TO_USR(args->args_node_getattr.pattr, &attr, status, 1); 1361 CP_TO_USR(args->args_node_getattr.attr, &attr, status, 1);
1362 1362
1363 return status; 1363 return status;
1364} 1364}
@@ -1372,13 +1372,13 @@ u32 nodewrap_get_message(union trapped_args *args, void *pr_ctxt)
1372 struct dsp_msg msg; 1372 struct dsp_msg msg;
1373 struct node_res_object *node_res; 1373 struct node_res_object *node_res;
1374 1374
1375 find_node_handle(&node_res, pr_ctxt, args->args_node_getmessage.hnode); 1375 find_node_handle(&node_res, pr_ctxt, args->args_node_getmessage.node);
1376 1376
1377 if (!node_res) 1377 if (!node_res)
1378 return -EFAULT; 1378 return -EFAULT;
1379 1379
1380 status = node_get_message(node_res->hnode, &msg, 1380 status = node_get_message(node_res->node, &msg,
1381 args->args_node_getmessage.utimeout); 1381 args->args_node_getmessage.timeout);
1382 1382
1383 CP_TO_USR(args->args_node_getmessage.message, &msg, status, 1); 1383 CP_TO_USR(args->args_node_getmessage.message, &msg, status, 1);
1384 1384
@@ -1393,12 +1393,12 @@ u32 nodewrap_pause(union trapped_args *args, void *pr_ctxt)
1393 u32 ret; 1393 u32 ret;
1394 struct node_res_object *node_res; 1394 struct node_res_object *node_res;
1395 1395
1396 find_node_handle(&node_res, pr_ctxt, args->args_node_pause.hnode); 1396 find_node_handle(&node_res, pr_ctxt, args->args_node_pause.node);
1397 1397
1398 if (!node_res) 1398 if (!node_res)
1399 return -EFAULT; 1399 return -EFAULT;
1400 1400
1401 ret = node_pause(node_res->hnode); 1401 ret = node_pause(node_res->node);
1402 1402
1403 return ret; 1403 return ret;
1404} 1404}
@@ -1412,7 +1412,7 @@ u32 nodewrap_put_message(union trapped_args *args, void *pr_ctxt)
1412 struct dsp_msg msg; 1412 struct dsp_msg msg;
1413 struct node_res_object *node_res; 1413 struct node_res_object *node_res;
1414 1414
1415 find_node_handle(&node_res, pr_ctxt, args->args_node_putmessage.hnode); 1415 find_node_handle(&node_res, pr_ctxt, args->args_node_putmessage.node);
1416 1416
1417 if (!node_res) 1417 if (!node_res)
1418 return -EFAULT; 1418 return -EFAULT;
@@ -1421,8 +1421,8 @@ u32 nodewrap_put_message(union trapped_args *args, void *pr_ctxt)
1421 1421
1422 if (!status) { 1422 if (!status) {
1423 status = 1423 status =
1424 node_put_message(node_res->hnode, &msg, 1424 node_put_message(node_res->node, &msg,
1425 args->args_node_putmessage.utimeout); 1425 args->args_node_putmessage.timeout);
1426 } 1426 }
1427 1427
1428 return status; 1428 return status;
@@ -1438,25 +1438,25 @@ u32 nodewrap_register_notify(union trapped_args *args, void *pr_ctxt)
1438 struct node_res_object *node_res; 1438 struct node_res_object *node_res;
1439 1439
1440 find_node_handle(&node_res, pr_ctxt, 1440 find_node_handle(&node_res, pr_ctxt,
1441 args->args_node_registernotify.hnode); 1441 args->args_node_registernotify.node);
1442 1442
1443 if (!node_res) 1443 if (!node_res)
1444 return -EFAULT; 1444 return -EFAULT;
1445 1445
1446 /* Initialize the notification data structure */ 1446 /* Initialize the notification data structure */
1447 notification.ps_name = NULL; 1447 notification.name = NULL;
1448 notification.handle = NULL; 1448 notification.handle = NULL;
1449 1449
1450 if (!args->args_proc_register_notify.event_mask) 1450 if (!args->args_proc_register_notify.event_mask)
1451 CP_FM_USR(&notification, 1451 CP_FM_USR(&notification,
1452 args->args_proc_register_notify.hnotification, 1452 args->args_proc_register_notify.notification,
1453 status, 1); 1453 status, 1);
1454 1454
1455 status = node_register_notify(node_res->hnode, 1455 status = node_register_notify(node_res->node,
1456 args->args_node_registernotify.event_mask, 1456 args->args_node_registernotify.event_mask,
1457 args->args_node_registernotify. 1457 args->args_node_registernotify.
1458 notify_type, &notification); 1458 notify_type, &notification);
1459 CP_TO_USR(args->args_node_registernotify.hnotification, &notification, 1459 CP_TO_USR(args->args_node_registernotify.notification, &notification,
1460 status, 1); 1460 status, 1);
1461 return status; 1461 return status;
1462} 1462}
@@ -1469,12 +1469,12 @@ u32 nodewrap_run(union trapped_args *args, void *pr_ctxt)
1469 u32 ret; 1469 u32 ret;
1470 struct node_res_object *node_res; 1470 struct node_res_object *node_res;
1471 1471
1472 find_node_handle(&node_res, pr_ctxt, args->args_node_run.hnode); 1472 find_node_handle(&node_res, pr_ctxt, args->args_node_run.node);
1473 1473
1474 if (!node_res) 1474 if (!node_res)
1475 return -EFAULT; 1475 return -EFAULT;
1476 1476
1477 ret = node_run(node_res->hnode); 1477 ret = node_run(node_res->node);
1478 1478
1479 return ret; 1479 return ret;
1480} 1480}
@@ -1488,14 +1488,14 @@ u32 nodewrap_terminate(union trapped_args *args, void *pr_ctxt)
1488 int tempstatus; 1488 int tempstatus;
1489 struct node_res_object *node_res; 1489 struct node_res_object *node_res;
1490 1490
1491 find_node_handle(&node_res, pr_ctxt, args->args_node_terminate.hnode); 1491 find_node_handle(&node_res, pr_ctxt, args->args_node_terminate.node);
1492 1492
1493 if (!node_res) 1493 if (!node_res)
1494 return -EFAULT; 1494 return -EFAULT;
1495 1495
1496 status = node_terminate(node_res->hnode, &tempstatus); 1496 status = node_terminate(node_res->node, &tempstatus);
1497 1497
1498 CP_TO_USR(args->args_node_terminate.pstatus, &tempstatus, status, 1); 1498 CP_TO_USR(args->args_node_terminate.status, &tempstatus, status, 1);
1499 1499
1500 return status; 1500 return status;
1501} 1501}
@@ -1508,7 +1508,7 @@ u32 nodewrap_get_uuid_props(union trapped_args *args, void *pr_ctxt)
1508 int status = 0; 1508 int status = 0;
1509 struct dsp_uuid node_uuid; 1509 struct dsp_uuid node_uuid;
1510 struct dsp_ndbprops *pnode_props = NULL; 1510 struct dsp_ndbprops *pnode_props = NULL;
1511 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 1511 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
1512 1512
1513 CP_FM_USR(&node_uuid, args->args_node_getuuidprops.node_id_ptr, status, 1513 CP_FM_USR(&node_uuid, args->args_node_getuuidprops.node_id_ptr, status,
1514 1); 1514 1);
@@ -1551,7 +1551,7 @@ u32 strmwrap_allocate_buffer(union trapped_args *args, void *pr_ctxt)
1551 struct strm_res_object *strm_res; 1551 struct strm_res_object *strm_res;
1552 1552
1553 find_strm_handle(&strm_res, pr_ctxt, 1553 find_strm_handle(&strm_res, pr_ctxt,
1554 args->args_strm_allocatebuffer.hstream); 1554 args->args_strm_allocatebuffer.stream);
1555 1555
1556 if (!strm_res) 1556 if (!strm_res)
1557 return -EFAULT; 1557 return -EFAULT;
@@ -1564,7 +1564,7 @@ u32 strmwrap_allocate_buffer(union trapped_args *args, void *pr_ctxt)
1564 return -ENOMEM; 1564 return -ENOMEM;
1565 1565
1566 status = strm_allocate_buffer(strm_res, 1566 status = strm_allocate_buffer(strm_res,
1567 args->args_strm_allocatebuffer.usize, 1567 args->args_strm_allocatebuffer.size,
1568 ap_buffer, num_bufs, pr_ctxt); 1568 ap_buffer, num_bufs, pr_ctxt);
1569 if (!status) { 1569 if (!status) {
1570 CP_TO_USR(args->args_strm_allocatebuffer.ap_buffer, ap_buffer, 1570 CP_TO_USR(args->args_strm_allocatebuffer.ap_buffer, ap_buffer,
@@ -1587,7 +1587,7 @@ u32 strmwrap_close(union trapped_args *args, void *pr_ctxt)
1587{ 1587{
1588 struct strm_res_object *strm_res; 1588 struct strm_res_object *strm_res;
1589 1589
1590 find_strm_handle(&strm_res, pr_ctxt, args->args_strm_close.hstream); 1590 find_strm_handle(&strm_res, pr_ctxt, args->args_strm_close.stream);
1591 1591
1592 if (!strm_res) 1592 if (!strm_res)
1593 return -EFAULT; 1593 return -EFAULT;
@@ -1606,7 +1606,7 @@ u32 strmwrap_free_buffer(union trapped_args *args, void *pr_ctxt)
1606 struct strm_res_object *strm_res; 1606 struct strm_res_object *strm_res;
1607 1607
1608 find_strm_handle(&strm_res, pr_ctxt, 1608 find_strm_handle(&strm_res, pr_ctxt,
1609 args->args_strm_freebuffer.hstream); 1609 args->args_strm_freebuffer.stream);
1610 1610
1611 if (!strm_res) 1611 if (!strm_res)
1612 return -EFAULT; 1612 return -EFAULT;
@@ -1654,7 +1654,7 @@ u32 strmwrap_get_info(union trapped_args *args, void *pr_ctxt)
1654 struct strm_res_object *strm_res; 1654 struct strm_res_object *strm_res;
1655 1655
1656 find_strm_handle(&strm_res, pr_ctxt, 1656 find_strm_handle(&strm_res, pr_ctxt,
1657 args->args_strm_getinfo.hstream); 1657 args->args_strm_getinfo.stream);
1658 1658
1659 if (!strm_res) 1659 if (!strm_res)
1660 return -EFAULT; 1660 return -EFAULT;
@@ -1665,7 +1665,7 @@ u32 strmwrap_get_info(union trapped_args *args, void *pr_ctxt)
1665 strm_info.user_strm = &user; 1665 strm_info.user_strm = &user;
1666 1666
1667 if (!status) { 1667 if (!status) {
1668 status = strm_get_info(strm_res->hstream, 1668 status = strm_get_info(strm_res->stream,
1669 &strm_info, 1669 &strm_info,
1670 args->args_strm_getinfo. 1670 args->args_strm_getinfo.
1671 stream_info_size); 1671 stream_info_size);
@@ -1684,12 +1684,12 @@ u32 strmwrap_idle(union trapped_args *args, void *pr_ctxt)
1684 u32 ret; 1684 u32 ret;
1685 struct strm_res_object *strm_res; 1685 struct strm_res_object *strm_res;
1686 1686
1687 find_strm_handle(&strm_res, pr_ctxt, args->args_strm_idle.hstream); 1687 find_strm_handle(&strm_res, pr_ctxt, args->args_strm_idle.stream);
1688 1688
1689 if (!strm_res) 1689 if (!strm_res)
1690 return -EFAULT; 1690 return -EFAULT;
1691 1691
1692 ret = strm_idle(strm_res->hstream, args->args_strm_idle.flush_flag); 1692 ret = strm_idle(strm_res->stream, args->args_strm_idle.flush_flag);
1693 1693
1694 return ret; 1694 return ret;
1695} 1695}
@@ -1702,22 +1702,22 @@ u32 strmwrap_issue(union trapped_args *args, void *pr_ctxt)
1702 int status = 0; 1702 int status = 0;
1703 struct strm_res_object *strm_res; 1703 struct strm_res_object *strm_res;
1704 1704
1705 find_strm_handle(&strm_res, pr_ctxt, args->args_strm_issue.hstream); 1705 find_strm_handle(&strm_res, pr_ctxt, args->args_strm_issue.stream);
1706 1706
1707 if (!strm_res) 1707 if (!strm_res)
1708 return -EFAULT; 1708 return -EFAULT;
1709 1709
1710 if (!args->args_strm_issue.pbuffer) 1710 if (!args->args_strm_issue.buffer)
1711 return -EFAULT; 1711 return -EFAULT;
1712 1712
1713 /* No need of doing CP_FM_USR for the user buffer (pbuffer) 1713 /* No need of doing CP_FM_USR for the user buffer (pbuffer)
1714 as this is done in Bridge internal function bridge_chnl_add_io_req 1714 as this is done in Bridge internal function bridge_chnl_add_io_req
1715 in chnl_sm.c */ 1715 in chnl_sm.c */
1716 status = strm_issue(strm_res->hstream, 1716 status = strm_issue(strm_res->stream,
1717 args->args_strm_issue.pbuffer, 1717 args->args_strm_issue.buffer,
1718 args->args_strm_issue.dw_bytes, 1718 args->args_strm_issue.bytes,
1719 args->args_strm_issue.dw_buf_size, 1719 args->args_strm_issue.buf_size,
1720 args->args_strm_issue.dw_arg); 1720 args->args_strm_issue.arg);
1721 1721
1722 return status; 1722 return status;
1723} 1723}
@@ -1734,7 +1734,7 @@ u32 strmwrap_open(union trapped_args *args, void *pr_ctxt)
1734 struct node_res_object *node_res; 1734 struct node_res_object *node_res;
1735 int strmid; 1735 int strmid;
1736 1736
1737 find_node_handle(&node_res, pr_ctxt, args->args_strm_open.hnode); 1737 find_node_handle(&node_res, pr_ctxt, args->args_strm_open.node);
1738 1738
1739 if (!node_res) 1739 if (!node_res)
1740 return -EFAULT; 1740 return -EFAULT;
@@ -1750,13 +1750,13 @@ u32 strmwrap_open(union trapped_args *args, void *pr_ctxt)
1750 } 1750 }
1751 1751
1752 } 1752 }
1753 status = strm_open(node_res->hnode, 1753 status = strm_open(node_res->node,
1754 args->args_strm_open.direction, 1754 args->args_strm_open.direction,
1755 args->args_strm_open.index, &attr, &strm_res_obj, 1755 args->args_strm_open.index, &attr, &strm_res_obj,
1756 pr_ctxt); 1756 pr_ctxt);
1757 if (!status) { 1757 if (!status) {
1758 strmid = strm_res_obj->id + 1; 1758 strmid = strm_res_obj->id + 1;
1759 CP_TO_USR(args->args_strm_open.ph_stream, &strmid, status, 1); 1759 CP_TO_USR(args->args_strm_open.stream, &strmid, status, 1);
1760 } 1760 }
1761 return status; 1761 return status;
1762} 1762}
@@ -1773,16 +1773,16 @@ u32 strmwrap_reclaim(union trapped_args *args, void *pr_ctxt)
1773 u32 ul_buf_size; 1773 u32 ul_buf_size;
1774 struct strm_res_object *strm_res; 1774 struct strm_res_object *strm_res;
1775 1775
1776 find_strm_handle(&strm_res, pr_ctxt, args->args_strm_reclaim.hstream); 1776 find_strm_handle(&strm_res, pr_ctxt, args->args_strm_reclaim.stream);
1777 1777
1778 if (!strm_res) 1778 if (!strm_res)
1779 return -EFAULT; 1779 return -EFAULT;
1780 1780
1781 status = strm_reclaim(strm_res->hstream, &buf_ptr, 1781 status = strm_reclaim(strm_res->stream, &buf_ptr,
1782 &ul_bytes, &ul_buf_size, &dw_arg); 1782 &ul_bytes, &ul_buf_size, &dw_arg);
1783 CP_TO_USR(args->args_strm_reclaim.buf_ptr, &buf_ptr, status, 1); 1783 CP_TO_USR(args->args_strm_reclaim.buf_ptr, &buf_ptr, status, 1);
1784 CP_TO_USR(args->args_strm_reclaim.bytes, &ul_bytes, status, 1); 1784 CP_TO_USR(args->args_strm_reclaim.bytes, &ul_bytes, status, 1);
1785 CP_TO_USR(args->args_strm_reclaim.pdw_arg, &dw_arg, status, 1); 1785 CP_TO_USR(args->args_strm_reclaim.arg, &dw_arg, status, 1);
1786 1786
1787 if (args->args_strm_reclaim.buf_size_ptr != NULL) { 1787 if (args->args_strm_reclaim.buf_size_ptr != NULL) {
1788 CP_TO_USR(args->args_strm_reclaim.buf_size_ptr, &ul_buf_size, 1788 CP_TO_USR(args->args_strm_reclaim.buf_size_ptr, &ul_buf_size,
@@ -1802,20 +1802,20 @@ u32 strmwrap_register_notify(union trapped_args *args, void *pr_ctxt)
1802 struct strm_res_object *strm_res; 1802 struct strm_res_object *strm_res;
1803 1803
1804 find_strm_handle(&strm_res, pr_ctxt, 1804 find_strm_handle(&strm_res, pr_ctxt,
1805 args->args_strm_registernotify.hstream); 1805 args->args_strm_registernotify.stream);
1806 1806
1807 if (!strm_res) 1807 if (!strm_res)
1808 return -EFAULT; 1808 return -EFAULT;
1809 1809
1810 /* Initialize the notification data structure */ 1810 /* Initialize the notification data structure */
1811 notification.ps_name = NULL; 1811 notification.name = NULL;
1812 notification.handle = NULL; 1812 notification.handle = NULL;
1813 1813
1814 status = strm_register_notify(strm_res->hstream, 1814 status = strm_register_notify(strm_res->stream,
1815 args->args_strm_registernotify.event_mask, 1815 args->args_strm_registernotify.event_mask,
1816 args->args_strm_registernotify. 1816 args->args_strm_registernotify.
1817 notify_type, &notification); 1817 notify_type, &notification);
1818 CP_TO_USR(args->args_strm_registernotify.hnotification, &notification, 1818 CP_TO_USR(args->args_strm_registernotify.notification, &notification,
1819 status, 1); 1819 status, 1);
1820 1820
1821 return status; 1821 return status;
@@ -1848,14 +1848,14 @@ u32 strmwrap_select(union trapped_args *args, void *pr_ctxt)
1848 if (!strm_res) 1848 if (!strm_res)
1849 return -EFAULT; 1849 return -EFAULT;
1850 1850
1851 strm_tab[i] = strm_res->hstream; 1851 strm_tab[i] = strm_res->stream;
1852 } 1852 }
1853 1853
1854 if (!status) { 1854 if (!status) {
1855 status = strm_select(strm_tab, args->args_strm_select.strm_num, 1855 status = strm_select(strm_tab, args->args_strm_select.strm_num,
1856 &mask, args->args_strm_select.utimeout); 1856 &mask, args->args_strm_select.timeout);
1857 } 1857 }
1858 CP_TO_USR(args->args_strm_select.pmask, &mask, status, 1); 1858 CP_TO_USR(args->args_strm_select.mask, &mask, status, 1);
1859 return status; 1859 return status;
1860} 1860}
1861 1861
@@ -1888,11 +1888,11 @@ u32 cmmwrap_get_handle(union trapped_args *args, void *pr_ctxt)
1888{ 1888{
1889 int status = 0; 1889 int status = 0;
1890 struct cmm_object *hcmm_mgr; 1890 struct cmm_object *hcmm_mgr;
1891 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; 1891 void *hprocessor = ((struct process_context *)pr_ctxt)->processor;
1892 1892
1893 status = cmm_get_handle(hprocessor, &hcmm_mgr); 1893 status = cmm_get_handle(hprocessor, &hcmm_mgr);
1894 1894
1895 CP_TO_USR(args->args_cmm_gethandle.ph_cmm_mgr, &hcmm_mgr, status, 1); 1895 CP_TO_USR(args->args_cmm_gethandle.cmm_mgr, &hcmm_mgr, status, 1);
1896 1896
1897 return status; 1897 return status;
1898} 1898}
@@ -1905,7 +1905,7 @@ u32 cmmwrap_get_info(union trapped_args *args, void *pr_ctxt)
1905 int status = 0; 1905 int status = 0;
1906 struct cmm_info cmm_info_obj; 1906 struct cmm_info cmm_info_obj;
1907 1907
1908 status = cmm_get_info(args->args_cmm_getinfo.hcmm_mgr, &cmm_info_obj); 1908 status = cmm_get_info(args->args_cmm_getinfo.cmm_mgr, &cmm_info_obj);
1909 1909
1910 CP_TO_USR(args->args_cmm_getinfo.cmm_info_obj, &cmm_info_obj, status, 1910 CP_TO_USR(args->args_cmm_getinfo.cmm_info_obj, &cmm_info_obj, status,
1911 1); 1911 1);
diff --git a/drivers/staging/tidspbridge/pmgr/io.c b/drivers/staging/tidspbridge/pmgr/io.c
index 20cbb9fe40c..65245f310f8 100644
--- a/drivers/staging/tidspbridge/pmgr/io.c
+++ b/drivers/staging/tidspbridge/pmgr/io.c
@@ -31,7 +31,6 @@
31 31
32/* ----------------------------------- This */ 32/* ----------------------------------- This */
33#include <ioobj.h> 33#include <ioobj.h>
34#include <dspbridge/iodefs.h>
35#include <dspbridge/io.h> 34#include <dspbridge/io.h>
36 35
37/* ----------------------------------- Globals */ 36/* ----------------------------------- Globals */
@@ -58,7 +57,7 @@ int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj,
58 *io_man = NULL; 57 *io_man = NULL;
59 58
60 /* A memory base of 0 implies no memory base: */ 59 /* A memory base of 0 implies no memory base: */
61 if ((mgr_attrts->shm_base != 0) && (mgr_attrts->usm_length == 0)) 60 if ((mgr_attrts->shm_base != 0) && (mgr_attrts->sm_length == 0))
62 status = -EINVAL; 61 status = -EINVAL;
63 62
64 if (mgr_attrts->word_size == 0) 63 if (mgr_attrts->word_size == 0)
@@ -68,13 +67,13 @@ int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj,
68 dev_get_intf_fxns(hdev_obj, &intf_fxns); 67 dev_get_intf_fxns(hdev_obj, &intf_fxns);
69 68
70 /* Let Bridge channel module finish the create: */ 69 /* Let Bridge channel module finish the create: */
71 status = (*intf_fxns->pfn_io_create) (&hio_mgr, hdev_obj, 70 status = (*intf_fxns->io_create) (&hio_mgr, hdev_obj,
72 mgr_attrts); 71 mgr_attrts);
73 72
74 if (!status) { 73 if (!status) {
75 pio_mgr = (struct io_mgr_ *)hio_mgr; 74 pio_mgr = (struct io_mgr_ *)hio_mgr;
76 pio_mgr->intf_fxns = intf_fxns; 75 pio_mgr->intf_fxns = intf_fxns;
77 pio_mgr->hdev_obj = hdev_obj; 76 pio_mgr->dev_obj = hdev_obj;
78 77
79 /* Return the new channel manager handle: */ 78 /* Return the new channel manager handle: */
80 *io_man = hio_mgr; 79 *io_man = hio_mgr;
@@ -100,7 +99,7 @@ int io_destroy(struct io_mgr *hio_mgr)
100 intf_fxns = pio_mgr->intf_fxns; 99 intf_fxns = pio_mgr->intf_fxns;
101 100
102 /* Let Bridge channel module destroy the io_mgr: */ 101 /* Let Bridge channel module destroy the io_mgr: */
103 status = (*intf_fxns->pfn_io_destroy) (hio_mgr); 102 status = (*intf_fxns->io_destroy) (hio_mgr);
104 103
105 return status; 104 return status;
106} 105}
diff --git a/drivers/staging/tidspbridge/pmgr/ioobj.h b/drivers/staging/tidspbridge/pmgr/ioobj.h
index f46355fa7b2..7defd948145 100644
--- a/drivers/staging/tidspbridge/pmgr/ioobj.h
+++ b/drivers/staging/tidspbridge/pmgr/ioobj.h
@@ -29,10 +29,10 @@
29 */ 29 */
30struct io_mgr_ { 30struct io_mgr_ {
31 /* These must be the first fields in a io_mgr struct: */ 31 /* These must be the first fields in a io_mgr struct: */
32 struct bridge_dev_context *hbridge_context; /* Bridge context. */ 32 struct bridge_dev_context *bridge_context; /* Bridge context. */
33 /* Function interface to Bridge driver. */ 33 /* Function interface to Bridge driver. */
34 struct bridge_drv_interface *intf_fxns; 34 struct bridge_drv_interface *intf_fxns;
35 struct dev_object *hdev_obj; /* Device this board represents. */ 35 struct dev_object *dev_obj; /* Device this board represents. */
36}; 36};
37 37
38#endif /* IOOBJ_ */ 38#endif /* IOOBJ_ */
diff --git a/drivers/staging/tidspbridge/pmgr/msg.c b/drivers/staging/tidspbridge/pmgr/msg.c
index abd43659062..a6916039eed 100644
--- a/drivers/staging/tidspbridge/pmgr/msg.c
+++ b/drivers/staging/tidspbridge/pmgr/msg.c
@@ -64,7 +64,7 @@ int msg_create(struct msg_mgr **msg_man,
64 64
65 /* Let Bridge message module finish the create: */ 65 /* Let Bridge message module finish the create: */
66 status = 66 status =
67 (*intf_fxns->pfn_msg_create) (&hmsg_mgr, hdev_obj, msg_callback); 67 (*intf_fxns->msg_create) (&hmsg_mgr, hdev_obj, msg_callback);
68 68
69 if (!status) { 69 if (!status) {
70 /* Fill in DSP API message module's fields of the msg_mgr 70 /* Fill in DSP API message module's fields of the msg_mgr
@@ -96,7 +96,7 @@ void msg_delete(struct msg_mgr *hmsg_mgr)
96 intf_fxns = msg_mgr_obj->intf_fxns; 96 intf_fxns = msg_mgr_obj->intf_fxns;
97 97
98 /* Let Bridge message module destroy the msg_mgr: */ 98 /* Let Bridge message module destroy the msg_mgr: */
99 (*intf_fxns->pfn_msg_delete) (hmsg_mgr); 99 (*intf_fxns->msg_delete) (hmsg_mgr);
100 } else { 100 } else {
101 dev_dbg(bridge, "%s: Error hmsg_mgr handle: %p\n", 101 dev_dbg(bridge, "%s: Error hmsg_mgr handle: %p\n",
102 __func__, hmsg_mgr); 102 __func__, hmsg_mgr);
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index 3581a55ed4d..a7e407e2518 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -134,7 +134,7 @@ int dcd_create_manager(char *sz_zl_dll_name,
134 DBC_REQUIRE(refs >= 0); 134 DBC_REQUIRE(refs >= 0);
135 DBC_REQUIRE(dcd_mgr); 135 DBC_REQUIRE(dcd_mgr);
136 136
137 status = cod_create(&cod_mgr, sz_zl_dll_name, NULL); 137 status = cod_create(&cod_mgr, sz_zl_dll_name);
138 if (status) 138 if (status)
139 goto func_end; 139 goto func_end;
140 140
@@ -1020,8 +1020,6 @@ static s32 atoi(char *psz_buf)
1020{ 1020{
1021 char *pch = psz_buf; 1021 char *pch = psz_buf;
1022 s32 base = 0; 1022 s32 base = 0;
1023 unsigned long res;
1024 int ret_val;
1025 1023
1026 while (isspace(*pch)) 1024 while (isspace(*pch))
1027 pch++; 1025 pch++;
@@ -1033,9 +1031,7 @@ static s32 atoi(char *psz_buf)
1033 base = 16; 1031 base = 16;
1034 } 1032 }
1035 1033
1036 ret_val = strict_strtoul(pch, base, &res); 1034 return simple_strtoul(pch, NULL, base);
1037
1038 return ret_val ? : res;
1039} 1035}
1040 1036
1041/* 1037/*
@@ -1116,14 +1112,14 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1116 dsp_resource_reqmts.program_mem_size = atoi(token); 1112 dsp_resource_reqmts.program_mem_size = atoi(token);
1117 token = strsep(&psz_cur, seps); 1113 token = strsep(&psz_cur, seps);
1118 gen_obj->obj_data.node_obj.ndb_props. 1114 gen_obj->obj_data.node_obj.ndb_props.
1119 dsp_resource_reqmts.uwc_execution_time = atoi(token); 1115 dsp_resource_reqmts.wc_execution_time = atoi(token);
1120 token = strsep(&psz_cur, seps); 1116 token = strsep(&psz_cur, seps);
1121 gen_obj->obj_data.node_obj.ndb_props. 1117 gen_obj->obj_data.node_obj.ndb_props.
1122 dsp_resource_reqmts.uwc_period = atoi(token); 1118 dsp_resource_reqmts.wc_period = atoi(token);
1123 token = strsep(&psz_cur, seps); 1119 token = strsep(&psz_cur, seps);
1124 1120
1125 gen_obj->obj_data.node_obj.ndb_props. 1121 gen_obj->obj_data.node_obj.ndb_props.
1126 dsp_resource_reqmts.uwc_deadline = atoi(token); 1122 dsp_resource_reqmts.wc_deadline = atoi(token);
1127 token = strsep(&psz_cur, seps); 1123 token = strsep(&psz_cur, seps);
1128 1124
1129 gen_obj->obj_data.node_obj.ndb_props. 1125 gen_obj->obj_data.node_obj.ndb_props.
@@ -1166,40 +1162,40 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1166 atoi(token); 1162 atoi(token);
1167 token = strsep(&psz_cur, seps); 1163 token = strsep(&psz_cur, seps);
1168 1164
1169 /* u32 utimeout */ 1165 /* u32 timeout */
1170 gen_obj->obj_data.node_obj.ndb_props.utimeout = atoi(token); 1166 gen_obj->obj_data.node_obj.ndb_props.timeout = atoi(token);
1171 token = strsep(&psz_cur, seps); 1167 token = strsep(&psz_cur, seps);
1172 1168
1173 /* char *pstr_create_phase_fxn */ 1169 /* char *str_create_phase_fxn */
1174 DBC_REQUIRE(token); 1170 DBC_REQUIRE(token);
1175 token_len = strlen(token); 1171 token_len = strlen(token);
1176 gen_obj->obj_data.node_obj.pstr_create_phase_fxn = 1172 gen_obj->obj_data.node_obj.str_create_phase_fxn =
1177 kzalloc(token_len + 1, GFP_KERNEL); 1173 kzalloc(token_len + 1, GFP_KERNEL);
1178 strncpy(gen_obj->obj_data.node_obj.pstr_create_phase_fxn, 1174 strncpy(gen_obj->obj_data.node_obj.str_create_phase_fxn,
1179 token, token_len); 1175 token, token_len);
1180 gen_obj->obj_data.node_obj.pstr_create_phase_fxn[token_len] = 1176 gen_obj->obj_data.node_obj.str_create_phase_fxn[token_len] =
1181 '\0'; 1177 '\0';
1182 token = strsep(&psz_cur, seps); 1178 token = strsep(&psz_cur, seps);
1183 1179
1184 /* char *pstr_execute_phase_fxn */ 1180 /* char *str_execute_phase_fxn */
1185 DBC_REQUIRE(token); 1181 DBC_REQUIRE(token);
1186 token_len = strlen(token); 1182 token_len = strlen(token);
1187 gen_obj->obj_data.node_obj.pstr_execute_phase_fxn = 1183 gen_obj->obj_data.node_obj.str_execute_phase_fxn =
1188 kzalloc(token_len + 1, GFP_KERNEL); 1184 kzalloc(token_len + 1, GFP_KERNEL);
1189 strncpy(gen_obj->obj_data.node_obj.pstr_execute_phase_fxn, 1185 strncpy(gen_obj->obj_data.node_obj.str_execute_phase_fxn,
1190 token, token_len); 1186 token, token_len);
1191 gen_obj->obj_data.node_obj.pstr_execute_phase_fxn[token_len] = 1187 gen_obj->obj_data.node_obj.str_execute_phase_fxn[token_len] =
1192 '\0'; 1188 '\0';
1193 token = strsep(&psz_cur, seps); 1189 token = strsep(&psz_cur, seps);
1194 1190
1195 /* char *pstr_delete_phase_fxn */ 1191 /* char *str_delete_phase_fxn */
1196 DBC_REQUIRE(token); 1192 DBC_REQUIRE(token);
1197 token_len = strlen(token); 1193 token_len = strlen(token);
1198 gen_obj->obj_data.node_obj.pstr_delete_phase_fxn = 1194 gen_obj->obj_data.node_obj.str_delete_phase_fxn =
1199 kzalloc(token_len + 1, GFP_KERNEL); 1195 kzalloc(token_len + 1, GFP_KERNEL);
1200 strncpy(gen_obj->obj_data.node_obj.pstr_delete_phase_fxn, 1196 strncpy(gen_obj->obj_data.node_obj.str_delete_phase_fxn,
1201 token, token_len); 1197 token, token_len);
1202 gen_obj->obj_data.node_obj.pstr_delete_phase_fxn[token_len] = 1198 gen_obj->obj_data.node_obj.str_delete_phase_fxn[token_len] =
1203 '\0'; 1199 '\0';
1204 token = strsep(&psz_cur, seps); 1200 token = strsep(&psz_cur, seps);
1205 1201
@@ -1211,34 +1207,34 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1211 gen_obj->obj_data.node_obj.msg_notify_type = atoi(token); 1207 gen_obj->obj_data.node_obj.msg_notify_type = atoi(token);
1212 token = strsep(&psz_cur, seps); 1208 token = strsep(&psz_cur, seps);
1213 1209
1214 /* char *pstr_i_alg_name */ 1210 /* char *str_i_alg_name */
1215 if (token) { 1211 if (token) {
1216 token_len = strlen(token); 1212 token_len = strlen(token);
1217 gen_obj->obj_data.node_obj.pstr_i_alg_name = 1213 gen_obj->obj_data.node_obj.str_i_alg_name =
1218 kzalloc(token_len + 1, GFP_KERNEL); 1214 kzalloc(token_len + 1, GFP_KERNEL);
1219 strncpy(gen_obj->obj_data.node_obj.pstr_i_alg_name, 1215 strncpy(gen_obj->obj_data.node_obj.str_i_alg_name,
1220 token, token_len); 1216 token, token_len);
1221 gen_obj->obj_data.node_obj.pstr_i_alg_name[token_len] = 1217 gen_obj->obj_data.node_obj.str_i_alg_name[token_len] =
1222 '\0'; 1218 '\0';
1223 token = strsep(&psz_cur, seps); 1219 token = strsep(&psz_cur, seps);
1224 } 1220 }
1225 1221
1226 /* Load type (static, dynamic, or overlay) */ 1222 /* Load type (static, dynamic, or overlay) */
1227 if (token) { 1223 if (token) {
1228 gen_obj->obj_data.node_obj.us_load_type = atoi(token); 1224 gen_obj->obj_data.node_obj.load_type = atoi(token);
1229 token = strsep(&psz_cur, seps); 1225 token = strsep(&psz_cur, seps);
1230 } 1226 }
1231 1227
1232 /* Dynamic load data requirements */ 1228 /* Dynamic load data requirements */
1233 if (token) { 1229 if (token) {
1234 gen_obj->obj_data.node_obj.ul_data_mem_seg_mask = 1230 gen_obj->obj_data.node_obj.data_mem_seg_mask =
1235 atoi(token); 1231 atoi(token);
1236 token = strsep(&psz_cur, seps); 1232 token = strsep(&psz_cur, seps);
1237 } 1233 }
1238 1234
1239 /* Dynamic load code requirements */ 1235 /* Dynamic load code requirements */
1240 if (token) { 1236 if (token) {
1241 gen_obj->obj_data.node_obj.ul_code_mem_seg_mask = 1237 gen_obj->obj_data.node_obj.code_mem_seg_mask =
1242 atoi(token); 1238 atoi(token);
1243 token = strsep(&psz_cur, seps); 1239 token = strsep(&psz_cur, seps);
1244 } 1240 }
@@ -1257,7 +1253,7 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1257 /* Heap Size for the node */ 1253 /* Heap Size for the node */
1258 gen_obj->obj_data.node_obj. 1254 gen_obj->obj_data.node_obj.
1259 ndb_props.node_profiles[i]. 1255 ndb_props.node_profiles[i].
1260 ul_heap_size = atoi(token); 1256 heap_size = atoi(token);
1261 } 1257 }
1262 } 1258 }
1263 } 1259 }
@@ -1289,10 +1285,10 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1289 gen_obj->obj_data.proc_info.clock_rate = atoi(token); 1285 gen_obj->obj_data.proc_info.clock_rate = atoi(token);
1290 token = strsep(&psz_cur, seps); 1286 token = strsep(&psz_cur, seps);
1291 1287
1292 gen_obj->obj_data.proc_info.ul_internal_mem_size = atoi(token); 1288 gen_obj->obj_data.proc_info.internal_mem_size = atoi(token);
1293 token = strsep(&psz_cur, seps); 1289 token = strsep(&psz_cur, seps);
1294 1290
1295 gen_obj->obj_data.proc_info.ul_external_mem_size = atoi(token); 1291 gen_obj->obj_data.proc_info.external_mem_size = atoi(token);
1296 token = strsep(&psz_cur, seps); 1292 token = strsep(&psz_cur, seps);
1297 1293
1298 gen_obj->obj_data.proc_info.processor_id = atoi(token); 1294 gen_obj->obj_data.proc_info.processor_id = atoi(token);
@@ -1312,11 +1308,11 @@ static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size,
1312 for (entry_id = 0; entry_id < 7; entry_id++) { 1308 for (entry_id = 0; entry_id < 7; entry_id++) {
1313 token = strsep(&psz_cur, seps); 1309 token = strsep(&psz_cur, seps);
1314 gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id]. 1310 gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
1315 ul_gpp_phys = atoi(token); 1311 gpp_phys = atoi(token);
1316 1312
1317 token = strsep(&psz_cur, seps); 1313 token = strsep(&psz_cur, seps);
1318 gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id]. 1314 gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id].
1319 ul_dsp_virt = atoi(token); 1315 dsp_virt = atoi(token);
1320 } 1316 }
1321#endif 1317#endif
1322 1318
diff --git a/drivers/staging/tidspbridge/rmgr/disp.c b/drivers/staging/tidspbridge/rmgr/disp.c
index b7ce4353e06..a9aa22f3b4f 100644
--- a/drivers/staging/tidspbridge/rmgr/disp.c
+++ b/drivers/staging/tidspbridge/rmgr/disp.c
@@ -58,15 +58,15 @@
58 * ======== disp_object ======== 58 * ======== disp_object ========
59 */ 59 */
60struct disp_object { 60struct disp_object {
61 struct dev_object *hdev_obj; /* Device for this processor */ 61 struct dev_object *dev_obj; /* Device for this processor */
62 /* Function interface to Bridge driver */ 62 /* Function interface to Bridge driver */
63 struct bridge_drv_interface *intf_fxns; 63 struct bridge_drv_interface *intf_fxns;
64 struct chnl_mgr *hchnl_mgr; /* Channel manager */ 64 struct chnl_mgr *chnl_mgr; /* Channel manager */
65 struct chnl_object *chnl_to_dsp; /* Chnl for commands to RMS */ 65 struct chnl_object *chnl_to_dsp; /* Chnl for commands to RMS */
66 struct chnl_object *chnl_from_dsp; /* Chnl for replies from RMS */ 66 struct chnl_object *chnl_from_dsp; /* Chnl for replies from RMS */
67 u8 *pbuf; /* Buffer for commands, replies */ 67 u8 *buf; /* Buffer for commands, replies */
68 u32 ul_bufsize; /* pbuf size in bytes */ 68 u32 bufsize; /* buf size in bytes */
69 u32 ul_bufsize_rms; /* pbuf size in RMS words */ 69 u32 bufsize_rms; /* buf size in RMS words */
70 u32 char_size; /* Size of DSP character */ 70 u32 char_size; /* Size of DSP character */
71 u32 word_size; /* Size of DSP word */ 71 u32 word_size; /* Size of DSP word */
72 u32 data_mau_size; /* Size of DSP Data MAU */ 72 u32 data_mau_size; /* Size of DSP Data MAU */
@@ -108,11 +108,11 @@ int disp_create(struct disp_object **dispatch_obj,
108 if (disp_obj == NULL) 108 if (disp_obj == NULL)
109 status = -ENOMEM; 109 status = -ENOMEM;
110 else 110 else
111 disp_obj->hdev_obj = hdev_obj; 111 disp_obj->dev_obj = hdev_obj;
112 112
113 /* Get Channel manager and Bridge function interface */ 113 /* Get Channel manager and Bridge function interface */
114 if (!status) { 114 if (!status) {
115 status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->hchnl_mgr)); 115 status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->chnl_mgr));
116 if (!status) { 116 if (!status) {
117 (void)dev_get_intf_fxns(hdev_obj, &intf_fxns); 117 (void)dev_get_intf_fxns(hdev_obj, &intf_fxns);
118 disp_obj->intf_fxns = intf_fxns; 118 disp_obj->intf_fxns = intf_fxns;
@@ -140,26 +140,26 @@ int disp_create(struct disp_object **dispatch_obj,
140 /* Open channels for communicating with the RMS */ 140 /* Open channels for communicating with the RMS */
141 chnl_attr_obj.uio_reqs = CHNLIOREQS; 141 chnl_attr_obj.uio_reqs = CHNLIOREQS;
142 chnl_attr_obj.event_obj = NULL; 142 chnl_attr_obj.event_obj = NULL;
143 ul_chnl_id = disp_attrs->ul_chnl_offset + CHNLTORMSOFFSET; 143 ul_chnl_id = disp_attrs->chnl_offset + CHNLTORMSOFFSET;
144 status = (*intf_fxns->pfn_chnl_open) (&(disp_obj->chnl_to_dsp), 144 status = (*intf_fxns->chnl_open) (&(disp_obj->chnl_to_dsp),
145 disp_obj->hchnl_mgr, 145 disp_obj->chnl_mgr,
146 CHNL_MODETODSP, ul_chnl_id, 146 CHNL_MODETODSP, ul_chnl_id,
147 &chnl_attr_obj); 147 &chnl_attr_obj);
148 148
149 if (!status) { 149 if (!status) {
150 ul_chnl_id = disp_attrs->ul_chnl_offset + CHNLFROMRMSOFFSET; 150 ul_chnl_id = disp_attrs->chnl_offset + CHNLFROMRMSOFFSET;
151 status = 151 status =
152 (*intf_fxns->pfn_chnl_open) (&(disp_obj->chnl_from_dsp), 152 (*intf_fxns->chnl_open) (&(disp_obj->chnl_from_dsp),
153 disp_obj->hchnl_mgr, 153 disp_obj->chnl_mgr,
154 CHNL_MODEFROMDSP, ul_chnl_id, 154 CHNL_MODEFROMDSP, ul_chnl_id,
155 &chnl_attr_obj); 155 &chnl_attr_obj);
156 } 156 }
157 if (!status) { 157 if (!status) {
158 /* Allocate buffer for commands, replies */ 158 /* Allocate buffer for commands, replies */
159 disp_obj->ul_bufsize = disp_attrs->ul_chnl_buf_size; 159 disp_obj->bufsize = disp_attrs->chnl_buf_size;
160 disp_obj->ul_bufsize_rms = RMS_COMMANDBUFSIZE; 160 disp_obj->bufsize_rms = RMS_COMMANDBUFSIZE;
161 disp_obj->pbuf = kzalloc(disp_obj->ul_bufsize, GFP_KERNEL); 161 disp_obj->buf = kzalloc(disp_obj->bufsize, GFP_KERNEL);
162 if (disp_obj->pbuf == NULL) 162 if (disp_obj->buf == NULL)
163 status = -ENOMEM; 163 status = -ENOMEM;
164 } 164 }
165func_cont: 165func_cont:
@@ -232,7 +232,7 @@ int disp_node_change_priority(struct disp_object *disp_obj,
232 DBC_REQUIRE(hnode != NULL); 232 DBC_REQUIRE(hnode != NULL);
233 233
234 /* Send message to RMS to change priority */ 234 /* Send message to RMS to change priority */
235 rms_cmd = (struct rms_command *)(disp_obj->pbuf); 235 rms_cmd = (struct rms_command *)(disp_obj->buf);
236 rms_cmd->fxn = (rms_word) (rms_fxn); 236 rms_cmd->fxn = (rms_word) (rms_fxn);
237 rms_cmd->arg1 = (rms_word) node_env; 237 rms_cmd->arg1 = (rms_word) node_env;
238 rms_cmd->arg2 = prio; 238 rms_cmd->arg2 = prio;
@@ -282,7 +282,7 @@ int disp_node_create(struct disp_object *disp_obj,
282 DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE); 282 DBC_REQUIRE(node_get_type(hnode) != NODE_DEVICE);
283 DBC_REQUIRE(node_env != NULL); 283 DBC_REQUIRE(node_env != NULL);
284 284
285 status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type); 285 status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
286 286
287 if (status) 287 if (status)
288 goto func_end; 288 goto func_end;
@@ -295,7 +295,7 @@ int disp_node_create(struct disp_object *disp_obj,
295 DBC_REQUIRE(pargs != NULL); 295 DBC_REQUIRE(pargs != NULL);
296 node_type = node_get_type(hnode); 296 node_type = node_get_type(hnode);
297 node_msg_args = pargs->asa.node_msg_args; 297 node_msg_args = pargs->asa.node_msg_args;
298 max = disp_obj->ul_bufsize_rms; /*Max # of RMS words that can be sent */ 298 max = disp_obj->bufsize_rms; /*Max # of RMS words that can be sent */
299 DBC_ASSERT(max == RMS_COMMANDBUFSIZE); 299 DBC_ASSERT(max == RMS_COMMANDBUFSIZE);
300 chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size; 300 chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size;
301 /* Number of RMS words needed to hold arg data */ 301 /* Number of RMS words needed to hold arg data */
@@ -347,7 +347,7 @@ int disp_node_create(struct disp_object *disp_obj,
347 */ 347 */
348 if (!status) { 348 if (!status) {
349 total = 0; /* Total number of words in buffer so far */ 349 total = 0; /* Total number of words in buffer so far */
350 pdw_buf = (rms_word *) disp_obj->pbuf; 350 pdw_buf = (rms_word *) disp_obj->buf;
351 rms_cmd = (struct rms_command *)pdw_buf; 351 rms_cmd = (struct rms_command *)pdw_buf;
352 rms_cmd->fxn = (rms_word) (rms_fxn); 352 rms_cmd->fxn = (rms_word) (rms_fxn);
353 rms_cmd->arg1 = (rms_word) (ul_create_fxn); 353 rms_cmd->arg1 = (rms_word) (ul_create_fxn);
@@ -402,16 +402,16 @@ int disp_node_create(struct disp_object *disp_obj,
402 more_task_args->sysstack_size = 402 more_task_args->sysstack_size =
403 task_arg_obj.sys_stack_size; 403 task_arg_obj.sys_stack_size;
404 more_task_args->stack_seg = task_arg_obj.stack_seg; 404 more_task_args->stack_seg = task_arg_obj.stack_seg;
405 more_task_args->heap_addr = task_arg_obj.udsp_heap_addr; 405 more_task_args->heap_addr = task_arg_obj.dsp_heap_addr;
406 more_task_args->heap_size = task_arg_obj.heap_size; 406 more_task_args->heap_size = task_arg_obj.heap_size;
407 more_task_args->misc = task_arg_obj.ul_dais_arg; 407 more_task_args->misc = task_arg_obj.dais_arg;
408 more_task_args->num_input_streams = 408 more_task_args->num_input_streams =
409 task_arg_obj.num_inputs; 409 task_arg_obj.num_inputs;
410 total += 410 total +=
411 sizeof(struct rms_more_task_args) / 411 sizeof(struct rms_more_task_args) /
412 sizeof(rms_word); 412 sizeof(rms_word);
413 dev_dbg(bridge, "%s: udsp_heap_addr %x, heap_size %x\n", 413 dev_dbg(bridge, "%s: dsp_heap_addr %x, heap_size %x\n",
414 __func__, task_arg_obj.udsp_heap_addr, 414 __func__, task_arg_obj.dsp_heap_addr,
415 task_arg_obj.heap_size); 415 task_arg_obj.heap_size);
416 /* Keep track of pSIOInDef[] and pSIOOutDef[] 416 /* Keep track of pSIOInDef[] and pSIOOutDef[]
417 * positions in the buffer, since this needs to be 417 * positions in the buffer, since this needs to be
@@ -460,17 +460,6 @@ int disp_node_create(struct disp_object *disp_obj,
460 DBC_ASSERT(ul_bytes < (RMS_COMMANDBUFSIZE * sizeof(rms_word))); 460 DBC_ASSERT(ul_bytes < (RMS_COMMANDBUFSIZE * sizeof(rms_word)));
461 status = send_message(disp_obj, node_get_timeout(hnode), 461 status = send_message(disp_obj, node_get_timeout(hnode),
462 ul_bytes, node_env); 462 ul_bytes, node_env);
463 if (status >= 0) {
464 /*
465 * Message successfully received from RMS.
466 * Return the status of the Node's create function
467 * on the DSP-side
468 */
469 status = (((rms_word *) (disp_obj->pbuf))[0]);
470 if (status < 0)
471 dev_dbg(bridge, "%s: DSP-side failed: 0x%x\n",
472 __func__, status);
473 }
474 } 463 }
475func_end: 464func_end:
476 return status; 465 return status;
@@ -495,7 +484,7 @@ int disp_node_delete(struct disp_object *disp_obj,
495 DBC_REQUIRE(disp_obj); 484 DBC_REQUIRE(disp_obj);
496 DBC_REQUIRE(hnode != NULL); 485 DBC_REQUIRE(hnode != NULL);
497 486
498 status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type); 487 status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
499 488
500 if (!status) { 489 if (!status) {
501 490
@@ -504,7 +493,7 @@ int disp_node_delete(struct disp_object *disp_obj,
504 /* 493 /*
505 * Fill in buffer to send to RMS 494 * Fill in buffer to send to RMS
506 */ 495 */
507 rms_cmd = (struct rms_command *)disp_obj->pbuf; 496 rms_cmd = (struct rms_command *)disp_obj->buf;
508 rms_cmd->fxn = (rms_word) (rms_fxn); 497 rms_cmd->fxn = (rms_word) (rms_fxn);
509 rms_cmd->arg1 = (rms_word) node_env; 498 rms_cmd->arg1 = (rms_word) node_env;
510 rms_cmd->arg2 = (rms_word) (ul_delete_fxn); 499 rms_cmd->arg2 = (rms_word) (ul_delete_fxn);
@@ -513,18 +502,6 @@ int disp_node_delete(struct disp_object *disp_obj,
513 status = send_message(disp_obj, node_get_timeout(hnode), 502 status = send_message(disp_obj, node_get_timeout(hnode),
514 sizeof(struct rms_command), 503 sizeof(struct rms_command),
515 &dw_arg); 504 &dw_arg);
516 if (status >= 0) {
517 /*
518 * Message successfully received from RMS.
519 * Return the status of the Node's delete
520 * function on the DSP-side
521 */
522 status = (((rms_word *) (disp_obj->pbuf))[0]);
523 if (status < 0)
524 dev_dbg(bridge, "%s: DSP-side failed: "
525 "0x%x\n", __func__, status);
526 }
527
528 } 505 }
529 } 506 }
530 return status; 507 return status;
@@ -548,7 +525,7 @@ int disp_node_run(struct disp_object *disp_obj,
548 DBC_REQUIRE(disp_obj); 525 DBC_REQUIRE(disp_obj);
549 DBC_REQUIRE(hnode != NULL); 526 DBC_REQUIRE(hnode != NULL);
550 527
551 status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type); 528 status = dev_get_dev_type(disp_obj->dev_obj, &dev_type);
552 529
553 if (!status) { 530 if (!status) {
554 531
@@ -557,7 +534,7 @@ int disp_node_run(struct disp_object *disp_obj,
557 /* 534 /*
558 * Fill in buffer to send to RMS. 535 * Fill in buffer to send to RMS.
559 */ 536 */
560 rms_cmd = (struct rms_command *)disp_obj->pbuf; 537 rms_cmd = (struct rms_command *)disp_obj->buf;
561 rms_cmd->fxn = (rms_word) (rms_fxn); 538 rms_cmd->fxn = (rms_word) (rms_fxn);
562 rms_cmd->arg1 = (rms_word) node_env; 539 rms_cmd->arg1 = (rms_word) node_env;
563 rms_cmd->arg2 = (rms_word) (ul_execute_fxn); 540 rms_cmd->arg2 = (rms_word) (ul_execute_fxn);
@@ -566,18 +543,6 @@ int disp_node_run(struct disp_object *disp_obj,
566 status = send_message(disp_obj, node_get_timeout(hnode), 543 status = send_message(disp_obj, node_get_timeout(hnode),
567 sizeof(struct rms_command), 544 sizeof(struct rms_command),
568 &dw_arg); 545 &dw_arg);
569 if (status >= 0) {
570 /*
571 * Message successfully received from RMS.
572 * Return the status of the Node's execute
573 * function on the DSP-side
574 */
575 status = (((rms_word *) (disp_obj->pbuf))[0]);
576 if (status < 0)
577 dev_dbg(bridge, "%s: DSP-side failed: "
578 "0x%x\n", __func__, status);
579 }
580
581 } 546 }
582 } 547 }
583 548
@@ -601,7 +566,7 @@ static void delete_disp(struct disp_object *disp_obj)
601 if (disp_obj->chnl_from_dsp) { 566 if (disp_obj->chnl_from_dsp) {
602 /* Channel close can fail only if the channel handle 567 /* Channel close can fail only if the channel handle
603 * is invalid. */ 568 * is invalid. */
604 status = (*intf_fxns->pfn_chnl_close) 569 status = (*intf_fxns->chnl_close)
605 (disp_obj->chnl_from_dsp); 570 (disp_obj->chnl_from_dsp);
606 if (status) { 571 if (status) {
607 dev_dbg(bridge, "%s: Failed to close channel " 572 dev_dbg(bridge, "%s: Failed to close channel "
@@ -610,14 +575,14 @@ static void delete_disp(struct disp_object *disp_obj)
610 } 575 }
611 if (disp_obj->chnl_to_dsp) { 576 if (disp_obj->chnl_to_dsp) {
612 status = 577 status =
613 (*intf_fxns->pfn_chnl_close) (disp_obj-> 578 (*intf_fxns->chnl_close) (disp_obj->
614 chnl_to_dsp); 579 chnl_to_dsp);
615 if (status) { 580 if (status) {
616 dev_dbg(bridge, "%s: Failed to close channel to" 581 dev_dbg(bridge, "%s: Failed to close channel to"
617 " RMS: 0x%x\n", __func__, status); 582 " RMS: 0x%x\n", __func__, status);
618 } 583 }
619 } 584 }
620 kfree(disp_obj->pbuf); 585 kfree(disp_obj->buf);
621 586
622 kfree(disp_obj); 587 kfree(disp_obj);
623 } 588 }
@@ -646,7 +611,7 @@ static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset,
646 strm_def_obj->nbufs = strm_def.num_bufs; 611 strm_def_obj->nbufs = strm_def.num_bufs;
647 strm_def_obj->segid = strm_def.seg_id; 612 strm_def_obj->segid = strm_def.seg_id;
648 strm_def_obj->align = strm_def.buf_alignment; 613 strm_def_obj->align = strm_def.buf_alignment;
649 strm_def_obj->timeout = strm_def.utimeout; 614 strm_def_obj->timeout = strm_def.timeout;
650 } 615 }
651 616
652 if (!status) { 617 if (!status) {
@@ -699,16 +664,16 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
699 *pdw_arg = (u32) NULL; 664 *pdw_arg = (u32) NULL;
700 intf_fxns = disp_obj->intf_fxns; 665 intf_fxns = disp_obj->intf_fxns;
701 chnl_obj = disp_obj->chnl_to_dsp; 666 chnl_obj = disp_obj->chnl_to_dsp;
702 pbuf = disp_obj->pbuf; 667 pbuf = disp_obj->buf;
703 668
704 /* Send the command */ 669 /* Send the command */
705 status = (*intf_fxns->pfn_chnl_add_io_req) (chnl_obj, pbuf, ul_bytes, 0, 670 status = (*intf_fxns->chnl_add_io_req) (chnl_obj, pbuf, ul_bytes, 0,
706 0L, dw_arg); 671 0L, dw_arg);
707 if (status) 672 if (status)
708 goto func_end; 673 goto func_end;
709 674
710 status = 675 status =
711 (*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj); 676 (*intf_fxns->chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj);
712 if (!status) { 677 if (!status) {
713 if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) { 678 if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
714 if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) 679 if (CHNL_IS_TIMED_OUT(chnl_ioc_obj))
@@ -723,13 +688,13 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
723 688
724 chnl_obj = disp_obj->chnl_from_dsp; 689 chnl_obj = disp_obj->chnl_from_dsp;
725 ul_bytes = REPLYSIZE; 690 ul_bytes = REPLYSIZE;
726 status = (*intf_fxns->pfn_chnl_add_io_req) (chnl_obj, pbuf, ul_bytes, 691 status = (*intf_fxns->chnl_add_io_req) (chnl_obj, pbuf, ul_bytes,
727 0, 0L, dw_arg); 692 0, 0L, dw_arg);
728 if (status) 693 if (status)
729 goto func_end; 694 goto func_end;
730 695
731 status = 696 status =
732 (*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj); 697 (*intf_fxns->chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj);
733 if (!status) { 698 if (!status) {
734 if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) { 699 if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
735 status = -ETIME; 700 status = -ETIME;
@@ -738,10 +703,17 @@ static int send_message(struct disp_object *disp_obj, u32 timeout,
738 status = -EPERM; 703 status = -EPERM;
739 } else { 704 } else {
740 if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) { 705 if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
741 DBC_ASSERT(chnl_ioc_obj.pbuf == pbuf); 706 DBC_ASSERT(chnl_ioc_obj.buf == pbuf);
742 status = (*((rms_word *) chnl_ioc_obj.pbuf)); 707 if (*((int *)chnl_ioc_obj.buf) < 0) {
708 /* Translate DSP's to kernel error */
709 status = -EREMOTEIO;
710 dev_dbg(bridge, "%s: DSP-side failed:"
711 " DSP errcode = 0x%x, Kernel "
712 "errcode = %d\n", __func__,
713 *(int *)pbuf, status);
714 }
743 *pdw_arg = 715 *pdw_arg =
744 (((rms_word *) (chnl_ioc_obj.pbuf))[1]); 716 (((rms_word *) (chnl_ioc_obj.buf))[1]);
745 } else { 717 } else {
746 status = -EPERM; 718 status = -EPERM;
747 } 719 }
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index 81b1b901355..8c88583364e 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -16,6 +16,7 @@
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */ 17 */
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/list.h>
19 20
20/* ----------------------------------- Host OS */ 21/* ----------------------------------- Host OS */
21#include <dspbridge/host_os.h> 22#include <dspbridge/host_os.h>
@@ -26,9 +27,6 @@
26/* ----------------------------------- Trace & Debug */ 27/* ----------------------------------- Trace & Debug */
27#include <dspbridge/dbc.h> 28#include <dspbridge/dbc.h>
28 29
29/* ----------------------------------- OS Adaptation Layer */
30#include <dspbridge/list.h>
31
32/* ----------------------------------- This */ 30/* ----------------------------------- This */
33#include <dspbridge/drv.h> 31#include <dspbridge/drv.h>
34#include <dspbridge/dev.h> 32#include <dspbridge/dev.h>
@@ -42,8 +40,8 @@
42 40
43/* ----------------------------------- Defines, Data Structures, Typedefs */ 41/* ----------------------------------- Defines, Data Structures, Typedefs */
44struct drv_object { 42struct drv_object {
45 struct lst_list *dev_list; 43 struct list_head dev_list;
46 struct lst_list *dev_node_string; 44 struct list_head dev_node_string;
47}; 45};
48 46
49/* 47/*
@@ -91,7 +89,7 @@ int drv_insert_node_res_element(void *hnode, void *node_resource,
91 goto func_end; 89 goto func_end;
92 } 90 }
93 91
94 (*node_res_obj)->hnode = hnode; 92 (*node_res_obj)->node = hnode;
95 retval = idr_get_new(ctxt->node_id, *node_res_obj, 93 retval = idr_get_new(ctxt->node_id, *node_res_obj,
96 &(*node_res_obj)->id); 94 &(*node_res_obj)->id);
97 if (retval == -EAGAIN) { 95 if (retval == -EAGAIN) {
@@ -125,13 +123,13 @@ static int drv_proc_free_node_res(int id, void *p, void *data)
125 u32 node_state; 123 u32 node_state;
126 124
127 if (node_res_obj->node_allocated) { 125 if (node_res_obj->node_allocated) {
128 node_state = node_get_state(node_res_obj->hnode); 126 node_state = node_get_state(node_res_obj->node);
129 if (node_state <= NODE_DELETING) { 127 if (node_state <= NODE_DELETING) {
130 if ((node_state == NODE_RUNNING) || 128 if ((node_state == NODE_RUNNING) ||
131 (node_state == NODE_PAUSED) || 129 (node_state == NODE_PAUSED) ||
132 (node_state == NODE_TERMINATING)) 130 (node_state == NODE_TERMINATING))
133 node_terminate 131 node_terminate
134 (node_res_obj->hnode, &status); 132 (node_res_obj->node, &status);
135 133
136 node_delete(node_res_obj, ctxt); 134 node_delete(node_res_obj, ctxt);
137 } 135 }
@@ -150,7 +148,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
150 148
151 /* Free DMM mapped memory resources */ 149 /* Free DMM mapped memory resources */
152 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { 150 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
153 status = proc_un_map(ctxt->hprocessor, 151 status = proc_un_map(ctxt->processor,
154 (void *)map_obj->dsp_addr, ctxt); 152 (void *)map_obj->dsp_addr, ctxt);
155 if (status) 153 if (status)
156 pr_err("%s: proc_un_map failed!" 154 pr_err("%s: proc_un_map failed!"
@@ -159,7 +157,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
159 157
160 /* Free DMM reserved memory resources */ 158 /* Free DMM reserved memory resources */
161 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) { 159 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
162 status = proc_un_reserve_memory(ctxt->hprocessor, (void *) 160 status = proc_un_reserve_memory(ctxt->processor, (void *)
163 rsv_obj->dsp_reserved_addr, 161 rsv_obj->dsp_reserved_addr,
164 ctxt); 162 ctxt);
165 if (status) 163 if (status)
@@ -218,7 +216,7 @@ int drv_proc_insert_strm_res_element(void *stream_obj,
218 goto func_end; 216 goto func_end;
219 } 217 }
220 218
221 (*pstrm_res)->hstream = stream_obj; 219 (*pstrm_res)->stream = stream_obj;
222 retval = idr_get_new(ctxt->stream_id, *pstrm_res, 220 retval = idr_get_new(ctxt->stream_id, *pstrm_res,
223 &(*pstrm_res)->id); 221 &(*pstrm_res)->id);
224 if (retval == -EAGAIN) { 222 if (retval == -EAGAIN) {
@@ -265,9 +263,9 @@ static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
265 } 263 }
266 strm_info.user_strm = &user; 264 strm_info.user_strm = &user;
267 user.number_bufs_in_stream = 0; 265 user.number_bufs_in_stream = 0;
268 strm_get_info(strm_res->hstream, &strm_info, sizeof(strm_info)); 266 strm_get_info(strm_res->stream, &strm_info, sizeof(strm_info));
269 while (user.number_bufs_in_stream--) 267 while (user.number_bufs_in_stream--)
270 strm_reclaim(strm_res->hstream, &buf_ptr, &ul_bytes, 268 strm_reclaim(strm_res->stream, &buf_ptr, &ul_bytes,
271 (u32 *) &ul_buf_size, &dw_arg); 269 (u32 *) &ul_buf_size, &dw_arg);
272 strm_close(strm_res, ctxt); 270 strm_close(strm_res, ctxt);
273 return 0; 271 return 0;
@@ -316,22 +314,8 @@ int drv_create(struct drv_object **drv_obj)
316 pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL); 314 pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
317 if (pdrv_object) { 315 if (pdrv_object) {
318 /* Create and Initialize List of device objects */ 316 /* Create and Initialize List of device objects */
319 pdrv_object->dev_list = kzalloc(sizeof(struct lst_list), 317 INIT_LIST_HEAD(&pdrv_object->dev_list);
320 GFP_KERNEL); 318 INIT_LIST_HEAD(&pdrv_object->dev_node_string);
321 if (pdrv_object->dev_list) {
322 /* Create and Initialize List of device Extension */
323 pdrv_object->dev_node_string =
324 kzalloc(sizeof(struct lst_list), GFP_KERNEL);
325 if (!(pdrv_object->dev_node_string)) {
326 status = -EPERM;
327 } else {
328 INIT_LIST_HEAD(&pdrv_object->
329 dev_node_string->head);
330 INIT_LIST_HEAD(&pdrv_object->dev_list->head);
331 }
332 } else {
333 status = -ENOMEM;
334 }
335 } else { 319 } else {
336 status = -ENOMEM; 320 status = -ENOMEM;
337 } 321 }
@@ -348,8 +332,6 @@ int drv_create(struct drv_object **drv_obj)
348 if (!status) { 332 if (!status) {
349 *drv_obj = pdrv_object; 333 *drv_obj = pdrv_object;
350 } else { 334 } else {
351 kfree(pdrv_object->dev_list);
352 kfree(pdrv_object->dev_node_string);
353 /* Free the DRV Object */ 335 /* Free the DRV Object */
354 kfree(pdrv_object); 336 kfree(pdrv_object);
355 } 337 }
@@ -386,13 +368,6 @@ int drv_destroy(struct drv_object *driver_obj)
386 DBC_REQUIRE(refs > 0); 368 DBC_REQUIRE(refs > 0);
387 DBC_REQUIRE(pdrv_object); 369 DBC_REQUIRE(pdrv_object);
388 370
389 /*
390 * Delete the List if it exists.Should not come here
391 * as the drv_remove_dev_object and the Last drv_request_resources
392 * removes the list if the lists are empty.
393 */
394 kfree(pdrv_object->dev_list);
395 kfree(pdrv_object->dev_node_string);
396 kfree(pdrv_object); 371 kfree(pdrv_object);
397 /* Update the DRV Object in the driver data */ 372 /* Update the DRV Object in the driver data */
398 if (drv_datap) { 373 if (drv_datap) {
@@ -424,7 +399,7 @@ int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
424 DBC_REQUIRE(device_obj != NULL); 399 DBC_REQUIRE(device_obj != NULL);
425 DBC_REQUIRE(index >= 0); 400 DBC_REQUIRE(index >= 0);
426 DBC_REQUIRE(refs > 0); 401 DBC_REQUIRE(refs > 0);
427 DBC_ASSERT(!(LST_IS_EMPTY(pdrv_obj->dev_list))); 402 DBC_ASSERT(!(list_empty(&pdrv_obj->dev_list)));
428 403
429 dev_obj = (struct dev_object *)drv_get_first_dev_object(); 404 dev_obj = (struct dev_object *)drv_get_first_dev_object();
430 for (i = 0; i < index; i++) { 405 for (i = 0; i < index; i++) {
@@ -455,9 +430,8 @@ u32 drv_get_first_dev_object(void)
455 430
456 if (drv_datap && drv_datap->drv_object) { 431 if (drv_datap && drv_datap->drv_object) {
457 pdrv_obj = drv_datap->drv_object; 432 pdrv_obj = drv_datap->drv_object;
458 if ((pdrv_obj->dev_list != NULL) && 433 if (!list_empty(&pdrv_obj->dev_list))
459 !LST_IS_EMPTY(pdrv_obj->dev_list)) 434 dw_dev_object = (u32) pdrv_obj->dev_list.next;
460 dw_dev_object = (u32) lst_first(pdrv_obj->dev_list);
461 } else { 435 } else {
462 pr_err("%s: Failed to retrieve the object handle\n", __func__); 436 pr_err("%s: Failed to retrieve the object handle\n", __func__);
463 } 437 }
@@ -479,10 +453,9 @@ u32 drv_get_first_dev_extension(void)
479 453
480 if (drv_datap && drv_datap->drv_object) { 454 if (drv_datap && drv_datap->drv_object) {
481 pdrv_obj = drv_datap->drv_object; 455 pdrv_obj = drv_datap->drv_object;
482 if ((pdrv_obj->dev_node_string != NULL) && 456 if (!list_empty(&pdrv_obj->dev_node_string)) {
483 !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
484 dw_dev_extension = 457 dw_dev_extension =
485 (u32) lst_first(pdrv_obj->dev_node_string); 458 (u32) pdrv_obj->dev_node_string.next;
486 } 459 }
487 } else { 460 } else {
488 pr_err("%s: Failed to retrieve the object handle\n", __func__); 461 pr_err("%s: Failed to retrieve the object handle\n", __func__);
@@ -503,16 +476,15 @@ u32 drv_get_next_dev_object(u32 hdev_obj)
503 u32 dw_next_dev_object = 0; 476 u32 dw_next_dev_object = 0;
504 struct drv_object *pdrv_obj; 477 struct drv_object *pdrv_obj;
505 struct drv_data *drv_datap = dev_get_drvdata(bridge); 478 struct drv_data *drv_datap = dev_get_drvdata(bridge);
506 479 struct list_head *curr;
507 DBC_REQUIRE(hdev_obj != 0);
508 480
509 if (drv_datap && drv_datap->drv_object) { 481 if (drv_datap && drv_datap->drv_object) {
510 pdrv_obj = drv_datap->drv_object; 482 pdrv_obj = drv_datap->drv_object;
511 if ((pdrv_obj->dev_list != NULL) && 483 if (!list_empty(&pdrv_obj->dev_list)) {
512 !LST_IS_EMPTY(pdrv_obj->dev_list)) { 484 curr = (struct list_head *)hdev_obj;
513 dw_next_dev_object = (u32) lst_next(pdrv_obj->dev_list, 485 if (list_is_last(curr, &pdrv_obj->dev_list))
514 (struct list_head *) 486 return 0;
515 hdev_obj); 487 dw_next_dev_object = (u32) curr->next;
516 } 488 }
517 } else { 489 } else {
518 pr_err("%s: Failed to retrieve the object handle\n", __func__); 490 pr_err("%s: Failed to retrieve the object handle\n", __func__);
@@ -534,16 +506,15 @@ u32 drv_get_next_dev_extension(u32 dev_extension)
534 u32 dw_dev_extension = 0; 506 u32 dw_dev_extension = 0;
535 struct drv_object *pdrv_obj; 507 struct drv_object *pdrv_obj;
536 struct drv_data *drv_datap = dev_get_drvdata(bridge); 508 struct drv_data *drv_datap = dev_get_drvdata(bridge);
537 509 struct list_head *curr;
538 DBC_REQUIRE(dev_extension != 0);
539 510
540 if (drv_datap && drv_datap->drv_object) { 511 if (drv_datap && drv_datap->drv_object) {
541 pdrv_obj = drv_datap->drv_object; 512 pdrv_obj = drv_datap->drv_object;
542 if ((pdrv_obj->dev_node_string != NULL) && 513 if (!list_empty(&pdrv_obj->dev_node_string)) {
543 !LST_IS_EMPTY(pdrv_obj->dev_node_string)) { 514 curr = (struct list_head *)dev_extension;
544 dw_dev_extension = 515 if (list_is_last(curr, &pdrv_obj->dev_node_string))
545 (u32) lst_next(pdrv_obj->dev_node_string, 516 return 0;
546 (struct list_head *)dev_extension); 517 dw_dev_extension = (u32) curr->next;
547 } 518 }
548 } else { 519 } else {
549 pr_err("%s: Failed to retrieve the object handle\n", __func__); 520 pr_err("%s: Failed to retrieve the object handle\n", __func__);
@@ -584,11 +555,8 @@ int drv_insert_dev_object(struct drv_object *driver_obj,
584 DBC_REQUIRE(refs > 0); 555 DBC_REQUIRE(refs > 0);
585 DBC_REQUIRE(hdev_obj != NULL); 556 DBC_REQUIRE(hdev_obj != NULL);
586 DBC_REQUIRE(pdrv_object); 557 DBC_REQUIRE(pdrv_object);
587 DBC_ASSERT(pdrv_object->dev_list);
588
589 lst_put_tail(pdrv_object->dev_list, (struct list_head *)hdev_obj);
590 558
591 DBC_ENSURE(!LST_IS_EMPTY(pdrv_object->dev_list)); 559 list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list);
592 560
593 return 0; 561 return 0;
594} 562}
@@ -610,26 +578,17 @@ int drv_remove_dev_object(struct drv_object *driver_obj,
610 DBC_REQUIRE(pdrv_object); 578 DBC_REQUIRE(pdrv_object);
611 DBC_REQUIRE(hdev_obj != NULL); 579 DBC_REQUIRE(hdev_obj != NULL);
612 580
613 DBC_REQUIRE(pdrv_object->dev_list != NULL); 581 DBC_REQUIRE(!list_empty(&pdrv_object->dev_list));
614 DBC_REQUIRE(!LST_IS_EMPTY(pdrv_object->dev_list));
615 582
616 /* Search list for p_proc_object: */ 583 /* Search list for p_proc_object: */
617 for (cur_elem = lst_first(pdrv_object->dev_list); cur_elem != NULL; 584 list_for_each(cur_elem, &pdrv_object->dev_list) {
618 cur_elem = lst_next(pdrv_object->dev_list, cur_elem)) {
619 /* If found, remove it. */ 585 /* If found, remove it. */
620 if ((struct dev_object *)cur_elem == hdev_obj) { 586 if ((struct dev_object *)cur_elem == hdev_obj) {
621 lst_remove_elem(pdrv_object->dev_list, cur_elem); 587 list_del(cur_elem);
622 status = 0; 588 status = 0;
623 break; 589 break;
624 } 590 }
625 } 591 }
626 /* Remove list if empty. */
627 if (LST_IS_EMPTY(pdrv_object->dev_list)) {
628 kfree(pdrv_object->dev_list);
629 pdrv_object->dev_list = NULL;
630 }
631 DBC_ENSURE((pdrv_object->dev_list == NULL) ||
632 !LST_IS_EMPTY(pdrv_object->dev_list));
633 592
634 return status; 593 return status;
635} 594}
@@ -663,14 +622,13 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
663 if (!status) { 622 if (!status) {
664 pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL); 623 pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
665 if (pszdev_node) { 624 if (pszdev_node) {
666 lst_init_elem(&pszdev_node->link);
667 strncpy(pszdev_node->sz_string, 625 strncpy(pszdev_node->sz_string,
668 (char *)dw_context, MAXREGPATHLENGTH - 1); 626 (char *)dw_context, MAXREGPATHLENGTH - 1);
669 pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0'; 627 pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
670 /* Update the Driver Object List */ 628 /* Update the Driver Object List */
671 *dev_node_strg = (u32) pszdev_node->sz_string; 629 *dev_node_strg = (u32) pszdev_node->sz_string;
672 lst_put_tail(pdrv_object->dev_node_string, 630 list_add_tail(&pszdev_node->link,
673 (struct list_head *)pszdev_node); 631 &pdrv_object->dev_node_string);
674 } else { 632 } else {
675 status = -ENOMEM; 633 status = -ENOMEM;
676 *dev_node_strg = 0; 634 *dev_node_strg = 0;
@@ -682,7 +640,7 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
682 } 640 }
683 641
684 DBC_ENSURE((!status && dev_node_strg != NULL && 642 DBC_ENSURE((!status && dev_node_strg != NULL &&
685 !LST_IS_EMPTY(pdrv_object->dev_node_string)) || 643 !list_empty(&pdrv_object->dev_node_string)) ||
686 (status && *dev_node_strg == 0)); 644 (status && *dev_node_strg == 0));
687 645
688 return status; 646 return status;
@@ -696,7 +654,6 @@ int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
696int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj) 654int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
697{ 655{
698 int status = 0; 656 int status = 0;
699 struct drv_object *pdrv_object = (struct drv_object *)hdrv_obj;
700 struct drv_ext *pszdev_node; 657 struct drv_ext *pszdev_node;
701 658
702 /* 659 /*
@@ -706,23 +663,13 @@ int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
706 for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension(); 663 for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
707 pszdev_node != NULL; pszdev_node = (struct drv_ext *) 664 pszdev_node != NULL; pszdev_node = (struct drv_ext *)
708 drv_get_next_dev_extension((u32) pszdev_node)) { 665 drv_get_next_dev_extension((u32) pszdev_node)) {
709 if (!pdrv_object->dev_node_string) {
710 /* When this could happen? */
711 continue;
712 }
713 if ((u32) pszdev_node == dw_context) { 666 if ((u32) pszdev_node == dw_context) {
714 /* Found it */ 667 /* Found it */
715 /* Delete from the Driver object list */ 668 /* Delete from the Driver object list */
716 lst_remove_elem(pdrv_object->dev_node_string, 669 list_del(&pszdev_node->link);
717 (struct list_head *)pszdev_node); 670 kfree(pszdev_node);
718 kfree((void *)pszdev_node);
719 break; 671 break;
720 } 672 }
721 /* Delete the List if it is empty */
722 if (LST_IS_EMPTY(pdrv_object->dev_node_string)) {
723 kfree(pdrv_object->dev_node_string);
724 pdrv_object->dev_node_string = NULL;
725 }
726 } 673 }
727 return status; 674 return status;
728} 675}
@@ -740,10 +687,9 @@ static int request_bridge_resources(struct cfg_hostres *res)
740 host_res->num_mem_windows = 2; 687 host_res->num_mem_windows = 2;
741 688
742 /* First window is for DSP internal memory */ 689 /* First window is for DSP internal memory */
743 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); 690 dev_dbg(bridge, "mem_base[0] 0x%x\n", host_res->mem_base[0]);
744 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); 691 dev_dbg(bridge, "mem_base[3] 0x%x\n", host_res->mem_base[3]);
745 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); 692 dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
746 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
747 693
748 /* for 24xx base port is not mapping the mamory for DSP 694 /* for 24xx base port is not mapping the mamory for DSP
749 * internal memory TODO Do a ioremap here */ 695 * internal memory TODO Do a ioremap here */
@@ -752,11 +698,11 @@ static int request_bridge_resources(struct cfg_hostres *res)
752 /* These are hard-coded values */ 698 /* These are hard-coded values */
753 host_res->birq_registers = 0; 699 host_res->birq_registers = 0;
754 host_res->birq_attrib = 0; 700 host_res->birq_attrib = 0;
755 host_res->dw_offset_for_monitor = 0; 701 host_res->offset_for_monitor = 0;
756 host_res->dw_chnl_offset = 0; 702 host_res->chnl_offset = 0;
757 /* CHNL_MAXCHANNELS */ 703 /* CHNL_MAXCHANNELS */
758 host_res->dw_num_chnls = CHNL_MAXCHANNELS; 704 host_res->num_chnls = CHNL_MAXCHANNELS;
759 host_res->dw_chnl_buf_size = 0x400; 705 host_res->chnl_buf_size = 0x400;
760 706
761 return 0; 707 return 0;
762} 708}
@@ -784,51 +730,51 @@ int drv_request_bridge_res_dsp(void **phost_resources)
784 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */ 730 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
785 host_res->num_mem_windows = 4; 731 host_res->num_mem_windows = 4;
786 732
787 host_res->dw_mem_base[0] = 0; 733 host_res->mem_base[0] = 0;
788 host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE, 734 host_res->mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
789 OMAP_DSP_MEM1_SIZE); 735 OMAP_DSP_MEM1_SIZE);
790 host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE, 736 host_res->mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
791 OMAP_DSP_MEM2_SIZE); 737 OMAP_DSP_MEM2_SIZE);
792 host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE, 738 host_res->mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
793 OMAP_DSP_MEM3_SIZE); 739 OMAP_DSP_MEM3_SIZE);
794 host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE, 740 host_res->per_base = ioremap(OMAP_PER_CM_BASE,
795 OMAP_PER_CM_SIZE); 741 OMAP_PER_CM_SIZE);
796 host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE, 742 host_res->per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
797 OMAP_PER_PRM_SIZE); 743 OMAP_PER_PRM_SIZE);
798 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, 744 host_res->core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
799 OMAP_CORE_PRM_SIZE); 745 OMAP_CORE_PRM_SIZE);
800 host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE, 746 host_res->dmmu_base = ioremap(OMAP_DMMU_BASE,
801 OMAP_DMMU_SIZE); 747 OMAP_DMMU_SIZE);
802 748
803 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", 749 dev_dbg(bridge, "mem_base[0] 0x%x\n",
804 host_res->dw_mem_base[0]); 750 host_res->mem_base[0]);
805 dev_dbg(bridge, "dw_mem_base[1] 0x%x\n", 751 dev_dbg(bridge, "mem_base[1] 0x%x\n",
806 host_res->dw_mem_base[1]); 752 host_res->mem_base[1]);
807 dev_dbg(bridge, "dw_mem_base[2] 0x%x\n", 753 dev_dbg(bridge, "mem_base[2] 0x%x\n",
808 host_res->dw_mem_base[2]); 754 host_res->mem_base[2]);
809 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", 755 dev_dbg(bridge, "mem_base[3] 0x%x\n",
810 host_res->dw_mem_base[3]); 756 host_res->mem_base[3]);
811 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", 757 dev_dbg(bridge, "mem_base[4] 0x%x\n",
812 host_res->dw_mem_base[4]); 758 host_res->mem_base[4]);
813 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); 759 dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
814 760
815 shm_size = drv_datap->shm_size; 761 shm_size = drv_datap->shm_size;
816 if (shm_size >= 0x10000) { 762 if (shm_size >= 0x10000) {
817 /* Allocate Physically contiguous, 763 /* Allocate Physically contiguous,
818 * non-cacheable memory */ 764 * non-cacheable memory */
819 host_res->dw_mem_base[1] = 765 host_res->mem_base[1] =
820 (u32) mem_alloc_phys_mem(shm_size, 0x100000, 766 (u32) mem_alloc_phys_mem(shm_size, 0x100000,
821 &dma_addr); 767 &dma_addr);
822 if (host_res->dw_mem_base[1] == 0) { 768 if (host_res->mem_base[1] == 0) {
823 status = -ENOMEM; 769 status = -ENOMEM;
824 pr_err("shm reservation Failed\n"); 770 pr_err("shm reservation Failed\n");
825 } else { 771 } else {
826 host_res->dw_mem_length[1] = shm_size; 772 host_res->mem_length[1] = shm_size;
827 host_res->dw_mem_phys[1] = dma_addr; 773 host_res->mem_phys[1] = dma_addr;
828 774
829 dev_dbg(bridge, "%s: Bridge shm address 0x%x " 775 dev_dbg(bridge, "%s: Bridge shm address 0x%x "
830 "dma_addr %x size %x\n", __func__, 776 "dma_addr %x size %x\n", __func__,
831 host_res->dw_mem_base[1], 777 host_res->mem_base[1],
832 dma_addr, shm_size); 778 dma_addr, shm_size);
833 } 779 }
834 } 780 }
@@ -836,11 +782,11 @@ int drv_request_bridge_res_dsp(void **phost_resources)
836 /* These are hard-coded values */ 782 /* These are hard-coded values */
837 host_res->birq_registers = 0; 783 host_res->birq_registers = 0;
838 host_res->birq_attrib = 0; 784 host_res->birq_attrib = 0;
839 host_res->dw_offset_for_monitor = 0; 785 host_res->offset_for_monitor = 0;
840 host_res->dw_chnl_offset = 0; 786 host_res->chnl_offset = 0;
841 /* CHNL_MAXCHANNELS */ 787 /* CHNL_MAXCHANNELS */
842 host_res->dw_num_chnls = CHNL_MAXCHANNELS; 788 host_res->num_chnls = CHNL_MAXCHANNELS;
843 host_res->dw_chnl_buf_size = 0x400; 789 host_res->chnl_buf_size = 0x400;
844 dw_buff_size = sizeof(struct cfg_hostres); 790 dw_buff_size = sizeof(struct cfg_hostres);
845 } 791 }
846 *phost_resources = host_res; 792 *phost_resources = host_res;
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 324fcdffb3b..c43c7e3421c 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -59,7 +59,6 @@
59#include <dspbridge/chnl.h> 59#include <dspbridge/chnl.h>
60#include <dspbridge/proc.h> 60#include <dspbridge/proc.h>
61#include <dspbridge/dev.h> 61#include <dspbridge/dev.h>
62#include <dspbridge/drvdefs.h>
63#include <dspbridge/drv.h> 62#include <dspbridge/drv.h>
64 63
65#ifdef CONFIG_TIDSPBRIDGE_DVFS 64#ifdef CONFIG_TIDSPBRIDGE_DVFS
diff --git a/drivers/staging/tidspbridge/rmgr/mgr.c b/drivers/staging/tidspbridge/rmgr/mgr.c
index 0ea89a1bb77..d635c01c015 100644
--- a/drivers/staging/tidspbridge/rmgr/mgr.c
+++ b/drivers/staging/tidspbridge/rmgr/mgr.c
@@ -44,7 +44,7 @@
44#define ZLDLLNAME "" 44#define ZLDLLNAME ""
45 45
46struct mgr_object { 46struct mgr_object {
47 struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */ 47 struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
48}; 48};
49 49
50/* ----------------------------------- Globals */ 50/* ----------------------------------- Globals */
@@ -67,7 +67,7 @@ int mgr_create(struct mgr_object **mgr_obj,
67 67
68 pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL); 68 pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL);
69 if (pmgr_obj) { 69 if (pmgr_obj) {
70 status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->hdcd_mgr); 70 status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->dcd_mgr);
71 if (!status) { 71 if (!status) {
72 /* If succeeded store the handle in the MGR Object */ 72 /* If succeeded store the handle in the MGR Object */
73 if (drv_datap) { 73 if (drv_datap) {
@@ -81,7 +81,7 @@ int mgr_create(struct mgr_object **mgr_obj,
81 if (!status) { 81 if (!status) {
82 *mgr_obj = pmgr_obj; 82 *mgr_obj = pmgr_obj;
83 } else { 83 } else {
84 dcd_destroy_manager(pmgr_obj->hdcd_mgr); 84 dcd_destroy_manager(pmgr_obj->dcd_mgr);
85 kfree(pmgr_obj); 85 kfree(pmgr_obj);
86 } 86 }
87 } else { 87 } else {
@@ -110,8 +110,8 @@ int mgr_destroy(struct mgr_object *hmgr_obj)
110 DBC_REQUIRE(hmgr_obj); 110 DBC_REQUIRE(hmgr_obj);
111 111
112 /* Free resources */ 112 /* Free resources */
113 if (hmgr_obj->hdcd_mgr) 113 if (hmgr_obj->dcd_mgr)
114 dcd_destroy_manager(hmgr_obj->hdcd_mgr); 114 dcd_destroy_manager(hmgr_obj->dcd_mgr);
115 115
116 kfree(pmgr_obj); 116 kfree(pmgr_obj);
117 /* Update the driver data with NULL for MGR Object */ 117 /* Update the driver data with NULL for MGR Object */
@@ -134,8 +134,7 @@ int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
134 u32 undb_props_size, u32 *pu_num_nodes) 134 u32 undb_props_size, u32 *pu_num_nodes)
135{ 135{
136 int status = 0; 136 int status = 0;
137 struct dsp_uuid node_uuid, temp_uuid; 137 struct dsp_uuid node_uuid;
138 u32 temp_index = 0;
139 u32 node_index = 0; 138 u32 node_index = 0;
140 struct dcd_genericobj gen_obj; 139 struct dcd_genericobj gen_obj;
141 struct mgr_object *pmgr_obj = NULL; 140 struct mgr_object *pmgr_obj = NULL;
@@ -149,46 +148,33 @@ int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props,
149 *pu_num_nodes = 0; 148 *pu_num_nodes = 0;
150 /* Get the Manager Object from the driver data */ 149 /* Get the Manager Object from the driver data */
151 if (!drv_datap || !drv_datap->mgr_object) { 150 if (!drv_datap || !drv_datap->mgr_object) {
152 status = -ENODATA;
153 pr_err("%s: Failed to retrieve the object handle\n", __func__); 151 pr_err("%s: Failed to retrieve the object handle\n", __func__);
154 goto func_cont; 152 return -ENODATA;
155 } else {
156 pmgr_obj = drv_datap->mgr_object;
157 } 153 }
154 pmgr_obj = drv_datap->mgr_object;
158 155
159 DBC_ASSERT(pmgr_obj); 156 DBC_ASSERT(pmgr_obj);
160 /* Forever loop till we hit failed or no more items in the 157 /* Forever loop till we hit failed or no more items in the
161 * Enumeration. We will exit the loop other than 0; */ 158 * Enumeration. We will exit the loop other than 0; */
162 while (status == 0) { 159 while (!status) {
163 status = dcd_enumerate_object(temp_index++, DSP_DCDNODETYPE, 160 status = dcd_enumerate_object(node_index++, DSP_DCDNODETYPE,
164 &temp_uuid); 161 &node_uuid);
165 if (status == 0) { 162 if (status)
166 node_index++; 163 break;
167 if (node_id == (node_index - 1)) 164 *pu_num_nodes = node_index;
168 node_uuid = temp_uuid; 165 if (node_id == (node_index - 1)) {
169 166 status = dcd_get_object_def(pmgr_obj->dcd_mgr,
170 } 167 &node_uuid, DSP_DCDNODETYPE, &gen_obj);
171 } 168 if (status)
172 if (!status) { 169 break;
173 if (node_id > (node_index - 1)) { 170 /* Get the Obj def */
174 status = -EINVAL; 171 *pndb_props = gen_obj.obj_data.node_obj.ndb_props;
175 } else {
176 status = dcd_get_object_def(pmgr_obj->hdcd_mgr,
177 (struct dsp_uuid *)
178 &node_uuid, DSP_DCDNODETYPE,
179 &gen_obj);
180 if (!status) {
181 /* Get the Obj def */
182 *pndb_props =
183 gen_obj.obj_data.node_obj.ndb_props;
184 *pu_num_nodes = node_index;
185 }
186 } 172 }
187 } 173 }
188 174
189func_cont: 175 /* the last status is not 0, but neither an error */
190 DBC_ENSURE((!status && *pu_num_nodes > 0) || 176 if (status > 0)
191 (status && *pu_num_nodes == 0)); 177 status = 0;
192 178
193 return status; 179 return status;
194} 180}
@@ -272,7 +258,7 @@ int mgr_enum_processor_info(u32 processor_id,
272 if (proc_detect != false) 258 if (proc_detect != false)
273 continue; 259 continue;
274 260
275 status2 = dcd_get_object_def(pmgr_obj->hdcd_mgr, 261 status2 = dcd_get_object_def(pmgr_obj->dcd_mgr,
276 (struct dsp_uuid *)&temp_uuid, 262 (struct dsp_uuid *)&temp_uuid,
277 DSP_DCDPROCESSORTYPE, &gen_obj); 263 DSP_DCDPROCESSORTYPE, &gen_obj);
278 if (!status2) { 264 if (!status2) {
@@ -347,7 +333,7 @@ int mgr_get_dcd_handle(struct mgr_object *mgr_handle,
347 333
348 *dcd_handle = (u32) NULL; 334 *dcd_handle = (u32) NULL;
349 if (pmgr_obj) { 335 if (pmgr_obj) {
350 *dcd_handle = (u32) pmgr_obj->hdcd_mgr; 336 *dcd_handle = (u32) pmgr_obj->dcd_mgr;
351 status = 0; 337 status = 0;
352 } 338 }
353 DBC_ENSURE((!status && *dcd_handle != (u32) NULL) || 339 DBC_ENSURE((!status && *dcd_handle != (u32) NULL) ||
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
index 28354bbf1ae..fb5c2ba01d4 100644
--- a/drivers/staging/tidspbridge/rmgr/nldr.c
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -190,8 +190,8 @@ struct ovly_node {
190 * Overlay loader object. 190 * Overlay loader object.
191 */ 191 */
192struct nldr_object { 192struct nldr_object {
193 struct dev_object *hdev_obj; /* Device object */ 193 struct dev_object *dev_obj; /* Device object */
194 struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */ 194 struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
195 struct dbll_tar_obj *dbll; /* The DBL loader */ 195 struct dbll_tar_obj *dbll; /* The DBL loader */
196 struct dbll_library_obj *base_lib; /* Base image library */ 196 struct dbll_library_obj *base_lib; /* Base image library */
197 struct rmm_target_obj *rmm; /* Remote memory manager for DSP */ 197 struct rmm_target_obj *rmm; /* Remote memory manager for DSP */
@@ -206,8 +206,8 @@ struct nldr_object {
206 u32 *seg_table; /* memtypes of dynamic memory segs 206 u32 *seg_table; /* memtypes of dynamic memory segs
207 * indexed by segid 207 * indexed by segid
208 */ 208 */
209 u16 us_dsp_mau_size; /* Size of DSP MAU */ 209 u16 dsp_mau_size; /* Size of DSP MAU */
210 u16 us_dsp_word_size; /* Size of DSP word */ 210 u16 dsp_word_size; /* Size of DSP word */
211}; 211};
212 212
213/* 213/*
@@ -220,7 +220,7 @@ struct nldr_nodeobject {
220 struct dsp_uuid uuid; /* Node's UUID */ 220 struct dsp_uuid uuid; /* Node's UUID */
221 bool dynamic; /* Dynamically loaded node? */ 221 bool dynamic; /* Dynamically loaded node? */
222 bool overlay; /* Overlay node? */ 222 bool overlay; /* Overlay node? */
223 bool *pf_phase_split; /* Multiple phase libraries? */ 223 bool *phase_split; /* Multiple phase libraries? */
224 struct lib_node root; /* Library containing node phase */ 224 struct lib_node root; /* Library containing node phase */
225 struct lib_node create_lib; /* Library with create phase lib */ 225 struct lib_node create_lib; /* Library with create phase lib */
226 struct lib_node execute_lib; /* Library with execute phase lib */ 226 struct lib_node execute_lib; /* Library with execute phase lib */
@@ -260,12 +260,9 @@ static struct dbll_fxns ldr_fxns = {
260 (dbll_get_sect_fxn) dbll_get_sect, 260 (dbll_get_sect_fxn) dbll_get_sect,
261 (dbll_init_fxn) dbll_init, 261 (dbll_init_fxn) dbll_init,
262 (dbll_load_fxn) dbll_load, 262 (dbll_load_fxn) dbll_load,
263 (dbll_load_sect_fxn) dbll_load_sect,
264 (dbll_open_fxn) dbll_open, 263 (dbll_open_fxn) dbll_open,
265 (dbll_read_sect_fxn) dbll_read_sect, 264 (dbll_read_sect_fxn) dbll_read_sect,
266 (dbll_set_attrs_fxn) dbll_set_attrs,
267 (dbll_unload_fxn) dbll_unload, 265 (dbll_unload_fxn) dbll_unload,
268 (dbll_unload_sect_fxn) dbll_unload_sect,
269}; 266};
270 267
271static u32 refs; /* module reference count */ 268static u32 refs; /* module reference count */
@@ -329,7 +326,7 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
329 if (nldr_node_obj == NULL) { 326 if (nldr_node_obj == NULL) {
330 status = -ENOMEM; 327 status = -ENOMEM;
331 } else { 328 } else {
332 nldr_node_obj->pf_phase_split = pf_phase_split; 329 nldr_node_obj->phase_split = pf_phase_split;
333 nldr_node_obj->pers_libs = 0; 330 nldr_node_obj->pers_libs = 0;
334 nldr_node_obj->nldr_obj = nldr_obj; 331 nldr_node_obj->nldr_obj = nldr_obj;
335 nldr_node_obj->priv_ref = priv_ref; 332 nldr_node_obj->priv_ref = priv_ref;
@@ -339,7 +336,7 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
339 * Determine if node is a dynamically loaded node from 336 * Determine if node is a dynamically loaded node from
340 * ndb_props. 337 * ndb_props.
341 */ 338 */
342 if (node_props->us_load_type == NLDR_DYNAMICLOAD) { 339 if (node_props->load_type == NLDR_DYNAMICLOAD) {
343 /* Dynamic node */ 340 /* Dynamic node */
344 nldr_node_obj->dynamic = true; 341 nldr_node_obj->dynamic = true;
345 /* 342 /*
@@ -347,51 +344,51 @@ int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
347 */ 344 */
348 /* Create phase */ 345 /* Create phase */
349 nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16) 346 nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16)
350 (node_props->ul_data_mem_seg_mask >> CREATEBIT) & 347 (node_props->data_mem_seg_mask >> CREATEBIT) &
351 SEGMASK; 348 SEGMASK;
352 nldr_node_obj->code_data_flag_mask |= 349 nldr_node_obj->code_data_flag_mask |=
353 ((node_props->ul_data_mem_seg_mask >> 350 ((node_props->data_mem_seg_mask >>
354 (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT; 351 (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT;
355 nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16) 352 nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16)
356 (node_props->ul_code_mem_seg_mask >> 353 (node_props->code_mem_seg_mask >>
357 CREATEBIT) & SEGMASK; 354 CREATEBIT) & SEGMASK;
358 nldr_node_obj->code_data_flag_mask |= 355 nldr_node_obj->code_data_flag_mask |=
359 ((node_props->ul_code_mem_seg_mask >> 356 ((node_props->code_mem_seg_mask >>
360 (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT; 357 (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT;
361 /* Execute phase */ 358 /* Execute phase */
362 nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16) 359 nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16)
363 (node_props->ul_data_mem_seg_mask >> 360 (node_props->data_mem_seg_mask >>
364 EXECUTEBIT) & SEGMASK; 361 EXECUTEBIT) & SEGMASK;
365 nldr_node_obj->code_data_flag_mask |= 362 nldr_node_obj->code_data_flag_mask |=
366 ((node_props->ul_data_mem_seg_mask >> 363 ((node_props->data_mem_seg_mask >>
367 (EXECUTEBIT + FLAGBIT)) & 1) << 364 (EXECUTEBIT + FLAGBIT)) & 1) <<
368 EXECUTEDATAFLAGBIT; 365 EXECUTEDATAFLAGBIT;
369 nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16) 366 nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16)
370 (node_props->ul_code_mem_seg_mask >> 367 (node_props->code_mem_seg_mask >>
371 EXECUTEBIT) & SEGMASK; 368 EXECUTEBIT) & SEGMASK;
372 nldr_node_obj->code_data_flag_mask |= 369 nldr_node_obj->code_data_flag_mask |=
373 ((node_props->ul_code_mem_seg_mask >> 370 ((node_props->code_mem_seg_mask >>
374 (EXECUTEBIT + FLAGBIT)) & 1) << 371 (EXECUTEBIT + FLAGBIT)) & 1) <<
375 EXECUTECODEFLAGBIT; 372 EXECUTECODEFLAGBIT;
376 /* Delete phase */ 373 /* Delete phase */
377 nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16) 374 nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16)
378 (node_props->ul_data_mem_seg_mask >> DELETEBIT) & 375 (node_props->data_mem_seg_mask >> DELETEBIT) &
379 SEGMASK; 376 SEGMASK;
380 nldr_node_obj->code_data_flag_mask |= 377 nldr_node_obj->code_data_flag_mask |=
381 ((node_props->ul_data_mem_seg_mask >> 378 ((node_props->data_mem_seg_mask >>
382 (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT; 379 (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT;
383 nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16) 380 nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16)
384 (node_props->ul_code_mem_seg_mask >> 381 (node_props->code_mem_seg_mask >>
385 DELETEBIT) & SEGMASK; 382 DELETEBIT) & SEGMASK;
386 nldr_node_obj->code_data_flag_mask |= 383 nldr_node_obj->code_data_flag_mask |=
387 ((node_props->ul_code_mem_seg_mask >> 384 ((node_props->code_mem_seg_mask >>
388 (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT; 385 (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT;
389 } else { 386 } else {
390 /* Non-dynamically loaded nodes are part of the 387 /* Non-dynamically loaded nodes are part of the
391 * base image */ 388 * base image */
392 nldr_node_obj->root.lib = nldr_obj->base_lib; 389 nldr_node_obj->root.lib = nldr_obj->base_lib;
393 /* Check for overlay node */ 390 /* Check for overlay node */
394 if (node_props->us_load_type == NLDR_OVLYLOAD) 391 if (node_props->load_type == NLDR_OVLYLOAD)
395 nldr_node_obj->overlay = true; 392 nldr_node_obj->overlay = true;
396 393
397 } 394 }
@@ -432,13 +429,13 @@ int nldr_create(struct nldr_object **nldr,
432 DBC_REQUIRE(nldr != NULL); 429 DBC_REQUIRE(nldr != NULL);
433 DBC_REQUIRE(hdev_obj != NULL); 430 DBC_REQUIRE(hdev_obj != NULL);
434 DBC_REQUIRE(pattrs != NULL); 431 DBC_REQUIRE(pattrs != NULL);
435 DBC_REQUIRE(pattrs->pfn_ovly != NULL); 432 DBC_REQUIRE(pattrs->ovly != NULL);
436 DBC_REQUIRE(pattrs->pfn_write != NULL); 433 DBC_REQUIRE(pattrs->write != NULL);
437 434
438 /* Allocate dynamic loader object */ 435 /* Allocate dynamic loader object */
439 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL); 436 nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
440 if (nldr_obj) { 437 if (nldr_obj) {
441 nldr_obj->hdev_obj = hdev_obj; 438 nldr_obj->dev_obj = hdev_obj;
442 /* warning, lazy status checking alert! */ 439 /* warning, lazy status checking alert! */
443 dev_get_cod_mgr(hdev_obj, &cod_mgr); 440 dev_get_cod_mgr(hdev_obj, &cod_mgr);
444 if (cod_mgr) { 441 if (cod_mgr) {
@@ -453,8 +450,8 @@ int nldr_create(struct nldr_object **nldr,
453 } 450 }
454 status = 0; 451 status = 0;
455 /* end lazy status checking */ 452 /* end lazy status checking */
456 nldr_obj->us_dsp_mau_size = pattrs->us_dsp_mau_size; 453 nldr_obj->dsp_mau_size = pattrs->dsp_mau_size;
457 nldr_obj->us_dsp_word_size = pattrs->us_dsp_word_size; 454 nldr_obj->dsp_word_size = pattrs->dsp_word_size;
458 nldr_obj->ldr_fxns = ldr_fxns; 455 nldr_obj->ldr_fxns = ldr_fxns;
459 if (!(nldr_obj->ldr_fxns.init_fxn())) 456 if (!(nldr_obj->ldr_fxns.init_fxn()))
460 status = -ENOMEM; 457 status = -ENOMEM;
@@ -464,7 +461,7 @@ int nldr_create(struct nldr_object **nldr,
464 } 461 }
465 /* Create the DCD Manager */ 462 /* Create the DCD Manager */
466 if (!status) 463 if (!status)
467 status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr); 464 status = dcd_create_manager(NULL, &nldr_obj->dcd_mgr);
468 465
469 /* Get dynamic loading memory sections from base lib */ 466 /* Get dynamic loading memory sections from base lib */
470 if (!status) { 467 if (!status) {
@@ -474,7 +471,7 @@ int nldr_create(struct nldr_object **nldr,
474 &ul_len); 471 &ul_len);
475 if (!status) { 472 if (!status) {
476 psz_coff_buf = 473 psz_coff_buf =
477 kzalloc(ul_len * nldr_obj->us_dsp_mau_size, 474 kzalloc(ul_len * nldr_obj->dsp_mau_size,
478 GFP_KERNEL); 475 GFP_KERNEL);
479 if (!psz_coff_buf) 476 if (!psz_coff_buf)
480 status = -ENOMEM; 477 status = -ENOMEM;
@@ -536,9 +533,9 @@ int nldr_create(struct nldr_object **nldr,
536 new_attrs.free = (dbll_free_fxn) remote_free; 533 new_attrs.free = (dbll_free_fxn) remote_free;
537 new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value; 534 new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value;
538 new_attrs.sym_handle = nldr_obj; 535 new_attrs.sym_handle = nldr_obj;
539 new_attrs.write = (dbll_write_fxn) pattrs->pfn_write; 536 new_attrs.write = (dbll_write_fxn) pattrs->write;
540 nldr_obj->ovly_fxn = pattrs->pfn_ovly; 537 nldr_obj->ovly_fxn = pattrs->ovly;
541 nldr_obj->write_fxn = pattrs->pfn_write; 538 nldr_obj->write_fxn = pattrs->write;
542 nldr_obj->ldr_attrs = new_attrs; 539 nldr_obj->ldr_attrs = new_attrs;
543 } 540 }
544 kfree(rmm_segs); 541 kfree(rmm_segs);
@@ -553,7 +550,7 @@ int nldr_create(struct nldr_object **nldr,
553 DBC_ASSERT(!status); 550 DBC_ASSERT(!status);
554 /* First count number of overlay nodes */ 551 /* First count number of overlay nodes */
555 status = 552 status =
556 dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file, 553 dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
557 add_ovly_node, (void *)nldr_obj); 554 add_ovly_node, (void *)nldr_obj);
558 /* Now build table of overlay nodes */ 555 /* Now build table of overlay nodes */
559 if (!status && nldr_obj->ovly_nodes > 0) { 556 if (!status && nldr_obj->ovly_nodes > 0) {
@@ -563,7 +560,7 @@ int nldr_create(struct nldr_object **nldr,
563 nldr_obj->ovly_nodes, GFP_KERNEL); 560 nldr_obj->ovly_nodes, GFP_KERNEL);
564 /* Put overlay nodes in the table */ 561 /* Put overlay nodes in the table */
565 nldr_obj->ovly_nid = 0; 562 nldr_obj->ovly_nid = 0;
566 status = dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file, 563 status = dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file,
567 add_ovly_node, 564 add_ovly_node,
568 (void *)nldr_obj); 565 (void *)nldr_obj);
569 } 566 }
@@ -607,8 +604,8 @@ void nldr_delete(struct nldr_object *nldr_obj)
607 604
608 kfree(nldr_obj->seg_table); 605 kfree(nldr_obj->seg_table);
609 606
610 if (nldr_obj->hdcd_mgr) 607 if (nldr_obj->dcd_mgr)
611 dcd_destroy_manager(nldr_obj->hdcd_mgr); 608 dcd_destroy_manager(nldr_obj->dcd_mgr);
612 609
613 /* Free overlay node information */ 610 /* Free overlay node information */
614 if (nldr_obj->ovly_table) { 611 if (nldr_obj->ovly_table) {
@@ -681,7 +678,7 @@ int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
681 678
682 nldr_obj = nldr_node_obj->nldr_obj; 679 nldr_obj = nldr_node_obj->nldr_obj;
683 /* Called from node_create(), node_delete(), or node_run(). */ 680 /* Called from node_create(), node_delete(), or node_run(). */
684 if (nldr_node_obj->dynamic && *nldr_node_obj->pf_phase_split) { 681 if (nldr_node_obj->dynamic && *nldr_node_obj->phase_split) {
685 switch (nldr_node_obj->phase) { 682 switch (nldr_node_obj->phase) {
686 case NLDR_CREATE: 683 case NLDR_CREATE:
687 root = nldr_node_obj->create_lib; 684 root = nldr_node_obj->create_lib;
@@ -824,7 +821,7 @@ int nldr_load(struct nldr_nodeobject *nldr_node_obj,
824 false, nldr_node_obj->lib_path, phase, 0); 821 false, nldr_node_obj->lib_path, phase, 0);
825 822
826 if (!status) { 823 if (!status) {
827 if (*nldr_node_obj->pf_phase_split) { 824 if (*nldr_node_obj->phase_split) {
828 switch (phase) { 825 switch (phase) {
829 case NLDR_CREATE: 826 case NLDR_CREATE:
830 nldr_node_obj->create_lib = 827 nldr_node_obj->create_lib =
@@ -871,7 +868,7 @@ int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
871 868
872 if (nldr_node_obj != NULL) { 869 if (nldr_node_obj != NULL) {
873 if (nldr_node_obj->dynamic) { 870 if (nldr_node_obj->dynamic) {
874 if (*nldr_node_obj->pf_phase_split) { 871 if (*nldr_node_obj->phase_split) {
875 switch (phase) { 872 switch (phase) {
876 case NLDR_CREATE: 873 case NLDR_CREATE:
877 root_lib = &nldr_node_obj->create_lib; 874 root_lib = &nldr_node_obj->create_lib;
@@ -1008,13 +1005,13 @@ static int add_ovly_node(struct dsp_uuid *uuid_obj,
1008 goto func_end; 1005 goto func_end;
1009 1006
1010 status = 1007 status =
1011 dcd_get_object_def(nldr_obj->hdcd_mgr, uuid_obj, obj_type, 1008 dcd_get_object_def(nldr_obj->dcd_mgr, uuid_obj, obj_type,
1012 &obj_def); 1009 &obj_def);
1013 if (status) 1010 if (status)
1014 goto func_end; 1011 goto func_end;
1015 1012
1016 /* If overlay node, add to the list */ 1013 /* If overlay node, add to the list */
1017 if (obj_def.obj_data.node_obj.us_load_type == NLDR_OVLYLOAD) { 1014 if (obj_def.obj_data.node_obj.load_type == NLDR_OVLYLOAD) {
1018 if (nldr_obj->ovly_table == NULL) { 1015 if (nldr_obj->ovly_table == NULL) {
1019 nldr_obj->ovly_nodes++; 1016 nldr_obj->ovly_nodes++;
1020 } else { 1017 } else {
@@ -1038,13 +1035,13 @@ static int add_ovly_node(struct dsp_uuid *uuid_obj,
1038 } 1035 }
1039 } 1036 }
1040 /* These were allocated in dcd_get_object_def */ 1037 /* These were allocated in dcd_get_object_def */
1041 kfree(obj_def.obj_data.node_obj.pstr_create_phase_fxn); 1038 kfree(obj_def.obj_data.node_obj.str_create_phase_fxn);
1042 1039
1043 kfree(obj_def.obj_data.node_obj.pstr_execute_phase_fxn); 1040 kfree(obj_def.obj_data.node_obj.str_execute_phase_fxn);
1044 1041
1045 kfree(obj_def.obj_data.node_obj.pstr_delete_phase_fxn); 1042 kfree(obj_def.obj_data.node_obj.str_delete_phase_fxn);
1046 1043
1047 kfree(obj_def.obj_data.node_obj.pstr_i_alg_name); 1044 kfree(obj_def.obj_data.node_obj.str_i_alg_name);
1048 1045
1049func_end: 1046func_end:
1050 return status; 1047 return status;
@@ -1265,14 +1262,14 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1265 if (depth == 0) { 1262 if (depth == 0) {
1266 status = 1263 status =
1267 dcd_get_library_name(nldr_node_obj->nldr_obj-> 1264 dcd_get_library_name(nldr_node_obj->nldr_obj->
1268 hdcd_mgr, &uuid, psz_file_name, 1265 dcd_mgr, &uuid, psz_file_name,
1269 &dw_buf_size, phase, 1266 &dw_buf_size, phase,
1270 nldr_node_obj->pf_phase_split); 1267 nldr_node_obj->phase_split);
1271 } else { 1268 } else {
1272 /* Dependent libraries are registered with a phase */ 1269 /* Dependent libraries are registered with a phase */
1273 status = 1270 status =
1274 dcd_get_library_name(nldr_node_obj->nldr_obj-> 1271 dcd_get_library_name(nldr_node_obj->nldr_obj->
1275 hdcd_mgr, &uuid, psz_file_name, 1272 dcd_mgr, &uuid, psz_file_name,
1276 &dw_buf_size, NLDR_NOPHASE, 1273 &dw_buf_size, NLDR_NOPHASE,
1277 NULL); 1274 NULL);
1278 } 1275 }
@@ -1312,12 +1309,12 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1312 depth++; 1309 depth++;
1313 /* Get number of dependent libraries */ 1310 /* Get number of dependent libraries */
1314 status = 1311 status =
1315 dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->hdcd_mgr, 1312 dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr,
1316 &uuid, &nd_libs, &np_libs, phase); 1313 &uuid, &nd_libs, &np_libs, phase);
1317 } 1314 }
1318 DBC_ASSERT(nd_libs >= np_libs); 1315 DBC_ASSERT(nd_libs >= np_libs);
1319 if (!status) { 1316 if (!status) {
1320 if (!(*nldr_node_obj->pf_phase_split)) 1317 if (!(*nldr_node_obj->phase_split))
1321 np_libs = 0; 1318 np_libs = 0;
1322 1319
1323 /* nd_libs = #of dependent libraries */ 1320 /* nd_libs = #of dependent libraries */
@@ -1345,7 +1342,7 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1345 /* Get the dependent library UUIDs */ 1342 /* Get the dependent library UUIDs */
1346 status = 1343 status =
1347 dcd_get_dep_libs(nldr_node_obj-> 1344 dcd_get_dep_libs(nldr_node_obj->
1348 nldr_obj->hdcd_mgr, &uuid, 1345 nldr_obj->dcd_mgr, &uuid,
1349 nd_libs, dep_lib_uui_ds, 1346 nd_libs, dep_lib_uui_ds,
1350 persistent_dep_libs, 1347 persistent_dep_libs,
1351 phase); 1348 phase);
@@ -1362,7 +1359,7 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1362 * is, then record it. If root library IS persistent, 1359 * is, then record it. If root library IS persistent,
1363 * the deplib is already included */ 1360 * the deplib is already included */
1364 if (!root_prstnt && persistent_dep_libs[i] && 1361 if (!root_prstnt && persistent_dep_libs[i] &&
1365 *nldr_node_obj->pf_phase_split) { 1362 *nldr_node_obj->phase_split) {
1366 if ((nldr_node_obj->pers_libs) >= MAXLIBS) { 1363 if ((nldr_node_obj->pers_libs) >= MAXLIBS) {
1367 status = -EILSEQ; 1364 status = -EILSEQ;
1368 break; 1365 break;
@@ -1388,11 +1385,11 @@ static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1388 if (!status) { 1385 if (!status) {
1389 if ((status != 0) && 1386 if ((status != 0) &&
1390 !root_prstnt && persistent_dep_libs[i] && 1387 !root_prstnt && persistent_dep_libs[i] &&
1391 *nldr_node_obj->pf_phase_split) { 1388 *nldr_node_obj->phase_split) {
1392 (nldr_node_obj->pers_libs)++; 1389 (nldr_node_obj->pers_libs)++;
1393 } else { 1390 } else {
1394 if (!persistent_dep_libs[i] || 1391 if (!persistent_dep_libs[i] ||
1395 !(*nldr_node_obj->pf_phase_split)) { 1392 !(*nldr_node_obj->phase_split)) {
1396 nd_libs_loaded++; 1393 nd_libs_loaded++;
1397 } 1394 }
1398 } 1395 }
@@ -1633,8 +1630,8 @@ static int remote_alloc(void **ref, u16 mem_sect, u32 size,
1633 rmm = nldr_obj->rmm; 1630 rmm = nldr_obj->rmm;
1634 /* Convert size to DSP words */ 1631 /* Convert size to DSP words */
1635 word_size = 1632 word_size =
1636 (size + nldr_obj->us_dsp_word_size - 1633 (size + nldr_obj->dsp_word_size -
1637 1) / nldr_obj->us_dsp_word_size; 1634 1) / nldr_obj->dsp_word_size;
1638 /* Modify memory 'align' to account for DSP cache line size */ 1635 /* Modify memory 'align' to account for DSP cache line size */
1639 align = lcm(GEM_CACHE_LINE_SIZE, align); 1636 align = lcm(GEM_CACHE_LINE_SIZE, align);
1640 dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align); 1637 dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
@@ -1745,8 +1742,8 @@ static int remote_free(void **ref, u16 space, u32 dsp_address,
1745 1742
1746 /* Convert size to DSP words */ 1743 /* Convert size to DSP words */
1747 word_size = 1744 word_size =
1748 (size + nldr_obj->us_dsp_word_size - 1745 (size + nldr_obj->dsp_word_size -
1749 1) / nldr_obj->us_dsp_word_size; 1746 1) / nldr_obj->dsp_word_size;
1750 1747
1751 if (rmm_free(rmm, space, dsp_address, word_size, reserve)) 1748 if (rmm_free(rmm, space, dsp_address, word_size, reserve))
1752 status = 0; 1749 status = 0;
@@ -1906,7 +1903,7 @@ int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
1906 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node, 1903 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, (u32) nldr_node,
1907 sym_addr, offset_range, (u32) offset_output, sym_name); 1904 sym_addr, offset_range, (u32) offset_output, sym_name);
1908 1905
1909 if (nldr_node->dynamic && *nldr_node->pf_phase_split) { 1906 if (nldr_node->dynamic && *nldr_node->phase_split) {
1910 switch (nldr_node->phase) { 1907 switch (nldr_node->phase) {
1911 case NLDR_CREATE: 1908 case NLDR_CREATE:
1912 root = nldr_node->create_lib; 1909 root = nldr_node->create_lib;
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index 1562f3c1281..5dadaa445ad 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -17,6 +17,9 @@
17 */ 17 */
18 18
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/bitmap.h>
21#include <linux/list.h>
22
20/* ----------------------------------- Host OS */ 23/* ----------------------------------- Host OS */
21#include <dspbridge/host_os.h> 24#include <dspbridge/host_os.h>
22 25
@@ -27,7 +30,6 @@
27#include <dspbridge/dbc.h> 30#include <dspbridge/dbc.h>
28 31
29/* ----------------------------------- OS Adaptation Layer */ 32/* ----------------------------------- OS Adaptation Layer */
30#include <dspbridge/list.h>
31#include <dspbridge/memdefs.h> 33#include <dspbridge/memdefs.h>
32#include <dspbridge/proc.h> 34#include <dspbridge/proc.h>
33#include <dspbridge/strm.h> 35#include <dspbridge/strm.h>
@@ -50,7 +52,6 @@
50#include <dspbridge/dspioctl.h> 52#include <dspbridge/dspioctl.h>
51 53
52/* ----------------------------------- Others */ 54/* ----------------------------------- Others */
53#include <dspbridge/gb.h>
54#include <dspbridge/uuidutil.h> 55#include <dspbridge/uuidutil.h>
55 56
56/* ----------------------------------- This */ 57/* ----------------------------------- This */
@@ -63,7 +64,6 @@
63#include <dspbridge/nldr.h> 64#include <dspbridge/nldr.h>
64 65
65#include <dspbridge/drv.h> 66#include <dspbridge/drv.h>
66#include <dspbridge/drvdefs.h>
67#include <dspbridge/resourcecleanup.h> 67#include <dspbridge/resourcecleanup.h>
68#include <_tiomap.h> 68#include <_tiomap.h>
69 69
@@ -124,33 +124,36 @@
124 * ======== node_mgr ======== 124 * ======== node_mgr ========
125 */ 125 */
126struct node_mgr { 126struct node_mgr {
127 struct dev_object *hdev_obj; /* Device object */ 127 struct dev_object *dev_obj; /* Device object */
128 /* Function interface to Bridge driver */ 128 /* Function interface to Bridge driver */
129 struct bridge_drv_interface *intf_fxns; 129 struct bridge_drv_interface *intf_fxns;
130 struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */ 130 struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
131 struct disp_object *disp_obj; /* Node dispatcher */ 131 struct disp_object *disp_obj; /* Node dispatcher */
132 struct lst_list *node_list; /* List of all allocated nodes */ 132 struct list_head node_list; /* List of all allocated nodes */
133 u32 num_nodes; /* Number of nodes in node_list */ 133 u32 num_nodes; /* Number of nodes in node_list */
134 u32 num_created; /* Number of nodes *created* on DSP */ 134 u32 num_created; /* Number of nodes *created* on DSP */
135 struct gb_t_map *pipe_map; /* Pipe connection bit map */ 135 DECLARE_BITMAP(pipe_map, MAXPIPES); /* Pipe connection bitmap */
136 struct gb_t_map *pipe_done_map; /* Pipes that are half free */ 136 DECLARE_BITMAP(pipe_done_map, MAXPIPES); /* Pipes that are half free */
137 struct gb_t_map *chnl_map; /* Channel allocation bit map */ 137 /* Channel allocation bitmap */
138 struct gb_t_map *dma_chnl_map; /* DMA Channel allocation bit map */ 138 DECLARE_BITMAP(chnl_map, CHNL_MAXCHANNELS);
139 struct gb_t_map *zc_chnl_map; /* Zero-Copy Channel alloc bit map */ 139 /* DMA Channel allocation bitmap */
140 DECLARE_BITMAP(dma_chnl_map, CHNL_MAXCHANNELS);
141 /* Zero-Copy Channel alloc bitmap */
142 DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS);
140 struct ntfy_object *ntfy_obj; /* Manages registered notifications */ 143 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
141 struct mutex node_mgr_lock; /* For critical sections */ 144 struct mutex node_mgr_lock; /* For critical sections */
142 u32 ul_fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */ 145 u32 fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
143 struct msg_mgr *msg_mgr_obj; 146 struct msg_mgr *msg_mgr_obj;
144 147
145 /* Processor properties needed by Node Dispatcher */ 148 /* Processor properties needed by Node Dispatcher */
146 u32 ul_num_chnls; /* Total number of channels */ 149 u32 num_chnls; /* Total number of channels */
147 u32 ul_chnl_offset; /* Offset of chnl ids rsvd for RMS */ 150 u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */
148 u32 ul_chnl_buf_size; /* Buffer size for data to RMS */ 151 u32 chnl_buf_size; /* Buffer size for data to RMS */
149 int proc_family; /* eg, 5000 */ 152 int proc_family; /* eg, 5000 */
150 int proc_type; /* eg, 5510 */ 153 int proc_type; /* eg, 5510 */
151 u32 udsp_word_size; /* Size of DSP word on host bytes */ 154 u32 dsp_word_size; /* Size of DSP word on host bytes */
152 u32 udsp_data_mau_size; /* Size of DSP data MAU */ 155 u32 dsp_data_mau_size; /* Size of DSP data MAU */
153 u32 udsp_mau_size; /* Size of MAU */ 156 u32 dsp_mau_size; /* Size of MAU */
154 s32 min_pri; /* Minimum runtime priority for node */ 157 s32 min_pri; /* Minimum runtime priority for node */
155 s32 max_pri; /* Maximum runtime priority for node */ 158 s32 max_pri; /* Maximum runtime priority for node */
156 159
@@ -185,14 +188,14 @@ struct stream_chnl {
185 */ 188 */
186struct node_object { 189struct node_object {
187 struct list_head list_elem; 190 struct list_head list_elem;
188 struct node_mgr *hnode_mgr; /* The manager of this node */ 191 struct node_mgr *node_mgr; /* The manager of this node */
189 struct proc_object *hprocessor; /* Back pointer to processor */ 192 struct proc_object *processor; /* Back pointer to processor */
190 struct dsp_uuid node_uuid; /* Node's ID */ 193 struct dsp_uuid node_uuid; /* Node's ID */
191 s32 prio; /* Node's current priority */ 194 s32 prio; /* Node's current priority */
192 u32 utimeout; /* Timeout for blocking NODE calls */ 195 u32 timeout; /* Timeout for blocking NODE calls */
193 u32 heap_size; /* Heap Size */ 196 u32 heap_size; /* Heap Size */
194 u32 udsp_heap_virt_addr; /* Heap Size */ 197 u32 dsp_heap_virt_addr; /* Heap Size */
195 u32 ugpp_heap_virt_addr; /* Heap Size */ 198 u32 gpp_heap_virt_addr; /* Heap Size */
196 enum node_type ntype; /* Type of node: message, task, etc */ 199 enum node_type ntype; /* Type of node: message, task, etc */
197 enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */ 200 enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
198 u32 num_inputs; /* Current number of inputs */ 201 u32 num_inputs; /* Current number of inputs */
@@ -204,9 +207,9 @@ struct node_object {
204 struct node_createargs create_args; /* Args for node create func */ 207 struct node_createargs create_args; /* Args for node create func */
205 nodeenv node_env; /* Environment returned by RMS */ 208 nodeenv node_env; /* Environment returned by RMS */
206 struct dcd_genericobj dcd_props; /* Node properties from DCD */ 209 struct dcd_genericobj dcd_props; /* Node properties from DCD */
207 struct dsp_cbdata *pargs; /* Optional args to pass to node */ 210 struct dsp_cbdata *args; /* Optional args to pass to node */
208 struct ntfy_object *ntfy_obj; /* Manages registered notifications */ 211 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
209 char *pstr_dev_name; /* device name, if device node */ 212 char *str_dev_name; /* device name, if device node */
210 struct sync_object *sync_done; /* Synchronize node_terminate */ 213 struct sync_object *sync_done; /* Synchronize node_terminate */
211 s32 exit_status; /* execute function return status */ 214 s32 exit_status; /* execute function return status */
212 215
@@ -232,9 +235,9 @@ struct node_object {
232 235
233/* Default buffer attributes */ 236/* Default buffer attributes */
234static struct dsp_bufferattr node_dfltbufattrs = { 237static struct dsp_bufferattr node_dfltbufattrs = {
235 0, /* cb_struct */ 238 .cb_struct = 0,
236 1, /* segment_id */ 239 .segment_id = 1,
237 0, /* buf_alignment */ 240 .buf_alignment = 0,
238}; 241};
239 242
240static void delete_node(struct node_object *hnode, 243static void delete_node(struct node_object *hnode,
@@ -280,8 +283,7 @@ enum node_state node_get_state(void *hnode)
280 struct node_object *pnode = (struct node_object *)hnode; 283 struct node_object *pnode = (struct node_object *)hnode;
281 if (!pnode) 284 if (!pnode)
282 return -1; 285 return -1;
283 else 286 return pnode->node_state;
284 return pnode->node_state;
285} 287}
286 288
287/* 289/*
@@ -365,7 +367,7 @@ int node_allocate(struct proc_object *hprocessor,
365 } 367 }
366 368
367 /* Assuming that 0 is not a valid function address */ 369 /* Assuming that 0 is not a valid function address */
368 if (hnode_mgr->ul_fxn_addrs[0] == 0) { 370 if (hnode_mgr->fxn_addrs[0] == 0) {
369 /* No RMS on target - we currently can't handle this */ 371 /* No RMS on target - we currently can't handle this */
370 pr_err("%s: Failed, no RMS in base image\n", __func__); 372 pr_err("%s: Failed, no RMS in base image\n", __func__);
371 status = -EPERM; 373 status = -EPERM;
@@ -387,28 +389,28 @@ int node_allocate(struct proc_object *hprocessor,
387 status = -ENOMEM; 389 status = -ENOMEM;
388 goto func_end; 390 goto func_end;
389 } 391 }
390 pnode->hnode_mgr = hnode_mgr; 392 pnode->node_mgr = hnode_mgr;
391 /* This critical section protects get_node_props */ 393 /* This critical section protects get_node_props */
392 mutex_lock(&hnode_mgr->node_mgr_lock); 394 mutex_lock(&hnode_mgr->node_mgr_lock);
393 395
394 /* Get dsp_ndbprops from node database */ 396 /* Get dsp_ndbprops from node database */
395 status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid, 397 status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid,
396 &(pnode->dcd_props)); 398 &(pnode->dcd_props));
397 if (status) 399 if (status)
398 goto func_cont; 400 goto func_cont;
399 401
400 pnode->node_uuid = *node_uuid; 402 pnode->node_uuid = *node_uuid;
401 pnode->hprocessor = hprocessor; 403 pnode->processor = hprocessor;
402 pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype; 404 pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
403 pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout; 405 pnode->timeout = pnode->dcd_props.obj_data.node_obj.ndb_props.timeout;
404 pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio; 406 pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
405 407
406 /* Currently only C64 DSP builds support Node Dynamic * heaps */ 408 /* Currently only C64 DSP builds support Node Dynamic * heaps */
407 /* Allocate memory for node heap */ 409 /* Allocate memory for node heap */
408 pnode->create_args.asa.task_arg_obj.heap_size = 0; 410 pnode->create_args.asa.task_arg_obj.heap_size = 0;
409 pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0; 411 pnode->create_args.asa.task_arg_obj.dsp_heap_addr = 0;
410 pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0; 412 pnode->create_args.asa.task_arg_obj.dsp_heap_res_addr = 0;
411 pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0; 413 pnode->create_args.asa.task_arg_obj.gpp_heap_addr = 0;
412 if (!attr_in) 414 if (!attr_in)
413 goto func_cont; 415 goto func_cont;
414 416
@@ -424,7 +426,7 @@ int node_allocate(struct proc_object *hprocessor,
424 } else { 426 } else {
425 pnode->create_args.asa.task_arg_obj.heap_size = 427 pnode->create_args.asa.task_arg_obj.heap_size =
426 attr_in->heap_size; 428 attr_in->heap_size;
427 pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 429 pnode->create_args.asa.task_arg_obj.gpp_heap_addr =
428 (u32) attr_in->pgpp_virt_addr; 430 (u32) attr_in->pgpp_virt_addr;
429 } 431 }
430 if (status) 432 if (status)
@@ -434,7 +436,7 @@ int node_allocate(struct proc_object *hprocessor,
434 pnode->create_args.asa.task_arg_obj. 436 pnode->create_args.asa.task_arg_obj.
435 heap_size + PAGE_SIZE, 437 heap_size + PAGE_SIZE,
436 (void **)&(pnode->create_args.asa. 438 (void **)&(pnode->create_args.asa.
437 task_arg_obj.udsp_heap_res_addr), 439 task_arg_obj.dsp_heap_res_addr),
438 pr_ctxt); 440 pr_ctxt);
439 if (status) { 441 if (status) {
440 pr_err("%s: Failed to reserve memory for heap: 0x%x\n", 442 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
@@ -457,20 +459,20 @@ int node_allocate(struct proc_object *hprocessor,
457 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, 459 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
458 pnode->create_args.asa.task_arg_obj.heap_size, 460 pnode->create_args.asa.task_arg_obj.heap_size,
459 (void *)pnode->create_args.asa.task_arg_obj. 461 (void *)pnode->create_args.asa.task_arg_obj.
460 udsp_heap_res_addr, (void **)&mapped_addr, map_attrs, 462 dsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
461 pr_ctxt); 463 pr_ctxt);
462 if (status) 464 if (status)
463 pr_err("%s: Failed to map memory for Heap: 0x%x\n", 465 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
464 __func__, status); 466 __func__, status);
465 else 467 else
466 pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 468 pnode->create_args.asa.task_arg_obj.dsp_heap_addr =
467 (u32) mapped_addr; 469 (u32) mapped_addr;
468 470
469func_cont: 471func_cont:
470 mutex_unlock(&hnode_mgr->node_mgr_lock); 472 mutex_unlock(&hnode_mgr->node_mgr_lock);
471 if (attr_in != NULL) { 473 if (attr_in != NULL) {
472 /* Overrides of NBD properties */ 474 /* Overrides of NBD properties */
473 pnode->utimeout = attr_in->utimeout; 475 pnode->timeout = attr_in->timeout;
474 pnode->prio = attr_in->prio; 476 pnode->prio = attr_in->prio;
475 } 477 }
476 /* Create object to manage notifications */ 478 /* Create object to manage notifications */
@@ -562,7 +564,7 @@ func_cont:
562 /* Create a message queue for this node */ 564 /* Create a message queue for this node */
563 intf_fxns = hnode_mgr->intf_fxns; 565 intf_fxns = hnode_mgr->intf_fxns;
564 status = 566 status =
565 (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj, 567 (*intf_fxns->msg_create_queue) (hnode_mgr->msg_mgr_obj,
566 &pnode->msg_queue_obj, 568 &pnode->msg_queue_obj,
567 0, 569 0,
568 pnode->create_args.asa. 570 pnode->create_args.asa.
@@ -573,7 +575,7 @@ func_cont:
573 if (!status) { 575 if (!status) {
574 /* Create object for dynamic loading */ 576 /* Create object for dynamic loading */
575 577
576 status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj, 578 status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj,
577 (void *)pnode, 579 (void *)pnode,
578 &pnode->dcd_props. 580 &pnode->dcd_props.
579 obj_data.node_obj, 581 obj_data.node_obj,
@@ -594,7 +596,7 @@ func_cont:
594 stack_seg_name, STACKSEGLABEL) == 0) { 596 stack_seg_name, STACKSEGLABEL) == 0) {
595 status = 597 status =
596 hnode_mgr->nldr_fxns. 598 hnode_mgr->nldr_fxns.
597 pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG", 599 get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
598 &dynext_base); 600 &dynext_base);
599 if (status) 601 if (status)
600 pr_err("%s: Failed to get addr for DYNEXT_BEG" 602 pr_err("%s: Failed to get addr for DYNEXT_BEG"
@@ -602,7 +604,7 @@ func_cont:
602 604
603 status = 605 status =
604 hnode_mgr->nldr_fxns. 606 hnode_mgr->nldr_fxns.
605 pfn_get_fxn_addr(pnode->nldr_node_obj, 607 get_fxn_addr(pnode->nldr_node_obj,
606 "L1DSRAM_HEAP", &pul_value); 608 "L1DSRAM_HEAP", &pul_value);
607 609
608 if (status) 610 if (status)
@@ -619,7 +621,7 @@ func_cont:
619 goto func_end; 621 goto func_end;
620 } 622 }
621 623
622 ul_gpp_mem_base = (u32) host_res->dw_mem_base[1]; 624 ul_gpp_mem_base = (u32) host_res->mem_base[1];
623 off_set = pul_value - dynext_base; 625 off_set = pul_value - dynext_base;
624 ul_stack_seg_addr = ul_gpp_mem_base + off_set; 626 ul_stack_seg_addr = ul_gpp_mem_base + off_set;
625 ul_stack_seg_val = readl(ul_stack_seg_addr); 627 ul_stack_seg_val = readl(ul_stack_seg_addr);
@@ -637,13 +639,12 @@ func_cont:
637 if (!status) { 639 if (!status) {
638 /* Add the node to the node manager's list of allocated 640 /* Add the node to the node manager's list of allocated
639 * nodes. */ 641 * nodes. */
640 lst_init_elem((struct list_head *)pnode);
641 NODE_SET_STATE(pnode, NODE_ALLOCATED); 642 NODE_SET_STATE(pnode, NODE_ALLOCATED);
642 643
643 mutex_lock(&hnode_mgr->node_mgr_lock); 644 mutex_lock(&hnode_mgr->node_mgr_lock);
644 645
645 lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode); 646 list_add_tail(&pnode->list_elem, &hnode_mgr->node_list);
646 ++(hnode_mgr->num_nodes); 647 ++(hnode_mgr->num_nodes);
647 648
648 /* Exit critical section */ 649 /* Exit critical section */
649 mutex_unlock(&hnode_mgr->node_mgr_lock); 650 mutex_unlock(&hnode_mgr->node_mgr_lock);
@@ -711,7 +712,7 @@ DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
711 if (pattr == NULL) 712 if (pattr == NULL)
712 pattr = &node_dfltbufattrs; /* set defaults */ 713 pattr = &node_dfltbufattrs; /* set defaults */
713 714
714 status = proc_get_processor_id(pnode->hprocessor, &proc_id); 715 status = proc_get_processor_id(pnode->processor, &proc_id);
715 if (proc_id != DSP_UNIT) { 716 if (proc_id != DSP_UNIT) {
716 DBC_ASSERT(NULL); 717 DBC_ASSERT(NULL);
717 goto func_end; 718 goto func_end;
@@ -783,10 +784,10 @@ int node_change_priority(struct node_object *hnode, s32 prio)
783 784
784 DBC_REQUIRE(refs > 0); 785 DBC_REQUIRE(refs > 0);
785 786
786 if (!hnode || !hnode->hnode_mgr) { 787 if (!hnode || !hnode->node_mgr) {
787 status = -EFAULT; 788 status = -EFAULT;
788 } else { 789 } else {
789 hnode_mgr = hnode->hnode_mgr; 790 hnode_mgr = hnode->node_mgr;
790 node_type = node_get_type(hnode); 791 node_type = node_get_type(hnode);
791 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) 792 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
792 status = -EPERM; 793 status = -EPERM;
@@ -807,12 +808,12 @@ int node_change_priority(struct node_object *hnode, s32 prio)
807 status = -EBADR; 808 status = -EBADR;
808 goto func_cont; 809 goto func_cont;
809 } 810 }
810 status = proc_get_processor_id(pnode->hprocessor, &proc_id); 811 status = proc_get_processor_id(pnode->processor, &proc_id);
811 if (proc_id == DSP_UNIT) { 812 if (proc_id == DSP_UNIT) {
812 status = 813 status =
813 disp_node_change_priority(hnode_mgr->disp_obj, 814 disp_node_change_priority(hnode_mgr->disp_obj,
814 hnode, 815 hnode,
815 hnode_mgr->ul_fxn_addrs 816 hnode_mgr->fxn_addrs
816 [RMSCHANGENODEPRIORITY], 817 [RMSCHANGENODEPRIORITY],
817 hnode->node_env, prio); 818 hnode->node_env, prio);
818 } 819 }
@@ -841,229 +842,209 @@ int node_connect(struct node_object *node1, u32 stream1,
841 char *pstr_dev_name = NULL; 842 char *pstr_dev_name = NULL;
842 enum node_type node1_type = NODE_TASK; 843 enum node_type node1_type = NODE_TASK;
843 enum node_type node2_type = NODE_TASK; 844 enum node_type node2_type = NODE_TASK;
845 enum dsp_strmmode strm_mode;
844 struct node_strmdef *pstrm_def; 846 struct node_strmdef *pstrm_def;
845 struct node_strmdef *input = NULL; 847 struct node_strmdef *input = NULL;
846 struct node_strmdef *output = NULL; 848 struct node_strmdef *output = NULL;
847 struct node_object *dev_node_obj; 849 struct node_object *dev_node_obj;
848 struct node_object *hnode; 850 struct node_object *hnode;
849 struct stream_chnl *pstream; 851 struct stream_chnl *pstream;
850 u32 pipe_id = GB_NOBITS; 852 u32 pipe_id;
851 u32 chnl_id = GB_NOBITS; 853 u32 chnl_id;
852 s8 chnl_mode; 854 s8 chnl_mode;
853 u32 dw_length; 855 u32 dw_length;
854 int status = 0; 856 int status = 0;
855 DBC_REQUIRE(refs > 0); 857 DBC_REQUIRE(refs > 0);
856 858
857 if ((node1 != (struct node_object *)DSP_HGPPNODE && !node1) || 859 if (!node1 || !node2)
858 (node2 != (struct node_object *)DSP_HGPPNODE && !node2)) 860 return -EFAULT;
859 status = -EFAULT;
860 861
861 if (!status) { 862 /* The two nodes must be on the same processor */
862 /* The two nodes must be on the same processor */ 863 if (node1 != (struct node_object *)DSP_HGPPNODE &&
863 if (node1 != (struct node_object *)DSP_HGPPNODE && 864 node2 != (struct node_object *)DSP_HGPPNODE &&
864 node2 != (struct node_object *)DSP_HGPPNODE && 865 node1->node_mgr != node2->node_mgr)
865 node1->hnode_mgr != node2->hnode_mgr) 866 return -EPERM;
866 status = -EPERM; 867
867 /* Cannot connect a node to itself */ 868 /* Cannot connect a node to itself */
868 if (node1 == node2) 869 if (node1 == node2)
869 status = -EPERM; 870 return -EPERM;
871
872 /* node_get_type() will return NODE_GPP if hnode = DSP_HGPPNODE. */
873 node1_type = node_get_type(node1);
874 node2_type = node_get_type(node2);
875 /* Check stream indices ranges */
876 if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
877 stream1 >= MAX_OUTPUTS(node1)) ||
878 (node2_type != NODE_GPP && node2_type != NODE_DEVICE &&
879 stream2 >= MAX_INPUTS(node2)))
880 return -EINVAL;
870 881
871 } 882 /*
872 if (!status) { 883 * Only the following types of connections are allowed:
873 /* node_get_type() will return NODE_GPP if hnode = 884 * task/dais socket < == > task/dais socket
874 * DSP_HGPPNODE. */ 885 * task/dais socket < == > device
875 node1_type = node_get_type(node1); 886 * task/dais socket < == > GPP
876 node2_type = node_get_type(node2); 887 *
877 /* Check stream indices ranges */ 888 * ie, no message nodes, and at least one task or dais
878 if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE && 889 * socket node.
879 stream1 >= MAX_OUTPUTS(node1)) || (node2_type != NODE_GPP 890 */
880 && node2_type != 891 if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
881 NODE_DEVICE 892 (node1_type != NODE_TASK &&
882 && stream2 >= 893 node1_type != NODE_DAISSOCKET &&
883 MAX_INPUTS(node2))) 894 node2_type != NODE_TASK &&
884 status = -EINVAL; 895 node2_type != NODE_DAISSOCKET))
885 } 896 return -EPERM;
886 if (!status) {
887 /*
888 * Only the following types of connections are allowed:
889 * task/dais socket < == > task/dais socket
890 * task/dais socket < == > device
891 * task/dais socket < == > GPP
892 *
893 * ie, no message nodes, and at least one task or dais
894 * socket node.
895 */
896 if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
897 (node1_type != NODE_TASK && node1_type != NODE_DAISSOCKET &&
898 node2_type != NODE_TASK && node2_type != NODE_DAISSOCKET))
899 status = -EPERM;
900 }
901 /* 897 /*
902 * Check stream mode. Default is STRMMODE_PROCCOPY. 898 * Check stream mode. Default is STRMMODE_PROCCOPY.
903 */ 899 */
904 if (!status && pattrs) { 900 if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
905 if (pattrs->strm_mode != STRMMODE_PROCCOPY) 901 return -EPERM; /* illegal stream mode */
906 status = -EPERM; /* illegal stream mode */
907
908 }
909 if (status)
910 goto func_end;
911 902
912 if (node1_type != NODE_GPP) { 903 if (node1_type != NODE_GPP) {
913 hnode_mgr = node1->hnode_mgr; 904 hnode_mgr = node1->node_mgr;
914 } else { 905 } else {
915 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE); 906 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
916 hnode_mgr = node2->hnode_mgr; 907 hnode_mgr = node2->node_mgr;
917 } 908 }
909
918 /* Enter critical section */ 910 /* Enter critical section */
919 mutex_lock(&hnode_mgr->node_mgr_lock); 911 mutex_lock(&hnode_mgr->node_mgr_lock);
920 912
921 /* Nodes must be in the allocated state */ 913 /* Nodes must be in the allocated state */
922 if (node1_type != NODE_GPP && node_get_state(node1) != NODE_ALLOCATED) 914 if (node1_type != NODE_GPP &&
915 node_get_state(node1) != NODE_ALLOCATED) {
923 status = -EBADR; 916 status = -EBADR;
917 goto out_unlock;
918 }
924 919
925 if (node2_type != NODE_GPP && node_get_state(node2) != NODE_ALLOCATED) 920 if (node2_type != NODE_GPP &&
921 node_get_state(node2) != NODE_ALLOCATED) {
926 status = -EBADR; 922 status = -EBADR;
923 goto out_unlock;
924 }
927 925
928 if (!status) { 926 /*
929 /* Check that stream indices for task and dais socket nodes 927 * Check that stream indices for task and dais socket nodes
930 * are not already be used. (Device nodes checked later) */ 928 * are not already be used. (Device nodes checked later)
931 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) { 929 */
932 output = 930 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
933 &(node1->create_args.asa. 931 output = &(node1->create_args.asa.
934 task_arg_obj.strm_out_def[stream1]); 932 task_arg_obj.strm_out_def[stream1]);
935 if (output->sz_device != NULL) 933 if (output->sz_device) {
936 status = -EISCONN; 934 status = -EISCONN;
937 935 goto out_unlock;
938 } 936 }
939 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
940 input =
941 &(node2->create_args.asa.
942 task_arg_obj.strm_in_def[stream2]);
943 if (input->sz_device != NULL)
944 status = -EISCONN;
945 937
938 }
939 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
940 input = &(node2->create_args.asa.
941 task_arg_obj.strm_in_def[stream2]);
942 if (input->sz_device) {
943 status = -EISCONN;
944 goto out_unlock;
946 } 945 }
946
947 } 947 }
948 /* Connecting two task nodes? */ 948 /* Connecting two task nodes? */
949 if (!status && ((node1_type == NODE_TASK || 949 if ((node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) &&
950 node1_type == NODE_DAISSOCKET) 950 (node2_type == NODE_TASK ||
951 && (node2_type == NODE_TASK 951 node2_type == NODE_DAISSOCKET)) {
952 || node2_type == NODE_DAISSOCKET))) {
953 /* Find available pipe */ 952 /* Find available pipe */
954 pipe_id = gb_findandset(hnode_mgr->pipe_map); 953 pipe_id = find_first_zero_bit(hnode_mgr->pipe_map, MAXPIPES);
955 if (pipe_id == GB_NOBITS) { 954 if (pipe_id == MAXPIPES) {
956 status = -ECONNREFUSED; 955 status = -ECONNREFUSED;
957 } else { 956 goto out_unlock;
958 node1->outputs[stream1].type = NODECONNECT; 957 }
959 node2->inputs[stream2].type = NODECONNECT; 958 set_bit(pipe_id, hnode_mgr->pipe_map);
960 node1->outputs[stream1].dev_id = pipe_id; 959 node1->outputs[stream1].type = NODECONNECT;
961 node2->inputs[stream2].dev_id = pipe_id; 960 node2->inputs[stream2].type = NODECONNECT;
962 output->sz_device = kzalloc(PIPENAMELEN + 1, 961 node1->outputs[stream1].dev_id = pipe_id;
963 GFP_KERNEL); 962 node2->inputs[stream2].dev_id = pipe_id;
964 input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL); 963 output->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
965 if (output->sz_device == NULL || 964 input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
966 input->sz_device == NULL) { 965 if (!output->sz_device || !input->sz_device) {
967 /* Undo the connection */ 966 /* Undo the connection */
968 kfree(output->sz_device); 967 kfree(output->sz_device);
969 968 kfree(input->sz_device);
970 kfree(input->sz_device); 969 clear_bit(pipe_id, hnode_mgr->pipe_map);
971 970 status = -ENOMEM;
972 output->sz_device = NULL; 971 goto out_unlock;
973 input->sz_device = NULL;
974 gb_clear(hnode_mgr->pipe_map, pipe_id);
975 status = -ENOMEM;
976 } else {
977 /* Copy "/dbpipe<pipId>" name to device names */
978 sprintf(output->sz_device, "%s%d",
979 PIPEPREFIX, pipe_id);
980 strcpy(input->sz_device, output->sz_device);
981 }
982 } 972 }
973 /* Copy "/dbpipe<pipId>" name to device names */
974 sprintf(output->sz_device, "%s%d", PIPEPREFIX, pipe_id);
975 strcpy(input->sz_device, output->sz_device);
983 } 976 }
984 /* Connecting task node to host? */ 977 /* Connecting task node to host? */
985 if (!status && (node1_type == NODE_GPP || 978 if (node1_type == NODE_GPP || node2_type == NODE_GPP) {
986 node2_type == NODE_GPP)) { 979 pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
987 if (node1_type == NODE_GPP) { 980 if (!pstr_dev_name) {
988 chnl_mode = CHNL_MODETODSP; 981 status = -ENOMEM;
989 } else { 982 goto out_unlock;
990 DBC_ASSERT(node2_type == NODE_GPP);
991 chnl_mode = CHNL_MODEFROMDSP;
992 } 983 }
993 /* Reserve a channel id. We need to put the name "/host<id>" 984
985 DBC_ASSERT((node1_type == NODE_GPP) ||
986 (node2_type == NODE_GPP));
987
988 chnl_mode = (node1_type == NODE_GPP) ?
989 CHNL_MODETODSP : CHNL_MODEFROMDSP;
990
991 /*
992 * Reserve a channel id. We need to put the name "/host<id>"
994 * in the node's create_args, but the host 993 * in the node's create_args, but the host
995 * side channel will not be opened until DSPStream_Open is 994 * side channel will not be opened until DSPStream_Open is
996 * called for this node. */ 995 * called for this node.
997 if (pattrs) { 996 */
998 if (pattrs->strm_mode == STRMMODE_RDMA) { 997 strm_mode = pattrs ? pattrs->strm_mode : STRMMODE_PROCCOPY;
999 chnl_id = 998 switch (strm_mode) {
1000 gb_findandset(hnode_mgr->dma_chnl_map); 999 case STRMMODE_RDMA:
1000 chnl_id = find_first_zero_bit(hnode_mgr->dma_chnl_map,
1001 CHNL_MAXCHANNELS);
1002 if (chnl_id < CHNL_MAXCHANNELS) {
1003 set_bit(chnl_id, hnode_mgr->dma_chnl_map);
1001 /* dma chans are 2nd transport chnl set 1004 /* dma chans are 2nd transport chnl set
1002 * ids(e.g. 16-31) */ 1005 * ids(e.g. 16-31) */
1003 (chnl_id != GB_NOBITS) ? 1006 chnl_id = chnl_id + hnode_mgr->num_chnls;
1004 (chnl_id = 1007 }
1005 chnl_id + 1008 break;
1006 hnode_mgr->ul_num_chnls) : chnl_id; 1009 case STRMMODE_ZEROCOPY:
1007 } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) { 1010 chnl_id = find_first_zero_bit(hnode_mgr->zc_chnl_map,
1008 chnl_id = gb_findandset(hnode_mgr->zc_chnl_map); 1011 CHNL_MAXCHANNELS);
1012 if (chnl_id < CHNL_MAXCHANNELS) {
1013 set_bit(chnl_id, hnode_mgr->zc_chnl_map);
1009 /* zero-copy chans are 3nd transport set 1014 /* zero-copy chans are 3nd transport set
1010 * (e.g. 32-47) */ 1015 * (e.g. 32-47) */
1011 (chnl_id != GB_NOBITS) ? (chnl_id = chnl_id + 1016 chnl_id = chnl_id +
1012 (2 * 1017 (2 * hnode_mgr->num_chnls);
1013 hnode_mgr->
1014 ul_num_chnls))
1015 : chnl_id;
1016 } else { /* must be PROCCOPY */
1017 DBC_ASSERT(pattrs->strm_mode ==
1018 STRMMODE_PROCCOPY);
1019 chnl_id = gb_findandset(hnode_mgr->chnl_map);
1020 /* e.g. 0-15 */
1021 } 1018 }
1022 } else { 1019 break;
1023 /* default to PROCCOPY */ 1020 case STRMMODE_PROCCOPY:
1024 chnl_id = gb_findandset(hnode_mgr->chnl_map); 1021 chnl_id = find_first_zero_bit(hnode_mgr->chnl_map,
1022 CHNL_MAXCHANNELS);
1023 if (chnl_id < CHNL_MAXCHANNELS)
1024 set_bit(chnl_id, hnode_mgr->chnl_map);
1025 break;
1026 default:
1027 status = -EINVAL;
1028 goto out_unlock;
1025 } 1029 }
1026 if (chnl_id == GB_NOBITS) { 1030 if (chnl_id == CHNL_MAXCHANNELS) {
1027 status = -ECONNREFUSED; 1031 status = -ECONNREFUSED;
1028 goto func_cont2; 1032 goto out_unlock;
1029 } 1033 }
1030 pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL); 1034
1031 if (pstr_dev_name != NULL) 1035 if (node1 == (struct node_object *)DSP_HGPPNODE) {
1032 goto func_cont2; 1036 node2->inputs[stream2].type = HOSTCONNECT;
1033 1037 node2->inputs[stream2].dev_id = chnl_id;
1034 if (pattrs) { 1038 input->sz_device = pstr_dev_name;
1035 if (pattrs->strm_mode == STRMMODE_RDMA) {
1036 gb_clear(hnode_mgr->dma_chnl_map, chnl_id -
1037 hnode_mgr->ul_num_chnls);
1038 } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
1039 gb_clear(hnode_mgr->zc_chnl_map, chnl_id -
1040 (2 * hnode_mgr->ul_num_chnls));
1041 } else {
1042 DBC_ASSERT(pattrs->strm_mode ==
1043 STRMMODE_PROCCOPY);
1044 gb_clear(hnode_mgr->chnl_map, chnl_id);
1045 }
1046 } else { 1039 } else {
1047 gb_clear(hnode_mgr->chnl_map, chnl_id); 1040 node1->outputs[stream1].type = HOSTCONNECT;
1048 } 1041 node1->outputs[stream1].dev_id = chnl_id;
1049 status = -ENOMEM; 1042 output->sz_device = pstr_dev_name;
1050func_cont2:
1051 if (!status) {
1052 if (node1 == (struct node_object *)DSP_HGPPNODE) {
1053 node2->inputs[stream2].type = HOSTCONNECT;
1054 node2->inputs[stream2].dev_id = chnl_id;
1055 input->sz_device = pstr_dev_name;
1056 } else {
1057 node1->outputs[stream1].type = HOSTCONNECT;
1058 node1->outputs[stream1].dev_id = chnl_id;
1059 output->sz_device = pstr_dev_name;
1060 }
1061 sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1062 } 1043 }
1044 sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1063 } 1045 }
1064 /* Connecting task node to device node? */ 1046 /* Connecting task node to device node? */
1065 if (!status && ((node1_type == NODE_DEVICE) || 1047 if ((node1_type == NODE_DEVICE) || (node2_type == NODE_DEVICE)) {
1066 (node2_type == NODE_DEVICE))) {
1067 if (node2_type == NODE_DEVICE) { 1048 if (node2_type == NODE_DEVICE) {
1068 /* node1 == > device */ 1049 /* node1 == > device */
1069 dev_node_obj = node2; 1050 dev_node_obj = node2;
@@ -1079,61 +1060,59 @@ func_cont2:
1079 } 1060 }
1080 /* Set up create args */ 1061 /* Set up create args */
1081 pstream->type = DEVICECONNECT; 1062 pstream->type = DEVICECONNECT;
1082 dw_length = strlen(dev_node_obj->pstr_dev_name); 1063 dw_length = strlen(dev_node_obj->str_dev_name);
1083 if (conn_param != NULL) { 1064 if (conn_param)
1084 pstrm_def->sz_device = kzalloc(dw_length + 1 + 1065 pstrm_def->sz_device = kzalloc(dw_length + 1 +
1085 conn_param->cb_data, 1066 conn_param->cb_data,
1086 GFP_KERNEL); 1067 GFP_KERNEL);
1087 } else { 1068 else
1088 pstrm_def->sz_device = kzalloc(dw_length + 1, 1069 pstrm_def->sz_device = kzalloc(dw_length + 1,
1089 GFP_KERNEL); 1070 GFP_KERNEL);
1090 } 1071 if (!pstrm_def->sz_device) {
1091 if (pstrm_def->sz_device == NULL) {
1092 status = -ENOMEM; 1072 status = -ENOMEM;
1093 } else { 1073 goto out_unlock;
1094 /* Copy device name */ 1074 }
1095 strncpy(pstrm_def->sz_device, 1075 /* Copy device name */
1096 dev_node_obj->pstr_dev_name, dw_length); 1076 strncpy(pstrm_def->sz_device,
1097 if (conn_param != NULL) { 1077 dev_node_obj->str_dev_name, dw_length);
1098 strncat(pstrm_def->sz_device, 1078 if (conn_param)
1079 strncat(pstrm_def->sz_device,
1099 (char *)conn_param->node_data, 1080 (char *)conn_param->node_data,
1100 (u32) conn_param->cb_data); 1081 (u32) conn_param->cb_data);
1101 } 1082 dev_node_obj->device_owner = hnode;
1102 dev_node_obj->device_owner = hnode;
1103 }
1104 } 1083 }
1105 if (!status) { 1084 /* Fill in create args */
1106 /* Fill in create args */ 1085 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1107 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) { 1086 node1->create_args.asa.task_arg_obj.num_outputs++;
1108 node1->create_args.asa.task_arg_obj.num_outputs++; 1087 fill_stream_def(node1, output, pattrs);
1109 fill_stream_def(node1, output, pattrs); 1088 }
1110 } 1089 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1111 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) { 1090 node2->create_args.asa.task_arg_obj.num_inputs++;
1112 node2->create_args.asa.task_arg_obj.num_inputs++; 1091 fill_stream_def(node2, input, pattrs);
1113 fill_stream_def(node2, input, pattrs); 1092 }
1114 } 1093 /* Update node1 and node2 stream_connect */
1115 /* Update node1 and node2 stream_connect */ 1094 if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1116 if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) { 1095 node1->num_outputs++;
1117 node1->num_outputs++; 1096 if (stream1 > node1->max_output_index)
1118 if (stream1 > node1->max_output_index) 1097 node1->max_output_index = stream1;
1119 node1->max_output_index = stream1;
1120 1098
1121 } 1099 }
1122 if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) { 1100 if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1123 node2->num_inputs++; 1101 node2->num_inputs++;
1124 if (stream2 > node2->max_input_index) 1102 if (stream2 > node2->max_input_index)
1125 node2->max_input_index = stream2; 1103 node2->max_input_index = stream2;
1126 1104
1127 }
1128 fill_stream_connect(node1, node2, stream1, stream2);
1129 } 1105 }
1106 fill_stream_connect(node1, node2, stream1, stream2);
1130 /* end of sync_enter_cs */ 1107 /* end of sync_enter_cs */
1131 /* Exit critical section */ 1108 /* Exit critical section */
1109out_unlock:
1110 if (status && pstr_dev_name)
1111 kfree(pstr_dev_name);
1132 mutex_unlock(&hnode_mgr->node_mgr_lock); 1112 mutex_unlock(&hnode_mgr->node_mgr_lock);
1133func_end:
1134 dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d" 1113 dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1135 "pattrs: %p status: 0x%x\n", __func__, node1, 1114 "pattrs: %p status: 0x%x\n", __func__, node1,
1136 stream1, node2, stream2, pattrs, status); 1115 stream1, node2, stream2, pattrs, status);
1137 return status; 1116 return status;
1138} 1117}
1139 1118
@@ -1165,7 +1144,7 @@ int node_create(struct node_object *hnode)
1165 status = -EFAULT; 1144 status = -EFAULT;
1166 goto func_end; 1145 goto func_end;
1167 } 1146 }
1168 hprocessor = hnode->hprocessor; 1147 hprocessor = hnode->processor;
1169 status = proc_get_state(hprocessor, &proc_state, 1148 status = proc_get_state(hprocessor, &proc_state,
1170 sizeof(struct dsp_processorstate)); 1149 sizeof(struct dsp_processorstate));
1171 if (status) 1150 if (status)
@@ -1179,7 +1158,7 @@ int node_create(struct node_object *hnode)
1179 /* create struct dsp_cbdata struct for PWR calls */ 1158 /* create struct dsp_cbdata struct for PWR calls */
1180 cb_data.cb_data = PWR_TIMEOUT; 1159 cb_data.cb_data = PWR_TIMEOUT;
1181 node_type = node_get_type(hnode); 1160 node_type = node_get_type(hnode);
1182 hnode_mgr = hnode->hnode_mgr; 1161 hnode_mgr = hnode->node_mgr;
1183 intf_fxns = hnode_mgr->intf_fxns; 1162 intf_fxns = hnode_mgr->intf_fxns;
1184 /* Get access to node dispatcher */ 1163 /* Get access to node dispatcher */
1185 mutex_lock(&hnode_mgr->node_mgr_lock); 1164 mutex_lock(&hnode_mgr->node_mgr_lock);
@@ -1189,7 +1168,7 @@ int node_create(struct node_object *hnode)
1189 status = -EBADR; 1168 status = -EBADR;
1190 1169
1191 if (!status) 1170 if (!status)
1192 status = proc_get_processor_id(pnode->hprocessor, &proc_id); 1171 status = proc_get_processor_id(pnode->processor, &proc_id);
1193 1172
1194 if (status) 1173 if (status)
1195 goto func_cont2; 1174 goto func_cont2;
@@ -1211,7 +1190,7 @@ int node_create(struct node_object *hnode)
1211 if (pdata->cpu_set_freq) 1190 if (pdata->cpu_set_freq)
1212 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]); 1191 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1213#endif 1192#endif
1214 status = hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj, 1193 status = hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
1215 NLDR_CREATE); 1194 NLDR_CREATE);
1216 /* Get address of node's create function */ 1195 /* Get address of node's create function */
1217 if (!status) { 1196 if (!status) {
@@ -1232,19 +1211,19 @@ int node_create(struct node_object *hnode)
1232 /* Get address of iAlg functions, if socket node */ 1211 /* Get address of iAlg functions, if socket node */
1233 if (!status) { 1212 if (!status) {
1234 if (node_type == NODE_DAISSOCKET) { 1213 if (node_type == NODE_DAISSOCKET) {
1235 status = hnode_mgr->nldr_fxns.pfn_get_fxn_addr 1214 status = hnode_mgr->nldr_fxns.get_fxn_addr
1236 (hnode->nldr_node_obj, 1215 (hnode->nldr_node_obj,
1237 hnode->dcd_props.obj_data.node_obj. 1216 hnode->dcd_props.obj_data.node_obj.
1238 pstr_i_alg_name, 1217 str_i_alg_name,
1239 &hnode->create_args.asa. 1218 &hnode->create_args.asa.
1240 task_arg_obj.ul_dais_arg); 1219 task_arg_obj.dais_arg);
1241 } 1220 }
1242 } 1221 }
1243 } 1222 }
1244 if (!status) { 1223 if (!status) {
1245 if (node_type != NODE_DEVICE) { 1224 if (node_type != NODE_DEVICE) {
1246 status = disp_node_create(hnode_mgr->disp_obj, hnode, 1225 status = disp_node_create(hnode_mgr->disp_obj, hnode,
1247 hnode_mgr->ul_fxn_addrs 1226 hnode_mgr->fxn_addrs
1248 [RMSCREATENODE], 1227 [RMSCREATENODE],
1249 ul_create_fxn, 1228 ul_create_fxn,
1250 &(hnode->create_args), 1229 &(hnode->create_args),
@@ -1253,7 +1232,7 @@ int node_create(struct node_object *hnode)
1253 /* Set the message queue id to the node env 1232 /* Set the message queue id to the node env
1254 * pointer */ 1233 * pointer */
1255 intf_fxns = hnode_mgr->intf_fxns; 1234 intf_fxns = hnode_mgr->intf_fxns;
1256 (*intf_fxns->pfn_msg_set_queue_id) (hnode-> 1235 (*intf_fxns->msg_set_queue_id) (hnode->
1257 msg_queue_obj, 1236 msg_queue_obj,
1258 hnode->node_env); 1237 hnode->node_env);
1259 } 1238 }
@@ -1264,7 +1243,7 @@ int node_create(struct node_object *hnode)
1264 if (hnode->loaded && hnode->phase_split) { 1243 if (hnode->loaded && hnode->phase_split) {
1265 /* If create code was dynamically loaded, we can now unload 1244 /* If create code was dynamically loaded, we can now unload
1266 * it. */ 1245 * it. */
1267 status1 = hnode_mgr->nldr_fxns.pfn_unload(hnode->nldr_node_obj, 1246 status1 = hnode_mgr->nldr_fxns.unload(hnode->nldr_node_obj,
1268 NLDR_CREATE); 1247 NLDR_CREATE);
1269 hnode->loaded = false; 1248 hnode->loaded = false;
1270 } 1249 }
@@ -1287,7 +1266,7 @@ func_cont:
1287 mutex_unlock(&hnode_mgr->node_mgr_lock); 1266 mutex_unlock(&hnode_mgr->node_mgr_lock);
1288func_end: 1267func_end:
1289 if (status >= 0) { 1268 if (status >= 0) {
1290 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE); 1269 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1291 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE); 1270 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1292 } 1271 }
1293 1272
@@ -1311,6 +1290,7 @@ int node_create_mgr(struct node_mgr **node_man,
1311 struct nldr_attrs nldr_attrs_obj; 1290 struct nldr_attrs nldr_attrs_obj;
1312 int status = 0; 1291 int status = 0;
1313 u8 dev_type; 1292 u8 dev_type;
1293
1314 DBC_REQUIRE(refs > 0); 1294 DBC_REQUIRE(refs > 0);
1315 DBC_REQUIRE(node_man != NULL); 1295 DBC_REQUIRE(node_man != NULL);
1316 DBC_REQUIRE(hdev_obj != NULL); 1296 DBC_REQUIRE(hdev_obj != NULL);
@@ -1318,113 +1298,89 @@ int node_create_mgr(struct node_mgr **node_man,
1318 *node_man = NULL; 1298 *node_man = NULL;
1319 /* Allocate Node manager object */ 1299 /* Allocate Node manager object */
1320 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL); 1300 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1321 if (node_mgr_obj) { 1301 if (!node_mgr_obj)
1322 node_mgr_obj->hdev_obj = hdev_obj; 1302 return -ENOMEM;
1323 node_mgr_obj->node_list = kzalloc(sizeof(struct lst_list), 1303
1324 GFP_KERNEL); 1304 node_mgr_obj->dev_obj = hdev_obj;
1325 node_mgr_obj->pipe_map = gb_create(MAXPIPES); 1305
1326 node_mgr_obj->pipe_done_map = gb_create(MAXPIPES); 1306 node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
1327 if (node_mgr_obj->node_list == NULL 1307 GFP_KERNEL);
1328 || node_mgr_obj->pipe_map == NULL 1308 if (!node_mgr_obj->ntfy_obj) {
1329 || node_mgr_obj->pipe_done_map == NULL) {
1330 status = -ENOMEM;
1331 } else {
1332 INIT_LIST_HEAD(&node_mgr_obj->node_list->head);
1333 node_mgr_obj->ntfy_obj = kmalloc(
1334 sizeof(struct ntfy_object), GFP_KERNEL);
1335 if (node_mgr_obj->ntfy_obj)
1336 ntfy_init(node_mgr_obj->ntfy_obj);
1337 else
1338 status = -ENOMEM;
1339 }
1340 node_mgr_obj->num_created = 0;
1341 } else {
1342 status = -ENOMEM; 1309 status = -ENOMEM;
1310 goto out_err;
1343 } 1311 }
1344 /* get devNodeType */ 1312 ntfy_init(node_mgr_obj->ntfy_obj);
1345 if (!status)
1346 status = dev_get_dev_type(hdev_obj, &dev_type);
1347 1313
1348 /* Create the DCD Manager */ 1314 INIT_LIST_HEAD(&node_mgr_obj->node_list);
1349 if (!status) { 1315
1350 status = 1316 dev_get_dev_type(hdev_obj, &dev_type);
1351 dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr); 1317
1352 if (!status) 1318 status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr);
1353 status = get_proc_props(node_mgr_obj, hdev_obj); 1319 if (status)
1320 goto out_err;
1321
1322 status = get_proc_props(node_mgr_obj, hdev_obj);
1323 if (status)
1324 goto out_err;
1354 1325
1355 }
1356 /* Create NODE Dispatcher */ 1326 /* Create NODE Dispatcher */
1357 if (!status) { 1327 disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset;
1358 disp_attr_obj.ul_chnl_offset = node_mgr_obj->ul_chnl_offset; 1328 disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size;
1359 disp_attr_obj.ul_chnl_buf_size = node_mgr_obj->ul_chnl_buf_size; 1329 disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1360 disp_attr_obj.proc_family = node_mgr_obj->proc_family; 1330 disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1361 disp_attr_obj.proc_type = node_mgr_obj->proc_type; 1331
1362 status = 1332 status = disp_create(&node_mgr_obj->disp_obj, hdev_obj, &disp_attr_obj);
1363 disp_create(&node_mgr_obj->disp_obj, hdev_obj, 1333 if (status)
1364 &disp_attr_obj); 1334 goto out_err;
1365 } 1335
1366 /* Create a STRM Manager */ 1336 /* Create a STRM Manager */
1367 if (!status) 1337 status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1368 status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj); 1338 if (status)
1339 goto out_err;
1369 1340
1370 if (!status) { 1341 dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1371 dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns); 1342 /* Get msg_ctrl queue manager */
1372 /* Get msg_ctrl queue manager */ 1343 dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1373 dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj); 1344 mutex_init(&node_mgr_obj->node_mgr_lock);
1374 mutex_init(&node_mgr_obj->node_mgr_lock); 1345
1375 node_mgr_obj->chnl_map = gb_create(node_mgr_obj->ul_num_chnls); 1346 /* Block out reserved channels */
1376 /* dma chnl map. ul_num_chnls is # per transport */ 1347 for (i = 0; i < node_mgr_obj->chnl_offset; i++)
1377 node_mgr_obj->dma_chnl_map = 1348 set_bit(i, node_mgr_obj->chnl_map);
1378 gb_create(node_mgr_obj->ul_num_chnls); 1349
1379 node_mgr_obj->zc_chnl_map = 1350 /* Block out channels reserved for RMS */
1380 gb_create(node_mgr_obj->ul_num_chnls); 1351 set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map);
1381 if ((node_mgr_obj->chnl_map == NULL) 1352 set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map);
1382 || (node_mgr_obj->dma_chnl_map == NULL) 1353
1383 || (node_mgr_obj->zc_chnl_map == NULL)) { 1354 /* NO RM Server on the IVA */
1384 status = -ENOMEM; 1355 if (dev_type != IVA_UNIT) {
1385 } else { 1356 /* Get addresses of any RMS functions loaded */
1386 /* Block out reserved channels */ 1357 status = get_rms_fxns(node_mgr_obj);
1387 for (i = 0; i < node_mgr_obj->ul_chnl_offset; i++) 1358 if (status)
1388 gb_set(node_mgr_obj->chnl_map, i); 1359 goto out_err;
1389
1390 /* Block out channels reserved for RMS */
1391 gb_set(node_mgr_obj->chnl_map,
1392 node_mgr_obj->ul_chnl_offset);
1393 gb_set(node_mgr_obj->chnl_map,
1394 node_mgr_obj->ul_chnl_offset + 1);
1395 }
1396 }
1397 if (!status) {
1398 /* NO RM Server on the IVA */
1399 if (dev_type != IVA_UNIT) {
1400 /* Get addresses of any RMS functions loaded */
1401 status = get_rms_fxns(node_mgr_obj);
1402 }
1403 } 1360 }
1404 1361
1405 /* Get loader functions and create loader */ 1362 /* Get loader functions and create loader */
1406 if (!status) 1363 node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
1407 node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */ 1364
1365 nldr_attrs_obj.ovly = ovly;
1366 nldr_attrs_obj.write = mem_write;
1367 nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
1368 nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
1369 node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.init();
1370 status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
1371 hdev_obj,
1372 &nldr_attrs_obj);
1373 if (status)
1374 goto out_err;
1408 1375
1409 if (!status) { 1376 *node_man = node_mgr_obj;
1410 nldr_attrs_obj.pfn_ovly = ovly;
1411 nldr_attrs_obj.pfn_write = mem_write;
1412 nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size;
1413 nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size;
1414 node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.pfn_init();
1415 status =
1416 node_mgr_obj->nldr_fxns.pfn_create(&node_mgr_obj->nldr_obj,
1417 hdev_obj,
1418 &nldr_attrs_obj);
1419 }
1420 if (!status)
1421 *node_man = node_mgr_obj;
1422 else
1423 delete_node_mgr(node_mgr_obj);
1424 1377
1425 DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man)); 1378 DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
1426 1379
1427 return status; 1380 return status;
1381out_err:
1382 delete_node_mgr(node_mgr_obj);
1383 return status;
1428} 1384}
1429 1385
1430/* 1386/*
@@ -1437,7 +1393,7 @@ int node_create_mgr(struct node_mgr **node_man,
1437int node_delete(struct node_res_object *noderes, 1393int node_delete(struct node_res_object *noderes,
1438 struct process_context *pr_ctxt) 1394 struct process_context *pr_ctxt)
1439{ 1395{
1440 struct node_object *pnode = noderes->hnode; 1396 struct node_object *pnode = noderes->node;
1441 struct node_mgr *hnode_mgr; 1397 struct node_mgr *hnode_mgr;
1442 struct proc_object *hprocessor; 1398 struct proc_object *hprocessor;
1443 struct disp_object *disp_obj; 1399 struct disp_object *disp_obj;
@@ -1461,8 +1417,8 @@ int node_delete(struct node_res_object *noderes,
1461 } 1417 }
1462 /* create struct dsp_cbdata struct for PWR call */ 1418 /* create struct dsp_cbdata struct for PWR call */
1463 cb_data.cb_data = PWR_TIMEOUT; 1419 cb_data.cb_data = PWR_TIMEOUT;
1464 hnode_mgr = pnode->hnode_mgr; 1420 hnode_mgr = pnode->node_mgr;
1465 hprocessor = pnode->hprocessor; 1421 hprocessor = pnode->processor;
1466 disp_obj = hnode_mgr->disp_obj; 1422 disp_obj = hnode_mgr->disp_obj;
1467 node_type = node_get_type(pnode); 1423 node_type = node_get_type(pnode);
1468 intf_fxns = hnode_mgr->intf_fxns; 1424 intf_fxns = hnode_mgr->intf_fxns;
@@ -1477,7 +1433,7 @@ int node_delete(struct node_res_object *noderes,
1477 * code must be executed. */ 1433 * code must be executed. */
1478 if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) && 1434 if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
1479 node_type != NODE_DEVICE) { 1435 node_type != NODE_DEVICE) {
1480 status = proc_get_processor_id(pnode->hprocessor, &proc_id); 1436 status = proc_get_processor_id(pnode->processor, &proc_id);
1481 if (status) 1437 if (status)
1482 goto func_cont1; 1438 goto func_cont1;
1483 1439
@@ -1494,7 +1450,7 @@ int node_delete(struct node_res_object *noderes,
1494 * is not * running */ 1450 * is not * running */
1495 status1 = 1451 status1 =
1496 hnode_mgr->nldr_fxns. 1452 hnode_mgr->nldr_fxns.
1497 pfn_unload(pnode->nldr_node_obj, 1453 unload(pnode->nldr_node_obj,
1498 NLDR_EXECUTE); 1454 NLDR_EXECUTE);
1499 pnode->loaded = false; 1455 pnode->loaded = false;
1500 NODE_SET_STATE(pnode, NODE_DONE); 1456 NODE_SET_STATE(pnode, NODE_DONE);
@@ -1505,7 +1461,7 @@ int node_delete(struct node_res_object *noderes,
1505 pnode->phase_split) { 1461 pnode->phase_split) {
1506 status = 1462 status =
1507 hnode_mgr->nldr_fxns. 1463 hnode_mgr->nldr_fxns.
1508 pfn_load(pnode->nldr_node_obj, NLDR_DELETE); 1464 load(pnode->nldr_node_obj, NLDR_DELETE);
1509 if (!status) 1465 if (!status)
1510 pnode->loaded = true; 1466 pnode->loaded = true;
1511 else 1467 else
@@ -1533,7 +1489,7 @@ func_cont1:
1533 status = 1489 status =
1534 disp_node_delete(disp_obj, pnode, 1490 disp_node_delete(disp_obj, pnode,
1535 hnode_mgr-> 1491 hnode_mgr->
1536 ul_fxn_addrs 1492 fxn_addrs
1537 [RMSDELETENODE], 1493 [RMSDELETENODE],
1538 ul_delete_fxn, 1494 ul_delete_fxn,
1539 pnode->node_env); 1495 pnode->node_env);
@@ -1546,7 +1502,7 @@ func_cont1:
1546 pnode->phase_split) { 1502 pnode->phase_split) {
1547 status1 = 1503 status1 =
1548 hnode_mgr->nldr_fxns. 1504 hnode_mgr->nldr_fxns.
1549 pfn_unload(pnode->nldr_node_obj, 1505 unload(pnode->nldr_node_obj,
1550 NLDR_EXECUTE); 1506 NLDR_EXECUTE);
1551 } 1507 }
1552 if (status1) 1508 if (status1)
@@ -1554,7 +1510,7 @@ func_cont1:
1554 " 0x%x\n", __func__, status1); 1510 " 0x%x\n", __func__, status1);
1555 1511
1556 status1 = 1512 status1 =
1557 hnode_mgr->nldr_fxns.pfn_unload(pnode-> 1513 hnode_mgr->nldr_fxns.unload(pnode->
1558 nldr_node_obj, 1514 nldr_node_obj,
1559 NLDR_DELETE); 1515 NLDR_DELETE);
1560 pnode->loaded = false; 1516 pnode->loaded = false;
@@ -1566,7 +1522,7 @@ func_cont1:
1566 } 1522 }
1567 /* Free host side resources even if a failure occurred */ 1523 /* Free host side resources even if a failure occurred */
1568 /* Remove node from hnode_mgr->node_list */ 1524 /* Remove node from hnode_mgr->node_list */
1569 lst_remove_elem(hnode_mgr->node_list, (struct list_head *)pnode); 1525 list_del(&pnode->list_elem);
1570 hnode_mgr->num_nodes--; 1526 hnode_mgr->num_nodes--;
1571 /* Decrement count of nodes created on DSP */ 1527 /* Decrement count of nodes created on DSP */
1572 if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) && 1528 if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
@@ -1598,16 +1554,14 @@ func_end:
1598 */ 1554 */
1599int node_delete_mgr(struct node_mgr *hnode_mgr) 1555int node_delete_mgr(struct node_mgr *hnode_mgr)
1600{ 1556{
1601 int status = 0;
1602
1603 DBC_REQUIRE(refs > 0); 1557 DBC_REQUIRE(refs > 0);
1604 1558
1605 if (hnode_mgr) 1559 if (!hnode_mgr)
1606 delete_node_mgr(hnode_mgr); 1560 return -EFAULT;
1607 else
1608 status = -EFAULT;
1609 1561
1610 return status; 1562 delete_node_mgr(hnode_mgr);
1563
1564 return 0;
1611} 1565}
1612 1566
1613/* 1567/*
@@ -1620,7 +1574,7 @@ int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1620 u32 *pu_allocated) 1574 u32 *pu_allocated)
1621{ 1575{
1622 struct node_object *hnode; 1576 struct node_object *hnode;
1623 u32 i; 1577 u32 i = 0;
1624 int status = 0; 1578 int status = 0;
1625 DBC_REQUIRE(refs > 0); 1579 DBC_REQUIRE(refs > 0);
1626 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0); 1580 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
@@ -1639,15 +1593,8 @@ int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1639 *pu_num_nodes = 0; 1593 *pu_num_nodes = 0;
1640 status = -EINVAL; 1594 status = -EINVAL;
1641 } else { 1595 } else {
1642 hnode = (struct node_object *)lst_first(hnode_mgr-> 1596 list_for_each_entry(hnode, &hnode_mgr->node_list, list_elem)
1643 node_list); 1597 node_tab[i++] = hnode;
1644 for (i = 0; i < hnode_mgr->num_nodes; i++) {
1645 DBC_ASSERT(hnode);
1646 node_tab[i] = hnode;
1647 hnode = (struct node_object *)lst_next
1648 (hnode_mgr->node_list,
1649 (struct list_head *)hnode);
1650 }
1651 *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes; 1598 *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1652 } 1599 }
1653 /* end of sync_enter_cs */ 1600 /* end of sync_enter_cs */
@@ -1691,7 +1638,7 @@ int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1691 status = -EFAULT; 1638 status = -EFAULT;
1692 goto func_end; 1639 goto func_end;
1693 } 1640 }
1694 status = proc_get_processor_id(pnode->hprocessor, &proc_id); 1641 status = proc_get_processor_id(pnode->processor, &proc_id);
1695 if (proc_id == DSP_UNIT) { 1642 if (proc_id == DSP_UNIT) {
1696 if (!status) { 1643 if (!status) {
1697 if (pattr == NULL) { 1644 if (pattr == NULL) {
@@ -1722,38 +1669,37 @@ int node_get_attr(struct node_object *hnode,
1722 struct dsp_nodeattr *pattr, u32 attr_size) 1669 struct dsp_nodeattr *pattr, u32 attr_size)
1723{ 1670{
1724 struct node_mgr *hnode_mgr; 1671 struct node_mgr *hnode_mgr;
1725 int status = 0;
1726 DBC_REQUIRE(refs > 0); 1672 DBC_REQUIRE(refs > 0);
1727 DBC_REQUIRE(pattr != NULL); 1673 DBC_REQUIRE(pattr != NULL);
1728 DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr)); 1674 DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
1729 1675
1730 if (!hnode) { 1676 if (!hnode)
1731 status = -EFAULT; 1677 return -EFAULT;
1732 } else { 1678
1733 hnode_mgr = hnode->hnode_mgr; 1679 hnode_mgr = hnode->node_mgr;
1734 /* Enter hnode_mgr critical section (since we're accessing 1680 /* Enter hnode_mgr critical section (since we're accessing
1735 * data that could be changed by node_change_priority() and 1681 * data that could be changed by node_change_priority() and
1736 * node_connect(). */ 1682 * node_connect(). */
1737 mutex_lock(&hnode_mgr->node_mgr_lock); 1683 mutex_lock(&hnode_mgr->node_mgr_lock);
1738 pattr->cb_struct = sizeof(struct dsp_nodeattr); 1684 pattr->cb_struct = sizeof(struct dsp_nodeattr);
1739 /* dsp_nodeattrin */ 1685 /* dsp_nodeattrin */
1740 pattr->in_node_attr_in.cb_struct = 1686 pattr->in_node_attr_in.cb_struct =
1741 sizeof(struct dsp_nodeattrin); 1687 sizeof(struct dsp_nodeattrin);
1742 pattr->in_node_attr_in.prio = hnode->prio; 1688 pattr->in_node_attr_in.prio = hnode->prio;
1743 pattr->in_node_attr_in.utimeout = hnode->utimeout; 1689 pattr->in_node_attr_in.timeout = hnode->timeout;
1744 pattr->in_node_attr_in.heap_size = 1690 pattr->in_node_attr_in.heap_size =
1745 hnode->create_args.asa.task_arg_obj.heap_size; 1691 hnode->create_args.asa.task_arg_obj.heap_size;
1746 pattr->in_node_attr_in.pgpp_virt_addr = (void *) 1692 pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1747 hnode->create_args.asa.task_arg_obj.ugpp_heap_addr; 1693 hnode->create_args.asa.task_arg_obj.gpp_heap_addr;
1748 pattr->node_attr_inputs = hnode->num_gpp_inputs; 1694 pattr->node_attr_inputs = hnode->num_gpp_inputs;
1749 pattr->node_attr_outputs = hnode->num_gpp_outputs; 1695 pattr->node_attr_outputs = hnode->num_gpp_outputs;
1750 /* dsp_nodeinfo */ 1696 /* dsp_nodeinfo */
1751 get_node_info(hnode, &(pattr->node_info)); 1697 get_node_info(hnode, &(pattr->node_info));
1752 /* end of sync_enter_cs */ 1698 /* end of sync_enter_cs */
1753 /* Exit critical section */ 1699 /* Exit critical section */
1754 mutex_unlock(&hnode_mgr->node_mgr_lock); 1700 mutex_unlock(&hnode_mgr->node_mgr_lock);
1755 } 1701
1756 return status; 1702 return 0;
1757} 1703}
1758 1704
1759/* 1705/*
@@ -1822,7 +1768,7 @@ int node_get_message(struct node_object *hnode,
1822 status = -EFAULT; 1768 status = -EFAULT;
1823 goto func_end; 1769 goto func_end;
1824 } 1770 }
1825 hprocessor = hnode->hprocessor; 1771 hprocessor = hnode->processor;
1826 status = proc_get_state(hprocessor, &proc_state, 1772 status = proc_get_state(hprocessor, &proc_state,
1827 sizeof(struct dsp_processorstate)); 1773 sizeof(struct dsp_processorstate));
1828 if (status) 1774 if (status)
@@ -1833,7 +1779,7 @@ int node_get_message(struct node_object *hnode,
1833 status = -EPERM; 1779 status = -EPERM;
1834 goto func_end; 1780 goto func_end;
1835 } 1781 }
1836 hnode_mgr = hnode->hnode_mgr; 1782 hnode_mgr = hnode->node_mgr;
1837 node_type = node_get_type(hnode); 1783 node_type = node_get_type(hnode);
1838 if (node_type != NODE_MESSAGE && node_type != NODE_TASK && 1784 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1839 node_type != NODE_DAISSOCKET) { 1785 node_type != NODE_DAISSOCKET) {
@@ -1847,24 +1793,24 @@ int node_get_message(struct node_object *hnode,
1847 * available. */ 1793 * available. */
1848 intf_fxns = hnode_mgr->intf_fxns; 1794 intf_fxns = hnode_mgr->intf_fxns;
1849 status = 1795 status =
1850 (*intf_fxns->pfn_msg_get) (hnode->msg_queue_obj, message, utimeout); 1796 (*intf_fxns->msg_get) (hnode->msg_queue_obj, message, utimeout);
1851 /* Check if message contains SM descriptor */ 1797 /* Check if message contains SM descriptor */
1852 if (status || !(message->dw_cmd & DSP_RMSBUFDESC)) 1798 if (status || !(message->cmd & DSP_RMSBUFDESC))
1853 goto func_end; 1799 goto func_end;
1854 1800
1855 /* Translate DSP byte addr to GPP Va. */ 1801 /* Translate DSP byte addr to GPP Va. */
1856 tmp_buf = cmm_xlator_translate(hnode->xlator, 1802 tmp_buf = cmm_xlator_translate(hnode->xlator,
1857 (void *)(message->dw_arg1 * 1803 (void *)(message->arg1 *
1858 hnode->hnode_mgr-> 1804 hnode->node_mgr->
1859 udsp_word_size), CMM_DSPPA2PA); 1805 dsp_word_size), CMM_DSPPA2PA);
1860 if (tmp_buf != NULL) { 1806 if (tmp_buf != NULL) {
1861 /* now convert this GPP Pa to Va */ 1807 /* now convert this GPP Pa to Va */
1862 tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf, 1808 tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1863 CMM_PA2VA); 1809 CMM_PA2VA);
1864 if (tmp_buf != NULL) { 1810 if (tmp_buf != NULL) {
1865 /* Adjust SM size in msg */ 1811 /* Adjust SM size in msg */
1866 message->dw_arg1 = (u32) tmp_buf; 1812 message->arg1 = (u32) tmp_buf;
1867 message->dw_arg2 *= hnode->hnode_mgr->udsp_word_size; 1813 message->arg2 *= hnode->node_mgr->dsp_word_size;
1868 } else { 1814 } else {
1869 status = -ESRCH; 1815 status = -ESRCH;
1870 } 1816 }
@@ -1911,7 +1857,7 @@ int node_get_strm_mgr(struct node_object *hnode,
1911 if (!hnode) 1857 if (!hnode)
1912 status = -EFAULT; 1858 status = -EFAULT;
1913 else 1859 else
1914 *strm_man = hnode->hnode_mgr->strm_mgr_obj; 1860 *strm_man = hnode->node_mgr->strm_mgr_obj;
1915 1861
1916 return status; 1862 return status;
1917} 1863}
@@ -1927,7 +1873,7 @@ enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1927 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode); 1873 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1928 return -1; 1874 return -1;
1929 } else { 1875 } else {
1930 return hnode->dcd_props.obj_data.node_obj.us_load_type; 1876 return hnode->dcd_props.obj_data.node_obj.load_type;
1931 } 1877 }
1932} 1878}
1933 1879
@@ -1944,7 +1890,7 @@ u32 node_get_timeout(struct node_object *hnode)
1944 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode); 1890 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1945 return 0; 1891 return 0;
1946 } else { 1892 } else {
1947 return hnode->utimeout; 1893 return hnode->timeout;
1948 } 1894 }
1949} 1895}
1950 1896
@@ -1996,7 +1942,7 @@ void node_on_exit(struct node_object *hnode, s32 node_status)
1996 NODE_SET_STATE(hnode, NODE_DONE); 1942 NODE_SET_STATE(hnode, NODE_DONE);
1997 hnode->exit_status = node_status; 1943 hnode->exit_status = node_status;
1998 if (hnode->loaded && hnode->phase_split) { 1944 if (hnode->loaded && hnode->phase_split) {
1999 (void)hnode->hnode_mgr->nldr_fxns.pfn_unload(hnode-> 1945 (void)hnode->node_mgr->nldr_fxns.unload(hnode->
2000 nldr_node_obj, 1946 nldr_node_obj,
2001 NLDR_EXECUTE); 1947 NLDR_EXECUTE);
2002 hnode->loaded = false; 1948 hnode->loaded = false;
@@ -2004,7 +1950,7 @@ void node_on_exit(struct node_object *hnode, s32 node_status)
2004 /* Unblock call to node_terminate */ 1950 /* Unblock call to node_terminate */
2005 (void)sync_set_event(hnode->sync_done); 1951 (void)sync_set_event(hnode->sync_done);
2006 /* Notify clients */ 1952 /* Notify clients */
2007 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE); 1953 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
2008 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE); 1954 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2009} 1955}
2010 1956
@@ -2036,13 +1982,13 @@ int node_pause(struct node_object *hnode)
2036 if (status) 1982 if (status)
2037 goto func_end; 1983 goto func_end;
2038 1984
2039 status = proc_get_processor_id(pnode->hprocessor, &proc_id); 1985 status = proc_get_processor_id(pnode->processor, &proc_id);
2040 1986
2041 if (proc_id == IVA_UNIT) 1987 if (proc_id == IVA_UNIT)
2042 status = -ENOSYS; 1988 status = -ENOSYS;
2043 1989
2044 if (!status) { 1990 if (!status) {
2045 hnode_mgr = hnode->hnode_mgr; 1991 hnode_mgr = hnode->node_mgr;
2046 1992
2047 /* Enter critical section */ 1993 /* Enter critical section */
2048 mutex_lock(&hnode_mgr->node_mgr_lock); 1994 mutex_lock(&hnode_mgr->node_mgr_lock);
@@ -2053,7 +1999,7 @@ int node_pause(struct node_object *hnode)
2053 1999
2054 if (status) 2000 if (status)
2055 goto func_cont; 2001 goto func_cont;
2056 hprocessor = hnode->hprocessor; 2002 hprocessor = hnode->processor;
2057 status = proc_get_state(hprocessor, &proc_state, 2003 status = proc_get_state(hprocessor, &proc_state,
2058 sizeof(struct dsp_processorstate)); 2004 sizeof(struct dsp_processorstate));
2059 if (status) 2005 if (status)
@@ -2066,7 +2012,7 @@ int node_pause(struct node_object *hnode)
2066 } 2012 }
2067 2013
2068 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode, 2014 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2069 hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY], 2015 hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY],
2070 hnode->node_env, NODE_SUSPENDEDPRI); 2016 hnode->node_env, NODE_SUSPENDEDPRI);
2071 2017
2072 /* Update state */ 2018 /* Update state */
@@ -2078,7 +2024,7 @@ func_cont:
2078 /* Leave critical section */ 2024 /* Leave critical section */
2079 mutex_unlock(&hnode_mgr->node_mgr_lock); 2025 mutex_unlock(&hnode_mgr->node_mgr_lock);
2080 if (status >= 0) { 2026 if (status >= 0) {
2081 proc_notify_clients(hnode->hprocessor, 2027 proc_notify_clients(hnode->processor,
2082 DSP_NODESTATECHANGE); 2028 DSP_NODESTATECHANGE);
2083 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE); 2029 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2084 } 2030 }
@@ -2115,7 +2061,7 @@ int node_put_message(struct node_object *hnode,
2115 status = -EFAULT; 2061 status = -EFAULT;
2116 goto func_end; 2062 goto func_end;
2117 } 2063 }
2118 hprocessor = hnode->hprocessor; 2064 hprocessor = hnode->processor;
2119 status = proc_get_state(hprocessor, &proc_state, 2065 status = proc_get_state(hprocessor, &proc_state,
2120 sizeof(struct dsp_processorstate)); 2066 sizeof(struct dsp_processorstate));
2121 if (status) 2067 if (status)
@@ -2126,7 +2072,7 @@ int node_put_message(struct node_object *hnode,
2126 status = -EPERM; 2072 status = -EPERM;
2127 goto func_end; 2073 goto func_end;
2128 } 2074 }
2129 hnode_mgr = hnode->hnode_mgr; 2075 hnode_mgr = hnode->node_mgr;
2130 node_type = node_get_type(hnode); 2076 node_type = node_get_type(hnode);
2131 if (node_type != NODE_MESSAGE && node_type != NODE_TASK && 2077 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
2132 node_type != NODE_DAISSOCKET) 2078 node_type != NODE_DAISSOCKET)
@@ -2154,22 +2100,22 @@ int node_put_message(struct node_object *hnode,
2154 /* assign pmsg values to new msg */ 2100 /* assign pmsg values to new msg */
2155 new_msg = *pmsg; 2101 new_msg = *pmsg;
2156 /* Now, check if message contains a SM buffer descriptor */ 2102 /* Now, check if message contains a SM buffer descriptor */
2157 if (pmsg->dw_cmd & DSP_RMSBUFDESC) { 2103 if (pmsg->cmd & DSP_RMSBUFDESC) {
2158 /* Translate GPP Va to DSP physical buf Ptr. */ 2104 /* Translate GPP Va to DSP physical buf Ptr. */
2159 tmp_buf = cmm_xlator_translate(hnode->xlator, 2105 tmp_buf = cmm_xlator_translate(hnode->xlator,
2160 (void *)new_msg.dw_arg1, 2106 (void *)new_msg.arg1,
2161 CMM_VA2DSPPA); 2107 CMM_VA2DSPPA);
2162 if (tmp_buf != NULL) { 2108 if (tmp_buf != NULL) {
2163 /* got translation, convert to MAUs in msg */ 2109 /* got translation, convert to MAUs in msg */
2164 if (hnode->hnode_mgr->udsp_word_size != 0) { 2110 if (hnode->node_mgr->dsp_word_size != 0) {
2165 new_msg.dw_arg1 = 2111 new_msg.arg1 =
2166 (u32) tmp_buf / 2112 (u32) tmp_buf /
2167 hnode->hnode_mgr->udsp_word_size; 2113 hnode->node_mgr->dsp_word_size;
2168 /* MAUs */ 2114 /* MAUs */
2169 new_msg.dw_arg2 /= hnode->hnode_mgr-> 2115 new_msg.arg2 /= hnode->node_mgr->
2170 udsp_word_size; 2116 dsp_word_size;
2171 } else { 2117 } else {
2172 pr_err("%s: udsp_word_size is zero!\n", 2118 pr_err("%s: dsp_word_size is zero!\n",
2173 __func__); 2119 __func__);
2174 status = -EPERM; /* bad DSPWordSize */ 2120 status = -EPERM; /* bad DSPWordSize */
2175 } 2121 }
@@ -2179,7 +2125,7 @@ int node_put_message(struct node_object *hnode,
2179 } 2125 }
2180 if (!status) { 2126 if (!status) {
2181 intf_fxns = hnode_mgr->intf_fxns; 2127 intf_fxns = hnode_mgr->intf_fxns;
2182 status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj, 2128 status = (*intf_fxns->msg_put) (hnode->msg_queue_obj,
2183 &new_msg, utimeout); 2129 &new_msg, utimeout);
2184 } 2130 }
2185func_end: 2131func_end:
@@ -2226,8 +2172,8 @@ int node_register_notify(struct node_object *hnode, u32 event_mask,
2226 notify_type); 2172 notify_type);
2227 } else { 2173 } else {
2228 /* Send Message part of event mask to msg_ctrl */ 2174 /* Send Message part of event mask to msg_ctrl */
2229 intf_fxns = hnode->hnode_mgr->intf_fxns; 2175 intf_fxns = hnode->node_mgr->intf_fxns;
2230 status = (*intf_fxns->pfn_msg_register_notify) 2176 status = (*intf_fxns->msg_register_notify)
2231 (hnode->msg_queue_obj, 2177 (hnode->msg_queue_obj,
2232 event_mask & DSP_NODEMESSAGEREADY, notify_type, 2178 event_mask & DSP_NODEMESSAGEREADY, notify_type,
2233 hnotification); 2179 hnotification);
@@ -2267,7 +2213,7 @@ int node_run(struct node_object *hnode)
2267 status = -EFAULT; 2213 status = -EFAULT;
2268 goto func_end; 2214 goto func_end;
2269 } 2215 }
2270 hprocessor = hnode->hprocessor; 2216 hprocessor = hnode->processor;
2271 status = proc_get_state(hprocessor, &proc_state, 2217 status = proc_get_state(hprocessor, &proc_state,
2272 sizeof(struct dsp_processorstate)); 2218 sizeof(struct dsp_processorstate));
2273 if (status) 2219 if (status)
@@ -2283,7 +2229,7 @@ int node_run(struct node_object *hnode)
2283 if (status) 2229 if (status)
2284 goto func_end; 2230 goto func_end;
2285 2231
2286 hnode_mgr = hnode->hnode_mgr; 2232 hnode_mgr = hnode->node_mgr;
2287 if (!hnode_mgr) { 2233 if (!hnode_mgr) {
2288 status = -EFAULT; 2234 status = -EFAULT;
2289 goto func_end; 2235 goto func_end;
@@ -2297,7 +2243,7 @@ int node_run(struct node_object *hnode)
2297 status = -EBADR; 2243 status = -EBADR;
2298 2244
2299 if (!status) 2245 if (!status)
2300 status = proc_get_processor_id(pnode->hprocessor, &proc_id); 2246 status = proc_get_processor_id(pnode->processor, &proc_id);
2301 2247
2302 if (status) 2248 if (status)
2303 goto func_cont1; 2249 goto func_cont1;
@@ -2309,7 +2255,7 @@ int node_run(struct node_object *hnode)
2309 /* If node's execute function is not loaded, load it */ 2255 /* If node's execute function is not loaded, load it */
2310 if (!(hnode->loaded) && hnode->phase_split) { 2256 if (!(hnode->loaded) && hnode->phase_split) {
2311 status = 2257 status =
2312 hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj, 2258 hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
2313 NLDR_EXECUTE); 2259 NLDR_EXECUTE);
2314 if (!status) { 2260 if (!status) {
2315 hnode->loaded = true; 2261 hnode->loaded = true;
@@ -2328,14 +2274,14 @@ int node_run(struct node_object *hnode)
2328 } 2274 }
2329 } 2275 }
2330 if (!status) { 2276 if (!status) {
2331 ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSEXECUTENODE]; 2277 ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE];
2332 status = 2278 status =
2333 disp_node_run(hnode_mgr->disp_obj, hnode, 2279 disp_node_run(hnode_mgr->disp_obj, hnode,
2334 ul_fxn_addr, ul_execute_fxn, 2280 ul_fxn_addr, ul_execute_fxn,
2335 hnode->node_env); 2281 hnode->node_env);
2336 } 2282 }
2337 } else if (state == NODE_PAUSED) { 2283 } else if (state == NODE_PAUSED) {
2338 ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY]; 2284 ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY];
2339 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode, 2285 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2340 ul_fxn_addr, hnode->node_env, 2286 ul_fxn_addr, hnode->node_env,
2341 NODE_GET_PRIORITY(hnode)); 2287 NODE_GET_PRIORITY(hnode));
@@ -2353,7 +2299,7 @@ func_cont1:
2353 /* Exit critical section */ 2299 /* Exit critical section */
2354 mutex_unlock(&hnode_mgr->node_mgr_lock); 2300 mutex_unlock(&hnode_mgr->node_mgr_lock);
2355 if (status >= 0) { 2301 if (status >= 0) {
2356 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE); 2302 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
2357 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE); 2303 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2358 } 2304 }
2359func_end: 2305func_end:
@@ -2383,18 +2329,18 @@ int node_terminate(struct node_object *hnode, int *pstatus)
2383 DBC_REQUIRE(refs > 0); 2329 DBC_REQUIRE(refs > 0);
2384 DBC_REQUIRE(pstatus != NULL); 2330 DBC_REQUIRE(pstatus != NULL);
2385 2331
2386 if (!hnode || !hnode->hnode_mgr) { 2332 if (!hnode || !hnode->node_mgr) {
2387 status = -EFAULT; 2333 status = -EFAULT;
2388 goto func_end; 2334 goto func_end;
2389 } 2335 }
2390 if (pnode->hprocessor == NULL) { 2336 if (pnode->processor == NULL) {
2391 status = -EFAULT; 2337 status = -EFAULT;
2392 goto func_end; 2338 goto func_end;
2393 } 2339 }
2394 status = proc_get_processor_id(pnode->hprocessor, &proc_id); 2340 status = proc_get_processor_id(pnode->processor, &proc_id);
2395 2341
2396 if (!status) { 2342 if (!status) {
2397 hnode_mgr = hnode->hnode_mgr; 2343 hnode_mgr = hnode->node_mgr;
2398 node_type = node_get_type(hnode); 2344 node_type = node_get_type(hnode);
2399 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) 2345 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2400 status = -EPERM; 2346 status = -EPERM;
@@ -2421,7 +2367,7 @@ int node_terminate(struct node_object *hnode, int *pstatus)
2421 * Send exit message. Do not change state to NODE_DONE 2367 * Send exit message. Do not change state to NODE_DONE
2422 * here. That will be done in callback. 2368 * here. That will be done in callback.
2423 */ 2369 */
2424 status = proc_get_state(pnode->hprocessor, &proc_state, 2370 status = proc_get_state(pnode->processor, &proc_state,
2425 sizeof(struct dsp_processorstate)); 2371 sizeof(struct dsp_processorstate));
2426 if (status) 2372 if (status)
2427 goto func_cont; 2373 goto func_cont;
@@ -2432,19 +2378,19 @@ int node_terminate(struct node_object *hnode, int *pstatus)
2432 goto func_cont; 2378 goto func_cont;
2433 } 2379 }
2434 2380
2435 msg.dw_cmd = RMS_EXIT; 2381 msg.cmd = RMS_EXIT;
2436 msg.dw_arg1 = hnode->node_env; 2382 msg.arg1 = hnode->node_env;
2437 killmsg.dw_cmd = RMS_KILLTASK; 2383 killmsg.cmd = RMS_KILLTASK;
2438 killmsg.dw_arg1 = hnode->node_env; 2384 killmsg.arg1 = hnode->node_env;
2439 intf_fxns = hnode_mgr->intf_fxns; 2385 intf_fxns = hnode_mgr->intf_fxns;
2440 2386
2441 if (hnode->utimeout > MAXTIMEOUT) 2387 if (hnode->timeout > MAXTIMEOUT)
2442 kill_time_out = MAXTIMEOUT; 2388 kill_time_out = MAXTIMEOUT;
2443 else 2389 else
2444 kill_time_out = (hnode->utimeout) * 2; 2390 kill_time_out = (hnode->timeout) * 2;
2445 2391
2446 status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj, &msg, 2392 status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, &msg,
2447 hnode->utimeout); 2393 hnode->timeout);
2448 if (status) 2394 if (status)
2449 goto func_cont; 2395 goto func_cont;
2450 2396
@@ -2459,8 +2405,8 @@ int node_terminate(struct node_object *hnode, int *pstatus)
2459 if (status != ETIME) 2405 if (status != ETIME)
2460 goto func_cont; 2406 goto func_cont;
2461 2407
2462 status = (*intf_fxns->pfn_msg_put)(hnode->msg_queue_obj, 2408 status = (*intf_fxns->msg_put)(hnode->msg_queue_obj,
2463 &killmsg, hnode->utimeout); 2409 &killmsg, hnode->timeout);
2464 if (status) 2410 if (status)
2465 goto func_cont; 2411 goto func_cont;
2466 status = sync_wait_on_event(hnode->sync_done, 2412 status = sync_wait_on_event(hnode->sync_done,
@@ -2470,7 +2416,7 @@ int node_terminate(struct node_object *hnode, int *pstatus)
2470 * Here it goes the part of the simulation of 2416 * Here it goes the part of the simulation of
2471 * the DSP exception. 2417 * the DSP exception.
2472 */ 2418 */
2473 dev_get_deh_mgr(hnode_mgr->hdev_obj, &hdeh_mgr); 2419 dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr);
2474 if (!hdeh_mgr) 2420 if (!hdeh_mgr)
2475 goto func_cont; 2421 goto func_cont;
2476 2422
@@ -2514,12 +2460,12 @@ static void delete_node(struct node_object *hnode,
2514#ifdef DSP_DMM_DEBUG 2460#ifdef DSP_DMM_DEBUG
2515 struct dmm_object *dmm_mgr; 2461 struct dmm_object *dmm_mgr;
2516 struct proc_object *p_proc_object = 2462 struct proc_object *p_proc_object =
2517 (struct proc_object *)hnode->hprocessor; 2463 (struct proc_object *)hnode->processor;
2518#endif 2464#endif
2519 int status; 2465 int status;
2520 if (!hnode) 2466 if (!hnode)
2521 goto func_end; 2467 goto func_end;
2522 hnode_mgr = hnode->hnode_mgr; 2468 hnode_mgr = hnode->node_mgr;
2523 if (!hnode_mgr) 2469 if (!hnode_mgr)
2524 goto func_end; 2470 goto func_end;
2525 2471
@@ -2531,7 +2477,7 @@ static void delete_node(struct node_object *hnode,
2531 /* Free msg_ctrl queue */ 2477 /* Free msg_ctrl queue */
2532 if (hnode->msg_queue_obj) { 2478 if (hnode->msg_queue_obj) {
2533 intf_fxns = hnode_mgr->intf_fxns; 2479 intf_fxns = hnode_mgr->intf_fxns;
2534 (*intf_fxns->pfn_msg_delete_queue) (hnode-> 2480 (*intf_fxns->msg_delete_queue) (hnode->
2535 msg_queue_obj); 2481 msg_queue_obj);
2536 hnode->msg_queue_obj = NULL; 2482 hnode->msg_queue_obj = NULL;
2537 } 2483 }
@@ -2572,15 +2518,15 @@ static void delete_node(struct node_object *hnode,
2572 kfree(task_arg_obj.strm_out_def); 2518 kfree(task_arg_obj.strm_out_def);
2573 task_arg_obj.strm_out_def = NULL; 2519 task_arg_obj.strm_out_def = NULL;
2574 } 2520 }
2575 if (task_arg_obj.udsp_heap_res_addr) { 2521 if (task_arg_obj.dsp_heap_res_addr) {
2576 status = proc_un_map(hnode->hprocessor, (void *) 2522 status = proc_un_map(hnode->processor, (void *)
2577 task_arg_obj.udsp_heap_addr, 2523 task_arg_obj.dsp_heap_addr,
2578 pr_ctxt); 2524 pr_ctxt);
2579 2525
2580 status = proc_un_reserve_memory(hnode->hprocessor, 2526 status = proc_un_reserve_memory(hnode->processor,
2581 (void *) 2527 (void *)
2582 task_arg_obj. 2528 task_arg_obj.
2583 udsp_heap_res_addr, 2529 dsp_heap_res_addr,
2584 pr_ctxt); 2530 pr_ctxt);
2585#ifdef DSP_DMM_DEBUG 2531#ifdef DSP_DMM_DEBUG
2586 status = dmm_get_handle(p_proc_object, &dmm_mgr); 2532 status = dmm_get_handle(p_proc_object, &dmm_mgr);
@@ -2595,8 +2541,8 @@ static void delete_node(struct node_object *hnode,
2595 kfree(hnode->stream_connect); 2541 kfree(hnode->stream_connect);
2596 hnode->stream_connect = NULL; 2542 hnode->stream_connect = NULL;
2597 } 2543 }
2598 kfree(hnode->pstr_dev_name); 2544 kfree(hnode->str_dev_name);
2599 hnode->pstr_dev_name = NULL; 2545 hnode->str_dev_name = NULL;
2600 2546
2601 if (hnode->ntfy_obj) { 2547 if (hnode->ntfy_obj) {
2602 ntfy_delete(hnode->ntfy_obj); 2548 ntfy_delete(hnode->ntfy_obj);
@@ -2605,23 +2551,23 @@ static void delete_node(struct node_object *hnode,
2605 } 2551 }
2606 2552
2607 /* These were allocated in dcd_get_object_def (via node_allocate) */ 2553 /* These were allocated in dcd_get_object_def (via node_allocate) */
2608 kfree(hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn); 2554 kfree(hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn);
2609 hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn = NULL; 2555 hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn = NULL;
2610 2556
2611 kfree(hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn); 2557 kfree(hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn);
2612 hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn = NULL; 2558 hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn = NULL;
2613 2559
2614 kfree(hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn); 2560 kfree(hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn);
2615 hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn = NULL; 2561 hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn = NULL;
2616 2562
2617 kfree(hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name); 2563 kfree(hnode->dcd_props.obj_data.node_obj.str_i_alg_name);
2618 hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name = NULL; 2564 hnode->dcd_props.obj_data.node_obj.str_i_alg_name = NULL;
2619 2565
2620 /* Free all SM address translator resources */ 2566 /* Free all SM address translator resources */
2621 kfree(hnode->xlator); 2567 kfree(hnode->xlator);
2622 kfree(hnode->nldr_node_obj); 2568 kfree(hnode->nldr_node_obj);
2623 hnode->nldr_node_obj = NULL; 2569 hnode->nldr_node_obj = NULL;
2624 hnode->hnode_mgr = NULL; 2570 hnode->node_mgr = NULL;
2625 kfree(hnode); 2571 kfree(hnode);
2626 hnode = NULL; 2572 hnode = NULL;
2627func_end: 2573func_end:
@@ -2635,21 +2581,18 @@ func_end:
2635 */ 2581 */
2636static void delete_node_mgr(struct node_mgr *hnode_mgr) 2582static void delete_node_mgr(struct node_mgr *hnode_mgr)
2637{ 2583{
2638 struct node_object *hnode; 2584 struct node_object *hnode, *tmp;
2639 2585
2640 if (hnode_mgr) { 2586 if (hnode_mgr) {
2641 /* Free resources */ 2587 /* Free resources */
2642 if (hnode_mgr->hdcd_mgr) 2588 if (hnode_mgr->dcd_mgr)
2643 dcd_destroy_manager(hnode_mgr->hdcd_mgr); 2589 dcd_destroy_manager(hnode_mgr->dcd_mgr);
2644 2590
2645 /* Remove any elements remaining in lists */ 2591 /* Remove any elements remaining in lists */
2646 if (hnode_mgr->node_list) { 2592 list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list,
2647 while ((hnode = (struct node_object *) 2593 list_elem) {
2648 lst_get_head(hnode_mgr->node_list))) 2594 list_del(&hnode->list_elem);
2649 delete_node(hnode, NULL); 2595 delete_node(hnode, NULL);
2650
2651 DBC_ASSERT(LST_IS_EMPTY(hnode_mgr->node_list));
2652 kfree(hnode_mgr->node_list);
2653 } 2596 }
2654 mutex_destroy(&hnode_mgr->node_mgr_lock); 2597 mutex_destroy(&hnode_mgr->node_mgr_lock);
2655 if (hnode_mgr->ntfy_obj) { 2598 if (hnode_mgr->ntfy_obj) {
@@ -2657,21 +2600,6 @@ static void delete_node_mgr(struct node_mgr *hnode_mgr)
2657 kfree(hnode_mgr->ntfy_obj); 2600 kfree(hnode_mgr->ntfy_obj);
2658 } 2601 }
2659 2602
2660 if (hnode_mgr->pipe_map)
2661 gb_delete(hnode_mgr->pipe_map);
2662
2663 if (hnode_mgr->pipe_done_map)
2664 gb_delete(hnode_mgr->pipe_done_map);
2665
2666 if (hnode_mgr->chnl_map)
2667 gb_delete(hnode_mgr->chnl_map);
2668
2669 if (hnode_mgr->dma_chnl_map)
2670 gb_delete(hnode_mgr->dma_chnl_map);
2671
2672 if (hnode_mgr->zc_chnl_map)
2673 gb_delete(hnode_mgr->zc_chnl_map);
2674
2675 if (hnode_mgr->disp_obj) 2603 if (hnode_mgr->disp_obj)
2676 disp_delete(hnode_mgr->disp_obj); 2604 disp_delete(hnode_mgr->disp_obj);
2677 2605
@@ -2680,10 +2608,10 @@ static void delete_node_mgr(struct node_mgr *hnode_mgr)
2680 2608
2681 /* Delete the loader */ 2609 /* Delete the loader */
2682 if (hnode_mgr->nldr_obj) 2610 if (hnode_mgr->nldr_obj)
2683 hnode_mgr->nldr_fxns.pfn_delete(hnode_mgr->nldr_obj); 2611 hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
2684 2612
2685 if (hnode_mgr->loader_init) 2613 if (hnode_mgr->loader_init)
2686 hnode_mgr->nldr_fxns.pfn_exit(); 2614 hnode_mgr->nldr_fxns.exit();
2687 2615
2688 kfree(hnode_mgr); 2616 kfree(hnode_mgr);
2689 } 2617 }
@@ -2758,22 +2686,22 @@ static void fill_stream_def(struct node_object *hnode,
2758 struct node_strmdef *pstrm_def, 2686 struct node_strmdef *pstrm_def,
2759 struct dsp_strmattr *pattrs) 2687 struct dsp_strmattr *pattrs)
2760{ 2688{
2761 struct node_mgr *hnode_mgr = hnode->hnode_mgr; 2689 struct node_mgr *hnode_mgr = hnode->node_mgr;
2762 2690
2763 if (pattrs != NULL) { 2691 if (pattrs != NULL) {
2764 pstrm_def->num_bufs = pattrs->num_bufs; 2692 pstrm_def->num_bufs = pattrs->num_bufs;
2765 pstrm_def->buf_size = 2693 pstrm_def->buf_size =
2766 pattrs->buf_size / hnode_mgr->udsp_data_mau_size; 2694 pattrs->buf_size / hnode_mgr->dsp_data_mau_size;
2767 pstrm_def->seg_id = pattrs->seg_id; 2695 pstrm_def->seg_id = pattrs->seg_id;
2768 pstrm_def->buf_alignment = pattrs->buf_alignment; 2696 pstrm_def->buf_alignment = pattrs->buf_alignment;
2769 pstrm_def->utimeout = pattrs->utimeout; 2697 pstrm_def->timeout = pattrs->timeout;
2770 } else { 2698 } else {
2771 pstrm_def->num_bufs = DEFAULTNBUFS; 2699 pstrm_def->num_bufs = DEFAULTNBUFS;
2772 pstrm_def->buf_size = 2700 pstrm_def->buf_size =
2773 DEFAULTBUFSIZE / hnode_mgr->udsp_data_mau_size; 2701 DEFAULTBUFSIZE / hnode_mgr->dsp_data_mau_size;
2774 pstrm_def->seg_id = DEFAULTSEGID; 2702 pstrm_def->seg_id = DEFAULTSEGID;
2775 pstrm_def->buf_alignment = DEFAULTALIGNMENT; 2703 pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2776 pstrm_def->utimeout = DEFAULTTIMEOUT; 2704 pstrm_def->timeout = DEFAULTTIMEOUT;
2777 } 2705 }
2778} 2706}
2779 2707
@@ -2786,25 +2714,25 @@ static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2786{ 2714{
2787 /* Free up the pipe id unless other node has not yet been deleted. */ 2715 /* Free up the pipe id unless other node has not yet been deleted. */
2788 if (stream.type == NODECONNECT) { 2716 if (stream.type == NODECONNECT) {
2789 if (gb_test(hnode_mgr->pipe_done_map, stream.dev_id)) { 2717 if (test_bit(stream.dev_id, hnode_mgr->pipe_done_map)) {
2790 /* The other node has already been deleted */ 2718 /* The other node has already been deleted */
2791 gb_clear(hnode_mgr->pipe_done_map, stream.dev_id); 2719 clear_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2792 gb_clear(hnode_mgr->pipe_map, stream.dev_id); 2720 clear_bit(stream.dev_id, hnode_mgr->pipe_map);
2793 } else { 2721 } else {
2794 /* The other node has not been deleted yet */ 2722 /* The other node has not been deleted yet */
2795 gb_set(hnode_mgr->pipe_done_map, stream.dev_id); 2723 set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2796 } 2724 }
2797 } else if (stream.type == HOSTCONNECT) { 2725 } else if (stream.type == HOSTCONNECT) {
2798 if (stream.dev_id < hnode_mgr->ul_num_chnls) { 2726 if (stream.dev_id < hnode_mgr->num_chnls) {
2799 gb_clear(hnode_mgr->chnl_map, stream.dev_id); 2727 clear_bit(stream.dev_id, hnode_mgr->chnl_map);
2800 } else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) { 2728 } else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
2801 /* dsp-dma */ 2729 /* dsp-dma */
2802 gb_clear(hnode_mgr->dma_chnl_map, stream.dev_id - 2730 clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
2803 (1 * hnode_mgr->ul_num_chnls)); 2731 hnode_mgr->dma_chnl_map);
2804 } else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) { 2732 } else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
2805 /* zero-copy */ 2733 /* zero-copy */
2806 gb_clear(hnode_mgr->zc_chnl_map, stream.dev_id - 2734 clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
2807 (2 * hnode_mgr->ul_num_chnls)); 2735 hnode_mgr->zc_chnl_map);
2808 } 2736 }
2809 } 2737 }
2810} 2738}
@@ -2818,7 +2746,7 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2818 u32 phase) 2746 u32 phase)
2819{ 2747{
2820 char *pstr_fxn_name = NULL; 2748 char *pstr_fxn_name = NULL;
2821 struct node_mgr *hnode_mgr = hnode->hnode_mgr; 2749 struct node_mgr *hnode_mgr = hnode->node_mgr;
2822 int status = 0; 2750 int status = 0;
2823 DBC_REQUIRE(node_get_type(hnode) == NODE_TASK || 2751 DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
2824 node_get_type(hnode) == NODE_DAISSOCKET || 2752 node_get_type(hnode) == NODE_DAISSOCKET ||
@@ -2827,15 +2755,15 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2827 switch (phase) { 2755 switch (phase) {
2828 case CREATEPHASE: 2756 case CREATEPHASE:
2829 pstr_fxn_name = 2757 pstr_fxn_name =
2830 hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn; 2758 hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn;
2831 break; 2759 break;
2832 case EXECUTEPHASE: 2760 case EXECUTEPHASE:
2833 pstr_fxn_name = 2761 pstr_fxn_name =
2834 hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn; 2762 hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn;
2835 break; 2763 break;
2836 case DELETEPHASE: 2764 case DELETEPHASE:
2837 pstr_fxn_name = 2765 pstr_fxn_name =
2838 hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn; 2766 hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn;
2839 break; 2767 break;
2840 default: 2768 default:
2841 /* Should never get here */ 2769 /* Should never get here */
@@ -2844,7 +2772,7 @@ static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2844 } 2772 }
2845 2773
2846 status = 2774 status =
2847 hnode_mgr->nldr_fxns.pfn_get_fxn_addr(hnode->nldr_node_obj, 2775 hnode_mgr->nldr_fxns.get_fxn_addr(hnode->nldr_node_obj,
2848 pstr_fxn_name, fxn_addr); 2776 pstr_fxn_name, fxn_addr);
2849 2777
2850 return status; 2778 return status;
@@ -2923,11 +2851,11 @@ static int get_node_props(struct dcd_manager *hdcd_mgr,
2923 DBC_REQUIRE(pndb_props->ac_name); 2851 DBC_REQUIRE(pndb_props->ac_name);
2924 len = strlen(pndb_props->ac_name); 2852 len = strlen(pndb_props->ac_name);
2925 DBC_ASSERT(len < MAXDEVNAMELEN); 2853 DBC_ASSERT(len < MAXDEVNAMELEN);
2926 hnode->pstr_dev_name = kzalloc(len + 1, GFP_KERNEL); 2854 hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
2927 if (hnode->pstr_dev_name == NULL) { 2855 if (hnode->str_dev_name == NULL) {
2928 status = -ENOMEM; 2856 status = -ENOMEM;
2929 } else { 2857 } else {
2930 strncpy(hnode->pstr_dev_name, 2858 strncpy(hnode->str_dev_name,
2931 pndb_props->ac_name, len); 2859 pndb_props->ac_name, len);
2932 } 2860 }
2933 } 2861 }
@@ -2974,9 +2902,9 @@ static int get_proc_props(struct node_mgr *hnode_mgr,
2974 host_res = pbridge_context->resources; 2902 host_res = pbridge_context->resources;
2975 if (!host_res) 2903 if (!host_res)
2976 return -EPERM; 2904 return -EPERM;
2977 hnode_mgr->ul_chnl_offset = host_res->dw_chnl_offset; 2905 hnode_mgr->chnl_offset = host_res->chnl_offset;
2978 hnode_mgr->ul_chnl_buf_size = host_res->dw_chnl_buf_size; 2906 hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
2979 hnode_mgr->ul_num_chnls = host_res->dw_num_chnls; 2907 hnode_mgr->num_chnls = host_res->num_chnls;
2980 2908
2981 /* 2909 /*
2982 * PROC will add an API to get dsp_processorinfo. 2910 * PROC will add an API to get dsp_processorinfo.
@@ -2987,9 +2915,9 @@ static int get_proc_props(struct node_mgr *hnode_mgr,
2987 hnode_mgr->proc_type = 6410; 2915 hnode_mgr->proc_type = 6410;
2988 hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY; 2916 hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2989 hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY; 2917 hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2990 hnode_mgr->udsp_word_size = DSPWORDSIZE; 2918 hnode_mgr->dsp_word_size = DSPWORDSIZE;
2991 hnode_mgr->udsp_data_mau_size = DSPWORDSIZE; 2919 hnode_mgr->dsp_data_mau_size = DSPWORDSIZE;
2992 hnode_mgr->udsp_mau_size = 1; 2920 hnode_mgr->dsp_mau_size = 1;
2993 2921
2994 } 2922 }
2995 return status; 2923 return status;
@@ -3046,24 +2974,24 @@ int node_get_uuid_props(void *hprocessor,
3046 */ 2974 */
3047 mutex_lock(&hnode_mgr->node_mgr_lock); 2975 mutex_lock(&hnode_mgr->node_mgr_lock);
3048 2976
3049 dcd_node_props.pstr_create_phase_fxn = NULL; 2977 dcd_node_props.str_create_phase_fxn = NULL;
3050 dcd_node_props.pstr_execute_phase_fxn = NULL; 2978 dcd_node_props.str_execute_phase_fxn = NULL;
3051 dcd_node_props.pstr_delete_phase_fxn = NULL; 2979 dcd_node_props.str_delete_phase_fxn = NULL;
3052 dcd_node_props.pstr_i_alg_name = NULL; 2980 dcd_node_props.str_i_alg_name = NULL;
3053 2981
3054 status = dcd_get_object_def(hnode_mgr->hdcd_mgr, 2982 status = dcd_get_object_def(hnode_mgr->dcd_mgr,
3055 (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE, 2983 (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
3056 (struct dcd_genericobj *)&dcd_node_props); 2984 (struct dcd_genericobj *)&dcd_node_props);
3057 2985
3058 if (!status) { 2986 if (!status) {
3059 *node_props = dcd_node_props.ndb_props; 2987 *node_props = dcd_node_props.ndb_props;
3060 kfree(dcd_node_props.pstr_create_phase_fxn); 2988 kfree(dcd_node_props.str_create_phase_fxn);
3061 2989
3062 kfree(dcd_node_props.pstr_execute_phase_fxn); 2990 kfree(dcd_node_props.str_execute_phase_fxn);
3063 2991
3064 kfree(dcd_node_props.pstr_delete_phase_fxn); 2992 kfree(dcd_node_props.str_delete_phase_fxn);
3065 2993
3066 kfree(dcd_node_props.pstr_i_alg_name); 2994 kfree(dcd_node_props.str_i_alg_name);
3067 } 2995 }
3068 /* Leave the critical section, we're done. */ 2996 /* Leave the critical section, we're done. */
3069 mutex_unlock(&hnode_mgr->node_mgr_lock); 2997 mutex_unlock(&hnode_mgr->node_mgr_lock);
@@ -3079,7 +3007,7 @@ func_end:
3079static int get_rms_fxns(struct node_mgr *hnode_mgr) 3007static int get_rms_fxns(struct node_mgr *hnode_mgr)
3080{ 3008{
3081 s32 i; 3009 s32 i;
3082 struct dev_object *dev_obj = hnode_mgr->hdev_obj; 3010 struct dev_object *dev_obj = hnode_mgr->dev_obj;
3083 int status = 0; 3011 int status = 0;
3084 3012
3085 static char *psz_fxns[NUMRMSFXNS] = { 3013 static char *psz_fxns[NUMRMSFXNS] = {
@@ -3096,7 +3024,7 @@ static int get_rms_fxns(struct node_mgr *hnode_mgr)
3096 3024
3097 for (i = 0; i < NUMRMSFXNS; i++) { 3025 for (i = 0; i < NUMRMSFXNS; i++) {
3098 status = dev_get_symbol(dev_obj, psz_fxns[i], 3026 status = dev_get_symbol(dev_obj, psz_fxns[i],
3099 &(hnode_mgr->ul_fxn_addrs[i])); 3027 &(hnode_mgr->fxn_addrs[i]));
3100 if (status) { 3028 if (status) {
3101 if (status == -ESPIPE) { 3029 if (status == -ESPIPE) {
3102 /* 3030 /*
@@ -3137,17 +3065,17 @@ static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
3137 3065
3138 DBC_REQUIRE(hnode); 3066 DBC_REQUIRE(hnode);
3139 3067
3140 hnode_mgr = hnode->hnode_mgr; 3068 hnode_mgr = hnode->node_mgr;
3141 3069
3142 ul_size = ul_num_bytes / hnode_mgr->udsp_word_size; 3070 ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
3143 ul_timeout = hnode->utimeout; 3071 ul_timeout = hnode->timeout;
3144 3072
3145 /* Call new MemCopy function */ 3073 /* Call new MemCopy function */
3146 intf_fxns = hnode_mgr->intf_fxns; 3074 intf_fxns = hnode_mgr->intf_fxns;
3147 status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context); 3075 status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
3148 if (!status) { 3076 if (!status) {
3149 status = 3077 status =
3150 (*intf_fxns->pfn_brd_mem_copy) (hbridge_context, 3078 (*intf_fxns->brd_mem_copy) (hbridge_context,
3151 dsp_run_addr, dsp_load_addr, 3079 dsp_run_addr, dsp_load_addr,
3152 ul_num_bytes, (u32) mem_space); 3080 ul_num_bytes, (u32) mem_space);
3153 if (!status) 3081 if (!status)
@@ -3181,15 +3109,15 @@ static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
3181 DBC_REQUIRE(hnode); 3109 DBC_REQUIRE(hnode);
3182 DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA); 3110 DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
3183 3111
3184 hnode_mgr = hnode->hnode_mgr; 3112 hnode_mgr = hnode->node_mgr;
3185 3113
3186 ul_timeout = hnode->utimeout; 3114 ul_timeout = hnode->timeout;
3187 mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA; 3115 mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
3188 3116
3189 /* Call new MemWrite function */ 3117 /* Call new MemWrite function */
3190 intf_fxns = hnode_mgr->intf_fxns; 3118 intf_fxns = hnode_mgr->intf_fxns;
3191 status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context); 3119 status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
3192 status = (*intf_fxns->pfn_brd_mem_write) (hbridge_context, pbuf, 3120 status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
3193 dsp_add, ul_num_bytes, mem_sect_type); 3121 dsp_add, ul_num_bytes, mem_sect_type);
3194 3122
3195 return ul_num_bytes; 3123 return ul_num_bytes;
@@ -3204,23 +3132,17 @@ int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3204{ 3132{
3205 struct node_object *node_obj; 3133 struct node_object *node_obj;
3206 int status = -ENOENT; 3134 int status = -ENOENT;
3207 u32 n;
3208 3135
3209 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, 3136 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
3210 (unsigned int) node_mgr, 3137 (unsigned int) node_mgr,
3211 sym_addr, offset_range, 3138 sym_addr, offset_range,
3212 (unsigned int) sym_addr_output, sym_name); 3139 (unsigned int) sym_addr_output, sym_name);
3213 3140
3214 node_obj = (struct node_object *)(node_mgr->node_list->head.next); 3141 list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) {
3215
3216 for (n = 0; n < node_mgr->num_nodes; n++) {
3217 status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr, 3142 status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3218 offset_range, sym_addr_output, sym_name); 3143 offset_range, sym_addr_output, sym_name);
3219
3220 if (!status) 3144 if (!status)
3221 break; 3145 break;
3222
3223 node_obj = (struct node_object *) (node_obj->list_elem.next);
3224 } 3146 }
3225 3147
3226 return status; 3148 return status;
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index b47d7aa747b..54f61336d77 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -29,7 +29,6 @@
29#include <dspbridge/dbc.h> 29#include <dspbridge/dbc.h>
30 30
31/* ----------------------------------- OS Adaptation Layer */ 31/* ----------------------------------- OS Adaptation Layer */
32#include <dspbridge/list.h>
33#include <dspbridge/ntfy.h> 32#include <dspbridge/ntfy.h>
34#include <dspbridge/sync.h> 33#include <dspbridge/sync.h>
35/* ----------------------------------- Bridge Driver */ 34/* ----------------------------------- Bridge Driver */
@@ -81,24 +80,24 @@ extern struct device *bridge;
81/* The proc_object structure. */ 80/* The proc_object structure. */
82struct proc_object { 81struct proc_object {
83 struct list_head link; /* Link to next proc_object */ 82 struct list_head link; /* Link to next proc_object */
84 struct dev_object *hdev_obj; /* Device this PROC represents */ 83 struct dev_object *dev_obj; /* Device this PROC represents */
85 u32 process; /* Process owning this Processor */ 84 u32 process; /* Process owning this Processor */
86 struct mgr_object *hmgr_obj; /* Manager Object Handle */ 85 struct mgr_object *mgr_obj; /* Manager Object Handle */
87 u32 attach_count; /* Processor attach count */ 86 u32 attach_count; /* Processor attach count */
88 u32 processor_id; /* Processor number */ 87 u32 processor_id; /* Processor number */
89 u32 utimeout; /* Time out count */ 88 u32 timeout; /* Time out count */
90 enum dsp_procstate proc_state; /* Processor state */ 89 enum dsp_procstate proc_state; /* Processor state */
91 u32 ul_unit; /* DDSP unit number */ 90 u32 unit; /* DDSP unit number */
92 bool is_already_attached; /* 91 bool is_already_attached; /*
93 * True if the Device below has 92 * True if the Device below has
94 * GPP Client attached 93 * GPP Client attached
95 */ 94 */
96 struct ntfy_object *ntfy_obj; /* Manages notifications */ 95 struct ntfy_object *ntfy_obj; /* Manages notifications */
97 /* Bridge Context Handle */ 96 /* Bridge Context Handle */
98 struct bridge_dev_context *hbridge_context; 97 struct bridge_dev_context *bridge_context;
99 /* Function interface to Bridge driver */ 98 /* Function interface to Bridge driver */
100 struct bridge_drv_interface *intf_fxns; 99 struct bridge_drv_interface *intf_fxns;
101 char *psz_last_coff; 100 char *last_coff;
102 struct list_head proc_list; 101 struct list_head proc_list;
103}; 102};
104 103
@@ -285,8 +284,8 @@ proc_attach(u32 processor_id,
285 DBC_REQUIRE(refs > 0); 284 DBC_REQUIRE(refs > 0);
286 DBC_REQUIRE(ph_processor != NULL); 285 DBC_REQUIRE(ph_processor != NULL);
287 286
288 if (pr_ctxt->hprocessor) { 287 if (pr_ctxt->processor) {
289 *ph_processor = pr_ctxt->hprocessor; 288 *ph_processor = pr_ctxt->processor;
290 return status; 289 return status;
291 } 290 }
292 291
@@ -316,8 +315,8 @@ proc_attach(u32 processor_id,
316 status = -ENOMEM; 315 status = -ENOMEM;
317 goto func_end; 316 goto func_end;
318 } 317 }
319 p_proc_object->hdev_obj = hdev_obj; 318 p_proc_object->dev_obj = hdev_obj;
320 p_proc_object->hmgr_obj = hmgr_obj; 319 p_proc_object->mgr_obj = hmgr_obj;
321 p_proc_object->processor_id = dev_type; 320 p_proc_object->processor_id = dev_type;
322 /* Store TGID instead of process handle */ 321 /* Store TGID instead of process handle */
323 p_proc_object->process = current->tgid; 322 p_proc_object->process = current->tgid;
@@ -325,14 +324,14 @@ proc_attach(u32 processor_id,
325 INIT_LIST_HEAD(&p_proc_object->proc_list); 324 INIT_LIST_HEAD(&p_proc_object->proc_list);
326 325
327 if (attr_in) 326 if (attr_in)
328 p_proc_object->utimeout = attr_in->utimeout; 327 p_proc_object->timeout = attr_in->timeout;
329 else 328 else
330 p_proc_object->utimeout = PROC_DFLT_TIMEOUT; 329 p_proc_object->timeout = PROC_DFLT_TIMEOUT;
331 330
332 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); 331 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
333 if (!status) { 332 if (!status) {
334 status = dev_get_bridge_context(hdev_obj, 333 status = dev_get_bridge_context(hdev_obj,
335 &p_proc_object->hbridge_context); 334 &p_proc_object->bridge_context);
336 if (status) 335 if (status)
337 kfree(p_proc_object); 336 kfree(p_proc_object);
338 } else 337 } else
@@ -357,8 +356,7 @@ proc_attach(u32 processor_id,
357 * Return handle to this Processor Object: 356 * Return handle to this Processor Object:
358 * Find out if the Device is already attached to a 357 * Find out if the Device is already attached to a
359 * Processor. If so, return AlreadyAttached status */ 358 * Processor. If so, return AlreadyAttached status */
360 lst_init_elem(&p_proc_object->link); 359 status = dev_insert_proc_object(p_proc_object->dev_obj,
361 status = dev_insert_proc_object(p_proc_object->hdev_obj,
362 (u32) p_proc_object, 360 (u32) p_proc_object,
363 &p_proc_object-> 361 &p_proc_object->
364 is_already_attached); 362 is_already_attached);
@@ -375,7 +373,7 @@ proc_attach(u32 processor_id,
375 } 373 }
376 if (!status) { 374 if (!status) {
377 *ph_processor = (void *)p_proc_object; 375 *ph_processor = (void *)p_proc_object;
378 pr_ctxt->hprocessor = *ph_processor; 376 pr_ctxt->processor = *ph_processor;
379 (void)proc_notify_clients(p_proc_object, 377 (void)proc_notify_clients(p_proc_object,
380 DSP_PROCESSORATTACH); 378 DSP_PROCESSORATTACH);
381 } 379 }
@@ -465,12 +463,12 @@ int proc_auto_start(struct cfg_devnode *dev_node_obj,
465 status = -ENOMEM; 463 status = -ENOMEM;
466 goto func_end; 464 goto func_end;
467 } 465 }
468 p_proc_object->hdev_obj = hdev_obj; 466 p_proc_object->dev_obj = hdev_obj;
469 p_proc_object->hmgr_obj = hmgr_obj; 467 p_proc_object->mgr_obj = hmgr_obj;
470 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); 468 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
471 if (!status) 469 if (!status)
472 status = dev_get_bridge_context(hdev_obj, 470 status = dev_get_bridge_context(hdev_obj,
473 &p_proc_object->hbridge_context); 471 &p_proc_object->bridge_context);
474 if (status) 472 if (status)
475 goto func_cont; 473 goto func_cont;
476 474
@@ -493,8 +491,8 @@ int proc_auto_start(struct cfg_devnode *dev_node_obj,
493 if (!status) 491 if (!status)
494 status = proc_start(p_proc_object); 492 status = proc_start(p_proc_object);
495 } 493 }
496 kfree(p_proc_object->psz_last_coff); 494 kfree(p_proc_object->last_coff);
497 p_proc_object->psz_last_coff = NULL; 495 p_proc_object->last_coff = NULL;
498func_cont: 496func_cont:
499 kfree(p_proc_object); 497 kfree(p_proc_object);
500func_end: 498func_end:
@@ -542,8 +540,8 @@ int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
542 /* timeout = arg->cb_data; */ 540 /* timeout = arg->cb_data; */
543 status = pwr_wake_dsp(timeout); 541 status = pwr_wake_dsp(timeout);
544 } else 542 } else
545 if (!((*p_proc_object->intf_fxns->pfn_dev_cntrl) 543 if (!((*p_proc_object->intf_fxns->dev_cntrl)
546 (p_proc_object->hbridge_context, dw_cmd, 544 (p_proc_object->bridge_context, dw_cmd,
547 arg))) { 545 arg))) {
548 status = 0; 546 status = 0;
549 } else { 547 } else {
@@ -569,7 +567,7 @@ int proc_detach(struct process_context *pr_ctxt)
569 567
570 DBC_REQUIRE(refs > 0); 568 DBC_REQUIRE(refs > 0);
571 569
572 p_proc_object = (struct proc_object *)pr_ctxt->hprocessor; 570 p_proc_object = (struct proc_object *)pr_ctxt->processor;
573 571
574 if (p_proc_object) { 572 if (p_proc_object) {
575 /* Notify the Client */ 573 /* Notify the Client */
@@ -580,14 +578,14 @@ int proc_detach(struct process_context *pr_ctxt)
580 kfree(p_proc_object->ntfy_obj); 578 kfree(p_proc_object->ntfy_obj);
581 } 579 }
582 580
583 kfree(p_proc_object->psz_last_coff); 581 kfree(p_proc_object->last_coff);
584 p_proc_object->psz_last_coff = NULL; 582 p_proc_object->last_coff = NULL;
585 /* Remove the Proc from the DEV List */ 583 /* Remove the Proc from the DEV List */
586 (void)dev_remove_proc_object(p_proc_object->hdev_obj, 584 (void)dev_remove_proc_object(p_proc_object->dev_obj,
587 (u32) p_proc_object); 585 (u32) p_proc_object);
588 /* Free the Processor Object */ 586 /* Free the Processor Object */
589 kfree(p_proc_object); 587 kfree(p_proc_object);
590 pr_ctxt->hprocessor = NULL; 588 pr_ctxt->processor = NULL;
591 } else { 589 } else {
592 status = -EFAULT; 590 status = -EFAULT;
593 } 591 }
@@ -615,7 +613,7 @@ int proc_enum_nodes(void *hprocessor, void **node_tab,
615 DBC_REQUIRE(pu_allocated != NULL); 613 DBC_REQUIRE(pu_allocated != NULL);
616 614
617 if (p_proc_object) { 615 if (p_proc_object) {
618 if (!(dev_get_node_manager(p_proc_object->hdev_obj, 616 if (!(dev_get_node_manager(p_proc_object->dev_obj,
619 &hnode_mgr))) { 617 &hnode_mgr))) {
620 if (hnode_mgr) { 618 if (hnode_mgr) {
621 status = node_enum_nodes(hnode_mgr, node_tab, 619 status = node_enum_nodes(hnode_mgr, node_tab,
@@ -892,7 +890,7 @@ int proc_get_resource_info(void *hprocessor, u32 resource_type,
892 case DSP_RESOURCE_DYNSARAM: 890 case DSP_RESOURCE_DYNSARAM:
893 case DSP_RESOURCE_DYNEXTERNAL: 891 case DSP_RESOURCE_DYNEXTERNAL:
894 case DSP_RESOURCE_DYNSRAM: 892 case DSP_RESOURCE_DYNSRAM:
895 status = dev_get_node_manager(p_proc_object->hdev_obj, 893 status = dev_get_node_manager(p_proc_object->dev_obj,
896 &hnode_mgr); 894 &hnode_mgr);
897 if (!hnode_mgr) { 895 if (!hnode_mgr) {
898 status = -EFAULT; 896 status = -EFAULT;
@@ -915,11 +913,11 @@ int proc_get_resource_info(void *hprocessor, u32 resource_type,
915 } 913 }
916 break; 914 break;
917 case DSP_RESOURCE_PROCLOAD: 915 case DSP_RESOURCE_PROCLOAD:
918 status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr); 916 status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
919 if (hio_mgr) 917 if (hio_mgr)
920 status = 918 status =
921 p_proc_object->intf_fxns-> 919 p_proc_object->intf_fxns->
922 pfn_io_get_proc_load(hio_mgr, 920 io_get_proc_load(hio_mgr,
923 (struct dsp_procloadstat *) 921 (struct dsp_procloadstat *)
924 &(resource_info->result. 922 &(resource_info->result.
925 proc_load_stat)); 923 proc_load_stat));
@@ -965,7 +963,7 @@ int proc_get_dev_object(void *hprocessor,
965 DBC_REQUIRE(device_obj != NULL); 963 DBC_REQUIRE(device_obj != NULL);
966 964
967 if (p_proc_object) { 965 if (p_proc_object) {
968 *device_obj = p_proc_object->hdev_obj; 966 *device_obj = p_proc_object->dev_obj;
969 status = 0; 967 status = 0;
970 } else { 968 } else {
971 *device_obj = NULL; 969 *device_obj = NULL;
@@ -997,8 +995,8 @@ int proc_get_state(void *hprocessor,
997 995
998 if (p_proc_object) { 996 if (p_proc_object) {
999 /* First, retrieve BRD state information */ 997 /* First, retrieve BRD state information */
1000 status = (*p_proc_object->intf_fxns->pfn_brd_status) 998 status = (*p_proc_object->intf_fxns->brd_status)
1001 (p_proc_object->hbridge_context, &brd_status); 999 (p_proc_object->bridge_context, &brd_status);
1002 if (!status) { 1000 if (!status) {
1003 switch (brd_status) { 1001 switch (brd_status) {
1004 case BRD_STOPPED: 1002 case BRD_STOPPED:
@@ -1117,7 +1115,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
1117 status = -EFAULT; 1115 status = -EFAULT;
1118 goto func_end; 1116 goto func_end;
1119 } 1117 }
1120 dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr); 1118 dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
1121 if (!cod_mgr) { 1119 if (!cod_mgr) {
1122 status = -EPERM; 1120 status = -EPERM;
1123 goto func_end; 1121 goto func_end;
@@ -1149,7 +1147,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
1149 prepend_envp(new_envp, (char **)user_envp, 1147 prepend_envp(new_envp, (char **)user_envp,
1150 envp_elems, cnew_envp, sz_proc_id); 1148 envp_elems, cnew_envp, sz_proc_id);
1151 /* Get the DCD Handle */ 1149 /* Get the DCD Handle */
1152 status = mgr_get_dcd_handle(p_proc_object->hmgr_obj, 1150 status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
1153 (u32 *) &hdcd_handle); 1151 (u32 *) &hdcd_handle);
1154 if (!status) { 1152 if (!status) {
1155 /* Before proceeding with new load, 1153 /* Before proceeding with new load,
@@ -1158,16 +1156,16 @@ int proc_load(void *hprocessor, const s32 argc_index,
1158 * If yes, unregister nodes in previously 1156 * If yes, unregister nodes in previously
1159 * registered COFF. If any error occurred, 1157 * registered COFF. If any error occurred,
1160 * set previously registered COFF to NULL. */ 1158 * set previously registered COFF to NULL. */
1161 if (p_proc_object->psz_last_coff != NULL) { 1159 if (p_proc_object->last_coff != NULL) {
1162 status = 1160 status =
1163 dcd_auto_unregister(hdcd_handle, 1161 dcd_auto_unregister(hdcd_handle,
1164 p_proc_object-> 1162 p_proc_object->
1165 psz_last_coff); 1163 last_coff);
1166 /* Regardless of auto unregister status, 1164 /* Regardless of auto unregister status,
1167 * free previously allocated 1165 * free previously allocated
1168 * memory. */ 1166 * memory. */
1169 kfree(p_proc_object->psz_last_coff); 1167 kfree(p_proc_object->last_coff);
1170 p_proc_object->psz_last_coff = NULL; 1168 p_proc_object->last_coff = NULL;
1171 } 1169 }
1172 } 1170 }
1173 /* On success, do cod_open_base() */ 1171 /* On success, do cod_open_base() */
@@ -1180,7 +1178,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
1180 if (!status) { 1178 if (!status) {
1181 /* Auto-register data base */ 1179 /* Auto-register data base */
1182 /* Get the DCD Handle */ 1180 /* Get the DCD Handle */
1183 status = mgr_get_dcd_handle(p_proc_object->hmgr_obj, 1181 status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
1184 (u32 *) &hdcd_handle); 1182 (u32 *) &hdcd_handle);
1185 if (!status) { 1183 if (!status) {
1186 /* Auto register nodes in specified COFF 1184 /* Auto register nodes in specified COFF
@@ -1197,15 +1195,15 @@ int proc_load(void *hprocessor, const s32 argc_index,
1197 if (status) { 1195 if (status) {
1198 status = -EPERM; 1196 status = -EPERM;
1199 } else { 1197 } else {
1200 DBC_ASSERT(p_proc_object->psz_last_coff == 1198 DBC_ASSERT(p_proc_object->last_coff ==
1201 NULL); 1199 NULL);
1202 /* Allocate memory for pszLastCoff */ 1200 /* Allocate memory for pszLastCoff */
1203 p_proc_object->psz_last_coff = 1201 p_proc_object->last_coff =
1204 kzalloc((strlen(user_args[0]) + 1202 kzalloc((strlen(user_args[0]) +
1205 1), GFP_KERNEL); 1203 1), GFP_KERNEL);
1206 /* If memory allocated, save COFF file name */ 1204 /* If memory allocated, save COFF file name */
1207 if (p_proc_object->psz_last_coff) { 1205 if (p_proc_object->last_coff) {
1208 strncpy(p_proc_object->psz_last_coff, 1206 strncpy(p_proc_object->last_coff,
1209 (char *)user_args[0], 1207 (char *)user_args[0],
1210 (strlen((char *)user_args[0]) + 1208 (strlen((char *)user_args[0]) +
1211 1)); 1209 1));
@@ -1217,19 +1215,19 @@ int proc_load(void *hprocessor, const s32 argc_index,
1217 if (!status) { 1215 if (!status) {
1218 /* Create the message manager. This must be done 1216 /* Create the message manager. This must be done
1219 * before calling the IOOnLoaded function. */ 1217 * before calling the IOOnLoaded function. */
1220 dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr); 1218 dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
1221 if (!hmsg_mgr) { 1219 if (!hmsg_mgr) {
1222 status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj, 1220 status = msg_create(&hmsg_mgr, p_proc_object->dev_obj,
1223 (msg_onexit) node_on_exit); 1221 (msg_onexit) node_on_exit);
1224 DBC_ASSERT(!status); 1222 DBC_ASSERT(!status);
1225 dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr); 1223 dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr);
1226 } 1224 }
1227 } 1225 }
1228 if (!status) { 1226 if (!status) {
1229 /* Set the Device object's message manager */ 1227 /* Set the Device object's message manager */
1230 status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr); 1228 status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
1231 if (hio_mgr) 1229 if (hio_mgr)
1232 status = (*p_proc_object->intf_fxns->pfn_io_on_loaded) 1230 status = (*p_proc_object->intf_fxns->io_on_loaded)
1233 (hio_mgr); 1231 (hio_mgr);
1234 else 1232 else
1235 status = -EFAULT; 1233 status = -EFAULT;
@@ -1244,7 +1242,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
1244#endif 1242#endif
1245 status = cod_load_base(cod_mgr, argc_index, (char **)user_args, 1243 status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
1246 dev_brd_write_fxn, 1244 dev_brd_write_fxn,
1247 p_proc_object->hdev_obj, NULL); 1245 p_proc_object->dev_obj, NULL);
1248 if (status) { 1246 if (status) {
1249 if (status == -EBADF) { 1247 if (status == -EBADF) {
1250 dev_dbg(bridge, "%s: Failure to Load the EXE\n", 1248 dev_dbg(bridge, "%s: Failure to Load the EXE\n",
@@ -1264,8 +1262,8 @@ int proc_load(void *hprocessor, const s32 argc_index,
1264 } 1262 }
1265 if (!status) { 1263 if (!status) {
1266 /* Update the Processor status to loaded */ 1264 /* Update the Processor status to loaded */
1267 status = (*p_proc_object->intf_fxns->pfn_brd_set_state) 1265 status = (*p_proc_object->intf_fxns->brd_set_state)
1268 (p_proc_object->hbridge_context, BRD_LOADED); 1266 (p_proc_object->bridge_context, BRD_LOADED);
1269 if (!status) { 1267 if (!status) {
1270 p_proc_object->proc_state = PROC_LOADED; 1268 p_proc_object->proc_state = PROC_LOADED;
1271 if (p_proc_object->ntfy_obj) 1269 if (p_proc_object->ntfy_obj)
@@ -1285,7 +1283,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
1285 /* Reset DMM structs and add an initial free chunk */ 1283 /* Reset DMM structs and add an initial free chunk */
1286 if (!status) { 1284 if (!status) {
1287 status = 1285 status =
1288 dev_get_dmm_mgr(p_proc_object->hdev_obj, 1286 dev_get_dmm_mgr(p_proc_object->dev_obj,
1289 &dmm_mgr); 1287 &dmm_mgr);
1290 if (dmm_mgr) { 1288 if (dmm_mgr) {
1291 /* Set dw_ext_end to DMM START u8 1289 /* Set dw_ext_end to DMM START u8
@@ -1306,8 +1304,8 @@ int proc_load(void *hprocessor, const s32 argc_index,
1306 kfree(new_envp); 1304 kfree(new_envp);
1307 user_args[0] = pargv0; 1305 user_args[0] = pargv0;
1308 if (!status) { 1306 if (!status) {
1309 if (!((*p_proc_object->intf_fxns->pfn_brd_status) 1307 if (!((*p_proc_object->intf_fxns->brd_status)
1310 (p_proc_object->hbridge_context, &brd_state))) { 1308 (p_proc_object->bridge_context, &brd_state))) {
1311 pr_info("%s: Processor Loaded %s\n", __func__, pargv0); 1309 pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
1312 kfree(drv_datap->base_img); 1310 kfree(drv_datap->base_img);
1313 drv_datap->base_img = kmalloc(strlen(pargv0) + 1, 1311 drv_datap->base_img = kmalloc(strlen(pargv0) + 1,
@@ -1399,8 +1397,8 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1399 if (!map_obj) 1397 if (!map_obj)
1400 status = -ENOMEM; 1398 status = -ENOMEM;
1401 else 1399 else
1402 status = (*p_proc_object->intf_fxns->pfn_brd_mem_map) 1400 status = (*p_proc_object->intf_fxns->brd_mem_map)
1403 (p_proc_object->hbridge_context, pa_align, va_align, 1401 (p_proc_object->bridge_context, pa_align, va_align,
1404 size_align, ul_map_attr, map_obj->pages); 1402 size_align, ul_map_attr, map_obj->pages);
1405 } 1403 }
1406 if (!status) { 1404 if (!status) {
@@ -1477,7 +1475,7 @@ int proc_register_notify(void *hprocessor, u32 event_mask,
1477 */ 1475 */
1478 if ((event_mask == 0) && status) { 1476 if ((event_mask == 0) && status) {
1479 status = 1477 status =
1480 dev_get_deh_mgr(p_proc_object->hdev_obj, 1478 dev_get_deh_mgr(p_proc_object->dev_obj,
1481 &hdeh_mgr); 1479 &hdeh_mgr);
1482 status = 1480 status =
1483 bridge_deh_register_notify(hdeh_mgr, 1481 bridge_deh_register_notify(hdeh_mgr,
@@ -1486,7 +1484,7 @@ int proc_register_notify(void *hprocessor, u32 event_mask,
1486 hnotification); 1484 hnotification);
1487 } 1485 }
1488 } else { 1486 } else {
1489 status = dev_get_deh_mgr(p_proc_object->hdev_obj, 1487 status = dev_get_deh_mgr(p_proc_object->dev_obj,
1490 &hdeh_mgr); 1488 &hdeh_mgr);
1491 status = 1489 status =
1492 bridge_deh_register_notify(hdeh_mgr, 1490 bridge_deh_register_notify(hdeh_mgr,
@@ -1572,7 +1570,7 @@ int proc_start(void *hprocessor)
1572 status = -EBADR; 1570 status = -EBADR;
1573 goto func_end; 1571 goto func_end;
1574 } 1572 }
1575 status = dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr); 1573 status = dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
1576 if (!cod_mgr) { 1574 if (!cod_mgr) {
1577 status = -EFAULT; 1575 status = -EFAULT;
1578 goto func_cont; 1576 goto func_cont;
@@ -1582,13 +1580,13 @@ int proc_start(void *hprocessor)
1582 if (status) 1580 if (status)
1583 goto func_cont; 1581 goto func_cont;
1584 1582
1585 status = (*p_proc_object->intf_fxns->pfn_brd_start) 1583 status = (*p_proc_object->intf_fxns->brd_start)
1586 (p_proc_object->hbridge_context, dw_dsp_addr); 1584 (p_proc_object->bridge_context, dw_dsp_addr);
1587 if (status) 1585 if (status)
1588 goto func_cont; 1586 goto func_cont;
1589 1587
1590 /* Call dev_create2 */ 1588 /* Call dev_create2 */
1591 status = dev_create2(p_proc_object->hdev_obj); 1589 status = dev_create2(p_proc_object->dev_obj);
1592 if (!status) { 1590 if (!status) {
1593 p_proc_object->proc_state = PROC_RUNNING; 1591 p_proc_object->proc_state = PROC_RUNNING;
1594 /* Deep sleep switces off the peripheral clocks. 1592 /* Deep sleep switces off the peripheral clocks.
@@ -1603,13 +1601,13 @@ int proc_start(void *hprocessor)
1603 /* Failed to Create Node Manager and DISP Object 1601 /* Failed to Create Node Manager and DISP Object
1604 * Stop the Processor from running. Put it in STOPPED State */ 1602 * Stop the Processor from running. Put it in STOPPED State */
1605 (void)(*p_proc_object->intf_fxns-> 1603 (void)(*p_proc_object->intf_fxns->
1606 pfn_brd_stop) (p_proc_object->hbridge_context); 1604 brd_stop) (p_proc_object->bridge_context);
1607 p_proc_object->proc_state = PROC_STOPPED; 1605 p_proc_object->proc_state = PROC_STOPPED;
1608 } 1606 }
1609func_cont: 1607func_cont:
1610 if (!status) { 1608 if (!status) {
1611 if (!((*p_proc_object->intf_fxns->pfn_brd_status) 1609 if (!((*p_proc_object->intf_fxns->brd_status)
1612 (p_proc_object->hbridge_context, &brd_state))) { 1610 (p_proc_object->bridge_context, &brd_state))) {
1613 pr_info("%s: dsp in running state\n", __func__); 1611 pr_info("%s: dsp in running state\n", __func__);
1614 DBC_ASSERT(brd_state != BRD_HIBERNATION); 1612 DBC_ASSERT(brd_state != BRD_HIBERNATION);
1615 } 1613 }
@@ -1647,7 +1645,7 @@ int proc_stop(void *hprocessor)
1647 goto func_end; 1645 goto func_end;
1648 } 1646 }
1649 /* check if there are any running nodes */ 1647 /* check if there are any running nodes */
1650 status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr); 1648 status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr);
1651 if (!status && hnode_mgr) { 1649 if (!status && hnode_mgr) {
1652 status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size, 1650 status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
1653 &num_nodes, &nodes_allocated); 1651 &num_nodes, &nodes_allocated);
@@ -1661,21 +1659,21 @@ int proc_stop(void *hprocessor)
1661 /* It is OK to stop a device that does n't have nodes OR not started */ 1659 /* It is OK to stop a device that does n't have nodes OR not started */
1662 status = 1660 status =
1663 (*p_proc_object->intf_fxns-> 1661 (*p_proc_object->intf_fxns->
1664 pfn_brd_stop) (p_proc_object->hbridge_context); 1662 brd_stop) (p_proc_object->bridge_context);
1665 if (!status) { 1663 if (!status) {
1666 dev_dbg(bridge, "%s: processor in standby mode\n", __func__); 1664 dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
1667 p_proc_object->proc_state = PROC_STOPPED; 1665 p_proc_object->proc_state = PROC_STOPPED;
1668 /* Destory the Node Manager, msg_ctrl Manager */ 1666 /* Destory the Node Manager, msg_ctrl Manager */
1669 if (!(dev_destroy2(p_proc_object->hdev_obj))) { 1667 if (!(dev_destroy2(p_proc_object->dev_obj))) {
1670 /* Destroy the msg_ctrl by calling msg_delete */ 1668 /* Destroy the msg_ctrl by calling msg_delete */
1671 dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr); 1669 dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
1672 if (hmsg_mgr) { 1670 if (hmsg_mgr) {
1673 msg_delete(hmsg_mgr); 1671 msg_delete(hmsg_mgr);
1674 dev_set_msg_mgr(p_proc_object->hdev_obj, NULL); 1672 dev_set_msg_mgr(p_proc_object->dev_obj, NULL);
1675 } 1673 }
1676 if (!((*p_proc_object-> 1674 if (!((*p_proc_object->
1677 intf_fxns->pfn_brd_status) (p_proc_object-> 1675 intf_fxns->brd_status) (p_proc_object->
1678 hbridge_context, 1676 bridge_context,
1679 &brd_state))) 1677 &brd_state)))
1680 DBC_ASSERT(brd_state == BRD_STOPPED); 1678 DBC_ASSERT(brd_state == BRD_STOPPED);
1681 } 1679 }
@@ -1722,8 +1720,8 @@ int proc_un_map(void *hprocessor, void *map_addr,
1722 status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align); 1720 status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
1723 /* Remove mapping from the page tables. */ 1721 /* Remove mapping from the page tables. */
1724 if (!status) { 1722 if (!status) {
1725 status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map) 1723 status = (*p_proc_object->intf_fxns->brd_mem_un_map)
1726 (p_proc_object->hbridge_context, va_align, size_align); 1724 (p_proc_object->bridge_context, va_align, size_align);
1727 } 1725 }
1728 1726
1729 mutex_unlock(&proc_lock); 1727 mutex_unlock(&proc_lock);
@@ -1821,20 +1819,20 @@ static int proc_monitor(struct proc_object *proc_obj)
1821 /* This is needed only when Device is loaded when it is 1819 /* This is needed only when Device is loaded when it is
1822 * already 'ACTIVE' */ 1820 * already 'ACTIVE' */
1823 /* Destory the Node Manager, msg_ctrl Manager */ 1821 /* Destory the Node Manager, msg_ctrl Manager */
1824 if (!dev_destroy2(proc_obj->hdev_obj)) { 1822 if (!dev_destroy2(proc_obj->dev_obj)) {
1825 /* Destroy the msg_ctrl by calling msg_delete */ 1823 /* Destroy the msg_ctrl by calling msg_delete */
1826 dev_get_msg_mgr(proc_obj->hdev_obj, &hmsg_mgr); 1824 dev_get_msg_mgr(proc_obj->dev_obj, &hmsg_mgr);
1827 if (hmsg_mgr) { 1825 if (hmsg_mgr) {
1828 msg_delete(hmsg_mgr); 1826 msg_delete(hmsg_mgr);
1829 dev_set_msg_mgr(proc_obj->hdev_obj, NULL); 1827 dev_set_msg_mgr(proc_obj->dev_obj, NULL);
1830 } 1828 }
1831 } 1829 }
1832 /* Place the Board in the Monitor State */ 1830 /* Place the Board in the Monitor State */
1833 if (!((*proc_obj->intf_fxns->pfn_brd_monitor) 1831 if (!((*proc_obj->intf_fxns->brd_monitor)
1834 (proc_obj->hbridge_context))) { 1832 (proc_obj->bridge_context))) {
1835 status = 0; 1833 status = 0;
1836 if (!((*proc_obj->intf_fxns->pfn_brd_status) 1834 if (!((*proc_obj->intf_fxns->brd_status)
1837 (proc_obj->hbridge_context, &brd_state))) 1835 (proc_obj->bridge_context, &brd_state)))
1838 DBC_ASSERT(brd_state == BRD_IDLE); 1836 DBC_ASSERT(brd_state == BRD_IDLE);
1839 } 1837 }
1840 1838
@@ -1931,7 +1929,7 @@ int proc_notify_all_clients(void *proc, u32 events)
1931 goto func_end; 1929 goto func_end;
1932 } 1930 }
1933 1931
1934 dev_notify_clients(p_proc_object->hdev_obj, events); 1932 dev_notify_clients(p_proc_object->dev_obj, events);
1935 1933
1936func_end: 1934func_end:
1937 return status; 1935 return status;
diff --git a/drivers/staging/tidspbridge/rmgr/pwr.c b/drivers/staging/tidspbridge/rmgr/pwr.c
index 85cb1a2bc0b..17748df351b 100644
--- a/drivers/staging/tidspbridge/rmgr/pwr.c
+++ b/drivers/staging/tidspbridge/rmgr/pwr.c
@@ -67,7 +67,7 @@ int pwr_sleep_dsp(const u32 sleep_code, const u32 timeout)
67 status = -EINVAL; 67 status = -EINVAL;
68 68
69 if (status != -EINVAL) { 69 if (status != -EINVAL) {
70 status = (*intf_fxns->pfn_dev_cntrl) (dw_context, 70 status = (*intf_fxns->dev_cntrl) (dw_context,
71 ioctlcode, 71 ioctlcode,
72 (void *)&arg); 72 (void *)&arg);
73 } 73 }
@@ -97,7 +97,7 @@ int pwr_wake_dsp(const u32 timeout)
97 if (!(dev_get_intf_fxns(hdev_obj, 97 if (!(dev_get_intf_fxns(hdev_obj,
98 (struct bridge_drv_interface **)&intf_fxns))) { 98 (struct bridge_drv_interface **)&intf_fxns))) {
99 status = 99 status =
100 (*intf_fxns->pfn_dev_cntrl) (dw_context, 100 (*intf_fxns->dev_cntrl) (dw_context,
101 BRDIOCTL_WAKEUP, 101 BRDIOCTL_WAKEUP,
102 (void *)&arg); 102 (void *)&arg);
103 } 103 }
@@ -131,7 +131,7 @@ int pwr_pm_pre_scale(u16 voltage_domain, u32 level)
131 if (!(dev_get_intf_fxns(hdev_obj, 131 if (!(dev_get_intf_fxns(hdev_obj,
132 (struct bridge_drv_interface **)&intf_fxns))) { 132 (struct bridge_drv_interface **)&intf_fxns))) {
133 status = 133 status =
134 (*intf_fxns->pfn_dev_cntrl) (dw_context, 134 (*intf_fxns->dev_cntrl) (dw_context,
135 BRDIOCTL_PRESCALE_NOTIFY, 135 BRDIOCTL_PRESCALE_NOTIFY,
136 (void *)&arg); 136 (void *)&arg);
137 } 137 }
@@ -165,7 +165,7 @@ int pwr_pm_post_scale(u16 voltage_domain, u32 level)
165 if (!(dev_get_intf_fxns(hdev_obj, 165 if (!(dev_get_intf_fxns(hdev_obj,
166 (struct bridge_drv_interface **)&intf_fxns))) { 166 (struct bridge_drv_interface **)&intf_fxns))) {
167 status = 167 status =
168 (*intf_fxns->pfn_dev_cntrl) (dw_context, 168 (*intf_fxns->dev_cntrl) (dw_context,
169 BRDIOCTL_POSTSCALE_NOTIFY, 169 BRDIOCTL_POSTSCALE_NOTIFY,
170 (void *)&arg); 170 (void *)&arg);
171 } 171 }
diff --git a/drivers/staging/tidspbridge/rmgr/rmm.c b/drivers/staging/tidspbridge/rmgr/rmm.c
index 761e8f4fa46..f3dc0ddbfac 100644
--- a/drivers/staging/tidspbridge/rmgr/rmm.c
+++ b/drivers/staging/tidspbridge/rmgr/rmm.c
@@ -38,6 +38,10 @@
38 */ 38 */
39 39
40#include <linux/types.h> 40#include <linux/types.h>
41#include <linux/list.h>
42
43/* ----------------------------------- Host OS */
44#include <dspbridge/host_os.h>
41 45
42/* ----------------------------------- DSP/BIOS Bridge */ 46/* ----------------------------------- DSP/BIOS Bridge */
43#include <dspbridge/dbdefs.h> 47#include <dspbridge/dbdefs.h>
@@ -45,9 +49,6 @@
45/* ----------------------------------- Trace & Debug */ 49/* ----------------------------------- Trace & Debug */
46#include <dspbridge/dbc.h> 50#include <dspbridge/dbc.h>
47 51
48/* ----------------------------------- OS Adaptation Layer */
49#include <dspbridge/list.h>
50
51/* ----------------------------------- This */ 52/* ----------------------------------- This */
52#include <dspbridge/rmm.h> 53#include <dspbridge/rmm.h>
53 54
@@ -79,7 +80,7 @@ struct rmm_target_obj {
79 struct rmm_segment *seg_tab; 80 struct rmm_segment *seg_tab;
80 struct rmm_header **free_list; 81 struct rmm_header **free_list;
81 u32 num_segs; 82 u32 num_segs;
82 struct lst_list *ovly_list; /* List of overlay memory in use */ 83 struct list_head ovly_list; /* List of overlay memory in use */
83}; 84};
84 85
85static u32 refs; /* module reference count */ 86static u32 refs; /* module reference count */
@@ -95,8 +96,7 @@ static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
95int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size, 96int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
96 u32 align, u32 *dsp_address, bool reserve) 97 u32 align, u32 *dsp_address, bool reserve)
97{ 98{
98 struct rmm_ovly_sect *sect; 99 struct rmm_ovly_sect *sect, *prev_sect = NULL;
99 struct rmm_ovly_sect *prev_sect = NULL;
100 struct rmm_ovly_sect *new_sect; 100 struct rmm_ovly_sect *new_sect;
101 u32 addr; 101 u32 addr;
102 int status = 0; 102 int status = 0;
@@ -120,10 +120,9 @@ int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
120 /* An overlay section - See if block is already in use. If not, 120 /* An overlay section - See if block is already in use. If not,
121 * insert into the list in ascending address size. */ 121 * insert into the list in ascending address size. */
122 addr = *dsp_address; 122 addr = *dsp_address;
123 sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
124 /* Find place to insert new list element. List is sorted from 123 /* Find place to insert new list element. List is sorted from
125 * smallest to largest address. */ 124 * smallest to largest address. */
126 while (sect != NULL) { 125 list_for_each_entry(sect, &target->ovly_list, list_elem) {
127 if (addr <= sect->addr) { 126 if (addr <= sect->addr) {
128 /* Check for overlap with sect */ 127 /* Check for overlap with sect */
129 if ((addr + size > sect->addr) || (prev_sect && 128 if ((addr + size > sect->addr) || (prev_sect &&
@@ -135,9 +134,6 @@ int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
135 break; 134 break;
136 } 135 }
137 prev_sect = sect; 136 prev_sect = sect;
138 sect = (struct rmm_ovly_sect *)lst_next(target->ovly_list,
139 (struct list_head *)
140 sect);
141 } 137 }
142 if (!status) { 138 if (!status) {
143 /* No overlap - allocate list element for new section. */ 139 /* No overlap - allocate list element for new section. */
@@ -145,20 +141,17 @@ int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
145 if (new_sect == NULL) { 141 if (new_sect == NULL) {
146 status = -ENOMEM; 142 status = -ENOMEM;
147 } else { 143 } else {
148 lst_init_elem((struct list_head *)new_sect);
149 new_sect->addr = addr; 144 new_sect->addr = addr;
150 new_sect->size = size; 145 new_sect->size = size;
151 new_sect->page = segid; 146 new_sect->page = segid;
152 if (sect == NULL) { 147 if (list_is_last(&sect->list_elem, &target->ovly_list))
153 /* Put new section at the end of the list */ 148 /* Put new section at the end of the list */
154 lst_put_tail(target->ovly_list, 149 list_add_tail(&new_sect->list_elem,
155 (struct list_head *)new_sect); 150 &target->ovly_list);
156 } else { 151 else
157 /* Put new section just before sect */ 152 /* Put new section just before sect */
158 lst_insert_before(target->ovly_list, 153 list_add_tail(&new_sect->list_elem,
159 (struct list_head *)new_sect, 154 &sect->list_elem);
160 (struct list_head *)sect);
161 }
162 } 155 }
163 } 156 }
164func_end: 157func_end:
@@ -230,14 +223,8 @@ int rmm_create(struct rmm_target_obj **target_obj,
230 } 223 }
231func_cont: 224func_cont:
232 /* Initialize overlay memory list */ 225 /* Initialize overlay memory list */
233 if (!status) { 226 if (!status)
234 target->ovly_list = kzalloc(sizeof(struct lst_list), 227 INIT_LIST_HEAD(&target->ovly_list);
235 GFP_KERNEL);
236 if (target->ovly_list == NULL)
237 status = -ENOMEM;
238 else
239 INIT_LIST_HEAD(&target->ovly_list->head);
240 }
241 228
242 if (!status) { 229 if (!status) {
243 *target_obj = target; 230 *target_obj = target;
@@ -259,7 +246,7 @@ func_cont:
259 */ 246 */
260void rmm_delete(struct rmm_target_obj *target) 247void rmm_delete(struct rmm_target_obj *target)
261{ 248{
262 struct rmm_ovly_sect *ovly_section; 249 struct rmm_ovly_sect *sect, *tmp;
263 struct rmm_header *hptr; 250 struct rmm_header *hptr;
264 struct rmm_header *next; 251 struct rmm_header *next;
265 u32 i; 252 u32 i;
@@ -268,13 +255,9 @@ void rmm_delete(struct rmm_target_obj *target)
268 255
269 kfree(target->seg_tab); 256 kfree(target->seg_tab);
270 257
271 if (target->ovly_list) { 258 list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) {
272 while ((ovly_section = (struct rmm_ovly_sect *)lst_get_head 259 list_del(&sect->list_elem);
273 (target->ovly_list))) { 260 kfree(sect);
274 kfree(ovly_section);
275 }
276 DBC_ASSERT(LST_IS_EMPTY(target->ovly_list));
277 kfree(target->ovly_list);
278 } 261 }
279 262
280 if (target->free_list != NULL) { 263 if (target->free_list != NULL) {
@@ -311,8 +294,8 @@ void rmm_exit(void)
311bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size, 294bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
312 bool reserved) 295 bool reserved)
313{ 296{
314 struct rmm_ovly_sect *sect; 297 struct rmm_ovly_sect *sect, *tmp;
315 bool ret = true; 298 bool ret = false;
316 299
317 DBC_REQUIRE(target); 300 DBC_REQUIRE(target);
318 301
@@ -333,24 +316,16 @@ bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
333 316
334 } else { 317 } else {
335 /* Unreserve memory */ 318 /* Unreserve memory */
336 sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list); 319 list_for_each_entry_safe(sect, tmp, &target->ovly_list,
337 while (sect != NULL) { 320 list_elem) {
338 if (dsp_addr == sect->addr) { 321 if (dsp_addr == sect->addr) {
339 DBC_ASSERT(size == sect->size); 322 DBC_ASSERT(size == sect->size);
340 /* Remove from list */ 323 /* Remove from list */
341 lst_remove_elem(target->ovly_list, 324 list_del(&sect->list_elem);
342 (struct list_head *)sect);
343 kfree(sect); 325 kfree(sect);
344 break; 326 return true;
345 } 327 }
346 sect =
347 (struct rmm_ovly_sect *)lst_next(target->ovly_list,
348 (struct list_head
349 *)sect);
350 } 328 }
351 if (sect == NULL)
352 ret = false;
353
354 } 329 }
355 return ret; 330 return ret;
356} 331}
@@ -394,19 +369,19 @@ bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
394 } 369 }
395 370
396 /* ul_size */ 371 /* ul_size */
397 mem_stat_buf->ul_size = target->seg_tab[segid].length; 372 mem_stat_buf->size = target->seg_tab[segid].length;
398 373
399 /* ul_num_free_blocks */ 374 /* num_free_blocks */
400 mem_stat_buf->ul_num_free_blocks = free_blocks; 375 mem_stat_buf->num_free_blocks = free_blocks;
401 376
402 /* ul_total_free_size */ 377 /* total_free_size */
403 mem_stat_buf->ul_total_free_size = total_free_size; 378 mem_stat_buf->total_free_size = total_free_size;
404 379
405 /* ul_len_max_free_block */ 380 /* len_max_free_block */
406 mem_stat_buf->ul_len_max_free_block = max_free_size; 381 mem_stat_buf->len_max_free_block = max_free_size;
407 382
408 /* ul_num_alloc_blocks */ 383 /* num_alloc_blocks */
409 mem_stat_buf->ul_num_alloc_blocks = 384 mem_stat_buf->num_alloc_blocks =
410 target->seg_tab[segid].number; 385 target->seg_tab[segid].number;
411 386
412 ret = true; 387 ret = true;
diff --git a/drivers/staging/tidspbridge/rmgr/strm.c b/drivers/staging/tidspbridge/rmgr/strm.c
index 2e427149fb6..3fae0e9f511 100644
--- a/drivers/staging/tidspbridge/rmgr/strm.c
+++ b/drivers/staging/tidspbridge/rmgr/strm.c
@@ -55,7 +55,7 @@
55 */ 55 */
56struct strm_mgr { 56struct strm_mgr {
57 struct dev_object *dev_obj; /* Device for this processor */ 57 struct dev_object *dev_obj; /* Device for this processor */
58 struct chnl_mgr *hchnl_mgr; /* Channel manager */ 58 struct chnl_mgr *chnl_mgr; /* Channel manager */
59 /* Function interface to Bridge driver */ 59 /* Function interface to Bridge driver */
60 struct bridge_drv_interface *intf_fxns; 60 struct bridge_drv_interface *intf_fxns;
61}; 61};
@@ -68,16 +68,16 @@ struct strm_object {
68 struct strm_mgr *strm_mgr_obj; 68 struct strm_mgr *strm_mgr_obj;
69 struct chnl_object *chnl_obj; 69 struct chnl_object *chnl_obj;
70 u32 dir; /* DSP_TONODE or DSP_FROMNODE */ 70 u32 dir; /* DSP_TONODE or DSP_FROMNODE */
71 u32 utimeout; 71 u32 timeout;
72 u32 num_bufs; /* Max # of bufs allowed in stream */ 72 u32 num_bufs; /* Max # of bufs allowed in stream */
73 u32 un_bufs_in_strm; /* Current # of bufs in stream */ 73 u32 bufs_in_strm; /* Current # of bufs in stream */
74 u32 ul_n_bytes; /* bytes transferred since idled */ 74 u32 bytes; /* bytes transferred since idled */
75 /* STREAM_IDLE, STREAM_READY, ... */ 75 /* STREAM_IDLE, STREAM_READY, ... */
76 enum dsp_streamstate strm_state; 76 enum dsp_streamstate strm_state;
77 void *user_event; /* Saved for strm_get_info() */ 77 void *user_event; /* Saved for strm_get_info() */
78 enum dsp_strmmode strm_mode; /* STRMMODE_[PROCCOPY][ZEROCOPY]... */ 78 enum dsp_strmmode strm_mode; /* STRMMODE_[PROCCOPY][ZEROCOPY]... */
79 u32 udma_chnl_id; /* DMA chnl id */ 79 u32 dma_chnl_id; /* DMA chnl id */
80 u32 udma_priority; /* DMA priority:DMAPRI_[LOW][HIGH] */ 80 u32 dma_priority; /* DMA priority:DMAPRI_[LOW][HIGH] */
81 u32 segment_id; /* >0 is SM segment.=0 is local heap */ 81 u32 segment_id; /* >0 is SM segment.=0 is local heap */
82 u32 buf_alignment; /* Alignment for stream bufs */ 82 u32 buf_alignment; /* Alignment for stream bufs */
83 /* Stream's SM address translator */ 83 /* Stream's SM address translator */
@@ -102,7 +102,7 @@ int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize,
102 int status = 0; 102 int status = 0;
103 u32 alloc_cnt = 0; 103 u32 alloc_cnt = 0;
104 u32 i; 104 u32 i;
105 struct strm_object *stream_obj = strmres->hstream; 105 struct strm_object *stream_obj = strmres->stream;
106 106
107 DBC_REQUIRE(refs > 0); 107 DBC_REQUIRE(refs > 0);
108 DBC_REQUIRE(ap_buffer != NULL); 108 DBC_REQUIRE(ap_buffer != NULL);
@@ -154,7 +154,7 @@ int strm_close(struct strm_res_object *strmres,
154 struct bridge_drv_interface *intf_fxns; 154 struct bridge_drv_interface *intf_fxns;
155 struct chnl_info chnl_info_obj; 155 struct chnl_info chnl_info_obj;
156 int status = 0; 156 int status = 0;
157 struct strm_object *stream_obj = strmres->hstream; 157 struct strm_object *stream_obj = strmres->stream;
158 158
159 DBC_REQUIRE(refs > 0); 159 DBC_REQUIRE(refs > 0);
160 160
@@ -165,7 +165,7 @@ int strm_close(struct strm_res_object *strmres,
165 * -EPIPE */ 165 * -EPIPE */
166 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; 166 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
167 status = 167 status =
168 (*intf_fxns->pfn_chnl_get_info) (stream_obj->chnl_obj, 168 (*intf_fxns->chnl_get_info) (stream_obj->chnl_obj,
169 &chnl_info_obj); 169 &chnl_info_obj);
170 DBC_ASSERT(!status); 170 DBC_ASSERT(!status);
171 171
@@ -213,7 +213,7 @@ int strm_create(struct strm_mgr **strm_man,
213 213
214 /* Get Channel manager and Bridge function interface */ 214 /* Get Channel manager and Bridge function interface */
215 if (!status) { 215 if (!status) {
216 status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->hchnl_mgr)); 216 status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->chnl_mgr));
217 if (!status) { 217 if (!status) {
218 (void)dev_get_intf_fxns(dev_obj, 218 (void)dev_get_intf_fxns(dev_obj,
219 &(strm_mgr_obj->intf_fxns)); 219 &(strm_mgr_obj->intf_fxns));
@@ -268,7 +268,7 @@ int strm_free_buffer(struct strm_res_object *strmres, u8 ** ap_buffer,
268{ 268{
269 int status = 0; 269 int status = 0;
270 u32 i = 0; 270 u32 i = 0;
271 struct strm_object *stream_obj = strmres->hstream; 271 struct strm_object *stream_obj = strmres->stream;
272 272
273 DBC_REQUIRE(refs > 0); 273 DBC_REQUIRE(refs > 0);
274 DBC_REQUIRE(ap_buffer != NULL); 274 DBC_REQUIRE(ap_buffer != NULL);
@@ -323,7 +323,7 @@ int strm_get_info(struct strm_object *stream_obj,
323 323
324 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; 324 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
325 status = 325 status =
326 (*intf_fxns->pfn_chnl_get_info) (stream_obj->chnl_obj, 326 (*intf_fxns->chnl_get_info) (stream_obj->chnl_obj,
327 &chnl_info_obj); 327 &chnl_info_obj);
328 if (status) 328 if (status)
329 goto func_end; 329 goto func_end;
@@ -341,10 +341,10 @@ int strm_get_info(struct strm_object *stream_obj,
341 stream_info->user_strm->number_bufs_in_stream = chnl_info_obj.cio_cs + 341 stream_info->user_strm->number_bufs_in_stream = chnl_info_obj.cio_cs +
342 chnl_info_obj.cio_reqs; 342 chnl_info_obj.cio_reqs;
343 /* # of bytes transferred since last call to DSPStream_Idle() */ 343 /* # of bytes transferred since last call to DSPStream_Idle() */
344 stream_info->user_strm->ul_number_bytes = chnl_info_obj.bytes_tx; 344 stream_info->user_strm->number_bytes = chnl_info_obj.bytes_tx;
345 stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj; 345 stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj;
346 /* Determine stream state based on channel state and info */ 346 /* Determine stream state based on channel state and info */
347 if (chnl_info_obj.dw_state & CHNL_STATEEOS) { 347 if (chnl_info_obj.state & CHNL_STATEEOS) {
348 stream_info->user_strm->ss_stream_state = STREAM_DONE; 348 stream_info->user_strm->ss_stream_state = STREAM_DONE;
349 } else { 349 } else {
350 if (chnl_info_obj.cio_cs > 0) 350 if (chnl_info_obj.cio_cs > 0)
@@ -377,8 +377,8 @@ int strm_idle(struct strm_object *stream_obj, bool flush_data)
377 } else { 377 } else {
378 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; 378 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
379 379
380 status = (*intf_fxns->pfn_chnl_idle) (stream_obj->chnl_obj, 380 status = (*intf_fxns->chnl_idle) (stream_obj->chnl_obj,
381 stream_obj->utimeout, 381 stream_obj->timeout,
382 flush_data); 382 flush_data);
383 } 383 }
384 384
@@ -435,7 +435,7 @@ int strm_issue(struct strm_object *stream_obj, u8 *pbuf, u32 ul_bytes,
435 435
436 } 436 }
437 if (!status) { 437 if (!status) {
438 status = (*intf_fxns->pfn_chnl_add_io_req) 438 status = (*intf_fxns->chnl_add_io_req)
439 (stream_obj->chnl_obj, pbuf, ul_bytes, ul_buf_size, 439 (stream_obj->chnl_obj, pbuf, ul_bytes, ul_buf_size,
440 (u32) tmp_buf, dw_arg); 440 (u32) tmp_buf, dw_arg);
441 } 441 }
@@ -494,8 +494,8 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
494 strm_obj->strm_state = STREAM_IDLE; 494 strm_obj->strm_state = STREAM_IDLE;
495 strm_obj->user_event = pattr->user_event; 495 strm_obj->user_event = pattr->user_event;
496 if (pattr->stream_attr_in != NULL) { 496 if (pattr->stream_attr_in != NULL) {
497 strm_obj->utimeout = 497 strm_obj->timeout =
498 pattr->stream_attr_in->utimeout; 498 pattr->stream_attr_in->timeout;
499 strm_obj->num_bufs = 499 strm_obj->num_bufs =
500 pattr->stream_attr_in->num_bufs; 500 pattr->stream_attr_in->num_bufs;
501 strm_obj->strm_mode = 501 strm_obj->strm_mode =
@@ -504,25 +504,25 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
504 pattr->stream_attr_in->segment_id; 504 pattr->stream_attr_in->segment_id;
505 strm_obj->buf_alignment = 505 strm_obj->buf_alignment =
506 pattr->stream_attr_in->buf_alignment; 506 pattr->stream_attr_in->buf_alignment;
507 strm_obj->udma_chnl_id = 507 strm_obj->dma_chnl_id =
508 pattr->stream_attr_in->udma_chnl_id; 508 pattr->stream_attr_in->dma_chnl_id;
509 strm_obj->udma_priority = 509 strm_obj->dma_priority =
510 pattr->stream_attr_in->udma_priority; 510 pattr->stream_attr_in->dma_priority;
511 chnl_attr_obj.uio_reqs = 511 chnl_attr_obj.uio_reqs =
512 pattr->stream_attr_in->num_bufs; 512 pattr->stream_attr_in->num_bufs;
513 } else { 513 } else {
514 strm_obj->utimeout = DEFAULTTIMEOUT; 514 strm_obj->timeout = DEFAULTTIMEOUT;
515 strm_obj->num_bufs = DEFAULTNUMBUFS; 515 strm_obj->num_bufs = DEFAULTNUMBUFS;
516 strm_obj->strm_mode = STRMMODE_PROCCOPY; 516 strm_obj->strm_mode = STRMMODE_PROCCOPY;
517 strm_obj->segment_id = 0; /* local mem */ 517 strm_obj->segment_id = 0; /* local mem */
518 strm_obj->buf_alignment = 0; 518 strm_obj->buf_alignment = 0;
519 strm_obj->udma_chnl_id = 0; 519 strm_obj->dma_chnl_id = 0;
520 strm_obj->udma_priority = 0; 520 strm_obj->dma_priority = 0;
521 chnl_attr_obj.uio_reqs = DEFAULTNUMBUFS; 521 chnl_attr_obj.uio_reqs = DEFAULTNUMBUFS;
522 } 522 }
523 chnl_attr_obj.reserved1 = NULL; 523 chnl_attr_obj.reserved1 = NULL;
524 /* DMA chnl flush timeout */ 524 /* DMA chnl flush timeout */
525 chnl_attr_obj.reserved2 = strm_obj->utimeout; 525 chnl_attr_obj.reserved2 = strm_obj->timeout;
526 chnl_attr_obj.event_obj = NULL; 526 chnl_attr_obj.event_obj = NULL;
527 if (pattr->user_event != NULL) 527 if (pattr->user_event != NULL)
528 chnl_attr_obj.event_obj = pattr->user_event; 528 chnl_attr_obj.event_obj = pattr->user_event;
@@ -532,7 +532,7 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
532 if (status) 532 if (status)
533 goto func_cont; 533 goto func_cont;
534 534
535 if ((pattr->virt_base == NULL) || !(pattr->ul_virt_size > 0)) 535 if ((pattr->virt_base == NULL) || !(pattr->virt_size > 0))
536 goto func_cont; 536 goto func_cont;
537 537
538 /* No System DMA */ 538 /* No System DMA */
@@ -547,7 +547,7 @@ int strm_open(struct node_object *hnode, u32 dir, u32 index,
547 /* Set translators Virt Addr attributes */ 547 /* Set translators Virt Addr attributes */
548 status = cmm_xlator_info(strm_obj->xlator, 548 status = cmm_xlator_info(strm_obj->xlator,
549 (u8 **) &pattr->virt_base, 549 (u8 **) &pattr->virt_base,
550 pattr->ul_virt_size, 550 pattr->virt_size,
551 strm_obj->segment_id, true); 551 strm_obj->segment_id, true);
552 } 552 }
553 } 553 }
@@ -557,8 +557,8 @@ func_cont:
557 chnl_mode = (dir == DSP_TONODE) ? 557 chnl_mode = (dir == DSP_TONODE) ?
558 CHNL_MODETODSP : CHNL_MODEFROMDSP; 558 CHNL_MODETODSP : CHNL_MODEFROMDSP;
559 intf_fxns = strm_mgr_obj->intf_fxns; 559 intf_fxns = strm_mgr_obj->intf_fxns;
560 status = (*intf_fxns->pfn_chnl_open) (&(strm_obj->chnl_obj), 560 status = (*intf_fxns->chnl_open) (&(strm_obj->chnl_obj),
561 strm_mgr_obj->hchnl_mgr, 561 strm_mgr_obj->chnl_mgr,
562 chnl_mode, ul_chnl_id, 562 chnl_mode, ul_chnl_id,
563 &chnl_attr_obj); 563 &chnl_attr_obj);
564 if (status) { 564 if (status) {
@@ -572,7 +572,7 @@ func_cont:
572 * We got a status that's not return-able. 572 * We got a status that's not return-able.
573 * Assert that we got something we were 573 * Assert that we got something we were
574 * expecting (-EFAULT isn't acceptable, 574 * expecting (-EFAULT isn't acceptable,
575 * strm_mgr_obj->hchnl_mgr better be valid or we 575 * strm_mgr_obj->chnl_mgr better be valid or we
576 * assert here), and then return -EPERM. 576 * assert here), and then return -EPERM.
577 */ 577 */
578 DBC_ASSERT(status == -ENOSR || 578 DBC_ASSERT(status == -ENOSR ||
@@ -631,15 +631,15 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
631 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; 631 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
632 632
633 status = 633 status =
634 (*intf_fxns->pfn_chnl_get_ioc) (stream_obj->chnl_obj, 634 (*intf_fxns->chnl_get_ioc) (stream_obj->chnl_obj,
635 stream_obj->utimeout, 635 stream_obj->timeout,
636 &chnl_ioc_obj); 636 &chnl_ioc_obj);
637 if (!status) { 637 if (!status) {
638 *nbytes = chnl_ioc_obj.byte_size; 638 *nbytes = chnl_ioc_obj.byte_size;
639 if (buff_size) 639 if (buff_size)
640 *buff_size = chnl_ioc_obj.buf_size; 640 *buff_size = chnl_ioc_obj.buf_size;
641 641
642 *pdw_arg = chnl_ioc_obj.dw_arg; 642 *pdw_arg = chnl_ioc_obj.arg;
643 if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) { 643 if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
644 if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) { 644 if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
645 status = -ETIME; 645 status = -ETIME;
@@ -655,14 +655,14 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
655 && (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj)) 655 && (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj))
656 && (stream_obj->strm_mode == STRMMODE_ZEROCOPY)) { 656 && (stream_obj->strm_mode == STRMMODE_ZEROCOPY)) {
657 /* 657 /*
658 * This is a zero-copy channel so chnl_ioc_obj.pbuf 658 * This is a zero-copy channel so chnl_ioc_obj.buf
659 * contains the DSP address of SM. We need to 659 * contains the DSP address of SM. We need to
660 * translate it to a virtual address for the user 660 * translate it to a virtual address for the user
661 * thread to access. 661 * thread to access.
662 * Note: Could add CMM_DSPPA2VA to CMM in the future. 662 * Note: Could add CMM_DSPPA2VA to CMM in the future.
663 */ 663 */
664 tmp_buf = cmm_xlator_translate(stream_obj->xlator, 664 tmp_buf = cmm_xlator_translate(stream_obj->xlator,
665 chnl_ioc_obj.pbuf, 665 chnl_ioc_obj.buf,
666 CMM_DSPPA2PA); 666 CMM_DSPPA2PA);
667 if (tmp_buf != NULL) { 667 if (tmp_buf != NULL) {
668 /* now convert this GPP Pa to Va */ 668 /* now convert this GPP Pa to Va */
@@ -674,9 +674,9 @@ int strm_reclaim(struct strm_object *stream_obj, u8 ** buf_ptr,
674 if (tmp_buf == NULL) 674 if (tmp_buf == NULL)
675 status = -ESRCH; 675 status = -ESRCH;
676 676
677 chnl_ioc_obj.pbuf = tmp_buf; 677 chnl_ioc_obj.buf = tmp_buf;
678 } 678 }
679 *buf_ptr = chnl_ioc_obj.pbuf; 679 *buf_ptr = chnl_ioc_obj.buf;
680 } 680 }
681func_end: 681func_end:
682 /* ensure we return a documented return code */ 682 /* ensure we return a documented return code */
@@ -719,7 +719,7 @@ int strm_register_notify(struct strm_object *stream_obj, u32 event_mask,
719 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; 719 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
720 720
721 status = 721 status =
722 (*intf_fxns->pfn_chnl_register_notify) (stream_obj-> 722 (*intf_fxns->chnl_register_notify) (stream_obj->
723 chnl_obj, 723 chnl_obj,
724 event_mask, 724 event_mask,
725 notify_type, 725 notify_type,
@@ -765,7 +765,7 @@ int strm_select(struct strm_object **strm_tab, u32 strms,
765 /* Determine which channels have IO ready */ 765 /* Determine which channels have IO ready */
766 for (i = 0; i < strms; i++) { 766 for (i = 0; i < strms; i++) {
767 intf_fxns = strm_tab[i]->strm_mgr_obj->intf_fxns; 767 intf_fxns = strm_tab[i]->strm_mgr_obj->intf_fxns;
768 status = (*intf_fxns->pfn_chnl_get_info) (strm_tab[i]->chnl_obj, 768 status = (*intf_fxns->chnl_get_info) (strm_tab[i]->chnl_obj,
769 &chnl_info_obj); 769 &chnl_info_obj);
770 if (status) { 770 if (status) {
771 break; 771 break;
@@ -786,7 +786,7 @@ int strm_select(struct strm_object **strm_tab, u32 strms,
786 for (i = 0; i < strms; i++) { 786 for (i = 0; i < strms; i++) {
787 intf_fxns = 787 intf_fxns =
788 strm_tab[i]->strm_mgr_obj->intf_fxns; 788 strm_tab[i]->strm_mgr_obj->intf_fxns;
789 status = (*intf_fxns->pfn_chnl_get_info) 789 status = (*intf_fxns->chnl_get_info)
790 (strm_tab[i]->chnl_obj, &chnl_info_obj); 790 (strm_tab[i]->chnl_obj, &chnl_info_obj);
791 if (status) 791 if (status)
792 break; 792 break;
@@ -832,7 +832,7 @@ static int delete_strm(struct strm_object *stream_obj)
832 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; 832 intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
833 /* Channel close can fail only if the channel handle 833 /* Channel close can fail only if the channel handle
834 * is invalid. */ 834 * is invalid. */
835 status = (*intf_fxns->pfn_chnl_close) 835 status = (*intf_fxns->chnl_close)
836 (stream_obj->chnl_obj); 836 (stream_obj->chnl_obj);
837 } 837 }
838 /* Free all SM address translator resources */ 838 /* Free all SM address translator resources */