diff options
127 files changed, 0 insertions, 44757 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index b85ecbea34a8..5ca5b5638495 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -8624,11 +8624,6 @@ W: http://www.linux-speakup.org/ | |||
8624 | S: Odd Fixes | 8624 | S: Odd Fixes |
8625 | F: drivers/staging/speakup/ | 8625 | F: drivers/staging/speakup/ |
8626 | 8626 | ||
8627 | STAGING - TI DSP BRIDGE DRIVERS | ||
8628 | M: Omar Ramirez Luna <omar.ramirez@copitl.com> | ||
8629 | S: Odd Fixes | ||
8630 | F: drivers/staging/tidspbridge/ | ||
8631 | |||
8632 | STAGING - USB ENE SM/MS CARD READER DRIVER | 8627 | STAGING - USB ENE SM/MS CARD READER DRIVER |
8633 | M: Al Cho <acho@novell.com> | 8628 | M: Al Cho <acho@novell.com> |
8634 | S: Odd Fixes | 8629 | S: Odd Fixes |
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 0fd87443412a..12b092ce0d0f 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
@@ -74,8 +74,6 @@ source "drivers/staging/iio/Kconfig" | |||
74 | 74 | ||
75 | source "drivers/staging/xgifb/Kconfig" | 75 | source "drivers/staging/xgifb/Kconfig" |
76 | 76 | ||
77 | source "drivers/staging/tidspbridge/Kconfig" | ||
78 | |||
79 | source "drivers/staging/quickstart/Kconfig" | 77 | source "drivers/staging/quickstart/Kconfig" |
80 | 78 | ||
81 | source "drivers/staging/emxx_udc/Kconfig" | 79 | source "drivers/staging/emxx_udc/Kconfig" |
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 9aeecc9b07e0..29fa5df5b969 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile | |||
@@ -31,7 +31,6 @@ obj-$(CONFIG_VME_BUS) += vme/ | |||
31 | obj-$(CONFIG_DX_SEP) += sep/ | 31 | obj-$(CONFIG_DX_SEP) += sep/ |
32 | obj-$(CONFIG_IIO) += iio/ | 32 | obj-$(CONFIG_IIO) += iio/ |
33 | obj-$(CONFIG_FB_XGI) += xgifb/ | 33 | obj-$(CONFIG_FB_XGI) += xgifb/ |
34 | obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge/ | ||
35 | obj-$(CONFIG_ACPI_QUICKSTART) += quickstart/ | 34 | obj-$(CONFIG_ACPI_QUICKSTART) += quickstart/ |
36 | obj-$(CONFIG_USB_EMXX) += emxx_udc/ | 35 | obj-$(CONFIG_USB_EMXX) += emxx_udc/ |
37 | obj-$(CONFIG_USB_ENESTORAGE) += keucr/ | 36 | obj-$(CONFIG_USB_ENESTORAGE) += keucr/ |
diff --git a/drivers/staging/tidspbridge/Documentation/CONTRIBUTORS b/drivers/staging/tidspbridge/Documentation/CONTRIBUTORS deleted file mode 100644 index 86f578727f91..000000000000 --- a/drivers/staging/tidspbridge/Documentation/CONTRIBUTORS +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | TI DSP/Bridge Driver - Contributors File | ||
2 | |||
3 | The DSP/Bridge project wish to thank all of its contributors, current bridge | ||
4 | driver is the result of the work of all of them. If any name is accidentally | ||
5 | omitted, let us know by sending a mail to omar.ramirez@ti.com or | ||
6 | x095840@ti.com. | ||
7 | |||
8 | Please keep the following list in alphabetical order. | ||
9 | |||
10 | Suman Anna | ||
11 | Sripal Bagadia | ||
12 | Felipe Balbi | ||
13 | Ohad Ben-Cohen | ||
14 | Phil Carmody | ||
15 | Deepak Chitriki | ||
16 | Felipe Contreras | ||
17 | Hiroshi Doyu | ||
18 | Seth Forshee | ||
19 | Ivan Gomez Castellanos | ||
20 | Mark Grosen | ||
21 | Ramesh Gupta G | ||
22 | Fernando Guzman Lugo | ||
23 | Axel Haslam | ||
24 | Janet Head | ||
25 | Shivananda Hebbar | ||
26 | Hari Kanigeri | ||
27 | Tony Lindgren | ||
28 | Antonio Luna | ||
29 | Hari Nagalla | ||
30 | Nishanth Menon | ||
31 | Ameya Palande | ||
32 | Vijay Pasam | ||
33 | Gilbert Pitney | ||
34 | Omar Ramirez Luna | ||
35 | Ernesto Ramos | ||
36 | Chris Ring | ||
37 | Larry Schiefer | ||
38 | Rebecca Schultz Zavin | ||
39 | Bhavin Shah | ||
40 | Andy Shevchenko | ||
41 | Jeff Taylor | ||
42 | Roman Tereshonkov | ||
43 | Armando Uribe de Leon | ||
44 | Nischal Varide | ||
45 | Wenbiao Wang | ||
diff --git a/drivers/staging/tidspbridge/Documentation/README b/drivers/staging/tidspbridge/Documentation/README deleted file mode 100644 index df6d371161e0..000000000000 --- a/drivers/staging/tidspbridge/Documentation/README +++ /dev/null | |||
@@ -1,70 +0,0 @@ | |||
1 | Linux DSP/BIOS Bridge release | ||
2 | |||
3 | DSP/BIOS Bridge overview | ||
4 | ======================== | ||
5 | |||
6 | DSP/BIOS Bridge is designed for platforms that contain a GPP and one or more | ||
7 | attached DSPs. The GPP is considered the master or "host" processor, and the | ||
8 | attached DSPs are processing resources that can be utilized by applications | ||
9 | and drivers running on the GPP. | ||
10 | |||
11 | The abstraction that DSP/BIOS Bridge supplies, is a direct link between a GPP | ||
12 | program and a DSP task. This communication link is partitioned into two | ||
13 | types of sub-links: messaging (short, fixed-length packets) and data | ||
14 | streaming (multiple, large buffers). Each sub-link operates independently, | ||
15 | and features in-order delivery of data, meaning that messages are delivered | ||
16 | in the order they were submitted to the message link, and stream buffers are | ||
17 | delivered in the order they were submitted to the stream link. | ||
18 | |||
19 | In addition, a GPP client can specify what inputs and outputs a DSP task | ||
20 | uses. DSP tasks typically use message objects for passing control and status | ||
21 | information and stream objects for efficient streaming of real-time data. | ||
22 | |||
23 | GPP Software Architecture | ||
24 | ========================= | ||
25 | |||
26 | A GPP application communicates with its associated DSP task running on the | ||
27 | DSP subsystem using the DSP/BIOS Bridge API. For example, a GPP audio | ||
28 | application can use the API to pass messages to a DSP task that is managing | ||
29 | data flowing from analog-to-digital converters (ADCs) to digital-to-analog | ||
30 | converters (DACs). | ||
31 | |||
32 | From the perspective of the GPP OS, the DSP is treated as just another | ||
33 | peripheral device. Most high level GPP OS typically support a device driver | ||
34 | model, whereby applications can safely access and share a hardware peripheral | ||
35 | through standard driver interfaces. Therefore, to allow multiple GPP | ||
36 | applications to share access to the DSP, the GPP side of DSP/BIOS Bridge | ||
37 | implements a device driver for the DSP. | ||
38 | |||
39 | Since driver interfaces are not always standard across GPP OS, and to provide | ||
40 | some level of interoperability of application code using DSP/BIOS Bridge | ||
41 | between GPP OS, DSP/BIOS Bridge provides a standard library of APIs which | ||
42 | wrap calls into the device driver. So, rather than calling GPP OS specific | ||
43 | driver interfaces, applications (and even other device drivers) can use the | ||
44 | standard API library directly. | ||
45 | |||
46 | DSP Software Architecture | ||
47 | ========================= | ||
48 | |||
49 | For DSP/BIOS, DSP/BIOS Bridge adds a device-independent streaming I/O (STRM) | ||
50 | interface, a messaging interface (NODE), and a Resource Manager (RM) Server. | ||
51 | The RM Server runs as a task of DSP/BIOS and is subservient to commands | ||
52 | and queries from the GPP. It executes commands to start and stop DSP signal | ||
53 | processing nodes in response to GPP programs making requests through the | ||
54 | (GPP-side) API. | ||
55 | |||
56 | DSP tasks started by the RM Server are similar to any other DSP task with two | ||
57 | important differences: they must follow a specific task model consisting of | ||
58 | three C-callable functions (node create, execute, and delete), with specific | ||
59 | sets of arguments, and they have a pre-defined task environment established | ||
60 | by the RM Server. | ||
61 | |||
62 | Tasks started by the RM Server communicate using the STRM and NODE interfaces | ||
63 | and act as servers for their corresponding GPP clients, performing signal | ||
64 | processing functions as requested by messages sent by their GPP client. | ||
65 | Typically, a DSP task moves data from source devices to sink devices using | ||
66 | device independent I/O streams, performing application-specific processing | ||
67 | and transformations on the data while it is moved. For example, an audio | ||
68 | task might perform audio decompression (ADPCM, MPEG, CELP) on data received | ||
69 | from a GPP audio driver and then send the decompressed linear samples to a | ||
70 | digital-to-analog converter. | ||
diff --git a/drivers/staging/tidspbridge/Documentation/error-codes b/drivers/staging/tidspbridge/Documentation/error-codes deleted file mode 100644 index ad73cba058eb..000000000000 --- a/drivers/staging/tidspbridge/Documentation/error-codes +++ /dev/null | |||
@@ -1,157 +0,0 @@ | |||
1 | DSP/Bridge Error Code Guide | ||
2 | |||
3 | |||
4 | Success code is always taken as 0, except for one case where a success status | ||
5 | different than 0 can be possible, this is when enumerating a series of dsp | ||
6 | objects, if the enumeration doesn't have any more objects it is considered as a | ||
7 | successful case. In this case a positive ENODATA is returned (TODO: Change to | ||
8 | avoid this case). | ||
9 | |||
10 | Error codes are returned as a negative 1, if an specific code is expected, it | ||
11 | can be propagated to user space by reading errno symbol defined in errno.h, for | ||
12 | specific details on the implementation a copy of the standard used should be | ||
13 | read first. | ||
14 | |||
15 | The error codes used by this driver are: | ||
16 | |||
17 | [EPERM] | ||
18 | General driver failure. | ||
19 | |||
20 | According to the use case the following might apply: | ||
21 | - Device is in 'sleep/suspend' mode due to DPM. | ||
22 | - User cannot mark end of stream on an input channel. | ||
23 | - Requested operation is invalid for the node type. | ||
24 | - Invalid alignment for the node messaging buffer. | ||
25 | - The specified direction is invalid for the stream. | ||
26 | - Invalid stream mode. | ||
27 | |||
28 | [ENOENT] | ||
29 | The specified object or file was not found. | ||
30 | |||
31 | [ESRCH] | ||
32 | A shared memory buffer contained in a message or stream could not be mapped | ||
33 | to the GPP client process's virtual space. | ||
34 | |||
35 | [EIO] | ||
36 | Driver interface I/O error. | ||
37 | |||
38 | or: | ||
39 | - Unable to plug channel ISR for configured IRQ. | ||
40 | - No free I/O request packets are available. | ||
41 | |||
42 | [ENXIO] | ||
43 | Unable to find a named section in DSP executable or a non-existent memory | ||
44 | segment identifier was specified. | ||
45 | |||
46 | [EBADF] | ||
47 | General error for file handling: | ||
48 | |||
49 | - Unable to open file. | ||
50 | - Unable to read file. | ||
51 | - An error occurred while parsing the DSP executable file. | ||
52 | |||
53 | [ENOMEM] | ||
54 | A memory allocation failure occurred. | ||
55 | |||
56 | [EACCES] | ||
57 | - Unable to read content of DCD data section; this is typically caused by | ||
58 | improperly configured nodes. | ||
59 | - Unable to decode DCD data section content; this is typically caused by | ||
60 | changes to DSP/BIOS Bridge data structures. | ||
61 | - Unable to get pointer to DCD data section; this is typically caused by | ||
62 | improperly configured UUIDs. | ||
63 | - Unable to load file containing DCD data section; this is typically | ||
64 | caused by a missing COFF file. | ||
65 | - The specified COFF file does not contain a valid node registration | ||
66 | section. | ||
67 | |||
68 | [EFAULT] | ||
69 | Invalid pointer or handler. | ||
70 | |||
71 | [EEXIST] | ||
72 | Attempted to create a channel manager when one already exists. | ||
73 | |||
74 | [EINVAL] | ||
75 | Invalid argument. | ||
76 | |||
77 | [ESPIPE] | ||
78 | Symbol not found in the COFF file. DSPNode_Create will return this if | ||
79 | the iAlg function table for an xDAIS socket is not found in the COFF file. | ||
80 | In this case, force the symbol to be linked into the COFF file. | ||
81 | DSPNode_Create, DSPNode_Execute, and DSPNode_Delete will return this if | ||
82 | the create, execute, or delete phase function, respectively, could not be | ||
83 | found in the COFF file. | ||
84 | |||
85 | - No symbol table is loaded/found for this board. | ||
86 | - Unable to initialize the ZL COFF parsing module. | ||
87 | |||
88 | [EPIPE] | ||
89 | I/O is currently pending. | ||
90 | |||
91 | - End of stream was already requested on this output channel. | ||
92 | |||
93 | [EDOM] | ||
94 | A parameter is specified outside its valid range. | ||
95 | |||
96 | [ENOSYS] | ||
97 | The indicated operation is not supported. | ||
98 | |||
99 | [EIDRM] | ||
100 | During enumeration a change in the number or properties of the objects | ||
101 | has occurred. | ||
102 | |||
103 | [ECHRNG] | ||
104 | Attempt to created channel manager with too many channels or channel ID out | ||
105 | of range. | ||
106 | |||
107 | [EBADR] | ||
108 | The state of the specified object is incorrect for the requested operation. | ||
109 | |||
110 | - Invalid segment ID. | ||
111 | |||
112 | [ENODATA] | ||
113 | Unable to retrieve resource information from the registry. | ||
114 | |||
115 | - No more registry values. | ||
116 | |||
117 | [ETIME] | ||
118 | A timeout occurred before the requested operation could complete. | ||
119 | |||
120 | [ENOSR] | ||
121 | A stream has been issued the maximum number of buffers allowed in the | ||
122 | stream at once; buffers must be reclaimed from the stream before any more | ||
123 | can be issued. | ||
124 | |||
125 | - No free channels are available. | ||
126 | |||
127 | [EILSEQ] | ||
128 | Error occurred in a dynamic loader library function. | ||
129 | |||
130 | [EISCONN] | ||
131 | The Specified Connection already exists. | ||
132 | |||
133 | [ENOTCONN] | ||
134 | Nodes not connected. | ||
135 | |||
136 | [ETIMEDOUT] | ||
137 | Timeout occurred waiting for a response from the hardware. | ||
138 | |||
139 | - Wait for flush operation on an output channel timed out. | ||
140 | |||
141 | [ECONNREFUSED] | ||
142 | No more connections can be made for this node. | ||
143 | |||
144 | [EALREADY] | ||
145 | Channel is already in use. | ||
146 | |||
147 | [EREMOTEIO] | ||
148 | dwTimeOut parameter was CHNL_IOCNOWAIT, yet no I/O completions were | ||
149 | queued. | ||
150 | |||
151 | [ECANCELED] | ||
152 | I/O has been cancelled on this channel. | ||
153 | |||
154 | [ENOKEY] | ||
155 | Invalid subkey parameter. | ||
156 | |||
157 | - UUID not found in registry. | ||
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig deleted file mode 100644 index b5e74e9de6bd..000000000000 --- a/drivers/staging/tidspbridge/Kconfig +++ /dev/null | |||
@@ -1,69 +0,0 @@ | |||
1 | # | ||
2 | # DSP Bridge Driver Support | ||
3 | # | ||
4 | |||
5 | menuconfig TIDSPBRIDGE | ||
6 | tristate "DSP Bridge driver" | ||
7 | depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM && BROKEN | ||
8 | select MAILBOX | ||
9 | select OMAP2PLUS_MBOX | ||
10 | help | ||
11 | DSP/BIOS Bridge is designed for platforms that contain a GPP and | ||
12 | one or more attached DSPs. The GPP is considered the master or | ||
13 | "host" processor, and the attached DSPs are processing resources | ||
14 | that can be utilized by applications and drivers running on the GPP. | ||
15 | |||
16 | This driver depends on OMAP Mailbox (OMAP_MBOX_FWK). | ||
17 | |||
18 | config TIDSPBRIDGE_DVFS | ||
19 | bool "Enable Bridge Dynamic Voltage and Frequency Scaling (DVFS)" | ||
20 | depends on TIDSPBRIDGE && CPU_FREQ | ||
21 | help | ||
22 | DVFS allows DSP Bridge to initiate the operating point change to | ||
23 | scale the chip voltage and frequency in order to match the | ||
24 | performance and power consumption to the current processing | ||
25 | requirements. | ||
26 | |||
27 | config TIDSPBRIDGE_MEMPOOL_SIZE | ||
28 | hex "Physical memory pool size (Byte)" | ||
29 | depends on TIDSPBRIDGE | ||
30 | default 0x600000 | ||
31 | help | ||
32 | Allocate specified size of memory at booting time to avoid allocation | ||
33 | failure under heavy memory fragmentation after some use time. | ||
34 | |||
35 | config TIDSPBRIDGE_RECOVERY | ||
36 | bool "Recovery Support" | ||
37 | depends on TIDSPBRIDGE | ||
38 | default y | ||
39 | help | ||
40 | In case of DSP fatal error, BRIDGE driver will try to | ||
41 | recover itself. | ||
42 | |||
43 | config TIDSPBRIDGE_CACHE_LINE_CHECK | ||
44 | bool "Check buffers to be 128 byte aligned" | ||
45 | depends on TIDSPBRIDGE | ||
46 | help | ||
47 | When the DSP processes data, the DSP cache controller loads 128-Byte | ||
48 | chunks (lines) from SDRAM and writes the data back in 128-Byte chunks. | ||
49 | If a DMM buffer does not start and end on a 128-Byte boundary, the data | ||
50 | preceding the start address (SA) from the 128-Byte boundary to the SA | ||
51 | and the data at addresses trailing the end address (EA) from the EA to | ||
52 | the next 128-Byte boundary will be loaded and written back as well. | ||
53 | This can lead to heap corruption. Say Y, to enforce the check for 128 | ||
54 | byte alignment, buffers failing this check will be rejected. | ||
55 | |||
56 | config TIDSPBRIDGE_NTFY_PWRERR | ||
57 | bool "Notify power errors" | ||
58 | depends on TIDSPBRIDGE | ||
59 | help | ||
60 | Enable notifications to registered clients on the event of power error | ||
61 | trying to suspend bridge driver. Say Y, to signal this event as a fatal | ||
62 | error, this will require a bridge restart to recover. | ||
63 | |||
64 | config TIDSPBRIDGE_BACKTRACE | ||
65 | bool "Dump backtraces on fatal errors" | ||
66 | depends on TIDSPBRIDGE | ||
67 | help | ||
68 | Enable useful information to backtrace fatal errors. Say Y if you | ||
69 | want to dump information for testing purposes. | ||
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile deleted file mode 100644 index adb21c53f747..000000000000 --- a/drivers/staging/tidspbridge/Makefile +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge.o | ||
2 | |||
3 | libgen = gen/gh.o | ||
4 | libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ | ||
5 | core/tiomap3430_pwr.o core/tiomap_io.o \ | ||
6 | core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o | ||
7 | libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ | ||
8 | pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o | ||
9 | librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ | ||
10 | rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ | ||
11 | rmgr/nldr.o rmgr/drv_interface.o | ||
12 | libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ | ||
13 | dynload/tramp.o | ||
14 | libhw = hw/hw_mmu.o | ||
15 | |||
16 | tidspbridge-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ | ||
17 | $(libdload) $(libhw) | ||
18 | |||
19 | #Machine dependent | ||
20 | ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ | ||
21 | -DTICFG_PROC_VER -DTICFG_EVM_TYPE -DCHNL_SMCLASS \ | ||
22 | -DCHNL_MESSAGES -DUSE_LEVEL_1_MACROS | ||
23 | |||
24 | ccflags-y += -Idrivers/staging/tidspbridge/include | ||
25 | ccflags-y += -Idrivers/staging/tidspbridge/services | ||
26 | ccflags-y += -Idrivers/staging/tidspbridge/core | ||
27 | ccflags-y += -Idrivers/staging/tidspbridge/pmgr | ||
28 | ccflags-y += -Idrivers/staging/tidspbridge/rmgr | ||
29 | ccflags-y += -Idrivers/staging/tidspbridge/dynload | ||
30 | ccflags-y += -Idrivers/staging/tidspbridge/hw | ||
31 | ccflags-y += -Iarch/arm | ||
32 | |||
diff --git a/drivers/staging/tidspbridge/TODO b/drivers/staging/tidspbridge/TODO deleted file mode 100644 index 1c51e2dc7b56..000000000000 --- a/drivers/staging/tidspbridge/TODO +++ /dev/null | |||
@@ -1,18 +0,0 @@ | |||
1 | * Migrate to (and if necessary, extend) existing upstream code such as | ||
2 | iommu, wdt, mcbsp, gptimers | ||
3 | * Decouple hardware-specific code (e.g. bridge_brd_start/stop/delete/monitor) | ||
4 | * DOFF binary loader: consider pushing to user space. at the very least | ||
5 | eliminate the direct filesystem access | ||
6 | * Eliminate general services and libraries - use or extend existing kernel | ||
7 | libraries instead (e.g. gcf/lcm in nldr.c, global helpers in gen/) | ||
8 | * Eliminate direct manipulation of OMAP_SYSC_BASE | ||
9 | * Eliminate DSP_SUCCEEDED macros and their imposed redundant indentations | ||
10 | (adopt the kernel way of checking for return values) | ||
11 | * Audit interfaces exposed to user space | ||
12 | * Audit and clean up header files folder | ||
13 | * Use kernel coding style | ||
14 | * checkpatch.pl fixes | ||
15 | * allocate ext_mem_pool from consistent memory instead of using ioremap | ||
16 | |||
17 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com> | ||
18 | and Omar Ramirez Luna <omar.ramirez@ti.com>. | ||
diff --git a/drivers/staging/tidspbridge/core/_cmm.h b/drivers/staging/tidspbridge/core/_cmm.h deleted file mode 100644 index 7660bef6ebb3..000000000000 --- a/drivers/staging/tidspbridge/core/_cmm.h +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* | ||
2 | * _cmm.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Private header file defining CMM manager objects and defines needed | ||
7 | * by IO manager to register shared memory regions when DSP base image | ||
8 | * is loaded(bridge_io_on_loaded). | ||
9 | * | ||
10 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
11 | * | ||
12 | * This package is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
17 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
18 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
19 | */ | ||
20 | |||
21 | #ifndef _CMM_ | ||
22 | #define _CMM_ | ||
23 | |||
24 | /* | ||
25 | * These target side symbols define the beginning and ending addresses | ||
26 | * of the section of shared memory used for shared memory manager CMM. | ||
27 | * They are defined in the *cfg.cmd file by cdb code. | ||
28 | */ | ||
29 | #define SHM0_SHARED_BASE_SYM "_SHM0_BEG" | ||
30 | #define SHM0_SHARED_END_SYM "_SHM0_END" | ||
31 | #define SHM0_SHARED_RESERVED_BASE_SYM "_SHM0_RSVDSTRT" | ||
32 | |||
33 | /* | ||
34 | * Shared Memory Region #0(SHMSEG0) is used in the following way: | ||
35 | * | ||
36 | * |(_SHM0_BEG) | (_SHM0_RSVDSTRT) | (_SHM0_END) | ||
37 | * V V V | ||
38 | * ------------------------------------------------------------ | ||
39 | * | DSP-side allocations | GPP-side allocations | | ||
40 | * ------------------------------------------------------------ | ||
41 | * | ||
42 | * | ||
43 | */ | ||
44 | |||
45 | #endif /* _CMM_ */ | ||
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h deleted file mode 100644 index 025d34320e7e..000000000000 --- a/drivers/staging/tidspbridge/core/_deh.h +++ /dev/null | |||
@@ -1,35 +0,0 @@ | |||
1 | /* | ||
2 | * _deh.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Private header for DEH module. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * Copyright (C) 2010 Felipe Contreras | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef _DEH_ | ||
21 | #define _DEH_ | ||
22 | |||
23 | #include <dspbridge/ntfy.h> | ||
24 | #include <dspbridge/dspdefs.h> | ||
25 | |||
26 | /* DEH Manager: only one created per board: */ | ||
27 | struct deh_mgr { | ||
28 | struct bridge_dev_context *bridge_context; /* Bridge context. */ | ||
29 | struct ntfy_object *ntfy_obj; /* NTFY object */ | ||
30 | |||
31 | /* MMU Fault DPC */ | ||
32 | struct tasklet_struct dpc_tasklet; | ||
33 | }; | ||
34 | |||
35 | #endif /* _DEH_ */ | ||
diff --git a/drivers/staging/tidspbridge/core/_msg_sm.h b/drivers/staging/tidspbridge/core/_msg_sm.h deleted file mode 100644 index f6e58e3f3b48..000000000000 --- a/drivers/staging/tidspbridge/core/_msg_sm.h +++ /dev/null | |||
@@ -1,142 +0,0 @@ | |||
1 | /* | ||
2 | * _msg_sm.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Private header file defining msg_ctrl manager objects and defines needed | ||
7 | * by IO manager. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef _MSG_SM_ | ||
21 | #define _MSG_SM_ | ||
22 | |||
23 | #include <linux/list.h> | ||
24 | #include <dspbridge/msgdefs.h> | ||
25 | |||
26 | /* | ||
27 | * These target side symbols define the beginning and ending addresses | ||
28 | * of the section of shared memory used for messages. They are | ||
29 | * defined in the *cfg.cmd file by cdb code. | ||
30 | */ | ||
31 | #define MSG_SHARED_BUFFER_BASE_SYM "_MSG_BEG" | ||
32 | #define MSG_SHARED_BUFFER_LIMIT_SYM "_MSG_END" | ||
33 | |||
34 | #ifndef _CHNL_WORDSIZE | ||
35 | #define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */ | ||
36 | #endif | ||
37 | |||
38 | /* | ||
39 | * ======== msg_ctrl ======== | ||
40 | * There is a control structure for messages to the DSP, and a control | ||
41 | * structure for messages from the DSP. The shared memory region for | ||
42 | * transferring messages is partitioned as follows: | ||
43 | * | ||
44 | * ---------------------------------------------------------- | ||
45 | * |Control | Messages from DSP | Control | Messages to DSP | | ||
46 | * ---------------------------------------------------------- | ||
47 | * | ||
48 | * msg_ctrl control structure for messages to the DSP is used in the following | ||
49 | * way: | ||
50 | * | ||
51 | * buf_empty - This flag is set to FALSE by the GPP after it has output | ||
52 | * messages for the DSP. The DSP host driver sets it to | ||
53 | * TRUE after it has copied the messages. | ||
54 | * post_swi - Set to 1 by the GPP after it has written the messages, | ||
55 | * set the size, and set buf_empty to FALSE. | ||
56 | * The DSP Host driver uses SWI_andn of the post_swi field | ||
57 | * when a host interrupt occurs. The host driver clears | ||
58 | * this after posting the SWI. | ||
59 | * size - Number of messages to be read by the DSP. | ||
60 | * | ||
61 | * For messages from the DSP: | ||
62 | * buf_empty - This flag is set to FALSE by the DSP after it has output | ||
63 | * messages for the GPP. The DPC on the GPP sets it to | ||
64 | * TRUE after it has copied the messages. | ||
65 | * post_swi - Set to 1 the DPC on the GPP after copying the messages. | ||
66 | * size - Number of messages to be read by the GPP. | ||
67 | */ | ||
68 | struct msg_ctrl { | ||
69 | u32 buf_empty; /* to/from DSP buffer is empty */ | ||
70 | u32 post_swi; /* Set to "1" to post msg_ctrl SWI */ | ||
71 | u32 size; /* Number of messages to/from the DSP */ | ||
72 | u32 resvd; | ||
73 | }; | ||
74 | |||
75 | /* | ||
76 | * ======== msg_mgr ======== | ||
77 | * The msg_mgr maintains a list of all MSG_QUEUEs. Each NODE object can | ||
78 | * have msg_queue to hold all messages that come up from the corresponding | ||
79 | * node on the DSP. The msg_mgr also has a shared queue of messages | ||
80 | * ready to go to the DSP. | ||
81 | */ | ||
82 | struct msg_mgr { | ||
83 | /* The first field must match that in msgobj.h */ | ||
84 | |||
85 | /* Function interface to Bridge driver */ | ||
86 | struct bridge_drv_interface *intf_fxns; | ||
87 | |||
88 | struct io_mgr *iomgr; /* IO manager */ | ||
89 | struct list_head queue_list; /* List of MSG_QUEUEs */ | ||
90 | spinlock_t msg_mgr_lock; /* For critical sections */ | ||
91 | /* Signalled when MsgFrame is available */ | ||
92 | struct sync_object *sync_event; | ||
93 | struct list_head msg_free_list; /* Free MsgFrames ready to be filled */ | ||
94 | struct list_head msg_used_list; /* MsgFrames ready to go to DSP */ | ||
95 | u32 msgs_pending; /* # of queued messages to go to DSP */ | ||
96 | u32 max_msgs; /* Max # of msgs that fit in buffer */ | ||
97 | msg_onexit on_exit; /* called when RMS_EXIT is received */ | ||
98 | }; | ||
99 | |||
100 | /* | ||
101 | * ======== msg_queue ======== | ||
102 | * Each NODE has a msg_queue for receiving messages from the | ||
103 | * corresponding node on the DSP. The msg_queue object maintains a list | ||
104 | * of messages that have been sent to the host, but not yet read (MSG_Get), | ||
105 | * and a list of free frames that can be filled when new messages arrive | ||
106 | * from the DSP. | ||
107 | * The msg_queue's hSynEvent gets posted when a message is ready. | ||
108 | */ | ||
109 | struct msg_queue { | ||
110 | struct list_head list_elem; | ||
111 | struct msg_mgr *msg_mgr; | ||
112 | u32 max_msgs; /* Node message depth */ | ||
113 | u32 msgq_id; /* Node environment pointer */ | ||
114 | struct list_head msg_free_list; /* Free MsgFrames ready to be filled */ | ||
115 | /* Filled MsgFramess waiting to be read */ | ||
116 | struct list_head msg_used_list; | ||
117 | void *arg; /* Handle passed to mgr on_exit callback */ | ||
118 | struct sync_object *sync_event; /* Signalled when message is ready */ | ||
119 | struct sync_object *sync_done; /* For synchronizing cleanup */ | ||
120 | struct sync_object *sync_done_ack; /* For synchronizing cleanup */ | ||
121 | struct ntfy_object *ntfy_obj; /* For notification of message ready */ | ||
122 | bool done; /* TRUE <==> deleting the object */ | ||
123 | u32 io_msg_pend; /* Number of pending MSG_get/put calls */ | ||
124 | }; | ||
125 | |||
126 | /* | ||
127 | * ======== msg_dspmsg ======== | ||
128 | */ | ||
129 | struct msg_dspmsg { | ||
130 | struct dsp_msg msg; | ||
131 | u32 msgq_id; /* Identifies the node the message goes to */ | ||
132 | }; | ||
133 | |||
134 | /* | ||
135 | * ======== msg_frame ======== | ||
136 | */ | ||
137 | struct msg_frame { | ||
138 | struct list_head list_elem; | ||
139 | struct msg_dspmsg msg_data; | ||
140 | }; | ||
141 | |||
142 | #endif /* _MSG_SM_ */ | ||
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h deleted file mode 100644 index 65971b784b78..000000000000 --- a/drivers/staging/tidspbridge/core/_tiomap.h +++ /dev/null | |||
@@ -1,382 +0,0 @@ | |||
1 | /* | ||
2 | * _tiomap.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Definitions and types private to this Bridge driver. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef _TIOMAP_ | ||
20 | #define _TIOMAP_ | ||
21 | |||
22 | /* | ||
23 | * XXX These powerdomain.h/clockdomain.h includes are wrong and should | ||
24 | * be removed. No driver should call pwrdm_* or clkdm_* functions | ||
25 | * directly; they should rely on OMAP core code to do this. | ||
26 | */ | ||
27 | #include <mach-omap2/powerdomain.h> | ||
28 | #include <mach-omap2/clockdomain.h> | ||
29 | /* | ||
30 | * XXX These mach-omap2/ includes are wrong and should be removed. No | ||
31 | * driver should read or write to PRM/CM registers directly; they | ||
32 | * should rely on OMAP core code to do this. | ||
33 | */ | ||
34 | #include <mach-omap2/cm3xxx.h> | ||
35 | #include <mach-omap2/prm-regbits-34xx.h> | ||
36 | #include <mach-omap2/cm-regbits-34xx.h> | ||
37 | #include <dspbridge/devdefs.h> | ||
38 | #include <hw_defs.h> | ||
39 | #include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ | ||
40 | #include <dspbridge/sync.h> | ||
41 | #include <dspbridge/clk.h> | ||
42 | |||
43 | struct map_l4_peripheral { | ||
44 | u32 phys_addr; | ||
45 | u32 dsp_virt_addr; | ||
46 | }; | ||
47 | |||
48 | #define ARM_MAILBOX_START 0xfffcf000 | ||
49 | #define ARM_MAILBOX_LENGTH 0x800 | ||
50 | |||
51 | /* New Registers in OMAP3.1 */ | ||
52 | |||
53 | #define TESTBLOCK_ID_START 0xfffed400 | ||
54 | #define TESTBLOCK_ID_LENGTH 0xff | ||
55 | |||
56 | /* ID Returned by OMAP1510 */ | ||
57 | #define TBC_ID_VALUE 0xB47002F | ||
58 | |||
59 | #define SPACE_LENGTH 0x2000 | ||
60 | #define API_CLKM_DPLL_DMA 0xfffec000 | ||
61 | #define ARM_INTERRUPT_OFFSET 0xb00 | ||
62 | |||
63 | #define BIOS24XX | ||
64 | |||
65 | #define L4_PERIPHERAL_NULL 0x0 | ||
66 | #define DSPVA_PERIPHERAL_NULL 0x0 | ||
67 | |||
68 | #define MAX_LOCK_TLB_ENTRIES 15 | ||
69 | |||
70 | #define L4_PERIPHERAL_PRM 0x48306000 /*PRM L4 Peripheral */ | ||
71 | #define DSPVA_PERIPHERAL_PRM 0x1181e000 | ||
72 | #define L4_PERIPHERAL_SCM 0x48002000 /*SCM L4 Peripheral */ | ||
73 | #define DSPVA_PERIPHERAL_SCM 0x1181f000 | ||
74 | #define L4_PERIPHERAL_MMU 0x5D000000 /*MMU L4 Peripheral */ | ||
75 | #define DSPVA_PERIPHERAL_MMU 0x11820000 | ||
76 | #define L4_PERIPHERAL_CM 0x48004000 /* Core L4, Clock Management */ | ||
77 | #define DSPVA_PERIPHERAL_CM 0x1181c000 | ||
78 | #define L4_PERIPHERAL_PER 0x48005000 /* PER */ | ||
79 | #define DSPVA_PERIPHERAL_PER 0x1181d000 | ||
80 | |||
81 | #define L4_PERIPHERAL_GPIO1 0x48310000 | ||
82 | #define DSPVA_PERIPHERAL_GPIO1 0x11809000 | ||
83 | #define L4_PERIPHERAL_GPIO2 0x49050000 | ||
84 | #define DSPVA_PERIPHERAL_GPIO2 0x1180a000 | ||
85 | #define L4_PERIPHERAL_GPIO3 0x49052000 | ||
86 | #define DSPVA_PERIPHERAL_GPIO3 0x1180b000 | ||
87 | #define L4_PERIPHERAL_GPIO4 0x49054000 | ||
88 | #define DSPVA_PERIPHERAL_GPIO4 0x1180c000 | ||
89 | #define L4_PERIPHERAL_GPIO5 0x49056000 | ||
90 | #define DSPVA_PERIPHERAL_GPIO5 0x1180d000 | ||
91 | |||
92 | #define L4_PERIPHERAL_IVA2WDT 0x49030000 | ||
93 | #define DSPVA_PERIPHERAL_IVA2WDT 0x1180e000 | ||
94 | |||
95 | #define L4_PERIPHERAL_DISPLAY 0x48050000 | ||
96 | #define DSPVA_PERIPHERAL_DISPLAY 0x1180f000 | ||
97 | |||
98 | #define L4_PERIPHERAL_SSI 0x48058000 | ||
99 | #define DSPVA_PERIPHERAL_SSI 0x11804000 | ||
100 | #define L4_PERIPHERAL_GDD 0x48059000 | ||
101 | #define DSPVA_PERIPHERAL_GDD 0x11805000 | ||
102 | #define L4_PERIPHERAL_SS1 0x4805a000 | ||
103 | #define DSPVA_PERIPHERAL_SS1 0x11806000 | ||
104 | #define L4_PERIPHERAL_SS2 0x4805b000 | ||
105 | #define DSPVA_PERIPHERAL_SS2 0x11807000 | ||
106 | |||
107 | #define L4_PERIPHERAL_CAMERA 0x480BC000 | ||
108 | #define DSPVA_PERIPHERAL_CAMERA 0x11819000 | ||
109 | |||
110 | #define L4_PERIPHERAL_SDMA 0x48056000 | ||
111 | #define DSPVA_PERIPHERAL_SDMA 0x11810000 /* 0x1181d000 conflict w/ PER */ | ||
112 | |||
113 | #define L4_PERIPHERAL_UART1 0x4806a000 | ||
114 | #define DSPVA_PERIPHERAL_UART1 0x11811000 | ||
115 | #define L4_PERIPHERAL_UART2 0x4806c000 | ||
116 | #define DSPVA_PERIPHERAL_UART2 0x11812000 | ||
117 | #define L4_PERIPHERAL_UART3 0x49020000 | ||
118 | #define DSPVA_PERIPHERAL_UART3 0x11813000 | ||
119 | |||
120 | #define L4_PERIPHERAL_MCBSP1 0x48074000 | ||
121 | #define DSPVA_PERIPHERAL_MCBSP1 0x11814000 | ||
122 | #define L4_PERIPHERAL_MCBSP2 0x49022000 | ||
123 | #define DSPVA_PERIPHERAL_MCBSP2 0x11815000 | ||
124 | #define L4_PERIPHERAL_MCBSP3 0x49024000 | ||
125 | #define DSPVA_PERIPHERAL_MCBSP3 0x11816000 | ||
126 | #define L4_PERIPHERAL_MCBSP4 0x49026000 | ||
127 | #define DSPVA_PERIPHERAL_MCBSP4 0x11817000 | ||
128 | #define L4_PERIPHERAL_MCBSP5 0x48096000 | ||
129 | #define DSPVA_PERIPHERAL_MCBSP5 0x11818000 | ||
130 | |||
131 | #define L4_PERIPHERAL_GPTIMER5 0x49038000 | ||
132 | #define DSPVA_PERIPHERAL_GPTIMER5 0x11800000 | ||
133 | #define L4_PERIPHERAL_GPTIMER6 0x4903a000 | ||
134 | #define DSPVA_PERIPHERAL_GPTIMER6 0x11801000 | ||
135 | #define L4_PERIPHERAL_GPTIMER7 0x4903c000 | ||
136 | #define DSPVA_PERIPHERAL_GPTIMER7 0x11802000 | ||
137 | #define L4_PERIPHERAL_GPTIMER8 0x4903e000 | ||
138 | #define DSPVA_PERIPHERAL_GPTIMER8 0x11803000 | ||
139 | |||
140 | #define L4_PERIPHERAL_SPI1 0x48098000 | ||
141 | #define DSPVA_PERIPHERAL_SPI1 0x1181a000 | ||
142 | #define L4_PERIPHERAL_SPI2 0x4809a000 | ||
143 | #define DSPVA_PERIPHERAL_SPI2 0x1181b000 | ||
144 | |||
145 | #define L4_PERIPHERAL_MBOX 0x48094000 | ||
146 | #define DSPVA_PERIPHERAL_MBOX 0x11808000 | ||
147 | |||
148 | #define PM_GRPSEL_BASE 0x48307000 | ||
149 | #define DSPVA_GRPSEL_BASE 0x11821000 | ||
150 | |||
151 | #define L4_PERIPHERAL_SIDETONE_MCBSP2 0x49028000 | ||
152 | #define DSPVA_PERIPHERAL_SIDETONE_MCBSP2 0x11824000 | ||
153 | #define L4_PERIPHERAL_SIDETONE_MCBSP3 0x4902a000 | ||
154 | #define DSPVA_PERIPHERAL_SIDETONE_MCBSP3 0x11825000 | ||
155 | |||
156 | /* define a static array with L4 mappings */ | ||
157 | static const struct map_l4_peripheral l4_peripheral_table[] = { | ||
158 | {L4_PERIPHERAL_MBOX, DSPVA_PERIPHERAL_MBOX}, | ||
159 | {L4_PERIPHERAL_SCM, DSPVA_PERIPHERAL_SCM}, | ||
160 | {L4_PERIPHERAL_MMU, DSPVA_PERIPHERAL_MMU}, | ||
161 | {L4_PERIPHERAL_GPTIMER5, DSPVA_PERIPHERAL_GPTIMER5}, | ||
162 | {L4_PERIPHERAL_GPTIMER6, DSPVA_PERIPHERAL_GPTIMER6}, | ||
163 | {L4_PERIPHERAL_GPTIMER7, DSPVA_PERIPHERAL_GPTIMER7}, | ||
164 | {L4_PERIPHERAL_GPTIMER8, DSPVA_PERIPHERAL_GPTIMER8}, | ||
165 | {L4_PERIPHERAL_GPIO1, DSPVA_PERIPHERAL_GPIO1}, | ||
166 | {L4_PERIPHERAL_GPIO2, DSPVA_PERIPHERAL_GPIO2}, | ||
167 | {L4_PERIPHERAL_GPIO3, DSPVA_PERIPHERAL_GPIO3}, | ||
168 | {L4_PERIPHERAL_GPIO4, DSPVA_PERIPHERAL_GPIO4}, | ||
169 | {L4_PERIPHERAL_GPIO5, DSPVA_PERIPHERAL_GPIO5}, | ||
170 | {L4_PERIPHERAL_IVA2WDT, DSPVA_PERIPHERAL_IVA2WDT}, | ||
171 | {L4_PERIPHERAL_DISPLAY, DSPVA_PERIPHERAL_DISPLAY}, | ||
172 | {L4_PERIPHERAL_SSI, DSPVA_PERIPHERAL_SSI}, | ||
173 | {L4_PERIPHERAL_GDD, DSPVA_PERIPHERAL_GDD}, | ||
174 | {L4_PERIPHERAL_SS1, DSPVA_PERIPHERAL_SS1}, | ||
175 | {L4_PERIPHERAL_SS2, DSPVA_PERIPHERAL_SS2}, | ||
176 | {L4_PERIPHERAL_UART1, DSPVA_PERIPHERAL_UART1}, | ||
177 | {L4_PERIPHERAL_UART2, DSPVA_PERIPHERAL_UART2}, | ||
178 | {L4_PERIPHERAL_UART3, DSPVA_PERIPHERAL_UART3}, | ||
179 | {L4_PERIPHERAL_MCBSP1, DSPVA_PERIPHERAL_MCBSP1}, | ||
180 | {L4_PERIPHERAL_MCBSP2, DSPVA_PERIPHERAL_MCBSP2}, | ||
181 | {L4_PERIPHERAL_MCBSP3, DSPVA_PERIPHERAL_MCBSP3}, | ||
182 | {L4_PERIPHERAL_MCBSP4, DSPVA_PERIPHERAL_MCBSP4}, | ||
183 | {L4_PERIPHERAL_MCBSP5, DSPVA_PERIPHERAL_MCBSP5}, | ||
184 | {L4_PERIPHERAL_CAMERA, DSPVA_PERIPHERAL_CAMERA}, | ||
185 | {L4_PERIPHERAL_SPI1, DSPVA_PERIPHERAL_SPI1}, | ||
186 | {L4_PERIPHERAL_SPI2, DSPVA_PERIPHERAL_SPI2}, | ||
187 | {L4_PERIPHERAL_PRM, DSPVA_PERIPHERAL_PRM}, | ||
188 | {L4_PERIPHERAL_CM, DSPVA_PERIPHERAL_CM}, | ||
189 | {L4_PERIPHERAL_PER, DSPVA_PERIPHERAL_PER}, | ||
190 | {PM_GRPSEL_BASE, DSPVA_GRPSEL_BASE}, | ||
191 | {L4_PERIPHERAL_SIDETONE_MCBSP2, DSPVA_PERIPHERAL_SIDETONE_MCBSP2}, | ||
192 | {L4_PERIPHERAL_SIDETONE_MCBSP3, DSPVA_PERIPHERAL_SIDETONE_MCBSP3}, | ||
193 | {L4_PERIPHERAL_NULL, DSPVA_PERIPHERAL_NULL} | ||
194 | }; | ||
195 | |||
196 | /* | ||
197 | * 15 10 0 | ||
198 | * --------------------------------- | ||
199 | * |0|0|1|0|0|0|c|c|c|i|i|i|i|i|i|i| | ||
200 | * --------------------------------- | ||
201 | * | (class) | (module specific) | | ||
202 | * | ||
203 | * where c -> Externel Clock Command: Clk & Autoidle Disable/Enable | ||
204 | * i -> External Clock ID Timers 5,6,7,8, McBSP1,2 and WDT3 | ||
205 | */ | ||
206 | |||
207 | /* MBX_PM_CLK_IDMASK: DSP External clock id mask. */ | ||
208 | #define MBX_PM_CLK_IDMASK 0x7F | ||
209 | |||
210 | /* MBX_PM_CLK_CMDSHIFT: DSP External clock command shift. */ | ||
211 | #define MBX_PM_CLK_CMDSHIFT 7 | ||
212 | |||
213 | /* MBX_PM_CLK_CMDMASK: DSP External clock command mask. */ | ||
214 | #define MBX_PM_CLK_CMDMASK 7 | ||
215 | |||
216 | /* MBX_PM_MAX_RESOURCES: CORE 1 Clock resources. */ | ||
217 | #define MBX_CORE1_RESOURCES 7 | ||
218 | |||
219 | /* MBX_PM_MAX_RESOURCES: CORE 2 Clock Resources. */ | ||
220 | #define MBX_CORE2_RESOURCES 1 | ||
221 | |||
222 | /* MBX_PM_MAX_RESOURCES: TOTAL Clock Resources. */ | ||
223 | #define MBX_PM_MAX_RESOURCES 11 | ||
224 | |||
225 | /* Power Management Commands */ | ||
226 | #define BPWR_DISABLE_CLOCK 0 | ||
227 | #define BPWR_ENABLE_CLOCK 1 | ||
228 | |||
229 | /* OMAP242x specific resources */ | ||
230 | enum bpwr_ext_clock_id { | ||
231 | BPWR_GP_TIMER5 = 0x10, | ||
232 | BPWR_GP_TIMER6, | ||
233 | BPWR_GP_TIMER7, | ||
234 | BPWR_GP_TIMER8, | ||
235 | BPWR_WD_TIMER3, | ||
236 | BPWR_MCBSP1, | ||
237 | BPWR_MCBSP2, | ||
238 | BPWR_MCBSP3, | ||
239 | BPWR_MCBSP4, | ||
240 | BPWR_MCBSP5, | ||
241 | BPWR_SSI = 0x20 | ||
242 | }; | ||
243 | |||
244 | static const u32 bpwr_clkid[] = { | ||
245 | (u32) BPWR_GP_TIMER5, | ||
246 | (u32) BPWR_GP_TIMER6, | ||
247 | (u32) BPWR_GP_TIMER7, | ||
248 | (u32) BPWR_GP_TIMER8, | ||
249 | (u32) BPWR_WD_TIMER3, | ||
250 | (u32) BPWR_MCBSP1, | ||
251 | (u32) BPWR_MCBSP2, | ||
252 | (u32) BPWR_MCBSP3, | ||
253 | (u32) BPWR_MCBSP4, | ||
254 | (u32) BPWR_MCBSP5, | ||
255 | (u32) BPWR_SSI | ||
256 | }; | ||
257 | |||
258 | struct bpwr_clk_t { | ||
259 | u32 clk_id; | ||
260 | enum dsp_clk_id clk; | ||
261 | }; | ||
262 | |||
263 | static const struct bpwr_clk_t bpwr_clks[] = { | ||
264 | {(u32) BPWR_GP_TIMER5, DSP_CLK_GPT5}, | ||
265 | {(u32) BPWR_GP_TIMER6, DSP_CLK_GPT6}, | ||
266 | {(u32) BPWR_GP_TIMER7, DSP_CLK_GPT7}, | ||
267 | {(u32) BPWR_GP_TIMER8, DSP_CLK_GPT8}, | ||
268 | {(u32) BPWR_WD_TIMER3, DSP_CLK_WDT3}, | ||
269 | {(u32) BPWR_MCBSP1, DSP_CLK_MCBSP1}, | ||
270 | {(u32) BPWR_MCBSP2, DSP_CLK_MCBSP2}, | ||
271 | {(u32) BPWR_MCBSP3, DSP_CLK_MCBSP3}, | ||
272 | {(u32) BPWR_MCBSP4, DSP_CLK_MCBSP4}, | ||
273 | {(u32) BPWR_MCBSP5, DSP_CLK_MCBSP5}, | ||
274 | {(u32) BPWR_SSI, DSP_CLK_SSI} | ||
275 | }; | ||
276 | |||
277 | /* Interrupt Register Offsets */ | ||
278 | #define INTH_IT_REG_OFFSET 0x00 /* Interrupt register offset */ | ||
279 | #define INTH_MASK_IT_REG_OFFSET 0x04 /* Mask Interrupt reg offset */ | ||
280 | |||
281 | #define DSP_MAILBOX1_INT 10 | ||
282 | /* | ||
283 | * Bit definition of Interrupt Level Registers | ||
284 | */ | ||
285 | |||
286 | /* Mail Box defines */ | ||
287 | #define MB_ARM2DSP1_REG_OFFSET 0x00 | ||
288 | |||
289 | #define MB_ARM2DSP1B_REG_OFFSET 0x04 | ||
290 | |||
291 | #define MB_DSP2ARM1B_REG_OFFSET 0x0C | ||
292 | |||
293 | #define MB_ARM2DSP1_FLAG_REG_OFFSET 0x18 | ||
294 | |||
295 | #define MB_ARM2DSP_FLAG 0x0001 | ||
296 | |||
297 | #define MBOX_ARM2DSP HW_MBOX_ID0 | ||
298 | #define MBOX_DSP2ARM HW_MBOX_ID1 | ||
299 | #define MBOX_ARM HW_MBOX_U0_ARM | ||
300 | #define MBOX_DSP HW_MBOX_U1_DSP1 | ||
301 | |||
302 | #define ENABLE true | ||
303 | #define DISABLE false | ||
304 | |||
305 | #define HIGH_LEVEL true | ||
306 | #define LOW_LEVEL false | ||
307 | |||
308 | /* Macro's */ | ||
309 | #define CLEAR_BIT(reg, mask) (reg &= ~mask) | ||
310 | #define SET_BIT(reg, mask) (reg |= mask) | ||
311 | |||
312 | #define SET_GROUP_BITS16(reg, position, width, value) \ | ||
313 | do {\ | ||
314 | reg &= ~((0xFFFF >> (16 - (width))) << (position)); \ | ||
315 | reg |= ((value & (0xFFFF >> (16 - (width)))) << (position)); \ | ||
316 | } while (0); | ||
317 | |||
318 | #define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) | ||
319 | |||
320 | /* This Bridge driver's device context: */ | ||
321 | struct bridge_dev_context { | ||
322 | struct dev_object *dev_obj; /* Handle to Bridge device object. */ | ||
323 | u32 dsp_base_addr; /* Arm's API to DSP virt base addr */ | ||
324 | /* | ||
325 | * DSP External memory prog address as seen virtually by the OS on | ||
326 | * the host side. | ||
327 | */ | ||
328 | u32 dsp_ext_base_addr; /* See the comment above */ | ||
329 | u32 api_reg_base; /* API mem map'd registers */ | ||
330 | void __iomem *dsp_mmu_base; /* DSP MMU Mapped registers */ | ||
331 | u32 api_clk_base; /* CLK Registers */ | ||
332 | u32 dsp_clk_m2_base; /* DSP Clock Module m2 */ | ||
333 | u32 public_rhea; /* Pub Rhea */ | ||
334 | u32 int_addr; /* MB INTR reg */ | ||
335 | u32 tc_endianism; /* TC Endianism register */ | ||
336 | u32 test_base; /* DSP MMU Mapped registers */ | ||
337 | u32 self_loop; /* Pointer to the selfloop */ | ||
338 | u32 dsp_start_add; /* API Boot vector */ | ||
339 | u32 internal_size; /* Internal memory size */ | ||
340 | |||
341 | struct omap_mbox *mbox; /* Mail box handle */ | ||
342 | |||
343 | struct cfg_hostres *resources; /* Host Resources */ | ||
344 | |||
345 | /* | ||
346 | * Processor specific info is set when prog loaded and read from DCD. | ||
347 | * [See bridge_dev_ctrl()] PROC info contains DSP-MMU TLB entries. | ||
348 | */ | ||
349 | /* DMMU TLB entries */ | ||
350 | struct bridge_ioctl_extproc atlb_entry[BRDIOCTL_NUMOFMMUTLB]; | ||
351 | u32 brd_state; /* Last known board state. */ | ||
352 | |||
353 | /* TC Settings */ | ||
354 | bool tc_word_swap_on; /* Traffic Controller Word Swap */ | ||
355 | struct pg_table_attrs *pt_attrs; | ||
356 | u32 dsp_per_clks; | ||
357 | }; | ||
358 | |||
359 | /* | ||
360 | * If dsp_debug is true, do not branch to the DSP entry | ||
361 | * point and wait for DSP to boot. | ||
362 | */ | ||
363 | extern s32 dsp_debug; | ||
364 | |||
365 | /* | ||
366 | * ======== sm_interrupt_dsp ======== | ||
367 | * Purpose: | ||
368 | * Set interrupt value & send an interrupt to the DSP processor(s). | ||
369 | * This is typically used when mailbox interrupt mechanisms allow data | ||
370 | * to be associated with interrupt such as for OMAP's CMD/DATA regs. | ||
371 | * Parameters: | ||
372 | * dev_context: Handle to Bridge driver defined device info. | ||
373 | * mb_val: Value associated with interrupt(e.g. mailbox value). | ||
374 | * Returns: | ||
375 | * 0: Interrupt sent; | ||
376 | * else: Unable to send interrupt. | ||
377 | * Requires: | ||
378 | * Ensures: | ||
379 | */ | ||
380 | int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val); | ||
381 | |||
382 | #endif /* _TIOMAP_ */ | ||
diff --git a/drivers/staging/tidspbridge/core/_tiomap_pwr.h b/drivers/staging/tidspbridge/core/_tiomap_pwr.h deleted file mode 100644 index 7bbd3802c15f..000000000000 --- a/drivers/staging/tidspbridge/core/_tiomap_pwr.h +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /* | ||
2 | * _tiomap_pwr.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Definitions and types for the DSP wake/sleep routines. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef _TIOMAP_PWR_ | ||
20 | #define _TIOMAP_PWR_ | ||
21 | |||
22 | #ifdef CONFIG_PM | ||
23 | extern s32 dsp_test_sleepstate; | ||
24 | #endif | ||
25 | |||
26 | extern struct mailbox_context mboxsetting; | ||
27 | |||
28 | /* | ||
29 | * ======== wake_dsp ========= | ||
30 | * Wakes up the DSP from DeepSleep | ||
31 | */ | ||
32 | extern int wake_dsp(struct bridge_dev_context *dev_context, | ||
33 | void *pargs); | ||
34 | |||
35 | /* | ||
36 | * ======== sleep_dsp ========= | ||
37 | * Places the DSP in DeepSleep. | ||
38 | */ | ||
39 | extern int sleep_dsp(struct bridge_dev_context *dev_context, | ||
40 | u32 dw_cmd, void *pargs); | ||
41 | /* | ||
42 | * ========interrupt_dsp======== | ||
43 | * Sends an interrupt to DSP unconditionally. | ||
44 | */ | ||
45 | extern void interrupt_dsp(struct bridge_dev_context *dev_context, | ||
46 | u16 mb_val); | ||
47 | |||
48 | /* | ||
49 | * ======== wake_dsp ========= | ||
50 | * Wakes up the DSP from DeepSleep | ||
51 | */ | ||
52 | extern int dsp_peripheral_clk_ctrl(struct bridge_dev_context | ||
53 | *dev_context, void *pargs); | ||
54 | /* | ||
55 | * ======== handle_hibernation_from_dsp ======== | ||
56 | * Handle Hibernation requested from DSP | ||
57 | */ | ||
58 | int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context); | ||
59 | /* | ||
60 | * ======== post_scale_dsp ======== | ||
61 | * Handle Post Scale notification to DSP | ||
62 | */ | ||
63 | int post_scale_dsp(struct bridge_dev_context *dev_context, | ||
64 | void *pargs); | ||
65 | /* | ||
66 | * ======== pre_scale_dsp ======== | ||
67 | * Handle Pre Scale notification to DSP | ||
68 | */ | ||
69 | int pre_scale_dsp(struct bridge_dev_context *dev_context, | ||
70 | void *pargs); | ||
71 | /* | ||
72 | * ======== handle_constraints_set ======== | ||
73 | * Handle constraints request from DSP | ||
74 | */ | ||
75 | int handle_constraints_set(struct bridge_dev_context *dev_context, | ||
76 | void *pargs); | ||
77 | |||
78 | /* | ||
79 | * ======== dsp_clk_wakeup_event_ctrl ======== | ||
80 | * This function sets the group selction bits for while | ||
81 | * enabling/disabling. | ||
82 | */ | ||
83 | void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable); | ||
84 | |||
85 | #endif /* _TIOMAP_PWR_ */ | ||
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c deleted file mode 100644 index c855992f5950..000000000000 --- a/drivers/staging/tidspbridge/core/chnl_sm.c +++ /dev/null | |||
@@ -1,908 +0,0 @@ | |||
1 | /* | ||
2 | * chnl_sm.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Implements upper edge functions for Bridge driver channel module. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * The lower edge functions must be implemented by the Bridge driver | ||
21 | * writer, and are declared in chnl_sm.h. | ||
22 | * | ||
23 | * Care is taken in this code to prevent simultaneous access to channel | ||
24 | * queues from | ||
25 | * 1. Threads. | ||
26 | * 2. io_dpc(), scheduled from the io_isr() as an event. | ||
27 | * | ||
28 | * This is done primarily by: | ||
29 | * - Semaphores. | ||
30 | * - state flags in the channel object; and | ||
31 | * - ensuring the IO_Dispatch() routine, which is called from both | ||
32 | * CHNL_AddIOReq() and the DPC(if implemented), is not re-entered. | ||
33 | * | ||
34 | * Channel Invariant: | ||
35 | * There is an important invariant condition which must be maintained per | ||
36 | * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of | ||
37 | * which may cause timeouts and/or failure of function sync_wait_on_event. | ||
38 | * This invariant condition is: | ||
39 | * | ||
40 | * list_empty(&pchnl->io_completions) ==> pchnl->sync_event is reset | ||
41 | * and | ||
42 | * !list_empty(&pchnl->io_completions) ==> pchnl->sync_event is set. | ||
43 | */ | ||
44 | |||
45 | #include <linux/types.h> | ||
46 | |||
47 | /* ----------------------------------- OS */ | ||
48 | #include <dspbridge/host_os.h> | ||
49 | |||
50 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
51 | #include <dspbridge/dbdefs.h> | ||
52 | |||
53 | /* ----------------------------------- OS Adaptation Layer */ | ||
54 | #include <dspbridge/sync.h> | ||
55 | |||
56 | /* ----------------------------------- Bridge Driver */ | ||
57 | #include <dspbridge/dspdefs.h> | ||
58 | #include <dspbridge/dspchnl.h> | ||
59 | #include "_tiomap.h" | ||
60 | |||
61 | /* ----------------------------------- Platform Manager */ | ||
62 | #include <dspbridge/dev.h> | ||
63 | |||
64 | /* ----------------------------------- Others */ | ||
65 | #include <dspbridge/io_sm.h> | ||
66 | |||
67 | /* ----------------------------------- Define for This */ | ||
68 | #define USERMODE_ADDR PAGE_OFFSET | ||
69 | |||
70 | #define MAILBOX_IRQ INT_MAIL_MPU_IRQ | ||
71 | |||
72 | /* ----------------------------------- Function Prototypes */ | ||
73 | static int create_chirp_list(struct list_head *list, u32 chirps); | ||
74 | |||
75 | static void free_chirp_list(struct list_head *list); | ||
76 | |||
77 | static int search_free_channel(struct chnl_mgr *chnl_mgr_obj, | ||
78 | u32 *chnl); | ||
79 | |||
80 | /* | ||
81 | * ======== bridge_chnl_add_io_req ======== | ||
82 | * Enqueue an I/O request for data transfer on a channel to the DSP. | ||
83 | * The direction (mode) is specified in the channel object. Note the DSP | ||
84 | * address is specified for channels opened in direct I/O mode. | ||
85 | */ | ||
86 | int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf, | ||
87 | u32 byte_size, u32 buf_size, | ||
88 | u32 dw_dsp_addr, u32 dw_arg) | ||
89 | { | ||
90 | int status = 0; | ||
91 | struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; | ||
92 | struct chnl_irp *chnl_packet_obj = NULL; | ||
93 | struct bridge_dev_context *dev_ctxt; | ||
94 | struct dev_object *dev_obj; | ||
95 | u8 dw_state; | ||
96 | bool is_eos; | ||
97 | struct chnl_mgr *chnl_mgr_obj; | ||
98 | u8 *host_sys_buf = NULL; | ||
99 | bool sched_dpc = false; | ||
100 | u16 mb_val = 0; | ||
101 | |||
102 | is_eos = (byte_size == 0); | ||
103 | |||
104 | /* Validate args */ | ||
105 | if (!host_buf || !pchnl) | ||
106 | return -EFAULT; | ||
107 | |||
108 | if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) | ||
109 | return -EPERM; | ||
110 | |||
111 | /* | ||
112 | * Check the channel state: only queue chirp if channel state | ||
113 | * allows it. | ||
114 | */ | ||
115 | dw_state = pchnl->state; | ||
116 | if (dw_state != CHNL_STATEREADY) { | ||
117 | if (dw_state & CHNL_STATECANCEL) | ||
118 | return -ECANCELED; | ||
119 | if ((dw_state & CHNL_STATEEOS) && | ||
120 | CHNL_IS_OUTPUT(pchnl->chnl_mode)) | ||
121 | return -EPIPE; | ||
122 | /* No other possible states left */ | ||
123 | } | ||
124 | |||
125 | dev_obj = dev_get_first(); | ||
126 | dev_get_bridge_context(dev_obj, &dev_ctxt); | ||
127 | if (!dev_ctxt) | ||
128 | return -EFAULT; | ||
129 | |||
130 | if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) { | ||
131 | if (!(host_buf < (void *)USERMODE_ADDR)) { | ||
132 | host_sys_buf = host_buf; | ||
133 | goto func_cont; | ||
134 | } | ||
135 | /* if addr in user mode, then copy to kernel space */ | ||
136 | host_sys_buf = kmalloc(buf_size, GFP_KERNEL); | ||
137 | if (host_sys_buf == NULL) | ||
138 | return -ENOMEM; | ||
139 | |||
140 | if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) { | ||
141 | status = copy_from_user(host_sys_buf, host_buf, | ||
142 | buf_size); | ||
143 | if (status) { | ||
144 | kfree(host_sys_buf); | ||
145 | host_sys_buf = NULL; | ||
146 | return -EFAULT; | ||
147 | } | ||
148 | } | ||
149 | } | ||
150 | func_cont: | ||
151 | /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY | ||
152 | * channels. DPCCS is held to avoid race conditions with PCPY channels. | ||
153 | * If DPC is scheduled in process context (iosm_schedule) and any | ||
154 | * non-mailbox interrupt occurs, that DPC will run and break CS. Hence | ||
155 | * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */ | ||
156 | chnl_mgr_obj = pchnl->chnl_mgr_obj; | ||
157 | spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock); | ||
158 | omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); | ||
159 | if (pchnl->chnl_type == CHNL_PCPY) { | ||
160 | /* This is a processor-copy channel. */ | ||
161 | if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) { | ||
162 | /* Check buffer size on output channels for fit. */ | ||
163 | if (byte_size > io_buf_size( | ||
164 | pchnl->chnl_mgr_obj->iomgr)) { | ||
165 | status = -EINVAL; | ||
166 | goto out; | ||
167 | } | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* Get a free chirp: */ | ||
172 | if (list_empty(&pchnl->free_packets_list)) { | ||
173 | status = -EIO; | ||
174 | goto out; | ||
175 | } | ||
176 | chnl_packet_obj = list_first_entry(&pchnl->free_packets_list, | ||
177 | struct chnl_irp, link); | ||
178 | list_del(&chnl_packet_obj->link); | ||
179 | |||
180 | /* Enqueue the chirp on the chnl's IORequest queue: */ | ||
181 | chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf = | ||
182 | host_buf; | ||
183 | if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1) | ||
184 | chnl_packet_obj->host_sys_buf = host_sys_buf; | ||
185 | |||
186 | /* | ||
187 | * Note: for dma chans dw_dsp_addr contains dsp address | ||
188 | * of SM buffer. | ||
189 | */ | ||
190 | /* DSP address */ | ||
191 | chnl_packet_obj->dsp_tx_addr = dw_dsp_addr / chnl_mgr_obj->word_size; | ||
192 | chnl_packet_obj->byte_size = byte_size; | ||
193 | chnl_packet_obj->buf_size = buf_size; | ||
194 | /* Only valid for output channel */ | ||
195 | chnl_packet_obj->arg = dw_arg; | ||
196 | chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS : | ||
197 | CHNL_IOCSTATCOMPLETE); | ||
198 | list_add_tail(&chnl_packet_obj->link, &pchnl->io_requests); | ||
199 | pchnl->cio_reqs++; | ||
200 | /* | ||
201 | * If end of stream, update the channel state to prevent | ||
202 | * more IOR's. | ||
203 | */ | ||
204 | if (is_eos) | ||
205 | pchnl->state |= CHNL_STATEEOS; | ||
206 | |||
207 | /* Request IO from the DSP */ | ||
208 | io_request_chnl(chnl_mgr_obj->iomgr, pchnl, | ||
209 | (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT : | ||
210 | IO_OUTPUT), &mb_val); | ||
211 | sched_dpc = true; | ||
212 | out: | ||
213 | omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX); | ||
214 | spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); | ||
215 | if (mb_val != 0) | ||
216 | sm_interrupt_dsp(dev_ctxt, mb_val); | ||
217 | |||
218 | /* Schedule a DPC, to do the actual data transfer */ | ||
219 | if (sched_dpc) | ||
220 | iosm_schedule(chnl_mgr_obj->iomgr); | ||
221 | |||
222 | return status; | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * ======== bridge_chnl_cancel_io ======== | ||
227 | * Return all I/O requests to the client which have not yet been | ||
228 | * transferred. The channel's I/O completion object is | ||
229 | * signalled, and all the I/O requests are queued as IOC's, with the | ||
230 | * status field set to CHNL_IOCSTATCANCEL. | ||
231 | * This call is typically used in abort situations, and is a prelude to | ||
232 | * chnl_close(); | ||
233 | */ | ||
234 | int bridge_chnl_cancel_io(struct chnl_object *chnl_obj) | ||
235 | { | ||
236 | struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; | ||
237 | u32 chnl_id = -1; | ||
238 | s8 chnl_mode; | ||
239 | struct chnl_irp *chirp, *tmp; | ||
240 | struct chnl_mgr *chnl_mgr_obj = NULL; | ||
241 | |||
242 | /* Check args: */ | ||
243 | if (!pchnl || !pchnl->chnl_mgr_obj) | ||
244 | return -EFAULT; | ||
245 | |||
246 | chnl_id = pchnl->chnl_id; | ||
247 | chnl_mode = pchnl->chnl_mode; | ||
248 | chnl_mgr_obj = pchnl->chnl_mgr_obj; | ||
249 | |||
250 | /* Mark this channel as cancelled, to prevent further IORequests or | ||
251 | * IORequests or dispatching. */ | ||
252 | spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock); | ||
253 | |||
254 | pchnl->state |= CHNL_STATECANCEL; | ||
255 | |||
256 | if (list_empty(&pchnl->io_requests)) { | ||
257 | spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); | ||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | if (pchnl->chnl_type == CHNL_PCPY) { | ||
262 | /* Indicate we have no more buffers available for transfer: */ | ||
263 | if (CHNL_IS_INPUT(pchnl->chnl_mode)) { | ||
264 | io_cancel_chnl(chnl_mgr_obj->iomgr, chnl_id); | ||
265 | } else { | ||
266 | /* Record that we no longer have output buffers | ||
267 | * available: */ | ||
268 | chnl_mgr_obj->output_mask &= ~(1 << chnl_id); | ||
269 | } | ||
270 | } | ||
271 | /* Move all IOR's to IOC queue: */ | ||
272 | list_for_each_entry_safe(chirp, tmp, &pchnl->io_requests, link) { | ||
273 | list_del(&chirp->link); | ||
274 | chirp->byte_size = 0; | ||
275 | chirp->status |= CHNL_IOCSTATCANCEL; | ||
276 | list_add_tail(&chirp->link, &pchnl->io_completions); | ||
277 | pchnl->cio_cs++; | ||
278 | pchnl->cio_reqs--; | ||
279 | } | ||
280 | |||
281 | spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * ======== bridge_chnl_close ======== | ||
288 | * Purpose: | ||
289 | * Ensures all pending I/O on this channel is cancelled, discards all | ||
290 | * queued I/O completion notifications, then frees the resources allocated | ||
291 | * for this channel, and makes the corresponding logical channel id | ||
292 | * available for subsequent use. | ||
293 | */ | ||
294 | int bridge_chnl_close(struct chnl_object *chnl_obj) | ||
295 | { | ||
296 | int status; | ||
297 | struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; | ||
298 | |||
299 | /* Check args: */ | ||
300 | if (!pchnl) | ||
301 | return -EFAULT; | ||
302 | /* Cancel IO: this ensures no further IO requests or notifications */ | ||
303 | status = bridge_chnl_cancel_io(chnl_obj); | ||
304 | if (status) | ||
305 | return status; | ||
306 | /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */ | ||
307 | /* Free the slot in the channel manager: */ | ||
308 | pchnl->chnl_mgr_obj->channels[pchnl->chnl_id] = NULL; | ||
309 | spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); | ||
310 | pchnl->chnl_mgr_obj->open_channels -= 1; | ||
311 | spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); | ||
312 | if (pchnl->ntfy_obj) { | ||
313 | ntfy_delete(pchnl->ntfy_obj); | ||
314 | kfree(pchnl->ntfy_obj); | ||
315 | pchnl->ntfy_obj = NULL; | ||
316 | } | ||
317 | /* Reset channel event: (NOTE: user_event freed in user context) */ | ||
318 | if (pchnl->sync_event) { | ||
319 | sync_reset_event(pchnl->sync_event); | ||
320 | kfree(pchnl->sync_event); | ||
321 | pchnl->sync_event = NULL; | ||
322 | } | ||
323 | /* Free I/O request and I/O completion queues: */ | ||
324 | free_chirp_list(&pchnl->io_completions); | ||
325 | pchnl->cio_cs = 0; | ||
326 | |||
327 | free_chirp_list(&pchnl->io_requests); | ||
328 | pchnl->cio_reqs = 0; | ||
329 | |||
330 | free_chirp_list(&pchnl->free_packets_list); | ||
331 | |||
332 | /* Release channel object. */ | ||
333 | kfree(pchnl); | ||
334 | |||
335 | return status; | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * ======== bridge_chnl_create ======== | ||
340 | * Create a channel manager object, responsible for opening new channels | ||
341 | * and closing old ones for a given board. | ||
342 | */ | ||
343 | int bridge_chnl_create(struct chnl_mgr **channel_mgr, | ||
344 | struct dev_object *hdev_obj, | ||
345 | const struct chnl_mgrattrs *mgr_attrts) | ||
346 | { | ||
347 | int status = 0; | ||
348 | struct chnl_mgr *chnl_mgr_obj = NULL; | ||
349 | u8 max_channels; | ||
350 | |||
351 | /* Allocate channel manager object */ | ||
352 | chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL); | ||
353 | if (chnl_mgr_obj) { | ||
354 | /* | ||
355 | * The max_channels attr must equal the # of supported chnls for | ||
356 | * each transport(# chnls for PCPY = DDMA = ZCPY): i.e. | ||
357 | * mgr_attrts->max_channels = CHNL_MAXCHANNELS = | ||
358 | * DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS. | ||
359 | */ | ||
360 | max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY; | ||
361 | /* Create array of channels */ | ||
362 | chnl_mgr_obj->channels = kzalloc(sizeof(struct chnl_object *) | ||
363 | * max_channels, GFP_KERNEL); | ||
364 | if (chnl_mgr_obj->channels) { | ||
365 | /* Initialize chnl_mgr object */ | ||
366 | chnl_mgr_obj->type = CHNL_TYPESM; | ||
367 | chnl_mgr_obj->word_size = mgr_attrts->word_size; | ||
368 | /* Total # chnls supported */ | ||
369 | chnl_mgr_obj->max_channels = max_channels; | ||
370 | chnl_mgr_obj->open_channels = 0; | ||
371 | chnl_mgr_obj->output_mask = 0; | ||
372 | chnl_mgr_obj->last_output = 0; | ||
373 | chnl_mgr_obj->dev_obj = hdev_obj; | ||
374 | spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock); | ||
375 | } else { | ||
376 | status = -ENOMEM; | ||
377 | } | ||
378 | } else { | ||
379 | status = -ENOMEM; | ||
380 | } | ||
381 | |||
382 | if (status) { | ||
383 | bridge_chnl_destroy(chnl_mgr_obj); | ||
384 | *channel_mgr = NULL; | ||
385 | } else { | ||
386 | /* Return channel manager object to caller... */ | ||
387 | *channel_mgr = chnl_mgr_obj; | ||
388 | } | ||
389 | return status; | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * ======== bridge_chnl_destroy ======== | ||
394 | * Purpose: | ||
395 | * Close all open channels, and destroy the channel manager. | ||
396 | */ | ||
397 | int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr) | ||
398 | { | ||
399 | int status = 0; | ||
400 | struct chnl_mgr *chnl_mgr_obj = hchnl_mgr; | ||
401 | u32 chnl_id; | ||
402 | |||
403 | if (hchnl_mgr) { | ||
404 | /* Close all open channels: */ | ||
405 | for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels; | ||
406 | chnl_id++) { | ||
407 | status = | ||
408 | bridge_chnl_close(chnl_mgr_obj->channels | ||
409 | [chnl_id]); | ||
410 | if (status) | ||
411 | dev_dbg(bridge, "%s: Error status 0x%x\n", | ||
412 | __func__, status); | ||
413 | } | ||
414 | |||
415 | /* Free channel manager object: */ | ||
416 | kfree(chnl_mgr_obj->channels); | ||
417 | |||
418 | /* Set hchnl_mgr to NULL in device object. */ | ||
419 | dev_set_chnl_mgr(chnl_mgr_obj->dev_obj, NULL); | ||
420 | /* Free this Chnl Mgr object: */ | ||
421 | kfree(hchnl_mgr); | ||
422 | } else { | ||
423 | status = -EFAULT; | ||
424 | } | ||
425 | return status; | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * ======== bridge_chnl_flush_io ======== | ||
430 | * purpose: | ||
431 | * Flushes all the outstanding data requests on a channel. | ||
432 | */ | ||
433 | int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout) | ||
434 | { | ||
435 | int status = 0; | ||
436 | struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; | ||
437 | s8 chnl_mode = -1; | ||
438 | struct chnl_mgr *chnl_mgr_obj; | ||
439 | struct chnl_ioc chnl_ioc_obj; | ||
440 | /* Check args: */ | ||
441 | if (pchnl) { | ||
442 | if ((timeout == CHNL_IOCNOWAIT) | ||
443 | && CHNL_IS_OUTPUT(pchnl->chnl_mode)) { | ||
444 | status = -EINVAL; | ||
445 | } else { | ||
446 | chnl_mode = pchnl->chnl_mode; | ||
447 | chnl_mgr_obj = pchnl->chnl_mgr_obj; | ||
448 | } | ||
449 | } else { | ||
450 | status = -EFAULT; | ||
451 | } | ||
452 | if (!status) { | ||
453 | /* Note: Currently, if another thread continues to add IO | ||
454 | * requests to this channel, this function will continue to | ||
455 | * flush all such queued IO requests. */ | ||
456 | if (CHNL_IS_OUTPUT(chnl_mode) | ||
457 | && (pchnl->chnl_type == CHNL_PCPY)) { | ||
458 | /* Wait for IO completions, up to the specified | ||
459 | * timeout: */ | ||
460 | while (!list_empty(&pchnl->io_requests) && !status) { | ||
461 | status = bridge_chnl_get_ioc(chnl_obj, | ||
462 | timeout, &chnl_ioc_obj); | ||
463 | if (status) | ||
464 | continue; | ||
465 | |||
466 | if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT) | ||
467 | status = -ETIMEDOUT; | ||
468 | |||
469 | } | ||
470 | } else { | ||
471 | status = bridge_chnl_cancel_io(chnl_obj); | ||
472 | /* Now, leave the channel in the ready state: */ | ||
473 | pchnl->state &= ~CHNL_STATECANCEL; | ||
474 | } | ||
475 | } | ||
476 | return status; | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * ======== bridge_chnl_get_info ======== | ||
481 | * Purpose: | ||
482 | * Retrieve information related to a channel. | ||
483 | */ | ||
484 | int bridge_chnl_get_info(struct chnl_object *chnl_obj, | ||
485 | struct chnl_info *channel_info) | ||
486 | { | ||
487 | int status = 0; | ||
488 | struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; | ||
489 | |||
490 | if (channel_info != NULL) { | ||
491 | if (pchnl) { | ||
492 | /* Return the requested information: */ | ||
493 | channel_info->chnl_mgr = pchnl->chnl_mgr_obj; | ||
494 | channel_info->event_obj = pchnl->user_event; | ||
495 | channel_info->cnhl_id = pchnl->chnl_id; | ||
496 | channel_info->mode = pchnl->chnl_mode; | ||
497 | channel_info->bytes_tx = pchnl->bytes_moved; | ||
498 | channel_info->process = pchnl->process; | ||
499 | channel_info->sync_event = pchnl->sync_event; | ||
500 | channel_info->cio_cs = pchnl->cio_cs; | ||
501 | channel_info->cio_reqs = pchnl->cio_reqs; | ||
502 | channel_info->state = pchnl->state; | ||
503 | } else { | ||
504 | status = -EFAULT; | ||
505 | } | ||
506 | } else { | ||
507 | status = -EFAULT; | ||
508 | } | ||
509 | return status; | ||
510 | } | ||
511 | |||
512 | /* | ||
513 | * ======== bridge_chnl_get_ioc ======== | ||
514 | * Optionally wait for I/O completion on a channel. Dequeue an I/O | ||
515 | * completion record, which contains information about the completed | ||
516 | * I/O request. | ||
517 | * Note: Ensures Channel Invariant (see notes above). | ||
518 | */ | ||
519 | int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout, | ||
520 | struct chnl_ioc *chan_ioc) | ||
521 | { | ||
522 | int status = 0; | ||
523 | struct chnl_object *pchnl = (struct chnl_object *)chnl_obj; | ||
524 | struct chnl_irp *chnl_packet_obj; | ||
525 | int stat_sync; | ||
526 | bool dequeue_ioc = true; | ||
527 | struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 }; | ||
528 | u8 *host_sys_buf = NULL; | ||
529 | struct bridge_dev_context *dev_ctxt; | ||
530 | struct dev_object *dev_obj; | ||
531 | |||
532 | /* Check args: */ | ||
533 | if (!chan_ioc || !pchnl) { | ||
534 | status = -EFAULT; | ||
535 | } else if (timeout == CHNL_IOCNOWAIT) { | ||
536 | if (list_empty(&pchnl->io_completions)) | ||
537 | status = -EREMOTEIO; | ||
538 | |||
539 | } | ||
540 | |||
541 | dev_obj = dev_get_first(); | ||
542 | dev_get_bridge_context(dev_obj, &dev_ctxt); | ||
543 | if (!dev_ctxt) | ||
544 | status = -EFAULT; | ||
545 | |||
546 | if (status) | ||
547 | goto func_end; | ||
548 | |||
549 | ioc.status = CHNL_IOCSTATCOMPLETE; | ||
550 | if (timeout != | ||
551 | CHNL_IOCNOWAIT && list_empty(&pchnl->io_completions)) { | ||
552 | if (timeout == CHNL_IOCINFINITE) | ||
553 | timeout = SYNC_INFINITE; | ||
554 | |||
555 | stat_sync = sync_wait_on_event(pchnl->sync_event, timeout); | ||
556 | if (stat_sync == -ETIME) { | ||
557 | /* No response from DSP */ | ||
558 | ioc.status |= CHNL_IOCSTATTIMEOUT; | ||
559 | dequeue_ioc = false; | ||
560 | } else if (stat_sync == -EPERM) { | ||
561 | /* This can occur when the user mode thread is | ||
562 | * aborted (^C), or when _VWIN32_WaitSingleObject() | ||
563 | * fails due to unknown causes. */ | ||
564 | /* Even though Wait failed, there may be something in | ||
565 | * the Q: */ | ||
566 | if (list_empty(&pchnl->io_completions)) { | ||
567 | ioc.status |= CHNL_IOCSTATCANCEL; | ||
568 | dequeue_ioc = false; | ||
569 | } | ||
570 | } | ||
571 | } | ||
572 | /* See comment in AddIOReq */ | ||
573 | spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); | ||
574 | omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); | ||
575 | if (dequeue_ioc) { | ||
576 | /* Dequeue IOC and set chan_ioc; */ | ||
577 | chnl_packet_obj = list_first_entry(&pchnl->io_completions, | ||
578 | struct chnl_irp, link); | ||
579 | list_del(&chnl_packet_obj->link); | ||
580 | /* Update chan_ioc from channel state and chirp: */ | ||
581 | pchnl->cio_cs--; | ||
582 | /* | ||
583 | * If this is a zero-copy channel, then set IOC's pbuf | ||
584 | * to the DSP's address. This DSP address will get | ||
585 | * translated to user's virtual addr later. | ||
586 | */ | ||
587 | host_sys_buf = chnl_packet_obj->host_sys_buf; | ||
588 | ioc.buf = chnl_packet_obj->host_user_buf; | ||
589 | ioc.byte_size = chnl_packet_obj->byte_size; | ||
590 | ioc.buf_size = chnl_packet_obj->buf_size; | ||
591 | ioc.arg = chnl_packet_obj->arg; | ||
592 | ioc.status |= chnl_packet_obj->status; | ||
593 | /* Place the used chirp on the free list: */ | ||
594 | list_add_tail(&chnl_packet_obj->link, | ||
595 | &pchnl->free_packets_list); | ||
596 | } else { | ||
597 | ioc.buf = NULL; | ||
598 | ioc.byte_size = 0; | ||
599 | ioc.arg = 0; | ||
600 | ioc.buf_size = 0; | ||
601 | } | ||
602 | /* Ensure invariant: If any IOC's are queued for this channel... */ | ||
603 | if (!list_empty(&pchnl->io_completions)) { | ||
604 | /* Since DSPStream_Reclaim() does not take a timeout | ||
605 | * parameter, we pass the stream's timeout value to | ||
606 | * bridge_chnl_get_ioc. We cannot determine whether or not | ||
607 | * we have waited in user mode. Since the stream's timeout | ||
608 | * value may be non-zero, we still have to set the event. | ||
609 | * Therefore, this optimization is taken out. | ||
610 | * | ||
611 | * if (timeout == CHNL_IOCNOWAIT) { | ||
612 | * ... ensure event is set.. | ||
613 | * sync_set_event(pchnl->sync_event); | ||
614 | * } */ | ||
615 | sync_set_event(pchnl->sync_event); | ||
616 | } else { | ||
617 | /* else, if list is empty, ensure event is reset. */ | ||
618 | sync_reset_event(pchnl->sync_event); | ||
619 | } | ||
620 | omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX); | ||
621 | spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock); | ||
622 | if (dequeue_ioc | ||
623 | && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) { | ||
624 | if (!(ioc.buf < (void *)USERMODE_ADDR)) | ||
625 | goto func_cont; | ||
626 | |||
627 | /* If the addr is in user mode, then copy it */ | ||
628 | if (!host_sys_buf || !ioc.buf) { | ||
629 | status = -EFAULT; | ||
630 | goto func_cont; | ||
631 | } | ||
632 | if (!CHNL_IS_INPUT(pchnl->chnl_mode)) | ||
633 | goto func_cont1; | ||
634 | |||
635 | /*host_user_buf */ | ||
636 | status = copy_to_user(ioc.buf, host_sys_buf, ioc.byte_size); | ||
637 | if (status) { | ||
638 | if (current->flags & PF_EXITING) | ||
639 | status = 0; | ||
640 | } | ||
641 | if (status) | ||
642 | status = -EFAULT; | ||
643 | func_cont1: | ||
644 | kfree(host_sys_buf); | ||
645 | } | ||
646 | func_cont: | ||
647 | /* Update User's IOC block: */ | ||
648 | *chan_ioc = ioc; | ||
649 | func_end: | ||
650 | return status; | ||
651 | } | ||
652 | |||
653 | /* | ||
654 | * ======== bridge_chnl_get_mgr_info ======== | ||
655 | * Retrieve information related to the channel manager. | ||
656 | */ | ||
657 | int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id, | ||
658 | struct chnl_mgrinfo *mgr_info) | ||
659 | { | ||
660 | struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr; | ||
661 | |||
662 | if (!mgr_info || !hchnl_mgr) | ||
663 | return -EFAULT; | ||
664 | |||
665 | if (ch_id > CHNL_MAXCHANNELS) | ||
666 | return -ECHRNG; | ||
667 | |||
668 | /* Return the requested information: */ | ||
669 | mgr_info->chnl_obj = chnl_mgr_obj->channels[ch_id]; | ||
670 | mgr_info->open_channels = chnl_mgr_obj->open_channels; | ||
671 | mgr_info->type = chnl_mgr_obj->type; | ||
672 | /* total # of chnls */ | ||
673 | mgr_info->max_channels = chnl_mgr_obj->max_channels; | ||
674 | |||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * ======== bridge_chnl_idle ======== | ||
680 | * Idles a particular channel. | ||
681 | */ | ||
682 | int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout, | ||
683 | bool flush_data) | ||
684 | { | ||
685 | s8 chnl_mode; | ||
686 | struct chnl_mgr *chnl_mgr_obj; | ||
687 | int status = 0; | ||
688 | |||
689 | chnl_mode = chnl_obj->chnl_mode; | ||
690 | chnl_mgr_obj = chnl_obj->chnl_mgr_obj; | ||
691 | |||
692 | if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) { | ||
693 | /* Wait for IO completions, up to the specified timeout: */ | ||
694 | status = bridge_chnl_flush_io(chnl_obj, timeout); | ||
695 | } else { | ||
696 | status = bridge_chnl_cancel_io(chnl_obj); | ||
697 | |||
698 | /* Reset the byte count and put channel back in ready state. */ | ||
699 | chnl_obj->bytes_moved = 0; | ||
700 | chnl_obj->state &= ~CHNL_STATECANCEL; | ||
701 | } | ||
702 | |||
703 | return status; | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * ======== bridge_chnl_open ======== | ||
708 | * Open a new half-duplex channel to the DSP board. | ||
709 | */ | ||
710 | int bridge_chnl_open(struct chnl_object **chnl, | ||
711 | struct chnl_mgr *hchnl_mgr, s8 chnl_mode, | ||
712 | u32 ch_id, const struct chnl_attr *pattrs) | ||
713 | { | ||
714 | int status = 0; | ||
715 | struct chnl_mgr *chnl_mgr_obj = hchnl_mgr; | ||
716 | struct chnl_object *pchnl = NULL; | ||
717 | struct sync_object *sync_event = NULL; | ||
718 | |||
719 | *chnl = NULL; | ||
720 | |||
721 | /* Validate Args: */ | ||
722 | if (!pattrs->uio_reqs) | ||
723 | return -EINVAL; | ||
724 | |||
725 | if (!hchnl_mgr) | ||
726 | return -EFAULT; | ||
727 | |||
728 | if (ch_id != CHNL_PICKFREE) { | ||
729 | if (ch_id >= chnl_mgr_obj->max_channels) | ||
730 | return -ECHRNG; | ||
731 | if (chnl_mgr_obj->channels[ch_id] != NULL) | ||
732 | return -EALREADY; | ||
733 | } else { | ||
734 | /* Check for free channel */ | ||
735 | status = search_free_channel(chnl_mgr_obj, &ch_id); | ||
736 | if (status) | ||
737 | return status; | ||
738 | } | ||
739 | |||
740 | |||
741 | /* Create channel object: */ | ||
742 | pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL); | ||
743 | if (!pchnl) | ||
744 | return -ENOMEM; | ||
745 | |||
746 | /* Protect queues from io_dpc: */ | ||
747 | pchnl->state = CHNL_STATECANCEL; | ||
748 | |||
749 | /* Allocate initial IOR and IOC queues: */ | ||
750 | status = create_chirp_list(&pchnl->free_packets_list, | ||
751 | pattrs->uio_reqs); | ||
752 | if (status) | ||
753 | goto out_err; | ||
754 | |||
755 | INIT_LIST_HEAD(&pchnl->io_requests); | ||
756 | INIT_LIST_HEAD(&pchnl->io_completions); | ||
757 | |||
758 | pchnl->chnl_packets = pattrs->uio_reqs; | ||
759 | pchnl->cio_cs = 0; | ||
760 | pchnl->cio_reqs = 0; | ||
761 | |||
762 | sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL); | ||
763 | if (!sync_event) { | ||
764 | status = -ENOMEM; | ||
765 | goto out_err; | ||
766 | } | ||
767 | sync_init_event(sync_event); | ||
768 | |||
769 | pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); | ||
770 | if (!pchnl->ntfy_obj) { | ||
771 | status = -ENOMEM; | ||
772 | goto out_err; | ||
773 | } | ||
774 | ntfy_init(pchnl->ntfy_obj); | ||
775 | |||
776 | /* Initialize CHNL object fields: */ | ||
777 | pchnl->chnl_mgr_obj = chnl_mgr_obj; | ||
778 | pchnl->chnl_id = ch_id; | ||
779 | pchnl->chnl_mode = chnl_mode; | ||
780 | pchnl->user_event = sync_event; | ||
781 | pchnl->sync_event = sync_event; | ||
782 | /* Get the process handle */ | ||
783 | pchnl->process = current->tgid; | ||
784 | pchnl->cb_arg = 0; | ||
785 | pchnl->bytes_moved = 0; | ||
786 | /* Default to proc-copy */ | ||
787 | pchnl->chnl_type = CHNL_PCPY; | ||
788 | |||
789 | /* Insert channel object in channel manager: */ | ||
790 | chnl_mgr_obj->channels[pchnl->chnl_id] = pchnl; | ||
791 | spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock); | ||
792 | chnl_mgr_obj->open_channels++; | ||
793 | spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); | ||
794 | /* Return result... */ | ||
795 | pchnl->state = CHNL_STATEREADY; | ||
796 | *chnl = pchnl; | ||
797 | |||
798 | return status; | ||
799 | |||
800 | out_err: | ||
801 | /* Free memory */ | ||
802 | free_chirp_list(&pchnl->io_completions); | ||
803 | free_chirp_list(&pchnl->io_requests); | ||
804 | free_chirp_list(&pchnl->free_packets_list); | ||
805 | |||
806 | kfree(sync_event); | ||
807 | |||
808 | if (pchnl->ntfy_obj) { | ||
809 | ntfy_delete(pchnl->ntfy_obj); | ||
810 | kfree(pchnl->ntfy_obj); | ||
811 | pchnl->ntfy_obj = NULL; | ||
812 | } | ||
813 | kfree(pchnl); | ||
814 | |||
815 | return status; | ||
816 | } | ||
817 | |||
818 | /* | ||
819 | * ======== bridge_chnl_register_notify ======== | ||
820 | * Registers for events on a particular channel. | ||
821 | */ | ||
822 | int bridge_chnl_register_notify(struct chnl_object *chnl_obj, | ||
823 | u32 event_mask, u32 notify_type, | ||
824 | struct dsp_notification *hnotification) | ||
825 | { | ||
826 | int status = 0; | ||
827 | |||
828 | |||
829 | if (event_mask) | ||
830 | status = ntfy_register(chnl_obj->ntfy_obj, hnotification, | ||
831 | event_mask, notify_type); | ||
832 | else | ||
833 | status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification); | ||
834 | |||
835 | return status; | ||
836 | } | ||
837 | |||
838 | /* | ||
839 | * ======== create_chirp_list ======== | ||
840 | * Purpose: | ||
841 | * Initialize a queue of channel I/O Request/Completion packets. | ||
842 | * Parameters: | ||
843 | * list: Pointer to a list_head | ||
844 | * chirps: Number of Chirps to allocate. | ||
845 | * Returns: | ||
846 | * 0 if successful, error code otherwise. | ||
847 | * Requires: | ||
848 | * Ensures: | ||
849 | */ | ||
850 | static int create_chirp_list(struct list_head *list, u32 chirps) | ||
851 | { | ||
852 | struct chnl_irp *chirp; | ||
853 | u32 i; | ||
854 | |||
855 | INIT_LIST_HEAD(list); | ||
856 | |||
857 | /* Make N chirps and place on queue. */ | ||
858 | for (i = 0; i < chirps; i++) { | ||
859 | chirp = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL); | ||
860 | if (!chirp) | ||
861 | break; | ||
862 | list_add_tail(&chirp->link, list); | ||
863 | } | ||
864 | |||
865 | /* If we couldn't allocate all chirps, free those allocated: */ | ||
866 | if (i != chirps) { | ||
867 | free_chirp_list(list); | ||
868 | return -ENOMEM; | ||
869 | } | ||
870 | |||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | /* | ||
875 | * ======== free_chirp_list ======== | ||
876 | * Purpose: | ||
877 | * Free the queue of Chirps. | ||
878 | */ | ||
879 | static void free_chirp_list(struct list_head *chirp_list) | ||
880 | { | ||
881 | struct chnl_irp *chirp, *tmp; | ||
882 | |||
883 | list_for_each_entry_safe(chirp, tmp, chirp_list, link) { | ||
884 | list_del(&chirp->link); | ||
885 | kfree(chirp); | ||
886 | } | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * ======== search_free_channel ======== | ||
891 | * Search for a free channel slot in the array of channel pointers. | ||
892 | */ | ||
893 | static int search_free_channel(struct chnl_mgr *chnl_mgr_obj, | ||
894 | u32 *chnl) | ||
895 | { | ||
896 | int status = -ENOSR; | ||
897 | u32 i; | ||
898 | |||
899 | for (i = 0; i < chnl_mgr_obj->max_channels; i++) { | ||
900 | if (chnl_mgr_obj->channels[i] == NULL) { | ||
901 | status = 0; | ||
902 | *chnl = i; | ||
903 | break; | ||
904 | } | ||
905 | } | ||
906 | |||
907 | return status; | ||
908 | } | ||
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c deleted file mode 100644 index a1aca4416ca7..000000000000 --- a/drivers/staging/tidspbridge/core/dsp-clock.c +++ /dev/null | |||
@@ -1,391 +0,0 @@ | |||
1 | /* | ||
2 | * clk.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Clock and Timer services. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #define L4_34XX_BASE 0x48000000 | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | |||
23 | /* ----------------------------------- Host OS */ | ||
24 | #include <dspbridge/host_os.h> | ||
25 | #include <plat/dmtimer.h> | ||
26 | #include <linux/platform_data/asoc-ti-mcbsp.h> | ||
27 | |||
28 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
29 | #include <dspbridge/dbdefs.h> | ||
30 | #include <dspbridge/drv.h> | ||
31 | #include <dspbridge/dev.h> | ||
32 | #include "_tiomap.h" | ||
33 | |||
34 | /* ----------------------------------- This */ | ||
35 | #include <dspbridge/clk.h> | ||
36 | |||
37 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
38 | |||
39 | #define OMAP_SSI_OFFSET 0x58000 | ||
40 | #define OMAP_SSI_SIZE 0x1000 | ||
41 | #define OMAP_SSI_SYSCONFIG_OFFSET 0x10 | ||
42 | |||
43 | #define SSI_AUTOIDLE (1 << 0) | ||
44 | #define SSI_SIDLE_SMARTIDLE (2 << 3) | ||
45 | #define SSI_MIDLE_NOIDLE (1 << 12) | ||
46 | |||
47 | /* Clk types requested by the dsp */ | ||
48 | #define IVA2_CLK 0 | ||
49 | #define GPT_CLK 1 | ||
50 | #define WDT_CLK 2 | ||
51 | #define MCBSP_CLK 3 | ||
52 | #define SSI_CLK 4 | ||
53 | |||
54 | /* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */ | ||
55 | #define DMT_ID(id) ((id) + 4) | ||
56 | #define DM_TIMER_CLOCKS 4 | ||
57 | |||
58 | /* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */ | ||
59 | #define MCBSP_ID(id) ((id) - 6) | ||
60 | |||
61 | static struct omap_dm_timer *timer[4]; | ||
62 | |||
63 | struct clk *iva2_clk; | ||
64 | |||
65 | struct dsp_ssi { | ||
66 | struct clk *sst_fck; | ||
67 | struct clk *ssr_fck; | ||
68 | struct clk *ick; | ||
69 | }; | ||
70 | |||
71 | static struct dsp_ssi ssi; | ||
72 | |||
73 | static u32 dsp_clocks; | ||
74 | |||
75 | static inline u32 is_dsp_clk_active(u32 clk, u8 id) | ||
76 | { | ||
77 | return clk & (1 << id); | ||
78 | } | ||
79 | |||
80 | static inline void set_dsp_clk_active(u32 *clk, u8 id) | ||
81 | { | ||
82 | *clk |= (1 << id); | ||
83 | } | ||
84 | |||
85 | static inline void set_dsp_clk_inactive(u32 *clk, u8 id) | ||
86 | { | ||
87 | *clk &= ~(1 << id); | ||
88 | } | ||
89 | |||
90 | static s8 get_clk_type(u8 id) | ||
91 | { | ||
92 | s8 type; | ||
93 | |||
94 | if (id == DSP_CLK_IVA2) | ||
95 | type = IVA2_CLK; | ||
96 | else if (id <= DSP_CLK_GPT8) | ||
97 | type = GPT_CLK; | ||
98 | else if (id == DSP_CLK_WDT3) | ||
99 | type = WDT_CLK; | ||
100 | else if (id <= DSP_CLK_MCBSP5) | ||
101 | type = MCBSP_CLK; | ||
102 | else if (id == DSP_CLK_SSI) | ||
103 | type = SSI_CLK; | ||
104 | else | ||
105 | type = -1; | ||
106 | |||
107 | return type; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * ======== dsp_clk_exit ======== | ||
112 | * Purpose: | ||
113 | * Cleanup CLK module. | ||
114 | */ | ||
115 | void dsp_clk_exit(void) | ||
116 | { | ||
117 | int i; | ||
118 | |||
119 | dsp_clock_disable_all(dsp_clocks); | ||
120 | |||
121 | for (i = 0; i < DM_TIMER_CLOCKS; i++) | ||
122 | omap_dm_timer_free(timer[i]); | ||
123 | |||
124 | clk_unprepare(iva2_clk); | ||
125 | clk_put(iva2_clk); | ||
126 | clk_unprepare(ssi.sst_fck); | ||
127 | clk_put(ssi.sst_fck); | ||
128 | clk_unprepare(ssi.ssr_fck); | ||
129 | clk_put(ssi.ssr_fck); | ||
130 | clk_unprepare(ssi.ick); | ||
131 | clk_put(ssi.ick); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * ======== dsp_clk_init ======== | ||
136 | * Purpose: | ||
137 | * Initialize CLK module. | ||
138 | */ | ||
139 | void dsp_clk_init(void) | ||
140 | { | ||
141 | static struct platform_device dspbridge_device; | ||
142 | int i, id; | ||
143 | |||
144 | dspbridge_device.dev.bus = &platform_bus_type; | ||
145 | |||
146 | for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++) | ||
147 | timer[i] = omap_dm_timer_request_specific(id); | ||
148 | |||
149 | iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck"); | ||
150 | if (IS_ERR(iva2_clk)) | ||
151 | dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk); | ||
152 | else | ||
153 | clk_prepare(iva2_clk); | ||
154 | |||
155 | ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck"); | ||
156 | ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck"); | ||
157 | ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick"); | ||
158 | |||
159 | if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick)) { | ||
160 | dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n", | ||
161 | ssi.sst_fck, ssi.ssr_fck, ssi.ick); | ||
162 | } else { | ||
163 | clk_prepare(ssi.sst_fck); | ||
164 | clk_prepare(ssi.ssr_fck); | ||
165 | clk_prepare(ssi.ick); | ||
166 | } | ||
167 | } | ||
168 | |||
169 | /** | ||
170 | * dsp_gpt_wait_overflow - set gpt overflow and wait for fixed timeout | ||
171 | * @clk_id: GP Timer clock id. | ||
172 | * @load: Overflow value. | ||
173 | * | ||
174 | * Sets an overflow interrupt for the desired GPT waiting for a timeout | ||
175 | * of 5 msecs for the interrupt to occur. | ||
176 | */ | ||
177 | void dsp_gpt_wait_overflow(short int clk_id, unsigned int load) | ||
178 | { | ||
179 | struct omap_dm_timer *gpt = timer[clk_id - 1]; | ||
180 | unsigned long timeout; | ||
181 | |||
182 | if (!gpt) | ||
183 | return; | ||
184 | |||
185 | /* Enable overflow interrupt */ | ||
186 | omap_dm_timer_set_int_enable(gpt, OMAP_TIMER_INT_OVERFLOW); | ||
187 | |||
188 | /* | ||
189 | * Set counter value to overflow counter after | ||
190 | * one tick and start timer. | ||
191 | */ | ||
192 | omap_dm_timer_set_load_start(gpt, 0, load); | ||
193 | |||
194 | /* Wait 80us for timer to overflow */ | ||
195 | udelay(80); | ||
196 | |||
197 | timeout = msecs_to_jiffies(5); | ||
198 | /* Check interrupt status and wait for interrupt */ | ||
199 | while (!(omap_dm_timer_read_status(gpt) & OMAP_TIMER_INT_OVERFLOW)) { | ||
200 | if (time_is_after_jiffies(timeout)) { | ||
201 | pr_err("%s: GPTimer interrupt failed\n", __func__); | ||
202 | break; | ||
203 | } | ||
204 | } | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * ======== dsp_clk_enable ======== | ||
209 | * Purpose: | ||
210 | * Enable Clock . | ||
211 | * | ||
212 | */ | ||
213 | int dsp_clk_enable(enum dsp_clk_id clk_id) | ||
214 | { | ||
215 | int status = 0; | ||
216 | |||
217 | if (is_dsp_clk_active(dsp_clocks, clk_id)) { | ||
218 | dev_err(bridge, "WARN: clock id %d already enabled\n", clk_id); | ||
219 | goto out; | ||
220 | } | ||
221 | |||
222 | switch (get_clk_type(clk_id)) { | ||
223 | case IVA2_CLK: | ||
224 | clk_enable(iva2_clk); | ||
225 | break; | ||
226 | case GPT_CLK: | ||
227 | status = omap_dm_timer_start(timer[clk_id - 1]); | ||
228 | break; | ||
229 | #ifdef CONFIG_SND_OMAP_SOC_MCBSP | ||
230 | case MCBSP_CLK: | ||
231 | omap_mcbsp_request(MCBSP_ID(clk_id)); | ||
232 | omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC); | ||
233 | break; | ||
234 | #endif | ||
235 | case WDT_CLK: | ||
236 | dev_err(bridge, "ERROR: DSP requested to enable WDT3 clk\n"); | ||
237 | break; | ||
238 | case SSI_CLK: | ||
239 | clk_enable(ssi.sst_fck); | ||
240 | clk_enable(ssi.ssr_fck); | ||
241 | clk_enable(ssi.ick); | ||
242 | |||
243 | /* | ||
244 | * The SSI module need to configured not to have the Forced | ||
245 | * idle for master interface. If it is set to forced idle, | ||
246 | * the SSI module is transitioning to standby thereby causing | ||
247 | * the client in the DSP hang waiting for the SSI module to | ||
248 | * be active after enabling the clocks | ||
249 | */ | ||
250 | ssi_clk_prepare(true); | ||
251 | break; | ||
252 | default: | ||
253 | dev_err(bridge, "Invalid clock id for enable\n"); | ||
254 | status = -EPERM; | ||
255 | } | ||
256 | |||
257 | if (!status) | ||
258 | set_dsp_clk_active(&dsp_clocks, clk_id); | ||
259 | |||
260 | out: | ||
261 | return status; | ||
262 | } | ||
263 | |||
264 | /** | ||
265 | * dsp_clock_enable_all - Enable clocks used by the DSP | ||
266 | * @dev_context Driver's device context strucure | ||
267 | * | ||
268 | * This function enables all the peripheral clocks that were requested by DSP. | ||
269 | */ | ||
270 | u32 dsp_clock_enable_all(u32 dsp_per_clocks) | ||
271 | { | ||
272 | u32 clk_id; | ||
273 | u32 status = -EPERM; | ||
274 | |||
275 | for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) { | ||
276 | if (is_dsp_clk_active(dsp_per_clocks, clk_id)) | ||
277 | status = dsp_clk_enable(clk_id); | ||
278 | } | ||
279 | |||
280 | return status; | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * ======== dsp_clk_disable ======== | ||
285 | * Purpose: | ||
286 | * Disable the clock. | ||
287 | * | ||
288 | */ | ||
289 | int dsp_clk_disable(enum dsp_clk_id clk_id) | ||
290 | { | ||
291 | int status = 0; | ||
292 | |||
293 | if (!is_dsp_clk_active(dsp_clocks, clk_id)) { | ||
294 | dev_err(bridge, "ERR: clock id %d already disabled\n", clk_id); | ||
295 | goto out; | ||
296 | } | ||
297 | |||
298 | switch (get_clk_type(clk_id)) { | ||
299 | case IVA2_CLK: | ||
300 | clk_disable(iva2_clk); | ||
301 | break; | ||
302 | case GPT_CLK: | ||
303 | status = omap_dm_timer_stop(timer[clk_id - 1]); | ||
304 | break; | ||
305 | #ifdef CONFIG_SND_OMAP_SOC_MCBSP | ||
306 | case MCBSP_CLK: | ||
307 | omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC); | ||
308 | omap_mcbsp_free(MCBSP_ID(clk_id)); | ||
309 | break; | ||
310 | #endif | ||
311 | case WDT_CLK: | ||
312 | dev_err(bridge, "ERROR: DSP requested to disable WDT3 clk\n"); | ||
313 | break; | ||
314 | case SSI_CLK: | ||
315 | ssi_clk_prepare(false); | ||
316 | ssi_clk_prepare(false); | ||
317 | clk_disable(ssi.sst_fck); | ||
318 | clk_disable(ssi.ssr_fck); | ||
319 | clk_disable(ssi.ick); | ||
320 | break; | ||
321 | default: | ||
322 | dev_err(bridge, "Invalid clock id for disable\n"); | ||
323 | status = -EPERM; | ||
324 | } | ||
325 | |||
326 | if (!status) | ||
327 | set_dsp_clk_inactive(&dsp_clocks, clk_id); | ||
328 | |||
329 | out: | ||
330 | return status; | ||
331 | } | ||
332 | |||
333 | /** | ||
334 | * dsp_clock_disable_all - Disable all active clocks | ||
335 | * @dev_context Driver's device context structure | ||
336 | * | ||
337 | * This function disables all the peripheral clocks that were enabled by DSP. | ||
338 | * It is meant to be called only when DSP is entering hibernation or when DSP | ||
339 | * is in error state. | ||
340 | */ | ||
341 | u32 dsp_clock_disable_all(u32 dsp_per_clocks) | ||
342 | { | ||
343 | u32 clk_id; | ||
344 | u32 status = -EPERM; | ||
345 | |||
346 | for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) { | ||
347 | if (is_dsp_clk_active(dsp_per_clocks, clk_id)) | ||
348 | status = dsp_clk_disable(clk_id); | ||
349 | } | ||
350 | |||
351 | return status; | ||
352 | } | ||
353 | |||
354 | u32 dsp_clk_get_iva2_rate(void) | ||
355 | { | ||
356 | u32 clk_speed_khz; | ||
357 | |||
358 | clk_speed_khz = clk_get_rate(iva2_clk); | ||
359 | clk_speed_khz /= 1000; | ||
360 | dev_dbg(bridge, "%s: clk speed Khz = %d\n", __func__, clk_speed_khz); | ||
361 | |||
362 | return clk_speed_khz; | ||
363 | } | ||
364 | |||
365 | void ssi_clk_prepare(bool FLAG) | ||
366 | { | ||
367 | void __iomem *ssi_base; | ||
368 | unsigned int value; | ||
369 | |||
370 | ssi_base = ioremap(L4_34XX_BASE + OMAP_SSI_OFFSET, OMAP_SSI_SIZE); | ||
371 | if (!ssi_base) { | ||
372 | pr_err("%s: error, SSI not configured\n", __func__); | ||
373 | return; | ||
374 | } | ||
375 | |||
376 | if (FLAG) { | ||
377 | /* Set Autoidle, SIDLEMode to smart idle, and MIDLEmode to | ||
378 | * no idle | ||
379 | */ | ||
380 | value = SSI_AUTOIDLE | SSI_SIDLE_SMARTIDLE | SSI_MIDLE_NOIDLE; | ||
381 | } else { | ||
382 | /* Set Autoidle, SIDLEMode to forced idle, and MIDLEmode to | ||
383 | * forced idle | ||
384 | */ | ||
385 | value = SSI_AUTOIDLE; | ||
386 | } | ||
387 | |||
388 | __raw_writel(value, ssi_base + OMAP_SSI_SYSCONFIG_OFFSET); | ||
389 | iounmap(ssi_base); | ||
390 | } | ||
391 | |||
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c deleted file mode 100644 index 42f94e157efd..000000000000 --- a/drivers/staging/tidspbridge/core/io_sm.c +++ /dev/null | |||
@@ -1,2246 +0,0 @@ | |||
1 | /* | ||
2 | * io_sm.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * IO dispatcher for a shared memory channel driver. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * Channel Invariant: | ||
21 | * There is an important invariant condition which must be maintained per | ||
22 | * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of | ||
23 | * which may cause timeouts and/or failure of the sync_wait_on_event | ||
24 | * function. | ||
25 | */ | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/list.h> | ||
28 | |||
29 | /* Host OS */ | ||
30 | #include <dspbridge/host_os.h> | ||
31 | #include <linux/workqueue.h> | ||
32 | |||
33 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
34 | #include <dspbridge/dbdefs.h> | ||
35 | |||
36 | /* Services Layer */ | ||
37 | #include <dspbridge/ntfy.h> | ||
38 | #include <dspbridge/sync.h> | ||
39 | |||
40 | /* Hardware Abstraction Layer */ | ||
41 | #include <hw_defs.h> | ||
42 | #include <hw_mmu.h> | ||
43 | |||
44 | /* Bridge Driver */ | ||
45 | #include <dspbridge/dspdeh.h> | ||
46 | #include <dspbridge/dspio.h> | ||
47 | #include <dspbridge/dspioctl.h> | ||
48 | #include <dspbridge/wdt.h> | ||
49 | #include <_tiomap.h> | ||
50 | #include <tiomap_io.h> | ||
51 | #include <_tiomap_pwr.h> | ||
52 | |||
53 | /* Platform Manager */ | ||
54 | #include <dspbridge/cod.h> | ||
55 | #include <dspbridge/node.h> | ||
56 | #include <dspbridge/dev.h> | ||
57 | |||
58 | /* Others */ | ||
59 | #include <dspbridge/rms_sh.h> | ||
60 | #include <dspbridge/mgr.h> | ||
61 | #include <dspbridge/drv.h> | ||
62 | #include "_cmm.h" | ||
63 | #include "module_list.h" | ||
64 | |||
65 | /* This */ | ||
66 | #include <dspbridge/io_sm.h> | ||
67 | #include "_msg_sm.h" | ||
68 | |||
69 | /* Defines, Data Structures, Typedefs */ | ||
70 | #define OUTPUTNOTREADY 0xffff | ||
71 | #define NOTENABLED 0xffff /* Channel(s) not enabled */ | ||
72 | |||
73 | #define EXTEND "_EXT_END" | ||
74 | |||
75 | #define SWAP_WORD(x) (x) | ||
76 | #define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */ | ||
77 | |||
78 | #define MAX_PM_REQS 32 | ||
79 | |||
80 | #define MMU_FAULT_HEAD1 0xa5a5a5a5 | ||
81 | #define MMU_FAULT_HEAD2 0x96969696 | ||
82 | #define POLL_MAX 1000 | ||
83 | #define MAX_MMU_DBGBUFF 10240 | ||
84 | |||
85 | /* IO Manager: only one created per board */ | ||
86 | struct io_mgr { | ||
87 | /* These four fields must be the first fields in a io_mgr_ struct */ | ||
88 | /* Bridge device context */ | ||
89 | struct bridge_dev_context *bridge_context; | ||
90 | /* Function interface to Bridge driver */ | ||
91 | struct bridge_drv_interface *intf_fxns; | ||
92 | struct dev_object *dev_obj; /* Device this board represents */ | ||
93 | |||
94 | /* These fields initialized in bridge_io_create() */ | ||
95 | struct chnl_mgr *chnl_mgr; | ||
96 | struct shm *shared_mem; /* Shared Memory control */ | ||
97 | u8 *input; /* Address of input channel */ | ||
98 | u8 *output; /* Address of output channel */ | ||
99 | struct msg_mgr *msg_mgr; /* Message manager */ | ||
100 | /* Msg control for from DSP messages */ | ||
101 | struct msg_ctrl *msg_input_ctrl; | ||
102 | /* Msg control for to DSP messages */ | ||
103 | struct msg_ctrl *msg_output_ctrl; | ||
104 | u8 *msg_input; /* Address of input messages */ | ||
105 | u8 *msg_output; /* Address of output messages */ | ||
106 | u32 sm_buf_size; /* Size of a shared memory I/O channel */ | ||
107 | bool shared_irq; /* Is this IRQ shared? */ | ||
108 | u32 word_size; /* Size in bytes of DSP word */ | ||
109 | u16 intr_val; /* Interrupt value */ | ||
110 | /* Private extnd proc info; mmu setup */ | ||
111 | struct mgr_processorextinfo ext_proc_info; | ||
112 | struct cmm_object *cmm_mgr; /* Shared Mem Mngr */ | ||
113 | struct work_struct io_workq; /* workqueue */ | ||
114 | #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) | ||
115 | u32 trace_buffer_begin; /* Trace message start address */ | ||
116 | u32 trace_buffer_end; /* Trace message end address */ | ||
117 | u32 trace_buffer_current; /* Trace message current address */ | ||
118 | u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */ | ||
119 | u8 *msg; | ||
120 | u32 gpp_va; | ||
121 | u32 dsp_va; | ||
122 | #endif | ||
123 | /* IO Dpc */ | ||
124 | u32 dpc_req; /* Number of requested DPC's. */ | ||
125 | u32 dpc_sched; /* Number of executed DPC's. */ | ||
126 | struct tasklet_struct dpc_tasklet; | ||
127 | spinlock_t dpc_lock; | ||
128 | |||
129 | }; | ||
130 | |||
131 | struct shm_symbol_val { | ||
132 | u32 shm_base; | ||
133 | u32 shm_lim; | ||
134 | u32 msg_base; | ||
135 | u32 msg_lim; | ||
136 | u32 shm0_end; | ||
137 | u32 dyn_ext; | ||
138 | u32 ext_end; | ||
139 | }; | ||
140 | |||
141 | /* Function Prototypes */ | ||
142 | static void io_dispatch_pm(struct io_mgr *pio_mgr); | ||
143 | static void notify_chnl_complete(struct chnl_object *pchnl, | ||
144 | struct chnl_irp *chnl_packet_obj); | ||
145 | static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, | ||
146 | u8 io_mode); | ||
147 | static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, | ||
148 | u8 io_mode); | ||
149 | static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr); | ||
150 | static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr); | ||
151 | static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj, | ||
152 | struct chnl_object *pchnl, u32 mask); | ||
153 | |||
154 | /* Bus Addr (cached kernel) */ | ||
155 | static int register_shm_segs(struct io_mgr *hio_mgr, | ||
156 | struct cod_manager *cod_man, | ||
157 | u32 dw_gpp_base_pa); | ||
158 | |||
159 | static inline void set_chnl_free(struct shm *sm, u32 chnl) | ||
160 | { | ||
161 | sm->host_free_mask &= ~(1 << chnl); | ||
162 | } | ||
163 | |||
164 | static inline void set_chnl_busy(struct shm *sm, u32 chnl) | ||
165 | { | ||
166 | sm->host_free_mask |= 1 << chnl; | ||
167 | } | ||
168 | |||
169 | |||
170 | /* | ||
171 | * ======== bridge_io_create ======== | ||
172 | * Create an IO manager object. | ||
173 | */ | ||
174 | int bridge_io_create(struct io_mgr **io_man, | ||
175 | struct dev_object *hdev_obj, | ||
176 | const struct io_attrs *mgr_attrts) | ||
177 | { | ||
178 | struct io_mgr *pio_mgr = NULL; | ||
179 | struct bridge_dev_context *hbridge_context = NULL; | ||
180 | struct cfg_devnode *dev_node_obj; | ||
181 | struct chnl_mgr *hchnl_mgr; | ||
182 | u8 dev_type; | ||
183 | |||
184 | /* Check requirements */ | ||
185 | if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0) | ||
186 | return -EFAULT; | ||
187 | |||
188 | *io_man = NULL; | ||
189 | |||
190 | dev_get_chnl_mgr(hdev_obj, &hchnl_mgr); | ||
191 | if (!hchnl_mgr || hchnl_mgr->iomgr) | ||
192 | return -EFAULT; | ||
193 | |||
194 | /* | ||
195 | * Message manager will be created when a file is loaded, since | ||
196 | * size of message buffer in shared memory is configurable in | ||
197 | * the base image. | ||
198 | */ | ||
199 | dev_get_bridge_context(hdev_obj, &hbridge_context); | ||
200 | if (!hbridge_context) | ||
201 | return -EFAULT; | ||
202 | |||
203 | dev_get_dev_type(hdev_obj, &dev_type); | ||
204 | |||
205 | /* Allocate IO manager object */ | ||
206 | pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL); | ||
207 | if (!pio_mgr) | ||
208 | return -ENOMEM; | ||
209 | |||
210 | /* Initialize chnl_mgr object */ | ||
211 | pio_mgr->chnl_mgr = hchnl_mgr; | ||
212 | pio_mgr->word_size = mgr_attrts->word_size; | ||
213 | |||
214 | if (dev_type == DSP_UNIT) { | ||
215 | /* Create an IO DPC */ | ||
216 | tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr); | ||
217 | |||
218 | /* Initialize DPC counters */ | ||
219 | pio_mgr->dpc_req = 0; | ||
220 | pio_mgr->dpc_sched = 0; | ||
221 | |||
222 | spin_lock_init(&pio_mgr->dpc_lock); | ||
223 | |||
224 | if (dev_get_dev_node(hdev_obj, &dev_node_obj)) { | ||
225 | bridge_io_destroy(pio_mgr); | ||
226 | return -EIO; | ||
227 | } | ||
228 | } | ||
229 | |||
230 | pio_mgr->bridge_context = hbridge_context; | ||
231 | pio_mgr->shared_irq = mgr_attrts->irq_shared; | ||
232 | if (dsp_wdt_init()) { | ||
233 | bridge_io_destroy(pio_mgr); | ||
234 | return -EPERM; | ||
235 | } | ||
236 | |||
237 | /* Return IO manager object to caller... */ | ||
238 | hchnl_mgr->iomgr = pio_mgr; | ||
239 | *io_man = pio_mgr; | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * ======== bridge_io_destroy ======== | ||
246 | * Purpose: | ||
247 | * Disable interrupts, destroy the IO manager. | ||
248 | */ | ||
249 | int bridge_io_destroy(struct io_mgr *hio_mgr) | ||
250 | { | ||
251 | int status = 0; | ||
252 | |||
253 | if (hio_mgr) { | ||
254 | /* Free IO DPC object */ | ||
255 | tasklet_kill(&hio_mgr->dpc_tasklet); | ||
256 | |||
257 | #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) | ||
258 | kfree(hio_mgr->msg); | ||
259 | #endif | ||
260 | dsp_wdt_exit(); | ||
261 | /* Free this IO manager object */ | ||
262 | kfree(hio_mgr); | ||
263 | } else { | ||
264 | status = -EFAULT; | ||
265 | } | ||
266 | |||
267 | return status; | ||
268 | } | ||
269 | |||
270 | struct shm_symbol_val *_get_shm_symbol_values(struct io_mgr *hio_mgr) | ||
271 | { | ||
272 | struct shm_symbol_val *s; | ||
273 | struct cod_manager *cod_man; | ||
274 | int status; | ||
275 | |||
276 | s = kzalloc(sizeof(*s), GFP_KERNEL); | ||
277 | if (!s) | ||
278 | return ERR_PTR(-ENOMEM); | ||
279 | |||
280 | status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man); | ||
281 | if (status) | ||
282 | goto free_symbol; | ||
283 | |||
284 | /* Get start and length of channel part of shared memory */ | ||
285 | status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM, | ||
286 | &s->shm_base); | ||
287 | if (status) | ||
288 | goto free_symbol; | ||
289 | |||
290 | status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM, | ||
291 | &s->shm_lim); | ||
292 | if (status) | ||
293 | goto free_symbol; | ||
294 | |||
295 | if (s->shm_lim <= s->shm_base) { | ||
296 | status = -EINVAL; | ||
297 | goto free_symbol; | ||
298 | } | ||
299 | |||
300 | /* Get start and length of message part of shared memory */ | ||
301 | status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM, | ||
302 | &s->msg_base); | ||
303 | if (status) | ||
304 | goto free_symbol; | ||
305 | |||
306 | status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM, | ||
307 | &s->msg_lim); | ||
308 | if (status) | ||
309 | goto free_symbol; | ||
310 | |||
311 | if (s->msg_lim <= s->msg_base) { | ||
312 | status = -EINVAL; | ||
313 | goto free_symbol; | ||
314 | } | ||
315 | |||
316 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
317 | status = cod_get_sym_value(cod_man, DSP_TRACESEC_END, &s->shm0_end); | ||
318 | #else | ||
319 | status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, &s->shm0_end); | ||
320 | #endif | ||
321 | if (status) | ||
322 | goto free_symbol; | ||
323 | |||
324 | status = cod_get_sym_value(cod_man, DYNEXTBASE, &s->dyn_ext); | ||
325 | if (status) | ||
326 | goto free_symbol; | ||
327 | |||
328 | status = cod_get_sym_value(cod_man, EXTEND, &s->ext_end); | ||
329 | if (status) | ||
330 | goto free_symbol; | ||
331 | |||
332 | return s; | ||
333 | |||
334 | free_symbol: | ||
335 | kfree(s); | ||
336 | return ERR_PTR(status); | ||
337 | } | ||
338 | |||
339 | /* | ||
340 | * ======== bridge_io_on_loaded ======== | ||
341 | * Purpose: | ||
342 | * Called when a new program is loaded to get shared memory buffer | ||
343 | * parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit | ||
344 | * are in DSP address units. | ||
345 | */ | ||
346 | int bridge_io_on_loaded(struct io_mgr *hio_mgr) | ||
347 | { | ||
348 | struct bridge_dev_context *dc = hio_mgr->bridge_context; | ||
349 | struct cfg_hostres *cfg_res = dc->resources; | ||
350 | struct bridge_ioctl_extproc *eproc; | ||
351 | struct cod_manager *cod_man; | ||
352 | struct chnl_mgr *hchnl_mgr; | ||
353 | struct msg_mgr *hmsg_mgr; | ||
354 | struct shm_symbol_val *s; | ||
355 | int status; | ||
356 | u8 num_procs; | ||
357 | s32 ndx; | ||
358 | u32 i; | ||
359 | u32 mem_sz, msg_sz, pad_sz, shm_sz, shm_base_offs; | ||
360 | u32 seg0_sz, seg1_sz; | ||
361 | u32 pa, va, da; | ||
362 | u32 pa_curr, va_curr, da_curr; | ||
363 | u32 bytes; | ||
364 | u32 all_bits = 0; | ||
365 | u32 page_size[] = { | ||
366 | HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, | ||
367 | HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB | ||
368 | }; | ||
369 | u32 map_attrs = DSP_MAPLITTLEENDIAN | DSP_MAPPHYSICALADDR | | ||
370 | DSP_MAPELEMSIZE32 | DSP_MAPDONOTLOCK; | ||
371 | |||
372 | status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man); | ||
373 | if (status) | ||
374 | return status; | ||
375 | |||
376 | hchnl_mgr = hio_mgr->chnl_mgr; | ||
377 | |||
378 | /* The message manager is destroyed when the board is stopped */ | ||
379 | dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr); | ||
380 | hmsg_mgr = hio_mgr->msg_mgr; | ||
381 | if (!hchnl_mgr || !hmsg_mgr) | ||
382 | return -EFAULT; | ||
383 | |||
384 | if (hio_mgr->shared_mem) | ||
385 | hio_mgr->shared_mem = NULL; | ||
386 | |||
387 | s = _get_shm_symbol_values(hio_mgr); | ||
388 | if (IS_ERR(s)) | ||
389 | return PTR_ERR(s); | ||
390 | |||
391 | /* Get total length in bytes */ | ||
392 | shm_sz = (s->shm_lim - s->shm_base + 1) * hio_mgr->word_size; | ||
393 | |||
394 | /* Calculate size of a PROCCOPY shared memory region */ | ||
395 | dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n", | ||
396 | __func__, shm_sz - sizeof(struct shm)); | ||
397 | |||
398 | /* Length (bytes) of messaging part of shared memory */ | ||
399 | msg_sz = (s->msg_lim - s->msg_base + 1) * hio_mgr->word_size; | ||
400 | |||
401 | /* Total length (bytes) of shared memory: chnl + msg */ | ||
402 | mem_sz = shm_sz + msg_sz; | ||
403 | |||
404 | /* Get memory reserved in host resources */ | ||
405 | (void)mgr_enum_processor_info(0, | ||
406 | (struct dsp_processorinfo *) | ||
407 | &hio_mgr->ext_proc_info, | ||
408 | sizeof(struct mgr_processorextinfo), | ||
409 | &num_procs); | ||
410 | |||
411 | /* IO supports only one DSP for now */ | ||
412 | if (num_procs != 1) { | ||
413 | status = -EINVAL; | ||
414 | goto free_symbol; | ||
415 | } | ||
416 | |||
417 | /* The first MMU TLB entry(TLB_0) in DCD is ShmBase */ | ||
418 | pa = cfg_res->mem_phys[1]; | ||
419 | va = cfg_res->mem_base[1]; | ||
420 | |||
421 | /* This is the virtual uncached ioremapped address!!! */ | ||
422 | /* Why can't we directly take the DSPVA from the symbols? */ | ||
423 | da = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt; | ||
424 | seg0_sz = (s->shm0_end - da) * hio_mgr->word_size; | ||
425 | seg1_sz = (s->ext_end - s->dyn_ext) * hio_mgr->word_size; | ||
426 | |||
427 | /* 4K align */ | ||
428 | seg1_sz = (seg1_sz + 0xFFF) & (~0xFFFUL); | ||
429 | |||
430 | /* 64K align */ | ||
431 | seg0_sz = (seg0_sz + 0xFFFF) & (~0xFFFFUL); | ||
432 | |||
433 | pad_sz = UL_PAGE_ALIGN_SIZE - ((pa + seg1_sz) % UL_PAGE_ALIGN_SIZE); | ||
434 | if (pad_sz == UL_PAGE_ALIGN_SIZE) | ||
435 | pad_sz = 0x0; | ||
436 | |||
437 | dev_dbg(bridge, "%s: pa %x, va %x, da %x\n", __func__, pa, va, da); | ||
438 | dev_dbg(bridge, | ||
439 | "shm0_end %x, dyn_ext %x, ext_end %x, seg0_sz %x seg1_sz %x\n", | ||
440 | s->shm0_end, s->dyn_ext, s->ext_end, seg0_sz, seg1_sz); | ||
441 | |||
442 | if ((seg0_sz + seg1_sz + pad_sz) > cfg_res->mem_length[1]) { | ||
443 | pr_err("%s: shm Error, reserved 0x%x required 0x%x\n", | ||
444 | __func__, cfg_res->mem_length[1], | ||
445 | seg0_sz + seg1_sz + pad_sz); | ||
446 | status = -ENOMEM; | ||
447 | goto free_symbol; | ||
448 | } | ||
449 | |||
450 | pa_curr = pa; | ||
451 | va_curr = s->dyn_ext * hio_mgr->word_size; | ||
452 | da_curr = va; | ||
453 | bytes = seg1_sz; | ||
454 | |||
455 | /* | ||
456 | * Try to fit into TLB entries. If not possible, push them to page | ||
457 | * tables. It is quite possible that if sections are not on | ||
458 | * bigger page boundary, we may end up making several small pages. | ||
459 | * So, push them onto page tables, if that is the case. | ||
460 | */ | ||
461 | while (bytes) { | ||
462 | /* | ||
463 | * To find the max. page size with which both PA & VA are | ||
464 | * aligned. | ||
465 | */ | ||
466 | all_bits = pa_curr | va_curr; | ||
467 | dev_dbg(bridge, | ||
468 | "seg all_bits %x, pa_curr %x, va_curr %x, bytes %x\n", | ||
469 | all_bits, pa_curr, va_curr, bytes); | ||
470 | |||
471 | for (i = 0; i < 4; i++) { | ||
472 | if ((bytes >= page_size[i]) && | ||
473 | ((all_bits & (page_size[i] - 1)) == 0)) { | ||
474 | status = hio_mgr->intf_fxns->brd_mem_map(dc, | ||
475 | pa_curr, va_curr, | ||
476 | page_size[i], map_attrs, | ||
477 | NULL); | ||
478 | if (status) | ||
479 | goto free_symbol; | ||
480 | |||
481 | pa_curr += page_size[i]; | ||
482 | va_curr += page_size[i]; | ||
483 | da_curr += page_size[i]; | ||
484 | bytes -= page_size[i]; | ||
485 | /* | ||
486 | * Don't try smaller sizes. Hopefully we have | ||
487 | * reached an address aligned to a bigger page | ||
488 | * size. | ||
489 | */ | ||
490 | break; | ||
491 | } | ||
492 | } | ||
493 | } | ||
494 | |||
495 | pa_curr += pad_sz; | ||
496 | va_curr += pad_sz; | ||
497 | da_curr += pad_sz; | ||
498 | bytes = seg0_sz; | ||
499 | va_curr = da * hio_mgr->word_size; | ||
500 | |||
501 | eproc = kzalloc(sizeof(*eproc) * BRDIOCTL_NUMOFMMUTLB, GFP_KERNEL); | ||
502 | if (!eproc) { | ||
503 | status = -ENOMEM; | ||
504 | goto free_symbol; | ||
505 | } | ||
506 | |||
507 | ndx = 0; | ||
508 | /* Configure the TLB entries for the next cacheable segment */ | ||
509 | while (bytes) { | ||
510 | /* | ||
511 | * To find the max. page size with which both PA & VA are | ||
512 | * aligned. | ||
513 | */ | ||
514 | all_bits = pa_curr | va_curr; | ||
515 | dev_dbg(bridge, | ||
516 | "seg1 all_bits %x, pa_curr %x, va_curr %x, bytes %x\n", | ||
517 | all_bits, pa_curr, va_curr, bytes); | ||
518 | |||
519 | for (i = 0; i < 4; i++) { | ||
520 | if (!(bytes >= page_size[i]) || | ||
521 | !((all_bits & (page_size[i] - 1)) == 0)) | ||
522 | continue; | ||
523 | |||
524 | if (ndx >= MAX_LOCK_TLB_ENTRIES) { | ||
525 | status = hio_mgr->intf_fxns->brd_mem_map(dc, | ||
526 | pa_curr, va_curr, | ||
527 | page_size[i], map_attrs, | ||
528 | NULL); | ||
529 | dev_dbg(bridge, | ||
530 | "PTE pa %x va %x dsp_va %x sz %x\n", | ||
531 | eproc[ndx].gpp_pa, | ||
532 | eproc[ndx].gpp_va, | ||
533 | eproc[ndx].dsp_va * | ||
534 | hio_mgr->word_size, page_size[i]); | ||
535 | if (status) | ||
536 | goto free_eproc; | ||
537 | } | ||
538 | |||
539 | /* This is the physical address written to DSP MMU */ | ||
540 | eproc[ndx].gpp_pa = pa_curr; | ||
541 | |||
542 | /* | ||
543 | * This is the virtual uncached ioremapped | ||
544 | * address!!! | ||
545 | */ | ||
546 | eproc[ndx].gpp_va = da_curr; | ||
547 | eproc[ndx].dsp_va = va_curr / hio_mgr->word_size; | ||
548 | eproc[ndx].size = page_size[i]; | ||
549 | eproc[ndx].endianism = HW_LITTLE_ENDIAN; | ||
550 | eproc[ndx].elem_size = HW_ELEM_SIZE16BIT; | ||
551 | eproc[ndx].mixed_mode = HW_MMU_CPUES; | ||
552 | dev_dbg(bridge, "%s: tlb pa %x va %x dsp_va %x sz %x\n", | ||
553 | __func__, eproc[ndx].gpp_pa, | ||
554 | eproc[ndx].gpp_va, | ||
555 | eproc[ndx].dsp_va * hio_mgr->word_size, | ||
556 | page_size[i]); | ||
557 | ndx++; | ||
558 | |||
559 | pa_curr += page_size[i]; | ||
560 | va_curr += page_size[i]; | ||
561 | da_curr += page_size[i]; | ||
562 | bytes -= page_size[i]; | ||
563 | /* | ||
564 | * Don't try smaller sizes. Hopefully we have reached | ||
565 | * an address aligned to a bigger page size. | ||
566 | */ | ||
567 | break; | ||
568 | } | ||
569 | } | ||
570 | |||
571 | /* | ||
572 | * Copy remaining entries from CDB. All entries are 1 MB and | ||
573 | * should not conflict with shm entries on MPU or DSP side. | ||
574 | */ | ||
575 | for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) { | ||
576 | struct mgr_processorextinfo *ep = &hio_mgr->ext_proc_info; | ||
577 | u32 word_sz = hio_mgr->word_size; | ||
578 | |||
579 | if (ep->ty_tlb[i].gpp_phys == 0) | ||
580 | continue; | ||
581 | |||
582 | if ((ep->ty_tlb[i].gpp_phys > pa - 0x100000 && | ||
583 | ep->ty_tlb[i].gpp_phys <= pa + seg0_sz) || | ||
584 | (ep->ty_tlb[i].dsp_virt > da - 0x100000 / word_sz && | ||
585 | ep->ty_tlb[i].dsp_virt <= da + seg0_sz / word_sz)) { | ||
586 | dev_dbg(bridge, | ||
587 | "err cdb%d pa %x da %x shm pa %x da %x sz %x\n", | ||
588 | i, ep->ty_tlb[i].gpp_phys, | ||
589 | ep->ty_tlb[i].dsp_virt, pa, da, seg0_sz); | ||
590 | status = -EPERM; | ||
591 | goto free_eproc; | ||
592 | } | ||
593 | |||
594 | if (ndx >= MAX_LOCK_TLB_ENTRIES) { | ||
595 | status = hio_mgr->intf_fxns->brd_mem_map(dc, | ||
596 | ep->ty_tlb[i].gpp_phys, | ||
597 | ep->ty_tlb[i].dsp_virt, | ||
598 | 0x100000, map_attrs, NULL); | ||
599 | if (status) | ||
600 | goto free_eproc; | ||
601 | } | ||
602 | |||
603 | eproc[ndx].dsp_va = ep->ty_tlb[i].dsp_virt; | ||
604 | eproc[ndx].gpp_pa = ep->ty_tlb[i].gpp_phys; | ||
605 | eproc[ndx].gpp_va = 0; | ||
606 | |||
607 | /* 1 MB */ | ||
608 | eproc[ndx].size = 0x100000; | ||
609 | dev_dbg(bridge, "shm MMU entry pa %x da 0x%x\n", | ||
610 | eproc[ndx].gpp_pa, eproc[ndx].dsp_va); | ||
611 | ndx++; | ||
612 | } | ||
613 | |||
614 | /* Map the L4 peripherals */ | ||
615 | i = 0; | ||
616 | while (l4_peripheral_table[i].phys_addr) { | ||
617 | status = hio_mgr->intf_fxns->brd_mem_map(dc, | ||
618 | l4_peripheral_table[i].phys_addr, | ||
619 | l4_peripheral_table[i].dsp_virt_addr, | ||
620 | HW_PAGE_SIZE4KB, map_attrs, NULL); | ||
621 | if (status) | ||
622 | goto free_eproc; | ||
623 | i++; | ||
624 | } | ||
625 | |||
626 | for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { | ||
627 | eproc[i].dsp_va = 0; | ||
628 | eproc[i].gpp_pa = 0; | ||
629 | eproc[i].gpp_va = 0; | ||
630 | eproc[i].size = 0; | ||
631 | } | ||
632 | |||
633 | /* | ||
634 | * Set the shm physical address entry (grayed out in CDB file) | ||
635 | * to the virtual uncached ioremapped address of shm reserved | ||
636 | * on MPU. | ||
637 | */ | ||
638 | hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys = | ||
639 | (va + seg1_sz + pad_sz); | ||
640 | |||
641 | /* | ||
642 | * Need shm Phys addr. IO supports only one DSP for now: | ||
643 | * num_procs = 1. | ||
644 | */ | ||
645 | if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys) | ||
646 | return -EFAULT; | ||
647 | |||
648 | if (eproc[0].dsp_va > s->shm_base) | ||
649 | return -EPERM; | ||
650 | |||
651 | /* shm_base may not be at ul_dsp_va address */ | ||
652 | shm_base_offs = (s->shm_base - eproc[0].dsp_va) * | ||
653 | hio_mgr->word_size; | ||
654 | /* | ||
655 | * bridge_dev_ctrl() will set dev context dsp-mmu info. In | ||
656 | * bridge_brd_start() the MMU will be re-programed with MMU | ||
657 | * DSPVa-GPPPa pair info while DSP is in a known | ||
658 | * (reset) state. | ||
659 | */ | ||
660 | status = hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context, | ||
661 | BRDIOCTL_SETMMUCONFIG, eproc); | ||
662 | if (status) | ||
663 | goto free_eproc; | ||
664 | |||
665 | s->shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys; | ||
666 | s->shm_base += shm_base_offs; | ||
667 | s->shm_base = (u32) MEM_LINEAR_ADDRESS((void *)s->shm_base, | ||
668 | mem_sz); | ||
669 | if (!s->shm_base) { | ||
670 | status = -EFAULT; | ||
671 | goto free_eproc; | ||
672 | } | ||
673 | |||
674 | /* Register SM */ | ||
675 | status = register_shm_segs(hio_mgr, cod_man, eproc[0].gpp_pa); | ||
676 | |||
677 | hio_mgr->shared_mem = (struct shm *)s->shm_base; | ||
678 | hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm); | ||
679 | hio_mgr->output = hio_mgr->input + (shm_sz - | ||
680 | sizeof(struct shm)) / 2; | ||
681 | hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input; | ||
682 | |||
683 | /* Set up Shared memory addresses for messaging */ | ||
684 | hio_mgr->msg_input_ctrl = | ||
685 | (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem + shm_sz); | ||
686 | hio_mgr->msg_input = | ||
687 | (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl); | ||
688 | hio_mgr->msg_output_ctrl = | ||
689 | (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl + | ||
690 | msg_sz / 2); | ||
691 | hio_mgr->msg_output = | ||
692 | (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl); | ||
693 | hmsg_mgr->max_msgs = | ||
694 | ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input) / | ||
695 | sizeof(struct msg_dspmsg); | ||
696 | |||
697 | dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, " | ||
698 | "output %p, msg_input_ctrl %p, msg_input %p, " | ||
699 | "msg_output_ctrl %p, msg_output %p\n", | ||
700 | (u8 *) hio_mgr->shared_mem, hio_mgr->input, | ||
701 | hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl, | ||
702 | hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl, | ||
703 | hio_mgr->msg_output); | ||
704 | dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n", | ||
705 | hmsg_mgr->max_msgs); | ||
706 | memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm)); | ||
707 | |||
708 | #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) | ||
709 | /* Get the start address of trace buffer */ | ||
710 | status = cod_get_sym_value(cod_man, SYS_PUTCBEG, | ||
711 | &hio_mgr->trace_buffer_begin); | ||
712 | if (status) | ||
713 | goto free_eproc; | ||
714 | |||
715 | hio_mgr->gpp_read_pointer = | ||
716 | hio_mgr->trace_buffer_begin = | ||
717 | (va + seg1_sz + pad_sz) + | ||
718 | (hio_mgr->trace_buffer_begin - da); | ||
719 | |||
720 | /* Get the end address of trace buffer */ | ||
721 | status = cod_get_sym_value(cod_man, SYS_PUTCEND, | ||
722 | &hio_mgr->trace_buffer_end); | ||
723 | if (status) | ||
724 | goto free_eproc; | ||
725 | |||
726 | hio_mgr->trace_buffer_end = | ||
727 | (va + seg1_sz + pad_sz) + | ||
728 | (hio_mgr->trace_buffer_end - da); | ||
729 | |||
730 | /* Get the current address of DSP write pointer */ | ||
731 | status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT, | ||
732 | &hio_mgr->trace_buffer_current); | ||
733 | if (status) | ||
734 | goto free_eproc; | ||
735 | |||
736 | hio_mgr->trace_buffer_current = | ||
737 | (va + seg1_sz + pad_sz) + | ||
738 | (hio_mgr->trace_buffer_current - da); | ||
739 | |||
740 | /* Calculate the size of trace buffer */ | ||
741 | kfree(hio_mgr->msg); | ||
742 | hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end - | ||
743 | hio_mgr->trace_buffer_begin) * | ||
744 | hio_mgr->word_size) + 2, GFP_KERNEL); | ||
745 | if (!hio_mgr->msg) { | ||
746 | status = -ENOMEM; | ||
747 | goto free_eproc; | ||
748 | } | ||
749 | |||
750 | hio_mgr->dsp_va = da; | ||
751 | hio_mgr->gpp_va = (va + seg1_sz + pad_sz); | ||
752 | #endif | ||
753 | |||
754 | free_eproc: | ||
755 | kfree(eproc); | ||
756 | free_symbol: | ||
757 | kfree(s); | ||
758 | |||
759 | return status; | ||
760 | } | ||
761 | |||
762 | /* | ||
763 | * ======== io_buf_size ======== | ||
764 | * Size of shared memory I/O channel. | ||
765 | */ | ||
766 | u32 io_buf_size(struct io_mgr *hio_mgr) | ||
767 | { | ||
768 | if (hio_mgr) | ||
769 | return hio_mgr->sm_buf_size; | ||
770 | else | ||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | /* | ||
775 | * ======== io_cancel_chnl ======== | ||
776 | * Cancel IO on a given PCPY channel. | ||
777 | */ | ||
778 | void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl) | ||
779 | { | ||
780 | struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr; | ||
781 | struct shm *sm; | ||
782 | |||
783 | if (!hio_mgr) | ||
784 | goto func_end; | ||
785 | sm = hio_mgr->shared_mem; | ||
786 | |||
787 | /* Inform DSP that we have no more buffers on this channel */ | ||
788 | set_chnl_free(sm, chnl); | ||
789 | |||
790 | sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); | ||
791 | func_end: | ||
792 | return; | ||
793 | } | ||
794 | |||
795 | |||
796 | /* | ||
797 | * ======== io_dispatch_pm ======== | ||
798 | * Performs I/O dispatch on PM related messages from DSP | ||
799 | */ | ||
800 | static void io_dispatch_pm(struct io_mgr *pio_mgr) | ||
801 | { | ||
802 | int status; | ||
803 | u32 parg[2]; | ||
804 | |||
805 | /* Perform Power message processing here */ | ||
806 | parg[0] = pio_mgr->intr_val; | ||
807 | |||
808 | /* Send the command to the Bridge clk/pwr manager to handle */ | ||
809 | if (parg[0] == MBX_PM_HIBERNATE_EN) { | ||
810 | dev_dbg(bridge, "PM: Hibernate command\n"); | ||
811 | status = pio_mgr->intf_fxns-> | ||
812 | dev_cntrl(pio_mgr->bridge_context, | ||
813 | BRDIOCTL_PWR_HIBERNATE, parg); | ||
814 | if (status) | ||
815 | pr_err("%s: hibernate cmd failed 0x%x\n", | ||
816 | __func__, status); | ||
817 | } else if (parg[0] == MBX_PM_OPP_REQ) { | ||
818 | parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt; | ||
819 | dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]); | ||
820 | status = pio_mgr->intf_fxns-> | ||
821 | dev_cntrl(pio_mgr->bridge_context, | ||
822 | BRDIOCTL_CONSTRAINT_REQUEST, parg); | ||
823 | if (status) | ||
824 | dev_dbg(bridge, "PM: Failed to set constraint " | ||
825 | "= 0x%x\n", parg[1]); | ||
826 | } else { | ||
827 | dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n", | ||
828 | parg[0]); | ||
829 | status = pio_mgr->intf_fxns-> | ||
830 | dev_cntrl(pio_mgr->bridge_context, | ||
831 | BRDIOCTL_CLK_CTRL, parg); | ||
832 | if (status) | ||
833 | dev_dbg(bridge, "PM: Failed to ctrl the DSP clk" | ||
834 | "= 0x%x\n", *parg); | ||
835 | } | ||
836 | } | ||
837 | |||
838 | /* | ||
839 | * ======== io_dpc ======== | ||
840 | * Deferred procedure call for shared memory channel driver ISR. Carries | ||
841 | * out the dispatch of I/O as a non-preemptible event. It can only be | ||
842 | * pre-empted by an ISR. | ||
843 | */ | ||
844 | void io_dpc(unsigned long ref_data) | ||
845 | { | ||
846 | struct io_mgr *pio_mgr = (struct io_mgr *)ref_data; | ||
847 | struct chnl_mgr *chnl_mgr_obj; | ||
848 | struct msg_mgr *msg_mgr_obj; | ||
849 | struct deh_mgr *hdeh_mgr; | ||
850 | u32 requested; | ||
851 | u32 serviced; | ||
852 | |||
853 | if (!pio_mgr) | ||
854 | goto func_end; | ||
855 | chnl_mgr_obj = pio_mgr->chnl_mgr; | ||
856 | dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj); | ||
857 | dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr); | ||
858 | if (!chnl_mgr_obj) | ||
859 | goto func_end; | ||
860 | |||
861 | requested = pio_mgr->dpc_req; | ||
862 | serviced = pio_mgr->dpc_sched; | ||
863 | |||
864 | if (serviced == requested) | ||
865 | goto func_end; | ||
866 | |||
867 | /* Process pending DPC's */ | ||
868 | do { | ||
869 | /* Check value of interrupt reg to ensure it's a valid error */ | ||
870 | if ((pio_mgr->intr_val > DEH_BASE) && | ||
871 | (pio_mgr->intr_val < DEH_LIMIT)) { | ||
872 | /* Notify DSP/BIOS exception */ | ||
873 | if (hdeh_mgr) { | ||
874 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
875 | print_dsp_debug_trace(pio_mgr); | ||
876 | #endif | ||
877 | bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, | ||
878 | pio_mgr->intr_val); | ||
879 | } | ||
880 | } | ||
881 | /* Proc-copy channel dispatch */ | ||
882 | input_chnl(pio_mgr, NULL, IO_SERVICE); | ||
883 | output_chnl(pio_mgr, NULL, IO_SERVICE); | ||
884 | |||
885 | #ifdef CHNL_MESSAGES | ||
886 | if (msg_mgr_obj) { | ||
887 | /* Perform I/O dispatch on message queues */ | ||
888 | input_msg(pio_mgr, msg_mgr_obj); | ||
889 | output_msg(pio_mgr, msg_mgr_obj); | ||
890 | } | ||
891 | |||
892 | #endif | ||
893 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
894 | if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) { | ||
895 | /* Notify DSP Trace message */ | ||
896 | print_dsp_debug_trace(pio_mgr); | ||
897 | } | ||
898 | #endif | ||
899 | serviced++; | ||
900 | } while (serviced != requested); | ||
901 | pio_mgr->dpc_sched = requested; | ||
902 | func_end: | ||
903 | return; | ||
904 | } | ||
905 | |||
906 | /* | ||
907 | * ======== io_mbox_msg ======== | ||
908 | * Main interrupt handler for the shared memory IO manager. | ||
909 | * Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then | ||
910 | * schedules a DPC to dispatch I/O. | ||
911 | */ | ||
912 | int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg) | ||
913 | { | ||
914 | struct io_mgr *pio_mgr; | ||
915 | struct dev_object *dev_obj; | ||
916 | unsigned long flags; | ||
917 | |||
918 | dev_obj = dev_get_first(); | ||
919 | dev_get_io_mgr(dev_obj, &pio_mgr); | ||
920 | |||
921 | if (!pio_mgr) | ||
922 | return NOTIFY_BAD; | ||
923 | |||
924 | pio_mgr->intr_val = (u16)((u32)msg); | ||
925 | if (pio_mgr->intr_val & MBX_PM_CLASS) | ||
926 | io_dispatch_pm(pio_mgr); | ||
927 | |||
928 | if (pio_mgr->intr_val == MBX_DEH_RESET) { | ||
929 | pio_mgr->intr_val = 0; | ||
930 | } else { | ||
931 | spin_lock_irqsave(&pio_mgr->dpc_lock, flags); | ||
932 | pio_mgr->dpc_req++; | ||
933 | spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags); | ||
934 | tasklet_schedule(&pio_mgr->dpc_tasklet); | ||
935 | } | ||
936 | return NOTIFY_OK; | ||
937 | } | ||
938 | |||
939 | /* | ||
940 | * ======== io_request_chnl ======== | ||
941 | * Purpose: | ||
942 | * Request channel I/O from the DSP. Sets flags in shared memory, then | ||
943 | * interrupts the DSP. | ||
944 | */ | ||
945 | void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl, | ||
946 | u8 io_mode, u16 *mbx_val) | ||
947 | { | ||
948 | struct chnl_mgr *chnl_mgr_obj; | ||
949 | struct shm *sm; | ||
950 | |||
951 | if (!pchnl || !mbx_val) | ||
952 | goto func_end; | ||
953 | chnl_mgr_obj = io_manager->chnl_mgr; | ||
954 | sm = io_manager->shared_mem; | ||
955 | if (io_mode == IO_INPUT) { | ||
956 | /* Indicate to the DSP we have a buffer available for input */ | ||
957 | set_chnl_busy(sm, pchnl->chnl_id); | ||
958 | *mbx_val = MBX_PCPY_CLASS; | ||
959 | } else if (io_mode == IO_OUTPUT) { | ||
960 | /* | ||
961 | * Record the fact that we have a buffer available for | ||
962 | * output. | ||
963 | */ | ||
964 | chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id); | ||
965 | } else { | ||
966 | } | ||
967 | func_end: | ||
968 | return; | ||
969 | } | ||
970 | |||
971 | /* | ||
972 | * ======== iosm_schedule ======== | ||
973 | * Schedule DPC for IO. | ||
974 | */ | ||
975 | void iosm_schedule(struct io_mgr *io_manager) | ||
976 | { | ||
977 | unsigned long flags; | ||
978 | |||
979 | if (!io_manager) | ||
980 | return; | ||
981 | |||
982 | /* Increment count of DPC's pending. */ | ||
983 | spin_lock_irqsave(&io_manager->dpc_lock, flags); | ||
984 | io_manager->dpc_req++; | ||
985 | spin_unlock_irqrestore(&io_manager->dpc_lock, flags); | ||
986 | |||
987 | /* Schedule DPC */ | ||
988 | tasklet_schedule(&io_manager->dpc_tasklet); | ||
989 | } | ||
990 | |||
991 | /* | ||
992 | * ======== find_ready_output ======== | ||
993 | * Search for a host output channel which is ready to send. If this is | ||
994 | * called as a result of servicing the DPC, then implement a round | ||
995 | * robin search; otherwise, this was called by a client thread (via | ||
996 | * IO_Dispatch()), so just start searching from the current channel id. | ||
997 | */ | ||
998 | static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj, | ||
999 | struct chnl_object *pchnl, u32 mask) | ||
1000 | { | ||
1001 | u32 ret = OUTPUTNOTREADY; | ||
1002 | u32 id, start_id; | ||
1003 | u32 shift; | ||
1004 | |||
1005 | id = (pchnl != | ||
1006 | NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1)); | ||
1007 | id = ((id == CHNL_MAXCHANNELS) ? 0 : id); | ||
1008 | if (id >= CHNL_MAXCHANNELS) | ||
1009 | goto func_end; | ||
1010 | if (mask) { | ||
1011 | shift = (1 << id); | ||
1012 | start_id = id; | ||
1013 | do { | ||
1014 | if (mask & shift) { | ||
1015 | ret = id; | ||
1016 | if (pchnl == NULL) | ||
1017 | chnl_mgr_obj->last_output = id; | ||
1018 | break; | ||
1019 | } | ||
1020 | id = id + 1; | ||
1021 | id = ((id == CHNL_MAXCHANNELS) ? 0 : id); | ||
1022 | shift = (1 << id); | ||
1023 | } while (id != start_id); | ||
1024 | } | ||
1025 | func_end: | ||
1026 | return ret; | ||
1027 | } | ||
1028 | |||
1029 | /* | ||
1030 | * ======== input_chnl ======== | ||
1031 | * Dispatch a buffer on an input channel. | ||
1032 | */ | ||
1033 | static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, | ||
1034 | u8 io_mode) | ||
1035 | { | ||
1036 | struct chnl_mgr *chnl_mgr_obj; | ||
1037 | struct shm *sm; | ||
1038 | u32 chnl_id; | ||
1039 | u32 bytes; | ||
1040 | struct chnl_irp *chnl_packet_obj = NULL; | ||
1041 | u32 dw_arg; | ||
1042 | bool clear_chnl = false; | ||
1043 | bool notify_client = false; | ||
1044 | |||
1045 | sm = pio_mgr->shared_mem; | ||
1046 | chnl_mgr_obj = pio_mgr->chnl_mgr; | ||
1047 | |||
1048 | /* Attempt to perform input */ | ||
1049 | if (!sm->input_full) | ||
1050 | goto func_end; | ||
1051 | |||
1052 | bytes = sm->input_size * chnl_mgr_obj->word_size; | ||
1053 | chnl_id = sm->input_id; | ||
1054 | dw_arg = sm->arg; | ||
1055 | if (chnl_id >= CHNL_MAXCHANNELS) { | ||
1056 | /* Shouldn't be here: would indicate corrupted shm. */ | ||
1057 | goto func_end; | ||
1058 | } | ||
1059 | pchnl = chnl_mgr_obj->channels[chnl_id]; | ||
1060 | if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) { | ||
1061 | if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) { | ||
1062 | /* Get the I/O request, and attempt a transfer */ | ||
1063 | if (!list_empty(&pchnl->io_requests)) { | ||
1064 | if (!pchnl->cio_reqs) | ||
1065 | goto func_end; | ||
1066 | |||
1067 | chnl_packet_obj = list_first_entry( | ||
1068 | &pchnl->io_requests, | ||
1069 | struct chnl_irp, link); | ||
1070 | list_del(&chnl_packet_obj->link); | ||
1071 | pchnl->cio_reqs--; | ||
1072 | |||
1073 | /* | ||
1074 | * Ensure we don't overflow the client's | ||
1075 | * buffer. | ||
1076 | */ | ||
1077 | bytes = min(bytes, chnl_packet_obj->byte_size); | ||
1078 | memcpy(chnl_packet_obj->host_sys_buf, | ||
1079 | pio_mgr->input, bytes); | ||
1080 | pchnl->bytes_moved += bytes; | ||
1081 | chnl_packet_obj->byte_size = bytes; | ||
1082 | chnl_packet_obj->arg = dw_arg; | ||
1083 | chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE; | ||
1084 | |||
1085 | if (bytes == 0) { | ||
1086 | /* | ||
1087 | * This assertion fails if the DSP | ||
1088 | * sends EOS more than once on this | ||
1089 | * channel. | ||
1090 | */ | ||
1091 | if (pchnl->state & CHNL_STATEEOS) | ||
1092 | goto func_end; | ||
1093 | /* | ||
1094 | * Zero bytes indicates EOS. Update | ||
1095 | * IOC status for this chirp, and also | ||
1096 | * the channel state. | ||
1097 | */ | ||
1098 | chnl_packet_obj->status |= | ||
1099 | CHNL_IOCSTATEOS; | ||
1100 | pchnl->state |= CHNL_STATEEOS; | ||
1101 | /* | ||
1102 | * Notify that end of stream has | ||
1103 | * occurred. | ||
1104 | */ | ||
1105 | ntfy_notify(pchnl->ntfy_obj, | ||
1106 | DSP_STREAMDONE); | ||
1107 | } | ||
1108 | /* Tell DSP if no more I/O buffers available */ | ||
1109 | if (list_empty(&pchnl->io_requests)) | ||
1110 | set_chnl_free(sm, pchnl->chnl_id); | ||
1111 | clear_chnl = true; | ||
1112 | notify_client = true; | ||
1113 | } else { | ||
1114 | /* | ||
1115 | * Input full for this channel, but we have no | ||
1116 | * buffers available. The channel must be | ||
1117 | * "idling". Clear out the physical input | ||
1118 | * channel. | ||
1119 | */ | ||
1120 | clear_chnl = true; | ||
1121 | } | ||
1122 | } else { | ||
1123 | /* Input channel cancelled: clear input channel */ | ||
1124 | clear_chnl = true; | ||
1125 | } | ||
1126 | } else { | ||
1127 | /* DPC fired after host closed channel: clear input channel */ | ||
1128 | clear_chnl = true; | ||
1129 | } | ||
1130 | if (clear_chnl) { | ||
1131 | /* Indicate to the DSP we have read the input */ | ||
1132 | sm->input_full = 0; | ||
1133 | sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); | ||
1134 | } | ||
1135 | if (notify_client) { | ||
1136 | /* Notify client with IO completion record */ | ||
1137 | notify_chnl_complete(pchnl, chnl_packet_obj); | ||
1138 | } | ||
1139 | func_end: | ||
1140 | return; | ||
1141 | } | ||
1142 | |||
1143 | /* | ||
1144 | * ======== input_msg ======== | ||
1145 | * Copies messages from shared memory to the message queues. | ||
1146 | */ | ||
1147 | static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr) | ||
1148 | { | ||
1149 | u32 num_msgs; | ||
1150 | u32 i; | ||
1151 | u8 *msg_input; | ||
1152 | struct msg_queue *msg_queue_obj; | ||
1153 | struct msg_frame *pmsg; | ||
1154 | struct msg_dspmsg msg; | ||
1155 | struct msg_ctrl *msg_ctr_obj; | ||
1156 | u32 input_empty; | ||
1157 | u32 addr; | ||
1158 | |||
1159 | msg_ctr_obj = pio_mgr->msg_input_ctrl; | ||
1160 | /* Get the number of input messages to be read */ | ||
1161 | input_empty = msg_ctr_obj->buf_empty; | ||
1162 | num_msgs = msg_ctr_obj->size; | ||
1163 | if (input_empty) | ||
1164 | return; | ||
1165 | |||
1166 | msg_input = pio_mgr->msg_input; | ||
1167 | for (i = 0; i < num_msgs; i++) { | ||
1168 | /* Read the next message */ | ||
1169 | addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd); | ||
1170 | msg.msg.cmd = | ||
1171 | read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); | ||
1172 | addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1); | ||
1173 | msg.msg.arg1 = | ||
1174 | read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); | ||
1175 | addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2); | ||
1176 | msg.msg.arg2 = | ||
1177 | read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); | ||
1178 | addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id); | ||
1179 | msg.msgq_id = | ||
1180 | read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr); | ||
1181 | msg_input += sizeof(struct msg_dspmsg); | ||
1182 | |||
1183 | /* Determine which queue to put the message in */ | ||
1184 | dev_dbg(bridge, "input msg: cmd=0x%x arg1=0x%x " | ||
1185 | "arg2=0x%x msgq_id=0x%x\n", msg.msg.cmd, | ||
1186 | msg.msg.arg1, msg.msg.arg2, msg.msgq_id); | ||
1187 | /* | ||
1188 | * Interrupt may occur before shared memory and message | ||
1189 | * input locations have been set up. If all nodes were | ||
1190 | * cleaned up, hmsg_mgr->max_msgs should be 0. | ||
1191 | */ | ||
1192 | list_for_each_entry(msg_queue_obj, &hmsg_mgr->queue_list, | ||
1193 | list_elem) { | ||
1194 | if (msg.msgq_id != msg_queue_obj->msgq_id) | ||
1195 | continue; | ||
1196 | /* Found it */ | ||
1197 | if (msg.msg.cmd == RMS_EXITACK) { | ||
1198 | /* | ||
1199 | * Call the node exit notification. | ||
1200 | * The exit message does not get | ||
1201 | * queued. | ||
1202 | */ | ||
1203 | (*hmsg_mgr->on_exit)(msg_queue_obj->arg, | ||
1204 | msg.msg.arg1); | ||
1205 | break; | ||
1206 | } | ||
1207 | /* | ||
1208 | * Not an exit acknowledgement, queue | ||
1209 | * the message. | ||
1210 | */ | ||
1211 | if (list_empty(&msg_queue_obj->msg_free_list)) { | ||
1212 | /* | ||
1213 | * No free frame to copy the | ||
1214 | * message into. | ||
1215 | */ | ||
1216 | pr_err("%s: no free msg frames," | ||
1217 | " discarding msg\n", | ||
1218 | __func__); | ||
1219 | break; | ||
1220 | } | ||
1221 | |||
1222 | pmsg = list_first_entry(&msg_queue_obj->msg_free_list, | ||
1223 | struct msg_frame, list_elem); | ||
1224 | list_del(&pmsg->list_elem); | ||
1225 | pmsg->msg_data = msg; | ||
1226 | list_add_tail(&pmsg->list_elem, | ||
1227 | &msg_queue_obj->msg_used_list); | ||
1228 | ntfy_notify(msg_queue_obj->ntfy_obj, | ||
1229 | DSP_NODEMESSAGEREADY); | ||
1230 | sync_set_event(msg_queue_obj->sync_event); | ||
1231 | } | ||
1232 | } | ||
1233 | /* Set the post SWI flag */ | ||
1234 | if (num_msgs > 0) { | ||
1235 | /* Tell the DSP we've read the messages */ | ||
1236 | msg_ctr_obj->buf_empty = true; | ||
1237 | msg_ctr_obj->post_swi = true; | ||
1238 | sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1242 | /* | ||
1243 | * ======== notify_chnl_complete ======== | ||
1244 | * Purpose: | ||
1245 | * Signal the channel event, notifying the client that I/O has completed. | ||
1246 | */ | ||
1247 | static void notify_chnl_complete(struct chnl_object *pchnl, | ||
1248 | struct chnl_irp *chnl_packet_obj) | ||
1249 | { | ||
1250 | bool signal_event; | ||
1251 | |||
1252 | if (!pchnl || !pchnl->sync_event || !chnl_packet_obj) | ||
1253 | goto func_end; | ||
1254 | |||
1255 | /* | ||
1256 | * Note: we signal the channel event only if the queue of IO | ||
1257 | * completions is empty. If it is not empty, the event is sure to be | ||
1258 | * signalled by the only IO completion list consumer: | ||
1259 | * bridge_chnl_get_ioc(). | ||
1260 | */ | ||
1261 | signal_event = list_empty(&pchnl->io_completions); | ||
1262 | /* Enqueue the IO completion info for the client */ | ||
1263 | list_add_tail(&chnl_packet_obj->link, &pchnl->io_completions); | ||
1264 | pchnl->cio_cs++; | ||
1265 | |||
1266 | if (pchnl->cio_cs > pchnl->chnl_packets) | ||
1267 | goto func_end; | ||
1268 | /* Signal the channel event (if not already set) that IO is complete */ | ||
1269 | if (signal_event) | ||
1270 | sync_set_event(pchnl->sync_event); | ||
1271 | |||
1272 | /* Notify that IO is complete */ | ||
1273 | ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION); | ||
1274 | func_end: | ||
1275 | return; | ||
1276 | } | ||
1277 | |||
1278 | /* | ||
1279 | * ======== output_chnl ======== | ||
1280 | * Purpose: | ||
1281 | * Dispatch a buffer on an output channel. | ||
1282 | */ | ||
1283 | static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl, | ||
1284 | u8 io_mode) | ||
1285 | { | ||
1286 | struct chnl_mgr *chnl_mgr_obj; | ||
1287 | struct shm *sm; | ||
1288 | u32 chnl_id; | ||
1289 | struct chnl_irp *chnl_packet_obj; | ||
1290 | u32 dw_dsp_f_mask; | ||
1291 | |||
1292 | chnl_mgr_obj = pio_mgr->chnl_mgr; | ||
1293 | sm = pio_mgr->shared_mem; | ||
1294 | /* Attempt to perform output */ | ||
1295 | if (sm->output_full) | ||
1296 | goto func_end; | ||
1297 | |||
1298 | if (pchnl && !((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY)) | ||
1299 | goto func_end; | ||
1300 | |||
1301 | /* Look to see if both a PC and DSP output channel are ready */ | ||
1302 | dw_dsp_f_mask = sm->dsp_free_mask; | ||
1303 | chnl_id = | ||
1304 | find_ready_output(chnl_mgr_obj, pchnl, | ||
1305 | (chnl_mgr_obj->output_mask & dw_dsp_f_mask)); | ||
1306 | if (chnl_id == OUTPUTNOTREADY) | ||
1307 | goto func_end; | ||
1308 | |||
1309 | pchnl = chnl_mgr_obj->channels[chnl_id]; | ||
1310 | if (!pchnl || list_empty(&pchnl->io_requests)) { | ||
1311 | /* Shouldn't get here */ | ||
1312 | goto func_end; | ||
1313 | } | ||
1314 | |||
1315 | if (!pchnl->cio_reqs) | ||
1316 | goto func_end; | ||
1317 | |||
1318 | /* Get the I/O request, and attempt a transfer */ | ||
1319 | chnl_packet_obj = list_first_entry(&pchnl->io_requests, | ||
1320 | struct chnl_irp, link); | ||
1321 | list_del(&chnl_packet_obj->link); | ||
1322 | |||
1323 | pchnl->cio_reqs--; | ||
1324 | |||
1325 | /* Record fact that no more I/O buffers available */ | ||
1326 | if (list_empty(&pchnl->io_requests)) | ||
1327 | chnl_mgr_obj->output_mask &= ~(1 << chnl_id); | ||
1328 | |||
1329 | /* Transfer buffer to DSP side */ | ||
1330 | chnl_packet_obj->byte_size = min(pio_mgr->sm_buf_size, | ||
1331 | chnl_packet_obj->byte_size); | ||
1332 | memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf, | ||
1333 | chnl_packet_obj->byte_size); | ||
1334 | pchnl->bytes_moved += chnl_packet_obj->byte_size; | ||
1335 | /* Write all 32 bits of arg */ | ||
1336 | sm->arg = chnl_packet_obj->arg; | ||
1337 | #if _CHNL_WORDSIZE == 2 | ||
1338 | /* Access can be different SM access word size (e.g. 16/32 bit words) */ | ||
1339 | sm->output_id = (u16) chnl_id; | ||
1340 | sm->output_size = (u16) (chnl_packet_obj->byte_size + | ||
1341 | chnl_mgr_obj->word_size - 1) / | ||
1342 | (u16) chnl_mgr_obj->word_size; | ||
1343 | #else | ||
1344 | sm->output_id = chnl_id; | ||
1345 | sm->output_size = (chnl_packet_obj->byte_size + | ||
1346 | chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size; | ||
1347 | #endif | ||
1348 | sm->output_full = 1; | ||
1349 | /* Indicate to the DSP we have written the output */ | ||
1350 | sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); | ||
1351 | /* Notify client with IO completion record (keep EOS) */ | ||
1352 | chnl_packet_obj->status &= CHNL_IOCSTATEOS; | ||
1353 | notify_chnl_complete(pchnl, chnl_packet_obj); | ||
1354 | /* Notify if stream is done. */ | ||
1355 | if (chnl_packet_obj->status & CHNL_IOCSTATEOS) | ||
1356 | ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE); | ||
1357 | |||
1358 | func_end: | ||
1359 | return; | ||
1360 | } | ||
1361 | |||
1362 | /* | ||
1363 | * ======== output_msg ======== | ||
1364 | * Copies messages from the message queues to the shared memory. | ||
1365 | */ | ||
1366 | static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr) | ||
1367 | { | ||
1368 | u32 num_msgs = 0; | ||
1369 | u32 i; | ||
1370 | struct msg_dspmsg *msg_output; | ||
1371 | struct msg_frame *pmsg; | ||
1372 | struct msg_ctrl *msg_ctr_obj; | ||
1373 | u32 val; | ||
1374 | u32 addr; | ||
1375 | |||
1376 | msg_ctr_obj = pio_mgr->msg_output_ctrl; | ||
1377 | |||
1378 | /* Check if output has been cleared */ | ||
1379 | if (!msg_ctr_obj->buf_empty) | ||
1380 | return; | ||
1381 | |||
1382 | num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ? | ||
1383 | hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending; | ||
1384 | msg_output = (struct msg_dspmsg *) pio_mgr->msg_output; | ||
1385 | |||
1386 | /* Copy num_msgs messages into shared memory */ | ||
1387 | for (i = 0; i < num_msgs; i++) { | ||
1388 | if (list_empty(&hmsg_mgr->msg_used_list)) | ||
1389 | continue; | ||
1390 | |||
1391 | pmsg = list_first_entry(&hmsg_mgr->msg_used_list, | ||
1392 | struct msg_frame, list_elem); | ||
1393 | list_del(&pmsg->list_elem); | ||
1394 | |||
1395 | val = (pmsg->msg_data).msgq_id; | ||
1396 | addr = (u32) &msg_output->msgq_id; | ||
1397 | write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); | ||
1398 | |||
1399 | val = (pmsg->msg_data).msg.cmd; | ||
1400 | addr = (u32) &msg_output->msg.cmd; | ||
1401 | write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); | ||
1402 | |||
1403 | val = (pmsg->msg_data).msg.arg1; | ||
1404 | addr = (u32) &msg_output->msg.arg1; | ||
1405 | write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); | ||
1406 | |||
1407 | val = (pmsg->msg_data).msg.arg2; | ||
1408 | addr = (u32) &msg_output->msg.arg2; | ||
1409 | write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val); | ||
1410 | |||
1411 | msg_output++; | ||
1412 | list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list); | ||
1413 | sync_set_event(hmsg_mgr->sync_event); | ||
1414 | } | ||
1415 | |||
1416 | if (num_msgs > 0) { | ||
1417 | hmsg_mgr->msgs_pending -= num_msgs; | ||
1418 | #if _CHNL_WORDSIZE == 2 | ||
1419 | /* | ||
1420 | * Access can be different SM access word size | ||
1421 | * (e.g. 16/32 bit words) | ||
1422 | */ | ||
1423 | msg_ctr_obj->size = (u16) num_msgs; | ||
1424 | #else | ||
1425 | msg_ctr_obj->size = num_msgs; | ||
1426 | #endif | ||
1427 | msg_ctr_obj->buf_empty = false; | ||
1428 | /* Set the post SWI flag */ | ||
1429 | msg_ctr_obj->post_swi = true; | ||
1430 | /* Tell the DSP we have written the output. */ | ||
1431 | sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS); | ||
1432 | } | ||
1433 | } | ||
1434 | |||
1435 | /* | ||
1436 | * ======== register_shm_segs ======== | ||
1437 | * purpose: | ||
1438 | * Registers GPP SM segment with CMM. | ||
1439 | */ | ||
1440 | static int register_shm_segs(struct io_mgr *hio_mgr, | ||
1441 | struct cod_manager *cod_man, | ||
1442 | u32 dw_gpp_base_pa) | ||
1443 | { | ||
1444 | int status = 0; | ||
1445 | u32 ul_shm0_base = 0; | ||
1446 | u32 shm0_end = 0; | ||
1447 | u32 ul_shm0_rsrvd_start = 0; | ||
1448 | u32 ul_rsrvd_size = 0; | ||
1449 | u32 ul_gpp_phys; | ||
1450 | u32 ul_dsp_virt; | ||
1451 | u32 ul_shm_seg_id0 = 0; | ||
1452 | u32 dw_offset, dw_gpp_base_va, ul_dsp_size; | ||
1453 | |||
1454 | /* | ||
1455 | * Read address and size info for first SM region. | ||
1456 | * Get start of 1st SM Heap region. | ||
1457 | */ | ||
1458 | status = | ||
1459 | cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base); | ||
1460 | if (ul_shm0_base == 0) { | ||
1461 | status = -EPERM; | ||
1462 | goto func_end; | ||
1463 | } | ||
1464 | /* Get end of 1st SM Heap region */ | ||
1465 | if (!status) { | ||
1466 | /* Get start and length of message part of shared memory */ | ||
1467 | status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, | ||
1468 | &shm0_end); | ||
1469 | if (shm0_end == 0) { | ||
1470 | status = -EPERM; | ||
1471 | goto func_end; | ||
1472 | } | ||
1473 | } | ||
1474 | /* Start of Gpp reserved region */ | ||
1475 | if (!status) { | ||
1476 | /* Get start and length of message part of shared memory */ | ||
1477 | status = | ||
1478 | cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM, | ||
1479 | &ul_shm0_rsrvd_start); | ||
1480 | if (ul_shm0_rsrvd_start == 0) { | ||
1481 | status = -EPERM; | ||
1482 | goto func_end; | ||
1483 | } | ||
1484 | } | ||
1485 | /* Register with CMM */ | ||
1486 | if (!status) { | ||
1487 | status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr); | ||
1488 | if (!status) { | ||
1489 | status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr, | ||
1490 | CMM_ALLSEGMENTS); | ||
1491 | } | ||
1492 | } | ||
1493 | /* Register new SM region(s) */ | ||
1494 | if (!status && (shm0_end - ul_shm0_base) > 0) { | ||
1495 | /* Calc size (bytes) of SM the GPP can alloc from */ | ||
1496 | ul_rsrvd_size = | ||
1497 | (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size; | ||
1498 | if (ul_rsrvd_size <= 0) { | ||
1499 | status = -EPERM; | ||
1500 | goto func_end; | ||
1501 | } | ||
1502 | /* Calc size of SM DSP can alloc from */ | ||
1503 | ul_dsp_size = | ||
1504 | (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size; | ||
1505 | if (ul_dsp_size <= 0) { | ||
1506 | status = -EPERM; | ||
1507 | goto func_end; | ||
1508 | } | ||
1509 | /* First TLB entry reserved for Bridge SM use. */ | ||
1510 | ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys; | ||
1511 | /* Get size in bytes */ | ||
1512 | ul_dsp_virt = | ||
1513 | hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt * | ||
1514 | hio_mgr->word_size; | ||
1515 | /* | ||
1516 | * Calc byte offset used to convert GPP phys <-> DSP byte | ||
1517 | * address. | ||
1518 | */ | ||
1519 | if (dw_gpp_base_pa > ul_dsp_virt) | ||
1520 | dw_offset = dw_gpp_base_pa - ul_dsp_virt; | ||
1521 | else | ||
1522 | dw_offset = ul_dsp_virt - dw_gpp_base_pa; | ||
1523 | |||
1524 | if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) { | ||
1525 | status = -EPERM; | ||
1526 | goto func_end; | ||
1527 | } | ||
1528 | /* | ||
1529 | * Calc Gpp phys base of SM region. | ||
1530 | * This is actually uncached kernel virtual address. | ||
1531 | */ | ||
1532 | dw_gpp_base_va = | ||
1533 | ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size - | ||
1534 | ul_dsp_virt; | ||
1535 | /* | ||
1536 | * Calc Gpp phys base of SM region. | ||
1537 | * This is the physical address. | ||
1538 | */ | ||
1539 | dw_gpp_base_pa = | ||
1540 | dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size - | ||
1541 | ul_dsp_virt; | ||
1542 | /* Register SM Segment 0. */ | ||
1543 | status = | ||
1544 | cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa, | ||
1545 | ul_rsrvd_size, dw_offset, | ||
1546 | (dw_gpp_base_pa > | ||
1547 | ul_dsp_virt) ? CMM_ADDTODSPPA : | ||
1548 | CMM_SUBFROMDSPPA, | ||
1549 | (u32) (ul_shm0_base * | ||
1550 | hio_mgr->word_size), | ||
1551 | ul_dsp_size, &ul_shm_seg_id0, | ||
1552 | dw_gpp_base_va); | ||
1553 | /* First SM region is seg_id = 1 */ | ||
1554 | if (ul_shm_seg_id0 != 1) | ||
1555 | status = -EPERM; | ||
1556 | } | ||
1557 | func_end: | ||
1558 | return status; | ||
1559 | } | ||
1560 | |||
1561 | /* ZCPY IO routines. */ | ||
1562 | /* | ||
1563 | * ======== IO_SHMcontrol ======== | ||
1564 | * Sets the requested shm setting. | ||
1565 | */ | ||
1566 | int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs) | ||
1567 | { | ||
1568 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
1569 | u32 i; | ||
1570 | struct dspbridge_platform_data *pdata = | ||
1571 | omap_dspbridge_dev->dev.platform_data; | ||
1572 | |||
1573 | switch (desc) { | ||
1574 | case SHM_CURROPP: | ||
1575 | /* Update the shared memory with requested OPP information */ | ||
1576 | if (pargs != NULL) | ||
1577 | hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = | ||
1578 | *(u32 *) pargs; | ||
1579 | else | ||
1580 | return -EPERM; | ||
1581 | break; | ||
1582 | case SHM_OPPINFO: | ||
1583 | /* | ||
1584 | * Update the shared memory with the voltage, frequency, | ||
1585 | * min and max frequency values for an OPP. | ||
1586 | */ | ||
1587 | for (i = 0; i <= dsp_max_opps; i++) { | ||
1588 | hio_mgr->shared_mem->opp_table_struct.opp_point[i]. | ||
1589 | voltage = vdd1_dsp_freq[i][0]; | ||
1590 | dev_dbg(bridge, "OPP-shm: voltage: %d\n", | ||
1591 | vdd1_dsp_freq[i][0]); | ||
1592 | hio_mgr->shared_mem->opp_table_struct. | ||
1593 | opp_point[i].frequency = vdd1_dsp_freq[i][1]; | ||
1594 | dev_dbg(bridge, "OPP-shm: frequency: %d\n", | ||
1595 | vdd1_dsp_freq[i][1]); | ||
1596 | hio_mgr->shared_mem->opp_table_struct.opp_point[i]. | ||
1597 | min_freq = vdd1_dsp_freq[i][2]; | ||
1598 | dev_dbg(bridge, "OPP-shm: min freq: %d\n", | ||
1599 | vdd1_dsp_freq[i][2]); | ||
1600 | hio_mgr->shared_mem->opp_table_struct.opp_point[i]. | ||
1601 | max_freq = vdd1_dsp_freq[i][3]; | ||
1602 | dev_dbg(bridge, "OPP-shm: max freq: %d\n", | ||
1603 | vdd1_dsp_freq[i][3]); | ||
1604 | } | ||
1605 | hio_mgr->shared_mem->opp_table_struct.num_opp_pts = | ||
1606 | dsp_max_opps; | ||
1607 | dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps); | ||
1608 | /* Update the current OPP number */ | ||
1609 | if (pdata->dsp_get_opp) | ||
1610 | i = (*pdata->dsp_get_opp) (); | ||
1611 | hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i; | ||
1612 | dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i); | ||
1613 | break; | ||
1614 | case SHM_GETOPP: | ||
1615 | /* Get the OPP that DSP has requested */ | ||
1616 | *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt; | ||
1617 | break; | ||
1618 | default: | ||
1619 | break; | ||
1620 | } | ||
1621 | #endif | ||
1622 | return 0; | ||
1623 | } | ||
1624 | |||
1625 | /* | ||
1626 | * ======== bridge_io_get_proc_load ======== | ||
1627 | * Gets the Processor's Load information | ||
1628 | */ | ||
1629 | int bridge_io_get_proc_load(struct io_mgr *hio_mgr, | ||
1630 | struct dsp_procloadstat *proc_lstat) | ||
1631 | { | ||
1632 | if (!hio_mgr->shared_mem) | ||
1633 | return -EFAULT; | ||
1634 | |||
1635 | proc_lstat->curr_load = | ||
1636 | hio_mgr->shared_mem->load_mon_info.curr_dsp_load; | ||
1637 | proc_lstat->predicted_load = | ||
1638 | hio_mgr->shared_mem->load_mon_info.pred_dsp_load; | ||
1639 | proc_lstat->curr_dsp_freq = | ||
1640 | hio_mgr->shared_mem->load_mon_info.curr_dsp_freq; | ||
1641 | proc_lstat->predicted_freq = | ||
1642 | hio_mgr->shared_mem->load_mon_info.pred_dsp_freq; | ||
1643 | |||
1644 | dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, " | ||
1645 | "Pred Freq = %d\n", proc_lstat->curr_load, | ||
1646 | proc_lstat->predicted_load, proc_lstat->curr_dsp_freq, | ||
1647 | proc_lstat->predicted_freq); | ||
1648 | return 0; | ||
1649 | } | ||
1650 | |||
1651 | |||
1652 | #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) | ||
1653 | void print_dsp_debug_trace(struct io_mgr *hio_mgr) | ||
1654 | { | ||
1655 | u32 ul_new_message_length = 0, ul_gpp_cur_pointer; | ||
1656 | |||
1657 | while (true) { | ||
1658 | /* Get the DSP current pointer */ | ||
1659 | ul_gpp_cur_pointer = | ||
1660 | *(u32 *) (hio_mgr->trace_buffer_current); | ||
1661 | ul_gpp_cur_pointer = | ||
1662 | hio_mgr->gpp_va + (ul_gpp_cur_pointer - | ||
1663 | hio_mgr->dsp_va); | ||
1664 | |||
1665 | /* No new debug messages available yet */ | ||
1666 | if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) { | ||
1667 | break; | ||
1668 | } else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) { | ||
1669 | /* Continuous data */ | ||
1670 | ul_new_message_length = | ||
1671 | ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer; | ||
1672 | |||
1673 | memcpy(hio_mgr->msg, | ||
1674 | (char *)hio_mgr->gpp_read_pointer, | ||
1675 | ul_new_message_length); | ||
1676 | hio_mgr->msg[ul_new_message_length] = '\0'; | ||
1677 | /* | ||
1678 | * Advance the GPP trace pointer to DSP current | ||
1679 | * pointer. | ||
1680 | */ | ||
1681 | hio_mgr->gpp_read_pointer += ul_new_message_length; | ||
1682 | /* Print the trace messages */ | ||
1683 | pr_info("DSPTrace: %s\n", hio_mgr->msg); | ||
1684 | } else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) { | ||
1685 | /* Handle trace buffer wraparound */ | ||
1686 | memcpy(hio_mgr->msg, | ||
1687 | (char *)hio_mgr->gpp_read_pointer, | ||
1688 | hio_mgr->trace_buffer_end - | ||
1689 | hio_mgr->gpp_read_pointer); | ||
1690 | ul_new_message_length = | ||
1691 | ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin; | ||
1692 | memcpy(&hio_mgr->msg[hio_mgr->trace_buffer_end - | ||
1693 | hio_mgr->gpp_read_pointer], | ||
1694 | (char *)hio_mgr->trace_buffer_begin, | ||
1695 | ul_new_message_length); | ||
1696 | hio_mgr->msg[hio_mgr->trace_buffer_end - | ||
1697 | hio_mgr->gpp_read_pointer + | ||
1698 | ul_new_message_length] = '\0'; | ||
1699 | /* | ||
1700 | * Advance the GPP trace pointer to DSP current | ||
1701 | * pointer. | ||
1702 | */ | ||
1703 | hio_mgr->gpp_read_pointer = | ||
1704 | hio_mgr->trace_buffer_begin + | ||
1705 | ul_new_message_length; | ||
1706 | /* Print the trace messages */ | ||
1707 | pr_info("DSPTrace: %s\n", hio_mgr->msg); | ||
1708 | } | ||
1709 | } | ||
1710 | } | ||
1711 | #endif | ||
1712 | |||
1713 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
1714 | /* | ||
1715 | * ======== print_dsp_trace_buffer ======== | ||
1716 | * Prints the trace buffer returned from the DSP (if DBG_Trace is enabled). | ||
1717 | * Parameters: | ||
1718 | * hdeh_mgr: Handle to DEH manager object | ||
1719 | * number of extra carriage returns to generate. | ||
1720 | * Returns: | ||
1721 | * 0: Success. | ||
1722 | * -ENOMEM: Unable to allocate memory. | ||
1723 | * Requires: | ||
1724 | * hdeh_mgr muse be valid. Checked in bridge_deh_notify. | ||
1725 | */ | ||
1726 | int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context) | ||
1727 | { | ||
1728 | int status = 0; | ||
1729 | struct cod_manager *cod_mgr; | ||
1730 | u32 ul_trace_end; | ||
1731 | u32 ul_trace_begin; | ||
1732 | u32 trace_cur_pos; | ||
1733 | u32 ul_num_bytes = 0; | ||
1734 | u32 ul_num_words = 0; | ||
1735 | u32 ul_word_size = 2; | ||
1736 | char *psz_buf; | ||
1737 | char *str_beg; | ||
1738 | char *trace_end; | ||
1739 | char *buf_end; | ||
1740 | char *new_line; | ||
1741 | |||
1742 | struct bridge_dev_context *pbridge_context = hbridge_context; | ||
1743 | struct bridge_drv_interface *intf_fxns; | ||
1744 | struct dev_object *dev_obj = (struct dev_object *) | ||
1745 | pbridge_context->dev_obj; | ||
1746 | |||
1747 | status = dev_get_cod_mgr(dev_obj, &cod_mgr); | ||
1748 | |||
1749 | if (cod_mgr) { | ||
1750 | /* Look for SYS_PUTCBEG/SYS_PUTCEND */ | ||
1751 | status = | ||
1752 | cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin); | ||
1753 | } else { | ||
1754 | status = -EFAULT; | ||
1755 | } | ||
1756 | if (!status) | ||
1757 | status = | ||
1758 | cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end); | ||
1759 | |||
1760 | if (!status) | ||
1761 | /* trace_cur_pos will hold the address of a DSP pointer */ | ||
1762 | status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS, | ||
1763 | &trace_cur_pos); | ||
1764 | |||
1765 | if (status) | ||
1766 | goto func_end; | ||
1767 | |||
1768 | ul_num_bytes = (ul_trace_end - ul_trace_begin); | ||
1769 | |||
1770 | ul_num_words = ul_num_bytes * ul_word_size; | ||
1771 | status = dev_get_intf_fxns(dev_obj, &intf_fxns); | ||
1772 | |||
1773 | if (status) | ||
1774 | goto func_end; | ||
1775 | |||
1776 | psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC); | ||
1777 | if (psz_buf != NULL) { | ||
1778 | /* Read trace buffer data */ | ||
1779 | status = (*intf_fxns->brd_read)(pbridge_context, | ||
1780 | (u8 *)psz_buf, (u32)ul_trace_begin, | ||
1781 | ul_num_bytes, 0); | ||
1782 | |||
1783 | if (status) | ||
1784 | goto func_end; | ||
1785 | |||
1786 | /* Pack and do newline conversion */ | ||
1787 | pr_debug("PrintDspTraceBuffer: " | ||
1788 | "before pack and unpack.\n"); | ||
1789 | pr_debug("%s: DSP Trace Buffer Begin:\n" | ||
1790 | "=======================\n%s\n", | ||
1791 | __func__, psz_buf); | ||
1792 | |||
1793 | /* Read the value at the DSP address in trace_cur_pos. */ | ||
1794 | status = (*intf_fxns->brd_read)(pbridge_context, | ||
1795 | (u8 *)&trace_cur_pos, (u32)trace_cur_pos, | ||
1796 | 4, 0); | ||
1797 | if (status) | ||
1798 | goto func_end; | ||
1799 | /* Pack and do newline conversion */ | ||
1800 | pr_info("DSP Trace Buffer Begin:\n" | ||
1801 | "=======================\n%s\n", | ||
1802 | psz_buf); | ||
1803 | |||
1804 | |||
1805 | /* convert to offset */ | ||
1806 | trace_cur_pos = trace_cur_pos - ul_trace_begin; | ||
1807 | |||
1808 | if (ul_num_bytes) { | ||
1809 | /* | ||
1810 | * The buffer is not full, find the end of the | ||
1811 | * data -- buf_end will be >= pszBuf after | ||
1812 | * while. | ||
1813 | */ | ||
1814 | buf_end = &psz_buf[ul_num_bytes+1]; | ||
1815 | /* DSP print position */ | ||
1816 | trace_end = &psz_buf[trace_cur_pos]; | ||
1817 | |||
1818 | /* | ||
1819 | * Search buffer for a new_line and replace it | ||
1820 | * with '\0', then print as string. | ||
1821 | * Continue until end of buffer is reached. | ||
1822 | */ | ||
1823 | str_beg = trace_end; | ||
1824 | ul_num_bytes = buf_end - str_beg; | ||
1825 | |||
1826 | while (str_beg < buf_end) { | ||
1827 | new_line = strnchr(str_beg, ul_num_bytes, | ||
1828 | '\n'); | ||
1829 | if (new_line && new_line < buf_end) { | ||
1830 | *new_line = 0; | ||
1831 | pr_debug("%s\n", str_beg); | ||
1832 | str_beg = ++new_line; | ||
1833 | ul_num_bytes = buf_end - str_beg; | ||
1834 | } else { | ||
1835 | /* | ||
1836 | * Assume buffer empty if it contains | ||
1837 | * a zero | ||
1838 | */ | ||
1839 | if (*str_beg != '\0') { | ||
1840 | str_beg[ul_num_bytes] = 0; | ||
1841 | pr_debug("%s\n", str_beg); | ||
1842 | } | ||
1843 | str_beg = buf_end; | ||
1844 | ul_num_bytes = 0; | ||
1845 | } | ||
1846 | } | ||
1847 | /* | ||
1848 | * Search buffer for a nNewLine and replace it | ||
1849 | * with '\0', then print as string. | ||
1850 | * Continue until buffer is exhausted. | ||
1851 | */ | ||
1852 | str_beg = psz_buf; | ||
1853 | ul_num_bytes = trace_end - str_beg; | ||
1854 | |||
1855 | while (str_beg < trace_end) { | ||
1856 | new_line = strnchr(str_beg, ul_num_bytes, '\n'); | ||
1857 | if (new_line != NULL && new_line < trace_end) { | ||
1858 | *new_line = 0; | ||
1859 | pr_debug("%s\n", str_beg); | ||
1860 | str_beg = ++new_line; | ||
1861 | ul_num_bytes = trace_end - str_beg; | ||
1862 | } else { | ||
1863 | /* | ||
1864 | * Assume buffer empty if it contains | ||
1865 | * a zero | ||
1866 | */ | ||
1867 | if (*str_beg != '\0') { | ||
1868 | str_beg[ul_num_bytes] = 0; | ||
1869 | pr_debug("%s\n", str_beg); | ||
1870 | } | ||
1871 | str_beg = trace_end; | ||
1872 | ul_num_bytes = 0; | ||
1873 | } | ||
1874 | } | ||
1875 | } | ||
1876 | pr_info("\n=======================\n" | ||
1877 | "DSP Trace Buffer End:\n"); | ||
1878 | kfree(psz_buf); | ||
1879 | } else { | ||
1880 | status = -ENOMEM; | ||
1881 | } | ||
1882 | func_end: | ||
1883 | if (status) | ||
1884 | dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status); | ||
1885 | return status; | ||
1886 | } | ||
1887 | |||
1888 | /** | ||
1889 | * dump_dsp_stack() - This function dumps the data on the DSP stack. | ||
1890 | * @bridge_context: Bridge driver's device context pointer. | ||
1891 | * | ||
1892 | */ | ||
1893 | int dump_dsp_stack(struct bridge_dev_context *bridge_context) | ||
1894 | { | ||
1895 | int status = 0; | ||
1896 | struct cod_manager *code_mgr; | ||
1897 | struct node_mgr *node_mgr; | ||
1898 | u32 trace_begin; | ||
1899 | char name[256]; | ||
1900 | struct { | ||
1901 | u32 head[2]; | ||
1902 | u32 size; | ||
1903 | } mmu_fault_dbg_info; | ||
1904 | u32 *buffer; | ||
1905 | u32 *buffer_beg; | ||
1906 | u32 *buffer_end; | ||
1907 | u32 exc_type; | ||
1908 | u32 dyn_ext_base; | ||
1909 | u32 i; | ||
1910 | u32 offset_output; | ||
1911 | u32 total_size; | ||
1912 | u32 poll_cnt; | ||
1913 | const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR", | ||
1914 | "IRP", "NRP", "AMR", "SSR", | ||
1915 | "ILC", "RILC", "IER", "CSR"}; | ||
1916 | const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"}; | ||
1917 | struct bridge_drv_interface *intf_fxns; | ||
1918 | struct dev_object *dev_object = bridge_context->dev_obj; | ||
1919 | |||
1920 | status = dev_get_cod_mgr(dev_object, &code_mgr); | ||
1921 | if (!code_mgr) { | ||
1922 | pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__); | ||
1923 | status = -EFAULT; | ||
1924 | } | ||
1925 | |||
1926 | if (!status) { | ||
1927 | status = dev_get_node_manager(dev_object, &node_mgr); | ||
1928 | if (!node_mgr) { | ||
1929 | pr_debug("%s: Failed on dev_get_node_manager.\n", | ||
1930 | __func__); | ||
1931 | status = -EFAULT; | ||
1932 | } | ||
1933 | } | ||
1934 | |||
1935 | if (!status) { | ||
1936 | /* Look for SYS_PUTCBEG/SYS_PUTCEND: */ | ||
1937 | status = | ||
1938 | cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin); | ||
1939 | pr_debug("%s: trace_begin Value 0x%x\n", | ||
1940 | __func__, trace_begin); | ||
1941 | if (status) | ||
1942 | pr_debug("%s: Failed on cod_get_sym_value.\n", | ||
1943 | __func__); | ||
1944 | } | ||
1945 | if (!status) | ||
1946 | status = dev_get_intf_fxns(dev_object, &intf_fxns); | ||
1947 | /* | ||
1948 | * Check for the "magic number" in the trace buffer. If it has | ||
1949 | * yet to appear then poll the trace buffer to wait for it. Its | ||
1950 | * appearance signals that the DSP has finished dumping its state. | ||
1951 | */ | ||
1952 | mmu_fault_dbg_info.head[0] = 0; | ||
1953 | mmu_fault_dbg_info.head[1] = 0; | ||
1954 | if (!status) { | ||
1955 | poll_cnt = 0; | ||
1956 | while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 || | ||
1957 | mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) && | ||
1958 | poll_cnt < POLL_MAX) { | ||
1959 | |||
1960 | /* Read DSP dump size from the DSP trace buffer... */ | ||
1961 | status = (*intf_fxns->brd_read)(bridge_context, | ||
1962 | (u8 *)&mmu_fault_dbg_info, (u32)trace_begin, | ||
1963 | sizeof(mmu_fault_dbg_info), 0); | ||
1964 | |||
1965 | if (status) | ||
1966 | break; | ||
1967 | |||
1968 | poll_cnt++; | ||
1969 | } | ||
1970 | |||
1971 | if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 && | ||
1972 | mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) { | ||
1973 | status = -ETIME; | ||
1974 | pr_err("%s:No DSP MMU-Fault information available.\n", | ||
1975 | __func__); | ||
1976 | } | ||
1977 | } | ||
1978 | |||
1979 | if (!status) { | ||
1980 | total_size = mmu_fault_dbg_info.size; | ||
1981 | /* Limit the size in case DSP went crazy */ | ||
1982 | if (total_size > MAX_MMU_DBGBUFF) | ||
1983 | total_size = MAX_MMU_DBGBUFF; | ||
1984 | |||
1985 | buffer = kzalloc(total_size, GFP_ATOMIC); | ||
1986 | if (!buffer) { | ||
1987 | status = -ENOMEM; | ||
1988 | pr_debug("%s: Failed to " | ||
1989 | "allocate stack dump buffer.\n", __func__); | ||
1990 | goto func_end; | ||
1991 | } | ||
1992 | |||
1993 | buffer_beg = buffer; | ||
1994 | buffer_end = buffer + total_size / 4; | ||
1995 | |||
1996 | /* Read bytes from the DSP trace buffer... */ | ||
1997 | status = (*intf_fxns->brd_read)(bridge_context, | ||
1998 | (u8 *)buffer, (u32)trace_begin, | ||
1999 | total_size, 0); | ||
2000 | if (status) { | ||
2001 | pr_debug("%s: Failed to Read Trace Buffer.\n", | ||
2002 | __func__); | ||
2003 | goto func_end; | ||
2004 | } | ||
2005 | |||
2006 | pr_err("\nAproximate Crash Position:\n" | ||
2007 | "--------------------------\n"); | ||
2008 | |||
2009 | exc_type = buffer[3]; | ||
2010 | if (!exc_type) | ||
2011 | i = buffer[79]; /* IRP */ | ||
2012 | else | ||
2013 | i = buffer[80]; /* NRP */ | ||
2014 | |||
2015 | status = | ||
2016 | cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base); | ||
2017 | if (status) { | ||
2018 | status = -EFAULT; | ||
2019 | goto func_end; | ||
2020 | } | ||
2021 | |||
2022 | if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i, | ||
2023 | 0x1000, &offset_output, name) == 0)) | ||
2024 | pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name, | ||
2025 | i - offset_output); | ||
2026 | else | ||
2027 | pr_err("0x%-8x [Unable to match to a symbol.]\n", i); | ||
2028 | |||
2029 | buffer += 4; | ||
2030 | |||
2031 | pr_err("\nExecution Info:\n" | ||
2032 | "---------------\n"); | ||
2033 | |||
2034 | if (*buffer < ARRAY_SIZE(exec_ctxt)) { | ||
2035 | pr_err("Execution context \t%s\n", | ||
2036 | exec_ctxt[*buffer++]); | ||
2037 | } else { | ||
2038 | pr_err("Execution context corrupt\n"); | ||
2039 | kfree(buffer_beg); | ||
2040 | return -EFAULT; | ||
2041 | } | ||
2042 | pr_err("Task Handle\t\t0x%x\n", *buffer++); | ||
2043 | pr_err("Stack Pointer\t\t0x%x\n", *buffer++); | ||
2044 | pr_err("Stack Top\t\t0x%x\n", *buffer++); | ||
2045 | pr_err("Stack Bottom\t\t0x%x\n", *buffer++); | ||
2046 | pr_err("Stack Size\t\t0x%x\n", *buffer++); | ||
2047 | pr_err("Stack Size In Use\t0x%x\n", *buffer++); | ||
2048 | |||
2049 | pr_err("\nCPU Registers\n" | ||
2050 | "---------------\n"); | ||
2051 | |||
2052 | for (i = 0; i < 32; i++) { | ||
2053 | if (i == 4 || i == 6 || i == 8) | ||
2054 | pr_err("A%d 0x%-8x [Function Argument %d]\n", | ||
2055 | i, *buffer++, i-3); | ||
2056 | else if (i == 15) | ||
2057 | pr_err("A15 0x%-8x [Frame Pointer]\n", | ||
2058 | *buffer++); | ||
2059 | else | ||
2060 | pr_err("A%d 0x%x\n", i, *buffer++); | ||
2061 | } | ||
2062 | |||
2063 | pr_err("\nB0 0x%x\n", *buffer++); | ||
2064 | pr_err("B1 0x%x\n", *buffer++); | ||
2065 | pr_err("B2 0x%x\n", *buffer++); | ||
2066 | |||
2067 | if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr, | ||
2068 | *buffer, 0x1000, &offset_output, name) == 0)) | ||
2069 | |||
2070 | pr_err("B3 0x%-8x [Function Return Pointer:" | ||
2071 | " \"%s\" + 0x%x]\n", *buffer, name, | ||
2072 | *buffer - offset_output); | ||
2073 | else | ||
2074 | pr_err("B3 0x%-8x [Function Return Pointer:" | ||
2075 | "Unable to match to a symbol.]\n", *buffer); | ||
2076 | |||
2077 | buffer++; | ||
2078 | |||
2079 | for (i = 4; i < 32; i++) { | ||
2080 | if (i == 4 || i == 6 || i == 8) | ||
2081 | pr_err("B%d 0x%-8x [Function Argument %d]\n", | ||
2082 | i, *buffer++, i-2); | ||
2083 | else if (i == 14) | ||
2084 | pr_err("B14 0x%-8x [Data Page Pointer]\n", | ||
2085 | *buffer++); | ||
2086 | else | ||
2087 | pr_err("B%d 0x%x\n", i, *buffer++); | ||
2088 | } | ||
2089 | |||
2090 | pr_err("\n"); | ||
2091 | |||
2092 | for (i = 0; i < ARRAY_SIZE(dsp_regs); i++) | ||
2093 | pr_err("%s 0x%x\n", dsp_regs[i], *buffer++); | ||
2094 | |||
2095 | pr_err("\nStack:\n" | ||
2096 | "------\n"); | ||
2097 | |||
2098 | for (i = 0; buffer < buffer_end; i++, buffer++) { | ||
2099 | if ((*buffer > dyn_ext_base) && ( | ||
2100 | node_find_addr(node_mgr, *buffer , 0x600, | ||
2101 | &offset_output, name) == 0)) | ||
2102 | pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n", | ||
2103 | i, *buffer, name, | ||
2104 | *buffer - offset_output); | ||
2105 | else | ||
2106 | pr_err("[%d] 0x%x\n", i, *buffer); | ||
2107 | } | ||
2108 | kfree(buffer_beg); | ||
2109 | } | ||
2110 | func_end: | ||
2111 | return status; | ||
2112 | } | ||
2113 | |||
2114 | /** | ||
2115 | * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side | ||
2116 | * @bridge_context: Bridge driver's device context pointer. | ||
2117 | * | ||
2118 | */ | ||
2119 | void dump_dl_modules(struct bridge_dev_context *bridge_context) | ||
2120 | { | ||
2121 | struct cod_manager *code_mgr; | ||
2122 | struct bridge_drv_interface *intf_fxns; | ||
2123 | struct bridge_dev_context *bridge_ctxt = bridge_context; | ||
2124 | struct dev_object *dev_object = bridge_ctxt->dev_obj; | ||
2125 | struct modules_header modules_hdr; | ||
2126 | struct dll_module *module_struct = NULL; | ||
2127 | u32 module_dsp_addr; | ||
2128 | u32 module_size; | ||
2129 | u32 module_struct_size = 0; | ||
2130 | u32 sect_ndx; | ||
2131 | char *sect_str; | ||
2132 | int status = 0; | ||
2133 | |||
2134 | status = dev_get_intf_fxns(dev_object, &intf_fxns); | ||
2135 | if (status) { | ||
2136 | pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__); | ||
2137 | goto func_end; | ||
2138 | } | ||
2139 | |||
2140 | status = dev_get_cod_mgr(dev_object, &code_mgr); | ||
2141 | if (!code_mgr) { | ||
2142 | pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__); | ||
2143 | status = -EFAULT; | ||
2144 | goto func_end; | ||
2145 | } | ||
2146 | |||
2147 | /* Lookup the address of the modules_header structure */ | ||
2148 | status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr); | ||
2149 | if (status) { | ||
2150 | pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n", | ||
2151 | __func__); | ||
2152 | goto func_end; | ||
2153 | } | ||
2154 | |||
2155 | pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr); | ||
2156 | |||
2157 | /* Copy the modules_header structure from DSP memory. */ | ||
2158 | status = (*intf_fxns->brd_read)(bridge_context, (u8 *) &modules_hdr, | ||
2159 | (u32) module_dsp_addr, sizeof(modules_hdr), 0); | ||
2160 | |||
2161 | if (status) { | ||
2162 | pr_debug("%s: Failed failed to read modules header.\n", | ||
2163 | __func__); | ||
2164 | goto func_end; | ||
2165 | } | ||
2166 | |||
2167 | module_dsp_addr = modules_hdr.first_module; | ||
2168 | module_size = modules_hdr.first_module_size; | ||
2169 | |||
2170 | pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr, | ||
2171 | module_size); | ||
2172 | |||
2173 | pr_err("\nDynamically Loaded Modules:\n" | ||
2174 | "---------------------------\n"); | ||
2175 | |||
2176 | /* For each dll_module structure in the list... */ | ||
2177 | while (module_size) { | ||
2178 | /* | ||
2179 | * Allocate/re-allocate memory to hold the dll_module | ||
2180 | * structure. The memory is re-allocated only if the existing | ||
2181 | * allocation is too small. | ||
2182 | */ | ||
2183 | if (module_size > module_struct_size) { | ||
2184 | kfree(module_struct); | ||
2185 | module_struct = kzalloc(module_size+128, GFP_ATOMIC); | ||
2186 | module_struct_size = module_size+128; | ||
2187 | pr_debug("%s: allocated module struct %p %d\n", | ||
2188 | __func__, module_struct, module_struct_size); | ||
2189 | if (!module_struct) | ||
2190 | goto func_end; | ||
2191 | } | ||
2192 | /* Copy the dll_module structure from DSP memory */ | ||
2193 | status = (*intf_fxns->brd_read)(bridge_context, | ||
2194 | (u8 *)module_struct, module_dsp_addr, module_size, 0); | ||
2195 | |||
2196 | if (status) { | ||
2197 | pr_debug( | ||
2198 | "%s: Failed to read dll_module struct for 0x%x.\n", | ||
2199 | __func__, module_dsp_addr); | ||
2200 | break; | ||
2201 | } | ||
2202 | |||
2203 | /* Update info regarding the _next_ module in the list. */ | ||
2204 | module_dsp_addr = module_struct->next_module; | ||
2205 | module_size = module_struct->next_module_size; | ||
2206 | |||
2207 | pr_debug("%s: next module 0x%x %d, this module num sects %d\n", | ||
2208 | __func__, module_dsp_addr, module_size, | ||
2209 | module_struct->num_sects); | ||
2210 | |||
2211 | /* | ||
2212 | * The section name strings start immediately following | ||
2213 | * the array of dll_sect structures. | ||
2214 | */ | ||
2215 | sect_str = (char *) &module_struct-> | ||
2216 | sects[module_struct->num_sects]; | ||
2217 | pr_err("%s\n", sect_str); | ||
2218 | |||
2219 | /* | ||
2220 | * Advance to the first section name string. | ||
2221 | * Each string follows the one before. | ||
2222 | */ | ||
2223 | sect_str += strlen(sect_str) + 1; | ||
2224 | |||
2225 | /* Access each dll_sect structure and its name string. */ | ||
2226 | for (sect_ndx = 0; | ||
2227 | sect_ndx < module_struct->num_sects; sect_ndx++) { | ||
2228 | pr_err(" Section: 0x%x ", | ||
2229 | module_struct->sects[sect_ndx].sect_load_adr); | ||
2230 | |||
2231 | if (((u32) sect_str - (u32) module_struct) < | ||
2232 | module_struct_size) { | ||
2233 | pr_err("%s\n", sect_str); | ||
2234 | /* Each string follows the one before. */ | ||
2235 | sect_str += strlen(sect_str)+1; | ||
2236 | } else { | ||
2237 | pr_err("<string error>\n"); | ||
2238 | pr_debug("%s: section name sting address " | ||
2239 | "is invalid %p\n", __func__, sect_str); | ||
2240 | } | ||
2241 | } | ||
2242 | } | ||
2243 | func_end: | ||
2244 | kfree(module_struct); | ||
2245 | } | ||
2246 | #endif | ||
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c deleted file mode 100644 index 7b517eb827fe..000000000000 --- a/drivers/staging/tidspbridge/core/msg_sm.c +++ /dev/null | |||
@@ -1,564 +0,0 @@ | |||
1 | /* | ||
2 | * msg_sm.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Implements upper edge functions for Bridge message module. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | #include <linux/types.h> | ||
19 | |||
20 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
21 | #include <dspbridge/dbdefs.h> | ||
22 | |||
23 | /* ----------------------------------- OS Adaptation Layer */ | ||
24 | #include <dspbridge/sync.h> | ||
25 | |||
26 | /* ----------------------------------- Platform Manager */ | ||
27 | #include <dspbridge/dev.h> | ||
28 | |||
29 | /* ----------------------------------- Others */ | ||
30 | #include <dspbridge/io_sm.h> | ||
31 | |||
32 | /* ----------------------------------- This */ | ||
33 | #include <_msg_sm.h> | ||
34 | #include <dspbridge/dspmsg.h> | ||
35 | |||
36 | /* ----------------------------------- Function Prototypes */ | ||
37 | static int add_new_msg(struct list_head *msg_list); | ||
38 | static void delete_msg_mgr(struct msg_mgr *hmsg_mgr); | ||
39 | static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp); | ||
40 | static void free_msg_list(struct list_head *msg_list); | ||
41 | |||
42 | /* | ||
43 | * ======== bridge_msg_create ======== | ||
44 | * Create an object to manage message queues. Only one of these objects | ||
45 | * can exist per device object. | ||
46 | */ | ||
47 | int bridge_msg_create(struct msg_mgr **msg_man, | ||
48 | struct dev_object *hdev_obj, | ||
49 | msg_onexit msg_callback) | ||
50 | { | ||
51 | struct msg_mgr *msg_mgr_obj; | ||
52 | struct io_mgr *hio_mgr; | ||
53 | int status = 0; | ||
54 | |||
55 | if (!msg_man || !msg_callback || !hdev_obj) | ||
56 | return -EFAULT; | ||
57 | |||
58 | dev_get_io_mgr(hdev_obj, &hio_mgr); | ||
59 | if (!hio_mgr) | ||
60 | return -EFAULT; | ||
61 | |||
62 | *msg_man = NULL; | ||
63 | /* Allocate msg_ctrl manager object */ | ||
64 | msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL); | ||
65 | if (!msg_mgr_obj) | ||
66 | return -ENOMEM; | ||
67 | |||
68 | msg_mgr_obj->on_exit = msg_callback; | ||
69 | msg_mgr_obj->iomgr = hio_mgr; | ||
70 | /* List of MSG_QUEUEs */ | ||
71 | INIT_LIST_HEAD(&msg_mgr_obj->queue_list); | ||
72 | /* | ||
73 | * Queues of message frames for messages to the DSP. Message | ||
74 | * frames will only be added to the free queue when a | ||
75 | * msg_queue object is created. | ||
76 | */ | ||
77 | INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list); | ||
78 | INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list); | ||
79 | spin_lock_init(&msg_mgr_obj->msg_mgr_lock); | ||
80 | |||
81 | /* | ||
82 | * Create an event to be used by bridge_msg_put() in waiting | ||
83 | * for an available free frame from the message manager. | ||
84 | */ | ||
85 | msg_mgr_obj->sync_event = | ||
86 | kzalloc(sizeof(struct sync_object), GFP_KERNEL); | ||
87 | if (!msg_mgr_obj->sync_event) { | ||
88 | kfree(msg_mgr_obj); | ||
89 | return -ENOMEM; | ||
90 | } | ||
91 | sync_init_event(msg_mgr_obj->sync_event); | ||
92 | |||
93 | *msg_man = msg_mgr_obj; | ||
94 | |||
95 | return status; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * ======== bridge_msg_create_queue ======== | ||
100 | * Create a msg_queue for sending/receiving messages to/from a node | ||
101 | * on the DSP. | ||
102 | */ | ||
103 | int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq, | ||
104 | u32 msgq_id, u32 max_msgs, void *arg) | ||
105 | { | ||
106 | u32 i; | ||
107 | u32 num_allocated = 0; | ||
108 | struct msg_queue *msg_q; | ||
109 | int status = 0; | ||
110 | |||
111 | if (!hmsg_mgr || msgq == NULL) | ||
112 | return -EFAULT; | ||
113 | |||
114 | *msgq = NULL; | ||
115 | /* Allocate msg_queue object */ | ||
116 | msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL); | ||
117 | if (!msg_q) | ||
118 | return -ENOMEM; | ||
119 | |||
120 | msg_q->max_msgs = max_msgs; | ||
121 | msg_q->msg_mgr = hmsg_mgr; | ||
122 | msg_q->arg = arg; /* Node handle */ | ||
123 | msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */ | ||
124 | /* Queues of Message frames for messages from the DSP */ | ||
125 | INIT_LIST_HEAD(&msg_q->msg_free_list); | ||
126 | INIT_LIST_HEAD(&msg_q->msg_used_list); | ||
127 | |||
128 | /* Create event that will be signalled when a message from | ||
129 | * the DSP is available. */ | ||
130 | msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL); | ||
131 | if (!msg_q->sync_event) { | ||
132 | status = -ENOMEM; | ||
133 | goto out_err; | ||
134 | |||
135 | } | ||
136 | sync_init_event(msg_q->sync_event); | ||
137 | |||
138 | /* Create a notification list for message ready notification. */ | ||
139 | msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); | ||
140 | if (!msg_q->ntfy_obj) { | ||
141 | status = -ENOMEM; | ||
142 | goto out_err; | ||
143 | } | ||
144 | ntfy_init(msg_q->ntfy_obj); | ||
145 | |||
146 | /* Create events that will be used to synchronize cleanup | ||
147 | * when the object is deleted. sync_done will be set to | ||
148 | * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack | ||
149 | * will be set by the unblocked thread to signal that it | ||
150 | * is unblocked and will no longer reference the object. */ | ||
151 | msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL); | ||
152 | if (!msg_q->sync_done) { | ||
153 | status = -ENOMEM; | ||
154 | goto out_err; | ||
155 | } | ||
156 | sync_init_event(msg_q->sync_done); | ||
157 | |||
158 | msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL); | ||
159 | if (!msg_q->sync_done_ack) { | ||
160 | status = -ENOMEM; | ||
161 | goto out_err; | ||
162 | } | ||
163 | sync_init_event(msg_q->sync_done_ack); | ||
164 | |||
165 | /* Enter critical section */ | ||
166 | spin_lock_bh(&hmsg_mgr->msg_mgr_lock); | ||
167 | /* Initialize message frames and put in appropriate queues */ | ||
168 | for (i = 0; i < max_msgs && !status; i++) { | ||
169 | status = add_new_msg(&hmsg_mgr->msg_free_list); | ||
170 | if (!status) { | ||
171 | num_allocated++; | ||
172 | status = add_new_msg(&msg_q->msg_free_list); | ||
173 | } | ||
174 | } | ||
175 | if (status) { | ||
176 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
177 | goto out_err; | ||
178 | } | ||
179 | |||
180 | list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list); | ||
181 | *msgq = msg_q; | ||
182 | /* Signal that free frames are now available */ | ||
183 | if (!list_empty(&hmsg_mgr->msg_free_list)) | ||
184 | sync_set_event(hmsg_mgr->sync_event); | ||
185 | |||
186 | /* Exit critical section */ | ||
187 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
188 | |||
189 | return 0; | ||
190 | out_err: | ||
191 | delete_msg_queue(msg_q, num_allocated); | ||
192 | return status; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * ======== bridge_msg_delete ======== | ||
197 | * Delete a msg_ctrl manager allocated in bridge_msg_create(). | ||
198 | */ | ||
199 | void bridge_msg_delete(struct msg_mgr *hmsg_mgr) | ||
200 | { | ||
201 | delete_msg_mgr(hmsg_mgr); | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * ======== bridge_msg_delete_queue ======== | ||
206 | * Delete a msg_ctrl queue allocated in bridge_msg_create_queue. | ||
207 | */ | ||
208 | void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj) | ||
209 | { | ||
210 | struct msg_mgr *hmsg_mgr; | ||
211 | u32 io_msg_pend; | ||
212 | |||
213 | if (!msg_queue_obj || !msg_queue_obj->msg_mgr) | ||
214 | return; | ||
215 | |||
216 | hmsg_mgr = msg_queue_obj->msg_mgr; | ||
217 | msg_queue_obj->done = true; | ||
218 | /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */ | ||
219 | io_msg_pend = msg_queue_obj->io_msg_pend; | ||
220 | while (io_msg_pend) { | ||
221 | /* Unblock thread */ | ||
222 | sync_set_event(msg_queue_obj->sync_done); | ||
223 | /* Wait for acknowledgement */ | ||
224 | sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE); | ||
225 | io_msg_pend = msg_queue_obj->io_msg_pend; | ||
226 | } | ||
227 | /* Remove message queue from hmsg_mgr->queue_list */ | ||
228 | spin_lock_bh(&hmsg_mgr->msg_mgr_lock); | ||
229 | list_del(&msg_queue_obj->list_elem); | ||
230 | /* Free the message queue object */ | ||
231 | delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs); | ||
232 | if (list_empty(&hmsg_mgr->msg_free_list)) | ||
233 | sync_reset_event(hmsg_mgr->sync_event); | ||
234 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * ======== bridge_msg_get ======== | ||
239 | * Get a message from a msg_ctrl queue. | ||
240 | */ | ||
241 | int bridge_msg_get(struct msg_queue *msg_queue_obj, | ||
242 | struct dsp_msg *pmsg, u32 utimeout) | ||
243 | { | ||
244 | struct msg_frame *msg_frame_obj; | ||
245 | struct msg_mgr *hmsg_mgr; | ||
246 | struct sync_object *syncs[2]; | ||
247 | u32 index; | ||
248 | int status = 0; | ||
249 | |||
250 | if (!msg_queue_obj || pmsg == NULL) | ||
251 | return -ENOMEM; | ||
252 | |||
253 | hmsg_mgr = msg_queue_obj->msg_mgr; | ||
254 | |||
255 | spin_lock_bh(&hmsg_mgr->msg_mgr_lock); | ||
256 | /* If a message is already there, get it */ | ||
257 | if (!list_empty(&msg_queue_obj->msg_used_list)) { | ||
258 | msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list, | ||
259 | struct msg_frame, list_elem); | ||
260 | list_del(&msg_frame_obj->list_elem); | ||
261 | *pmsg = msg_frame_obj->msg_data.msg; | ||
262 | list_add_tail(&msg_frame_obj->list_elem, | ||
263 | &msg_queue_obj->msg_free_list); | ||
264 | if (list_empty(&msg_queue_obj->msg_used_list)) | ||
265 | sync_reset_event(msg_queue_obj->sync_event); | ||
266 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | if (msg_queue_obj->done) { | ||
271 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
272 | return -EPERM; | ||
273 | } | ||
274 | msg_queue_obj->io_msg_pend++; | ||
275 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
276 | |||
277 | /* | ||
278 | * Wait til message is available, timeout, or done. We don't | ||
279 | * have to schedule the DPC, since the DSP will send messages | ||
280 | * when they are available. | ||
281 | */ | ||
282 | syncs[0] = msg_queue_obj->sync_event; | ||
283 | syncs[1] = msg_queue_obj->sync_done; | ||
284 | status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index); | ||
285 | |||
286 | spin_lock_bh(&hmsg_mgr->msg_mgr_lock); | ||
287 | if (msg_queue_obj->done) { | ||
288 | msg_queue_obj->io_msg_pend--; | ||
289 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
290 | /* | ||
291 | * Signal that we're not going to access msg_queue_obj | ||
292 | * anymore, so it can be deleted. | ||
293 | */ | ||
294 | sync_set_event(msg_queue_obj->sync_done_ack); | ||
295 | return -EPERM; | ||
296 | } | ||
297 | if (!status && !list_empty(&msg_queue_obj->msg_used_list)) { | ||
298 | /* Get msg from used list */ | ||
299 | msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list, | ||
300 | struct msg_frame, list_elem); | ||
301 | list_del(&msg_frame_obj->list_elem); | ||
302 | /* Copy message into pmsg and put frame on the free list */ | ||
303 | *pmsg = msg_frame_obj->msg_data.msg; | ||
304 | list_add_tail(&msg_frame_obj->list_elem, | ||
305 | &msg_queue_obj->msg_free_list); | ||
306 | } | ||
307 | msg_queue_obj->io_msg_pend--; | ||
308 | /* Reset the event if there are still queued messages */ | ||
309 | if (!list_empty(&msg_queue_obj->msg_used_list)) | ||
310 | sync_set_event(msg_queue_obj->sync_event); | ||
311 | |||
312 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
313 | |||
314 | return status; | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * ======== bridge_msg_put ======== | ||
319 | * Put a message onto a msg_ctrl queue. | ||
320 | */ | ||
321 | int bridge_msg_put(struct msg_queue *msg_queue_obj, | ||
322 | const struct dsp_msg *pmsg, u32 utimeout) | ||
323 | { | ||
324 | struct msg_frame *msg_frame_obj; | ||
325 | struct msg_mgr *hmsg_mgr; | ||
326 | struct sync_object *syncs[2]; | ||
327 | u32 index; | ||
328 | int status; | ||
329 | |||
330 | if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr) | ||
331 | return -EFAULT; | ||
332 | |||
333 | hmsg_mgr = msg_queue_obj->msg_mgr; | ||
334 | |||
335 | spin_lock_bh(&hmsg_mgr->msg_mgr_lock); | ||
336 | |||
337 | /* If a message frame is available, use it */ | ||
338 | if (!list_empty(&hmsg_mgr->msg_free_list)) { | ||
339 | msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list, | ||
340 | struct msg_frame, list_elem); | ||
341 | list_del(&msg_frame_obj->list_elem); | ||
342 | msg_frame_obj->msg_data.msg = *pmsg; | ||
343 | msg_frame_obj->msg_data.msgq_id = | ||
344 | msg_queue_obj->msgq_id; | ||
345 | list_add_tail(&msg_frame_obj->list_elem, | ||
346 | &hmsg_mgr->msg_used_list); | ||
347 | hmsg_mgr->msgs_pending++; | ||
348 | |||
349 | if (list_empty(&hmsg_mgr->msg_free_list)) | ||
350 | sync_reset_event(hmsg_mgr->sync_event); | ||
351 | |||
352 | /* Release critical section before scheduling DPC */ | ||
353 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
354 | /* Schedule a DPC, to do the actual data transfer: */ | ||
355 | iosm_schedule(hmsg_mgr->iomgr); | ||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | if (msg_queue_obj->done) { | ||
360 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
361 | return -EPERM; | ||
362 | } | ||
363 | msg_queue_obj->io_msg_pend++; | ||
364 | |||
365 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
366 | |||
367 | /* Wait til a free message frame is available, timeout, or done */ | ||
368 | syncs[0] = hmsg_mgr->sync_event; | ||
369 | syncs[1] = msg_queue_obj->sync_done; | ||
370 | status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index); | ||
371 | if (status) | ||
372 | return status; | ||
373 | |||
374 | /* Enter critical section */ | ||
375 | spin_lock_bh(&hmsg_mgr->msg_mgr_lock); | ||
376 | if (msg_queue_obj->done) { | ||
377 | msg_queue_obj->io_msg_pend--; | ||
378 | /* Exit critical section */ | ||
379 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
380 | /* | ||
381 | * Signal that we're not going to access msg_queue_obj | ||
382 | * anymore, so it can be deleted. | ||
383 | */ | ||
384 | sync_set_event(msg_queue_obj->sync_done_ack); | ||
385 | return -EPERM; | ||
386 | } | ||
387 | |||
388 | if (list_empty(&hmsg_mgr->msg_free_list)) { | ||
389 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
390 | return -EFAULT; | ||
391 | } | ||
392 | |||
393 | /* Get msg from free list */ | ||
394 | msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list, | ||
395 | struct msg_frame, list_elem); | ||
396 | /* | ||
397 | * Copy message into pmsg and put frame on the | ||
398 | * used list. | ||
399 | */ | ||
400 | list_del(&msg_frame_obj->list_elem); | ||
401 | msg_frame_obj->msg_data.msg = *pmsg; | ||
402 | msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id; | ||
403 | list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list); | ||
404 | hmsg_mgr->msgs_pending++; | ||
405 | /* | ||
406 | * Schedule a DPC, to do the actual | ||
407 | * data transfer. | ||
408 | */ | ||
409 | iosm_schedule(hmsg_mgr->iomgr); | ||
410 | |||
411 | msg_queue_obj->io_msg_pend--; | ||
412 | /* Reset event if there are still frames available */ | ||
413 | if (!list_empty(&hmsg_mgr->msg_free_list)) | ||
414 | sync_set_event(hmsg_mgr->sync_event); | ||
415 | |||
416 | /* Exit critical section */ | ||
417 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * ======== bridge_msg_register_notify ======== | ||
424 | */ | ||
425 | int bridge_msg_register_notify(struct msg_queue *msg_queue_obj, | ||
426 | u32 event_mask, u32 notify_type, | ||
427 | struct dsp_notification *hnotification) | ||
428 | { | ||
429 | int status = 0; | ||
430 | |||
431 | if (!msg_queue_obj || !hnotification) { | ||
432 | status = -ENOMEM; | ||
433 | goto func_end; | ||
434 | } | ||
435 | |||
436 | if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) { | ||
437 | status = -EPERM; | ||
438 | goto func_end; | ||
439 | } | ||
440 | |||
441 | if (notify_type != DSP_SIGNALEVENT) { | ||
442 | status = -EBADR; | ||
443 | goto func_end; | ||
444 | } | ||
445 | |||
446 | if (event_mask) | ||
447 | status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification, | ||
448 | event_mask, notify_type); | ||
449 | else | ||
450 | status = ntfy_unregister(msg_queue_obj->ntfy_obj, | ||
451 | hnotification); | ||
452 | |||
453 | if (status == -EINVAL) { | ||
454 | /* Not registered. Ok, since we couldn't have known. Node | ||
455 | * notifications are split between node state change handled | ||
456 | * by NODE, and message ready handled by msg_ctrl. */ | ||
457 | status = 0; | ||
458 | } | ||
459 | func_end: | ||
460 | return status; | ||
461 | } | ||
462 | |||
463 | /* | ||
464 | * ======== bridge_msg_set_queue_id ======== | ||
465 | */ | ||
466 | void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id) | ||
467 | { | ||
468 | /* | ||
469 | * A message queue must be created when a node is allocated, | ||
470 | * so that node_register_notify() can be called before the node | ||
471 | * is created. Since we don't know the node environment until the | ||
472 | * node is created, we need this function to set msg_queue_obj->msgq_id | ||
473 | * to the node environment, after the node is created. | ||
474 | */ | ||
475 | if (msg_queue_obj) | ||
476 | msg_queue_obj->msgq_id = msgq_id; | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * ======== add_new_msg ======== | ||
481 | * Must be called in message manager critical section. | ||
482 | */ | ||
483 | static int add_new_msg(struct list_head *msg_list) | ||
484 | { | ||
485 | struct msg_frame *pmsg; | ||
486 | |||
487 | pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC); | ||
488 | if (!pmsg) | ||
489 | return -ENOMEM; | ||
490 | |||
491 | list_add_tail(&pmsg->list_elem, msg_list); | ||
492 | |||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * ======== delete_msg_mgr ======== | ||
498 | */ | ||
499 | static void delete_msg_mgr(struct msg_mgr *hmsg_mgr) | ||
500 | { | ||
501 | if (!hmsg_mgr) | ||
502 | return; | ||
503 | |||
504 | /* FIXME: free elements from queue_list? */ | ||
505 | free_msg_list(&hmsg_mgr->msg_free_list); | ||
506 | free_msg_list(&hmsg_mgr->msg_used_list); | ||
507 | kfree(hmsg_mgr->sync_event); | ||
508 | kfree(hmsg_mgr); | ||
509 | } | ||
510 | |||
511 | /* | ||
512 | * ======== delete_msg_queue ======== | ||
513 | */ | ||
514 | static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp) | ||
515 | { | ||
516 | struct msg_mgr *hmsg_mgr; | ||
517 | struct msg_frame *pmsg, *tmp; | ||
518 | u32 i; | ||
519 | |||
520 | if (!msg_queue_obj || !msg_queue_obj->msg_mgr) | ||
521 | return; | ||
522 | |||
523 | hmsg_mgr = msg_queue_obj->msg_mgr; | ||
524 | |||
525 | /* Pull off num_to_dsp message frames from Msg manager and free */ | ||
526 | i = 0; | ||
527 | list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list, | ||
528 | list_elem) { | ||
529 | list_del(&pmsg->list_elem); | ||
530 | kfree(pmsg); | ||
531 | if (i++ >= num_to_dsp) | ||
532 | break; | ||
533 | } | ||
534 | |||
535 | free_msg_list(&msg_queue_obj->msg_free_list); | ||
536 | free_msg_list(&msg_queue_obj->msg_used_list); | ||
537 | |||
538 | if (msg_queue_obj->ntfy_obj) { | ||
539 | ntfy_delete(msg_queue_obj->ntfy_obj); | ||
540 | kfree(msg_queue_obj->ntfy_obj); | ||
541 | } | ||
542 | |||
543 | kfree(msg_queue_obj->sync_event); | ||
544 | kfree(msg_queue_obj->sync_done); | ||
545 | kfree(msg_queue_obj->sync_done_ack); | ||
546 | |||
547 | kfree(msg_queue_obj); | ||
548 | } | ||
549 | |||
550 | /* | ||
551 | * ======== free_msg_list ======== | ||
552 | */ | ||
553 | static void free_msg_list(struct list_head *msg_list) | ||
554 | { | ||
555 | struct msg_frame *pmsg, *tmp; | ||
556 | |||
557 | if (!msg_list) | ||
558 | return; | ||
559 | |||
560 | list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) { | ||
561 | list_del(&pmsg->list_elem); | ||
562 | kfree(pmsg); | ||
563 | } | ||
564 | } | ||
diff --git a/drivers/staging/tidspbridge/core/sync.c b/drivers/staging/tidspbridge/core/sync.c deleted file mode 100644 index 743ff09d82d2..000000000000 --- a/drivers/staging/tidspbridge/core/sync.c +++ /dev/null | |||
@@ -1,121 +0,0 @@ | |||
1 | /* | ||
2 | * sync.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Synchronization services. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | /* ----------------------------------- Host OS */ | ||
20 | #include <dspbridge/host_os.h> | ||
21 | |||
22 | /* ----------------------------------- This */ | ||
23 | #include <dspbridge/sync.h> | ||
24 | #include <dspbridge/ntfy.h> | ||
25 | |||
26 | DEFINE_SPINLOCK(sync_lock); | ||
27 | |||
28 | /** | ||
29 | * sync_set_event() - set or signal and specified event | ||
30 | * @event: Event to be set.. | ||
31 | * | ||
32 | * set the @event, if there is an thread waiting for the event | ||
33 | * it will be waken up, this function only wakes one thread. | ||
34 | */ | ||
35 | |||
36 | void sync_set_event(struct sync_object *event) | ||
37 | { | ||
38 | spin_lock_bh(&sync_lock); | ||
39 | complete(&event->comp); | ||
40 | if (event->multi_comp) | ||
41 | complete(event->multi_comp); | ||
42 | spin_unlock_bh(&sync_lock); | ||
43 | } | ||
44 | |||
45 | /** | ||
46 | * sync_wait_on_multiple_events() - waits for multiple events to be set. | ||
47 | * @events: Array of events to wait for them. | ||
48 | * @count: number of elements of the array. | ||
49 | * @timeout timeout on waiting for the evetns. | ||
50 | * @pu_index index of the event set. | ||
51 | * | ||
52 | * These functions will wait until any of the array element is set or until | ||
53 | * timeout. In case of success the function will return 0 and | ||
54 | * @pu_index will store the index of the array element set or in case | ||
55 | * of timeout the function will return -ETIME or in case of | ||
56 | * interrupting by a signal it will return -EPERM. | ||
57 | */ | ||
58 | |||
59 | int sync_wait_on_multiple_events(struct sync_object **events, | ||
60 | unsigned count, unsigned timeout, | ||
61 | unsigned *index) | ||
62 | { | ||
63 | unsigned i; | ||
64 | int status = -EPERM; | ||
65 | struct completion m_comp; | ||
66 | |||
67 | init_completion(&m_comp); | ||
68 | |||
69 | if (SYNC_INFINITE == timeout) | ||
70 | timeout = MAX_SCHEDULE_TIMEOUT; | ||
71 | |||
72 | spin_lock_bh(&sync_lock); | ||
73 | for (i = 0; i < count; i++) { | ||
74 | if (completion_done(&events[i]->comp)) { | ||
75 | reinit_completion(&events[i]->comp); | ||
76 | *index = i; | ||
77 | spin_unlock_bh(&sync_lock); | ||
78 | status = 0; | ||
79 | goto func_end; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | for (i = 0; i < count; i++) | ||
84 | events[i]->multi_comp = &m_comp; | ||
85 | |||
86 | spin_unlock_bh(&sync_lock); | ||
87 | |||
88 | if (!wait_for_completion_interruptible_timeout(&m_comp, | ||
89 | msecs_to_jiffies(timeout))) | ||
90 | status = -ETIME; | ||
91 | |||
92 | spin_lock_bh(&sync_lock); | ||
93 | for (i = 0; i < count; i++) { | ||
94 | if (completion_done(&events[i]->comp)) { | ||
95 | reinit_completion(&events[i]->comp); | ||
96 | *index = i; | ||
97 | status = 0; | ||
98 | } | ||
99 | events[i]->multi_comp = NULL; | ||
100 | } | ||
101 | spin_unlock_bh(&sync_lock); | ||
102 | func_end: | ||
103 | return status; | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * dsp_notifier_event() - callback function to nofity events | ||
108 | * @this: pointer to itself struct notifier_block | ||
109 | * @event: event to be notified. | ||
110 | * @data: Currently not used. | ||
111 | * | ||
112 | */ | ||
113 | int dsp_notifier_event(struct notifier_block *this, unsigned long event, | ||
114 | void *data) | ||
115 | { | ||
116 | struct ntfy_event *ne = container_of(this, struct ntfy_event, | ||
117 | noti_block); | ||
118 | if (ne->event & event) | ||
119 | sync_set_event(&ne->sync_obj); | ||
120 | return NOTIFY_OK; | ||
121 | } | ||
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c deleted file mode 100644 index f63dd8f4dde9..000000000000 --- a/drivers/staging/tidspbridge/core/tiomap3430.c +++ /dev/null | |||
@@ -1,1815 +0,0 @@ | |||
1 | /* | ||
2 | * tiomap.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Processor Manager Driver for TI OMAP3430 EVM. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/platform_data/dsp-omap.h> | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | /* ----------------------------------- Host OS */ | ||
23 | #include <dspbridge/host_os.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/mmzone.h> | ||
26 | |||
27 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
28 | #include <dspbridge/dbdefs.h> | ||
29 | |||
30 | /* ----------------------------------- OS Adaptation Layer */ | ||
31 | #include <dspbridge/drv.h> | ||
32 | #include <dspbridge/sync.h> | ||
33 | |||
34 | /* ------------------------------------ Hardware Abstraction Layer */ | ||
35 | #include <hw_defs.h> | ||
36 | #include <hw_mmu.h> | ||
37 | |||
38 | /* ----------------------------------- Link Driver */ | ||
39 | #include <dspbridge/dspdefs.h> | ||
40 | #include <dspbridge/dspchnl.h> | ||
41 | #include <dspbridge/dspdeh.h> | ||
42 | #include <dspbridge/dspio.h> | ||
43 | #include <dspbridge/dspmsg.h> | ||
44 | #include <dspbridge/pwr.h> | ||
45 | #include <dspbridge/io_sm.h> | ||
46 | |||
47 | /* ----------------------------------- Platform Manager */ | ||
48 | #include <dspbridge/dev.h> | ||
49 | #include <dspbridge/dspapi.h> | ||
50 | #include <dspbridge/dmm.h> | ||
51 | #include <dspbridge/wdt.h> | ||
52 | |||
53 | /* ----------------------------------- Local */ | ||
54 | #include "_tiomap.h" | ||
55 | #include "_tiomap_pwr.h" | ||
56 | #include "tiomap_io.h" | ||
57 | |||
58 | /* Offset in shared mem to write to in order to synchronize start with DSP */ | ||
59 | #define SHMSYNCOFFSET 4 /* GPP byte offset */ | ||
60 | |||
61 | #define BUFFERSIZE 1024 | ||
62 | |||
63 | #define TIHELEN_ACKTIMEOUT 10000 | ||
64 | |||
65 | #define MMU_SECTION_ADDR_MASK 0xFFF00000 | ||
66 | #define MMU_SSECTION_ADDR_MASK 0xFF000000 | ||
67 | #define MMU_LARGE_PAGE_MASK 0xFFFF0000 | ||
68 | #define MMU_SMALL_PAGE_MASK 0xFFFFF000 | ||
69 | #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 | ||
70 | #define PAGES_II_LVL_TABLE 512 | ||
71 | #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT) | ||
72 | |||
73 | /* IVA Boot modes */ | ||
74 | #define DIRECT 0 | ||
75 | #define IDLE 1 | ||
76 | |||
77 | /* Forward Declarations: */ | ||
78 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); | ||
79 | static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, | ||
80 | u8 *host_buff, | ||
81 | u32 dsp_addr, u32 ul_num_bytes, | ||
82 | u32 mem_type); | ||
83 | static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | ||
84 | u32 dsp_addr); | ||
85 | static int bridge_brd_status(struct bridge_dev_context *dev_ctxt, | ||
86 | int *board_state); | ||
87 | static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt); | ||
88 | static int bridge_brd_write(struct bridge_dev_context *dev_ctxt, | ||
89 | u8 *host_buff, | ||
90 | u32 dsp_addr, u32 ul_num_bytes, | ||
91 | u32 mem_type); | ||
92 | static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt, | ||
93 | u32 brd_state); | ||
94 | static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, | ||
95 | u32 dsp_dest_addr, u32 dsp_src_addr, | ||
96 | u32 ul_num_bytes, u32 mem_type); | ||
97 | static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, | ||
98 | u8 *host_buff, u32 dsp_addr, | ||
99 | u32 ul_num_bytes, u32 mem_type); | ||
100 | static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, | ||
101 | u32 ul_mpu_addr, u32 virt_addr, | ||
102 | u32 ul_num_bytes, u32 ul_map_attr, | ||
103 | struct page **mapped_pages); | ||
104 | static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, | ||
105 | u32 virt_addr, u32 ul_num_bytes); | ||
106 | static int bridge_dev_create(struct bridge_dev_context | ||
107 | **dev_cntxt, | ||
108 | struct dev_object *hdev_obj, | ||
109 | struct cfg_hostres *config_param); | ||
110 | static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, | ||
111 | u32 dw_cmd, void *pargs); | ||
112 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); | ||
113 | static u32 user_va2_pa(struct mm_struct *mm, u32 address); | ||
114 | static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, | ||
115 | u32 va, u32 size, | ||
116 | struct hw_mmu_map_attrs_t *map_attrs); | ||
117 | static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | ||
118 | u32 size, struct hw_mmu_map_attrs_t *attrs); | ||
119 | static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | ||
120 | u32 ul_mpu_addr, u32 virt_addr, | ||
121 | u32 ul_num_bytes, | ||
122 | struct hw_mmu_map_attrs_t *hw_attrs); | ||
123 | |||
124 | bool wait_for_start(struct bridge_dev_context *dev_context, | ||
125 | void __iomem *sync_addr); | ||
126 | |||
127 | /* ----------------------------------- Globals */ | ||
128 | |||
129 | /* Attributes of L2 page tables for DSP MMU */ | ||
130 | struct page_info { | ||
131 | u32 num_entries; /* Number of valid PTEs in the L2 PT */ | ||
132 | }; | ||
133 | |||
134 | /* Attributes used to manage the DSP MMU page tables */ | ||
135 | struct pg_table_attrs { | ||
136 | spinlock_t pg_lock; /* Critical section object handle */ | ||
137 | |||
138 | u32 l1_base_pa; /* Physical address of the L1 PT */ | ||
139 | u32 l1_base_va; /* Virtual address of the L1 PT */ | ||
140 | u32 l1_size; /* Size of the L1 PT */ | ||
141 | u32 l1_tbl_alloc_pa; | ||
142 | /* Physical address of Allocated mem for L1 table. May not be aligned */ | ||
143 | u32 l1_tbl_alloc_va; | ||
144 | /* Virtual address of Allocated mem for L1 table. May not be aligned */ | ||
145 | u32 l1_tbl_alloc_sz; | ||
146 | /* Size of consistent memory allocated for L1 table. | ||
147 | * May not be aligned */ | ||
148 | |||
149 | u32 l2_base_pa; /* Physical address of the L2 PT */ | ||
150 | u32 l2_base_va; /* Virtual address of the L2 PT */ | ||
151 | u32 l2_size; /* Size of the L2 PT */ | ||
152 | u32 l2_tbl_alloc_pa; | ||
153 | /* Physical address of Allocated mem for L2 table. May not be aligned */ | ||
154 | u32 l2_tbl_alloc_va; | ||
155 | /* Virtual address of Allocated mem for L2 table. May not be aligned */ | ||
156 | u32 l2_tbl_alloc_sz; | ||
157 | /* Size of consistent memory allocated for L2 table. | ||
158 | * May not be aligned */ | ||
159 | |||
160 | u32 l2_num_pages; /* Number of allocated L2 PT */ | ||
161 | /* Array [l2_num_pages] of L2 PT info structs */ | ||
162 | struct page_info *pg_info; | ||
163 | }; | ||
164 | |||
165 | /* | ||
166 | * This Bridge driver's function interface table. | ||
167 | */ | ||
168 | static struct bridge_drv_interface drv_interface_fxns = { | ||
169 | /* Bridge API ver. for which this bridge driver is built. */ | ||
170 | BRD_API_MAJOR_VERSION, | ||
171 | BRD_API_MINOR_VERSION, | ||
172 | bridge_dev_create, | ||
173 | bridge_dev_destroy, | ||
174 | bridge_dev_ctrl, | ||
175 | bridge_brd_monitor, | ||
176 | bridge_brd_start, | ||
177 | bridge_brd_stop, | ||
178 | bridge_brd_status, | ||
179 | bridge_brd_read, | ||
180 | bridge_brd_write, | ||
181 | bridge_brd_set_state, | ||
182 | bridge_brd_mem_copy, | ||
183 | bridge_brd_mem_write, | ||
184 | bridge_brd_mem_map, | ||
185 | bridge_brd_mem_un_map, | ||
186 | /* The following CHNL functions are provided by chnl_io.lib: */ | ||
187 | bridge_chnl_create, | ||
188 | bridge_chnl_destroy, | ||
189 | bridge_chnl_open, | ||
190 | bridge_chnl_close, | ||
191 | bridge_chnl_add_io_req, | ||
192 | bridge_chnl_get_ioc, | ||
193 | bridge_chnl_cancel_io, | ||
194 | bridge_chnl_flush_io, | ||
195 | bridge_chnl_get_info, | ||
196 | bridge_chnl_get_mgr_info, | ||
197 | bridge_chnl_idle, | ||
198 | bridge_chnl_register_notify, | ||
199 | /* The following IO functions are provided by chnl_io.lib: */ | ||
200 | bridge_io_create, | ||
201 | bridge_io_destroy, | ||
202 | bridge_io_on_loaded, | ||
203 | bridge_io_get_proc_load, | ||
204 | /* The following msg_ctrl functions are provided by chnl_io.lib: */ | ||
205 | bridge_msg_create, | ||
206 | bridge_msg_create_queue, | ||
207 | bridge_msg_delete, | ||
208 | bridge_msg_delete_queue, | ||
209 | bridge_msg_get, | ||
210 | bridge_msg_put, | ||
211 | bridge_msg_register_notify, | ||
212 | bridge_msg_set_queue_id, | ||
213 | }; | ||
214 | |||
215 | static struct notifier_block dsp_mbox_notifier = { | ||
216 | .notifier_call = io_mbox_msg, | ||
217 | }; | ||
218 | |||
219 | static inline void flush_all(struct bridge_dev_context *dev_context) | ||
220 | { | ||
221 | if (dev_context->brd_state == BRD_DSP_HIBERNATION || | ||
222 | dev_context->brd_state == BRD_HIBERNATION) | ||
223 | wake_dsp(dev_context, NULL); | ||
224 | |||
225 | hw_mmu_tlb_flush_all(dev_context->dsp_mmu_base); | ||
226 | } | ||
227 | |||
228 | static void bad_page_dump(u32 pa, struct page *pg) | ||
229 | { | ||
230 | pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa); | ||
231 | pr_emerg("Bad page state in process '%s'\n" | ||
232 | "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" | ||
233 | "Backtrace:\n", | ||
234 | current->comm, pg, (int)(2 * sizeof(unsigned long)), | ||
235 | (unsigned long)pg->flags, pg->mapping, | ||
236 | page_mapcount(pg), page_count(pg)); | ||
237 | dump_stack(); | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * ======== bridge_drv_entry ======== | ||
242 | * purpose: | ||
243 | * Bridge Driver entry point. | ||
244 | */ | ||
245 | void bridge_drv_entry(struct bridge_drv_interface **drv_intf, | ||
246 | const char *driver_file_name) | ||
247 | { | ||
248 | if (strcmp(driver_file_name, "UMA") == 0) | ||
249 | *drv_intf = &drv_interface_fxns; | ||
250 | else | ||
251 | dev_dbg(bridge, "%s Unknown Bridge file name", __func__); | ||
252 | |||
253 | } | ||
254 | |||
255 | /* | ||
256 | * ======== bridge_brd_monitor ======== | ||
257 | * purpose: | ||
258 | * This bridge_brd_monitor puts DSP into a Loadable state. | ||
259 | * i.e Application can load and start the device. | ||
260 | * | ||
261 | * Preconditions: | ||
262 | * Device in 'OFF' state. | ||
263 | */ | ||
264 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) | ||
265 | { | ||
266 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
267 | u32 temp; | ||
268 | struct omap_dsp_platform_data *pdata = | ||
269 | omap_dspbridge_dev->dev.platform_data; | ||
270 | |||
271 | temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & | ||
272 | OMAP_POWERSTATEST_MASK; | ||
273 | if (!(temp & 0x02)) { | ||
274 | /* IVA2 is not in ON state */ | ||
275 | /* Read and set PM_PWSTCTRL_IVA2 to ON */ | ||
276 | (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK, | ||
277 | PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL); | ||
278 | /* Set the SW supervised state transition */ | ||
279 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, | ||
280 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | ||
281 | |||
282 | /* Wait until the state has moved to ON */ | ||
283 | while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, | ||
284 | OMAP2_PM_PWSTST) & | ||
285 | OMAP_INTRANSITION_MASK) | ||
286 | ; | ||
287 | /* Disable Automatic transition */ | ||
288 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, | ||
289 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | ||
290 | } | ||
291 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, | ||
292 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
293 | dsp_clk_enable(DSP_CLK_IVA2); | ||
294 | |||
295 | /* set the device state to IDLE */ | ||
296 | dev_context->brd_state = BRD_IDLE; | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | * ======== bridge_brd_read ======== | ||
303 | * purpose: | ||
304 | * Reads buffers for DSP memory. | ||
305 | */ | ||
306 | static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, | ||
307 | u8 *host_buff, u32 dsp_addr, | ||
308 | u32 ul_num_bytes, u32 mem_type) | ||
309 | { | ||
310 | int status = 0; | ||
311 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
312 | u32 offset; | ||
313 | u32 dsp_base_addr = dev_ctxt->dsp_base_addr; | ||
314 | |||
315 | if (dsp_addr < dev_context->dsp_start_add) { | ||
316 | status = -EPERM; | ||
317 | return status; | ||
318 | } | ||
319 | /* change here to account for the 3 bands of the DSP internal memory */ | ||
320 | if ((dsp_addr - dev_context->dsp_start_add) < | ||
321 | dev_context->internal_size) { | ||
322 | offset = dsp_addr - dev_context->dsp_start_add; | ||
323 | } else { | ||
324 | status = read_ext_dsp_data(dev_context, host_buff, dsp_addr, | ||
325 | ul_num_bytes, mem_type); | ||
326 | return status; | ||
327 | } | ||
328 | /* copy the data from DSP memory */ | ||
329 | memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes); | ||
330 | return status; | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * ======== bridge_brd_set_state ======== | ||
335 | * purpose: | ||
336 | * This routine updates the Board status. | ||
337 | */ | ||
338 | static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt, | ||
339 | u32 brd_state) | ||
340 | { | ||
341 | int status = 0; | ||
342 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
343 | |||
344 | dev_context->brd_state = brd_state; | ||
345 | return status; | ||
346 | } | ||
347 | |||
348 | /* | ||
349 | * ======== bridge_brd_start ======== | ||
350 | * purpose: | ||
351 | * Initializes DSP MMU and Starts DSP. | ||
352 | * | ||
353 | * Preconditions: | ||
354 | * a) DSP domain is 'ACTIVE'. | ||
355 | * b) DSP_RST1 is asserted. | ||
356 | * b) DSP_RST2 is released. | ||
357 | */ | ||
358 | static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | ||
359 | u32 dsp_addr) | ||
360 | { | ||
361 | int status = 0; | ||
362 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
363 | void __iomem *sync_addr; | ||
364 | u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ | ||
365 | u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ | ||
366 | u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ | ||
367 | u32 shm_sync_pa; | ||
368 | /* Offset of shm_base_virt from tlb_base_virt */ | ||
369 | u32 ul_shm_offset_virt; | ||
370 | s32 entry_ndx; | ||
371 | s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */ | ||
372 | struct cfg_hostres *resources = NULL; | ||
373 | u32 temp; | ||
374 | u32 ul_dsp_clk_rate; | ||
375 | u32 ul_dsp_clk_addr; | ||
376 | u32 ul_bios_gp_timer; | ||
377 | u32 clk_cmd; | ||
378 | struct io_mgr *hio_mgr; | ||
379 | u32 ul_load_monitor_timer; | ||
380 | u32 wdt_en = 0; | ||
381 | struct omap_dsp_platform_data *pdata = | ||
382 | omap_dspbridge_dev->dev.platform_data; | ||
383 | |||
384 | /* The device context contains all the mmu setup info from when the | ||
385 | * last dsp base image was loaded. The first entry is always | ||
386 | * SHMMEM base. */ | ||
387 | /* Get SHM_BEG - convert to byte address */ | ||
388 | (void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME, | ||
389 | &ul_shm_base_virt); | ||
390 | ul_shm_base_virt *= DSPWORDSIZE; | ||
391 | /* DSP Virtual address */ | ||
392 | ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va; | ||
393 | ul_shm_offset_virt = | ||
394 | ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); | ||
395 | /* Kernel logical address */ | ||
396 | ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt; | ||
397 | |||
398 | /* SHM physical sync address */ | ||
399 | shm_sync_pa = dev_context->atlb_entry[0].gpp_pa + ul_shm_offset_virt + | ||
400 | SHMSYNCOFFSET; | ||
401 | |||
402 | /* 2nd wd is used as sync field */ | ||
403 | sync_addr = ioremap(shm_sync_pa, SZ_32); | ||
404 | if (!sync_addr) | ||
405 | return -ENOMEM; | ||
406 | |||
407 | /* Write a signature into the shm base + offset; this will | ||
408 | * get cleared when the DSP program starts. */ | ||
409 | if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) { | ||
410 | pr_err("%s: Illegal SM base\n", __func__); | ||
411 | status = -EPERM; | ||
412 | } else | ||
413 | __raw_writel(0xffffffff, sync_addr); | ||
414 | |||
415 | if (!status) { | ||
416 | resources = dev_context->resources; | ||
417 | if (!resources) | ||
418 | status = -EPERM; | ||
419 | |||
420 | /* Assert RST1 i.e only the RST only for DSP megacell */ | ||
421 | if (!status) { | ||
422 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, | ||
423 | OMAP3430_RST1_IVA2_MASK, | ||
424 | OMAP3430_IVA2_MOD, | ||
425 | OMAP2_RM_RSTCTRL); | ||
426 | |||
427 | /* Mask address with 1K for compatibility */ | ||
428 | pdata->set_bootaddr(dsp_addr & | ||
429 | OMAP3_IVA2_BOOTADDR_MASK); | ||
430 | pdata->set_bootmode(dsp_debug ? IDLE : DIRECT); | ||
431 | } | ||
432 | } | ||
433 | if (!status) { | ||
434 | /* Reset and Unreset the RST2, so that BOOTADDR is copied to | ||
435 | * IVA2 SYSC register */ | ||
436 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, | ||
437 | OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, | ||
438 | OMAP2_RM_RSTCTRL); | ||
439 | udelay(100); | ||
440 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, | ||
441 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
442 | udelay(100); | ||
443 | |||
444 | /* Disbale the DSP MMU */ | ||
445 | hw_mmu_disable(resources->dmmu_base); | ||
446 | /* Disable TWL */ | ||
447 | hw_mmu_twl_disable(resources->dmmu_base); | ||
448 | |||
449 | /* Only make TLB entry if both addresses are non-zero */ | ||
450 | for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; | ||
451 | entry_ndx++) { | ||
452 | struct bridge_ioctl_extproc *e = | ||
453 | &dev_context->atlb_entry[entry_ndx]; | ||
454 | struct hw_mmu_map_attrs_t map_attrs = { | ||
455 | .endianism = e->endianism, | ||
456 | .element_size = e->elem_size, | ||
457 | .mixed_size = e->mixed_mode, | ||
458 | }; | ||
459 | |||
460 | if (!e->gpp_pa || !e->dsp_va) | ||
461 | continue; | ||
462 | |||
463 | dev_dbg(bridge, | ||
464 | "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x", | ||
465 | itmp_entry_ndx, | ||
466 | e->gpp_pa, | ||
467 | e->dsp_va, | ||
468 | e->size); | ||
469 | |||
470 | hw_mmu_tlb_add(dev_context->dsp_mmu_base, | ||
471 | e->gpp_pa, | ||
472 | e->dsp_va, | ||
473 | e->size, | ||
474 | itmp_entry_ndx, | ||
475 | &map_attrs, 1, 1); | ||
476 | |||
477 | itmp_entry_ndx++; | ||
478 | } | ||
479 | } | ||
480 | |||
481 | /* Lock the above TLB entries and get the BIOS and load monitor timer | ||
482 | * information */ | ||
483 | if (!status) { | ||
484 | hw_mmu_num_locked_set(resources->dmmu_base, itmp_entry_ndx); | ||
485 | hw_mmu_victim_num_set(resources->dmmu_base, itmp_entry_ndx); | ||
486 | hw_mmu_ttb_set(resources->dmmu_base, | ||
487 | dev_context->pt_attrs->l1_base_pa); | ||
488 | hw_mmu_twl_enable(resources->dmmu_base); | ||
489 | /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ | ||
490 | |||
491 | temp = __raw_readl((resources->dmmu_base) + 0x10); | ||
492 | temp = (temp & 0xFFFFFFEF) | 0x11; | ||
493 | __raw_writel(temp, (resources->dmmu_base) + 0x10); | ||
494 | |||
495 | /* Let the DSP MMU run */ | ||
496 | hw_mmu_enable(resources->dmmu_base); | ||
497 | |||
498 | /* Enable the BIOS clock */ | ||
499 | (void)dev_get_symbol(dev_context->dev_obj, | ||
500 | BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); | ||
501 | (void)dev_get_symbol(dev_context->dev_obj, | ||
502 | BRIDGEINIT_LOADMON_GPTIMER, | ||
503 | &ul_load_monitor_timer); | ||
504 | } | ||
505 | |||
506 | if (!status) { | ||
507 | if (ul_load_monitor_timer != 0xFFFF) { | ||
508 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | ||
509 | ul_load_monitor_timer; | ||
510 | dsp_peripheral_clk_ctrl(dev_context, &clk_cmd); | ||
511 | } else { | ||
512 | dev_dbg(bridge, "Not able to get the symbol for Load " | ||
513 | "Monitor Timer\n"); | ||
514 | } | ||
515 | } | ||
516 | |||
517 | if (!status) { | ||
518 | if (ul_bios_gp_timer != 0xFFFF) { | ||
519 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | ||
520 | ul_bios_gp_timer; | ||
521 | dsp_peripheral_clk_ctrl(dev_context, &clk_cmd); | ||
522 | } else { | ||
523 | dev_dbg(bridge, | ||
524 | "Not able to get the symbol for BIOS Timer\n"); | ||
525 | } | ||
526 | } | ||
527 | |||
528 | if (!status) { | ||
529 | /* Set the DSP clock rate */ | ||
530 | (void)dev_get_symbol(dev_context->dev_obj, | ||
531 | "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); | ||
532 | /*Set Autoidle Mode for IVA2 PLL */ | ||
533 | (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT, | ||
534 | OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL); | ||
535 | |||
536 | if ((unsigned int *)ul_dsp_clk_addr != NULL) { | ||
537 | /* Get the clock rate */ | ||
538 | ul_dsp_clk_rate = dsp_clk_get_iva2_rate(); | ||
539 | dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n", | ||
540 | __func__, ul_dsp_clk_rate); | ||
541 | (void)bridge_brd_write(dev_context, | ||
542 | (u8 *) &ul_dsp_clk_rate, | ||
543 | ul_dsp_clk_addr, sizeof(u32), 0); | ||
544 | } | ||
545 | /* | ||
546 | * Enable Mailbox events and also drain any pending | ||
547 | * stale messages. | ||
548 | */ | ||
549 | dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier); | ||
550 | if (IS_ERR(dev_context->mbox)) { | ||
551 | dev_context->mbox = NULL; | ||
552 | pr_err("%s: Failed to get dsp mailbox handle\n", | ||
553 | __func__); | ||
554 | status = -EPERM; | ||
555 | } | ||
556 | |||
557 | } | ||
558 | if (!status) { | ||
559 | /*PM_IVA2GRPSEL_PER = 0xC0;*/ | ||
560 | temp = readl(resources->per_pm_base + 0xA8); | ||
561 | temp = (temp & 0xFFFFFF30) | 0xC0; | ||
562 | writel(temp, resources->per_pm_base + 0xA8); | ||
563 | |||
564 | /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */ | ||
565 | temp = readl(resources->per_pm_base + 0xA4); | ||
566 | temp = (temp & 0xFFFFFF3F); | ||
567 | writel(temp, resources->per_pm_base + 0xA4); | ||
568 | /*CM_SLEEPDEP_PER |= 0x04; */ | ||
569 | temp = readl(resources->per_base + 0x44); | ||
570 | temp = (temp & 0xFFFFFFFB) | 0x04; | ||
571 | writel(temp, resources->per_base + 0x44); | ||
572 | |||
573 | /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */ | ||
574 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, | ||
575 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | ||
576 | |||
577 | /* Let DSP go */ | ||
578 | dev_dbg(bridge, "%s Unreset\n", __func__); | ||
579 | /* Enable DSP MMU Interrupts */ | ||
580 | hw_mmu_event_enable(resources->dmmu_base, | ||
581 | HW_MMU_ALL_INTERRUPTS); | ||
582 | /* release the RST1, DSP starts executing now .. */ | ||
583 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, | ||
584 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
585 | |||
586 | dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", *(u32 *)sync_addr); | ||
587 | dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr); | ||
588 | if (dsp_debug) | ||
589 | while (__raw_readw(sync_addr)) | ||
590 | ; | ||
591 | |||
592 | /* Wait for DSP to clear word in shared memory */ | ||
593 | /* Read the Location */ | ||
594 | if (!wait_for_start(dev_context, sync_addr)) | ||
595 | status = -ETIMEDOUT; | ||
596 | |||
597 | dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en); | ||
598 | if (wdt_en) { | ||
599 | /* Start wdt */ | ||
600 | dsp_wdt_sm_set((void *)ul_shm_base); | ||
601 | dsp_wdt_enable(true); | ||
602 | } | ||
603 | |||
604 | status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); | ||
605 | if (hio_mgr) { | ||
606 | io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL); | ||
607 | /* Write the synchronization bit to indicate the | ||
608 | * completion of OPP table update to DSP | ||
609 | */ | ||
610 | __raw_writel(0XCAFECAFE, sync_addr); | ||
611 | |||
612 | /* update board state */ | ||
613 | dev_context->brd_state = BRD_RUNNING; | ||
614 | /* (void)chnlsm_enable_interrupt(dev_context); */ | ||
615 | } else { | ||
616 | dev_context->brd_state = BRD_UNKNOWN; | ||
617 | } | ||
618 | } | ||
619 | |||
620 | iounmap(sync_addr); | ||
621 | |||
622 | return status; | ||
623 | } | ||
624 | |||
625 | /* | ||
626 | * ======== bridge_brd_stop ======== | ||
627 | * purpose: | ||
628 | * Puts DSP in self loop. | ||
629 | * | ||
630 | * Preconditions : | ||
631 | * a) None | ||
632 | */ | ||
633 | static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) | ||
634 | { | ||
635 | int status = 0; | ||
636 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
637 | struct pg_table_attrs *pt_attrs; | ||
638 | u32 dsp_pwr_state; | ||
639 | struct omap_dsp_platform_data *pdata = | ||
640 | omap_dspbridge_dev->dev.platform_data; | ||
641 | |||
642 | if (dev_context->brd_state == BRD_STOPPED) | ||
643 | return status; | ||
644 | |||
645 | /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode, | ||
646 | * before turning off the clocks.. This is to ensure that there are no | ||
647 | * pending L3 or other transactons from IVA2 */ | ||
648 | dsp_pwr_state = (*pdata->dsp_prm_read) | ||
649 | (OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK; | ||
650 | if (dsp_pwr_state != PWRDM_POWER_OFF) { | ||
651 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, | ||
652 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
653 | sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE); | ||
654 | mdelay(10); | ||
655 | |||
656 | /* IVA2 is not in OFF state */ | ||
657 | /* Set PM_PWSTCTRL_IVA2 to OFF */ | ||
658 | (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK, | ||
659 | PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL); | ||
660 | /* Set the SW supervised state transition for Sleep */ | ||
661 | (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, | ||
662 | OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); | ||
663 | } | ||
664 | udelay(10); | ||
665 | /* Release the Ext Base virtual Address as the next DSP Program | ||
666 | * may have a different load address */ | ||
667 | if (dev_context->dsp_ext_base_addr) | ||
668 | dev_context->dsp_ext_base_addr = 0; | ||
669 | |||
670 | dev_context->brd_state = BRD_STOPPED; /* update board state */ | ||
671 | |||
672 | dsp_wdt_enable(false); | ||
673 | |||
674 | /* This is a good place to clear the MMU page tables as well */ | ||
675 | if (dev_context->pt_attrs) { | ||
676 | pt_attrs = dev_context->pt_attrs; | ||
677 | memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size); | ||
678 | memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size); | ||
679 | memset((u8 *) pt_attrs->pg_info, 0x00, | ||
680 | (pt_attrs->l2_num_pages * sizeof(struct page_info))); | ||
681 | } | ||
682 | /* Disable the mailbox interrupts */ | ||
683 | if (dev_context->mbox) { | ||
684 | omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); | ||
685 | omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier); | ||
686 | dev_context->mbox = NULL; | ||
687 | } | ||
688 | /* Reset IVA2 clocks*/ | ||
689 | (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | | ||
690 | OMAP3430_RST2_IVA2_MASK | OMAP3430_RST3_IVA2_MASK, | ||
691 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | ||
692 | |||
693 | dsp_clock_disable_all(dev_context->dsp_per_clks); | ||
694 | dsp_clk_disable(DSP_CLK_IVA2); | ||
695 | |||
696 | return status; | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * ======== bridge_brd_status ======== | ||
701 | * Returns the board status. | ||
702 | */ | ||
703 | static int bridge_brd_status(struct bridge_dev_context *dev_ctxt, | ||
704 | int *board_state) | ||
705 | { | ||
706 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
707 | *board_state = dev_context->brd_state; | ||
708 | return 0; | ||
709 | } | ||
710 | |||
711 | /* | ||
712 | * ======== bridge_brd_write ======== | ||
713 | * Copies the buffers to DSP internal or external memory. | ||
714 | */ | ||
715 | static int bridge_brd_write(struct bridge_dev_context *dev_ctxt, | ||
716 | u8 *host_buff, u32 dsp_addr, | ||
717 | u32 ul_num_bytes, u32 mem_type) | ||
718 | { | ||
719 | int status = 0; | ||
720 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
721 | |||
722 | if (dsp_addr < dev_context->dsp_start_add) { | ||
723 | status = -EPERM; | ||
724 | return status; | ||
725 | } | ||
726 | if ((dsp_addr - dev_context->dsp_start_add) < | ||
727 | dev_context->internal_size) { | ||
728 | status = write_dsp_data(dev_ctxt, host_buff, dsp_addr, | ||
729 | ul_num_bytes, mem_type); | ||
730 | } else { | ||
731 | status = write_ext_dsp_data(dev_context, host_buff, dsp_addr, | ||
732 | ul_num_bytes, mem_type, false); | ||
733 | } | ||
734 | |||
735 | return status; | ||
736 | } | ||
737 | |||
738 | /* | ||
739 | * ======== bridge_dev_create ======== | ||
740 | * Creates a driver object. Puts DSP in self loop. | ||
741 | */ | ||
742 | static int bridge_dev_create(struct bridge_dev_context | ||
743 | **dev_cntxt, | ||
744 | struct dev_object *hdev_obj, | ||
745 | struct cfg_hostres *config_param) | ||
746 | { | ||
747 | int status = 0; | ||
748 | struct bridge_dev_context *dev_context = NULL; | ||
749 | s32 entry_ndx; | ||
750 | struct cfg_hostres *resources = config_param; | ||
751 | struct pg_table_attrs *pt_attrs; | ||
752 | u32 pg_tbl_pa; | ||
753 | u32 pg_tbl_va; | ||
754 | u32 align_size; | ||
755 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
756 | |||
757 | /* Allocate and initialize a data structure to contain the bridge driver | ||
758 | * state, which becomes the context for later calls into this driver */ | ||
759 | dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL); | ||
760 | if (!dev_context) { | ||
761 | status = -ENOMEM; | ||
762 | goto func_end; | ||
763 | } | ||
764 | |||
765 | dev_context->dsp_start_add = (u32) OMAP_GEM_BASE; | ||
766 | dev_context->self_loop = (u32) NULL; | ||
767 | dev_context->dsp_per_clks = 0; | ||
768 | dev_context->internal_size = OMAP_DSP_SIZE; | ||
769 | /* Clear dev context MMU table entries. | ||
770 | * These get set on bridge_io_on_loaded() call after program loaded. */ | ||
771 | for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) { | ||
772 | dev_context->atlb_entry[entry_ndx].gpp_pa = | ||
773 | dev_context->atlb_entry[entry_ndx].dsp_va = 0; | ||
774 | } | ||
775 | dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) | ||
776 | (config_param-> | ||
777 | mem_base | ||
778 | [3]), | ||
779 | config_param-> | ||
780 | mem_length | ||
781 | [3]); | ||
782 | if (!dev_context->dsp_base_addr) | ||
783 | status = -EPERM; | ||
784 | |||
785 | pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL); | ||
786 | if (pt_attrs != NULL) { | ||
787 | pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */ | ||
788 | align_size = pt_attrs->l1_size; | ||
789 | /* Align sizes are expected to be power of 2 */ | ||
790 | /* we like to get aligned on L1 table size */ | ||
791 | pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size, | ||
792 | align_size, &pg_tbl_pa); | ||
793 | |||
794 | /* Check if the PA is aligned for us */ | ||
795 | if ((pg_tbl_pa) & (align_size - 1)) { | ||
796 | /* PA not aligned to page table size , | ||
797 | * try with more allocation and align */ | ||
798 | mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa, | ||
799 | pt_attrs->l1_size); | ||
800 | /* we like to get aligned on L1 table size */ | ||
801 | pg_tbl_va = | ||
802 | (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2, | ||
803 | align_size, &pg_tbl_pa); | ||
804 | /* We should be able to get aligned table now */ | ||
805 | pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; | ||
806 | pt_attrs->l1_tbl_alloc_va = pg_tbl_va; | ||
807 | pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2; | ||
808 | /* Align the PA to the next 'align' boundary */ | ||
809 | pt_attrs->l1_base_pa = | ||
810 | ((pg_tbl_pa) + | ||
811 | (align_size - 1)) & (~(align_size - 1)); | ||
812 | pt_attrs->l1_base_va = | ||
813 | pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa); | ||
814 | } else { | ||
815 | /* We got aligned PA, cool */ | ||
816 | pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; | ||
817 | pt_attrs->l1_tbl_alloc_va = pg_tbl_va; | ||
818 | pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size; | ||
819 | pt_attrs->l1_base_pa = pg_tbl_pa; | ||
820 | pt_attrs->l1_base_va = pg_tbl_va; | ||
821 | } | ||
822 | if (pt_attrs->l1_base_va) | ||
823 | memset((u8 *) pt_attrs->l1_base_va, 0x00, | ||
824 | pt_attrs->l1_size); | ||
825 | |||
826 | /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM + | ||
827 | * L4 pages */ | ||
828 | pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6); | ||
829 | pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * | ||
830 | pt_attrs->l2_num_pages; | ||
831 | align_size = 4; /* Make it u32 aligned */ | ||
832 | /* we like to get aligned on L1 table size */ | ||
833 | pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size, | ||
834 | align_size, &pg_tbl_pa); | ||
835 | pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa; | ||
836 | pt_attrs->l2_tbl_alloc_va = pg_tbl_va; | ||
837 | pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size; | ||
838 | pt_attrs->l2_base_pa = pg_tbl_pa; | ||
839 | pt_attrs->l2_base_va = pg_tbl_va; | ||
840 | |||
841 | if (pt_attrs->l2_base_va) | ||
842 | memset((u8 *) pt_attrs->l2_base_va, 0x00, | ||
843 | pt_attrs->l2_size); | ||
844 | |||
845 | pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages * | ||
846 | sizeof(struct page_info), GFP_KERNEL); | ||
847 | dev_dbg(bridge, | ||
848 | "L1 pa %x, va %x, size %x\n L2 pa %x, va " | ||
849 | "%x, size %x\n", pt_attrs->l1_base_pa, | ||
850 | pt_attrs->l1_base_va, pt_attrs->l1_size, | ||
851 | pt_attrs->l2_base_pa, pt_attrs->l2_base_va, | ||
852 | pt_attrs->l2_size); | ||
853 | dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n", | ||
854 | pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info); | ||
855 | } | ||
856 | if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) && | ||
857 | (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL)) | ||
858 | dev_context->pt_attrs = pt_attrs; | ||
859 | else | ||
860 | status = -ENOMEM; | ||
861 | |||
862 | if (!status) { | ||
863 | spin_lock_init(&pt_attrs->pg_lock); | ||
864 | dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; | ||
865 | |||
866 | /* Set the Clock Divisor for the DSP module */ | ||
867 | udelay(5); | ||
868 | /* MMU address is obtained from the host | ||
869 | * resources struct */ | ||
870 | dev_context->dsp_mmu_base = resources->dmmu_base; | ||
871 | } | ||
872 | if (!status) { | ||
873 | dev_context->dev_obj = hdev_obj; | ||
874 | /* Store current board state. */ | ||
875 | dev_context->brd_state = BRD_UNKNOWN; | ||
876 | dev_context->resources = resources; | ||
877 | dsp_clk_enable(DSP_CLK_IVA2); | ||
878 | bridge_brd_stop(dev_context); | ||
879 | /* Return ptr to our device state to the DSP API for storage */ | ||
880 | *dev_cntxt = dev_context; | ||
881 | } else { | ||
882 | if (pt_attrs != NULL) { | ||
883 | kfree(pt_attrs->pg_info); | ||
884 | |||
885 | if (pt_attrs->l2_tbl_alloc_va) { | ||
886 | mem_free_phys_mem((void *) | ||
887 | pt_attrs->l2_tbl_alloc_va, | ||
888 | pt_attrs->l2_tbl_alloc_pa, | ||
889 | pt_attrs->l2_tbl_alloc_sz); | ||
890 | } | ||
891 | if (pt_attrs->l1_tbl_alloc_va) { | ||
892 | mem_free_phys_mem((void *) | ||
893 | pt_attrs->l1_tbl_alloc_va, | ||
894 | pt_attrs->l1_tbl_alloc_pa, | ||
895 | pt_attrs->l1_tbl_alloc_sz); | ||
896 | } | ||
897 | } | ||
898 | kfree(pt_attrs); | ||
899 | kfree(dev_context); | ||
900 | } | ||
901 | func_end: | ||
902 | return status; | ||
903 | } | ||
904 | |||
905 | /* | ||
906 | * ======== bridge_dev_ctrl ======== | ||
907 | * Receives device specific commands. | ||
908 | */ | ||
909 | static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, | ||
910 | u32 dw_cmd, void *pargs) | ||
911 | { | ||
912 | int status = 0; | ||
913 | struct bridge_ioctl_extproc *pa_ext_proc = | ||
914 | (struct bridge_ioctl_extproc *)pargs; | ||
915 | s32 ndx; | ||
916 | |||
917 | switch (dw_cmd) { | ||
918 | case BRDIOCTL_CHNLREAD: | ||
919 | break; | ||
920 | case BRDIOCTL_CHNLWRITE: | ||
921 | break; | ||
922 | case BRDIOCTL_SETMMUCONFIG: | ||
923 | /* store away dsp-mmu setup values for later use */ | ||
924 | for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++) | ||
925 | dev_context->atlb_entry[ndx] = *pa_ext_proc; | ||
926 | break; | ||
927 | case BRDIOCTL_DEEPSLEEP: | ||
928 | case BRDIOCTL_EMERGENCYSLEEP: | ||
929 | /* Currently only DSP Idle is supported Need to update for | ||
930 | * later releases */ | ||
931 | status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs); | ||
932 | break; | ||
933 | case BRDIOCTL_WAKEUP: | ||
934 | status = wake_dsp(dev_context, pargs); | ||
935 | break; | ||
936 | case BRDIOCTL_CLK_CTRL: | ||
937 | status = 0; | ||
938 | /* Looking For Baseport Fix for Clocks */ | ||
939 | status = dsp_peripheral_clk_ctrl(dev_context, pargs); | ||
940 | break; | ||
941 | case BRDIOCTL_PWR_HIBERNATE: | ||
942 | status = handle_hibernation_from_dsp(dev_context); | ||
943 | break; | ||
944 | case BRDIOCTL_PRESCALE_NOTIFY: | ||
945 | status = pre_scale_dsp(dev_context, pargs); | ||
946 | break; | ||
947 | case BRDIOCTL_POSTSCALE_NOTIFY: | ||
948 | status = post_scale_dsp(dev_context, pargs); | ||
949 | break; | ||
950 | case BRDIOCTL_CONSTRAINT_REQUEST: | ||
951 | status = handle_constraints_set(dev_context, pargs); | ||
952 | break; | ||
953 | default: | ||
954 | status = -EPERM; | ||
955 | break; | ||
956 | } | ||
957 | return status; | ||
958 | } | ||
959 | |||
960 | /* | ||
961 | * ======== bridge_dev_destroy ======== | ||
962 | * Destroys the driver object. | ||
963 | */ | ||
964 | static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) | ||
965 | { | ||
966 | struct pg_table_attrs *pt_attrs; | ||
967 | int status = 0; | ||
968 | struct bridge_dev_context *dev_context = (struct bridge_dev_context *) | ||
969 | dev_ctxt; | ||
970 | struct cfg_hostres *host_res; | ||
971 | u32 shm_size; | ||
972 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
973 | |||
974 | /* It should never happen */ | ||
975 | if (!dev_ctxt) | ||
976 | return -EFAULT; | ||
977 | |||
978 | /* first put the device to stop state */ | ||
979 | bridge_brd_stop(dev_context); | ||
980 | if (dev_context->pt_attrs) { | ||
981 | pt_attrs = dev_context->pt_attrs; | ||
982 | kfree(pt_attrs->pg_info); | ||
983 | |||
984 | if (pt_attrs->l2_tbl_alloc_va) { | ||
985 | mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va, | ||
986 | pt_attrs->l2_tbl_alloc_pa, | ||
987 | pt_attrs->l2_tbl_alloc_sz); | ||
988 | } | ||
989 | if (pt_attrs->l1_tbl_alloc_va) { | ||
990 | mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va, | ||
991 | pt_attrs->l1_tbl_alloc_pa, | ||
992 | pt_attrs->l1_tbl_alloc_sz); | ||
993 | } | ||
994 | kfree(pt_attrs); | ||
995 | |||
996 | } | ||
997 | |||
998 | if (dev_context->resources) { | ||
999 | host_res = dev_context->resources; | ||
1000 | shm_size = drv_datap->shm_size; | ||
1001 | if (shm_size >= 0x10000) { | ||
1002 | if ((host_res->mem_base[1]) && | ||
1003 | (host_res->mem_phys[1])) { | ||
1004 | mem_free_phys_mem((void *) | ||
1005 | host_res->mem_base | ||
1006 | [1], | ||
1007 | host_res->mem_phys | ||
1008 | [1], shm_size); | ||
1009 | } | ||
1010 | } else { | ||
1011 | dev_dbg(bridge, "%s: Error getting shm size " | ||
1012 | "from registry: %x. Not calling " | ||
1013 | "mem_free_phys_mem\n", __func__, | ||
1014 | status); | ||
1015 | } | ||
1016 | host_res->mem_base[1] = 0; | ||
1017 | host_res->mem_phys[1] = 0; | ||
1018 | |||
1019 | if (host_res->mem_base[0]) | ||
1020 | iounmap((void *)host_res->mem_base[0]); | ||
1021 | if (host_res->mem_base[2]) | ||
1022 | iounmap((void *)host_res->mem_base[2]); | ||
1023 | if (host_res->mem_base[3]) | ||
1024 | iounmap((void *)host_res->mem_base[3]); | ||
1025 | if (host_res->mem_base[4]) | ||
1026 | iounmap((void *)host_res->mem_base[4]); | ||
1027 | if (host_res->dmmu_base) | ||
1028 | iounmap(host_res->dmmu_base); | ||
1029 | if (host_res->per_base) | ||
1030 | iounmap(host_res->per_base); | ||
1031 | if (host_res->per_pm_base) | ||
1032 | iounmap((void *)host_res->per_pm_base); | ||
1033 | if (host_res->core_pm_base) | ||
1034 | iounmap((void *)host_res->core_pm_base); | ||
1035 | |||
1036 | host_res->mem_base[0] = (u32) NULL; | ||
1037 | host_res->mem_base[2] = (u32) NULL; | ||
1038 | host_res->mem_base[3] = (u32) NULL; | ||
1039 | host_res->mem_base[4] = (u32) NULL; | ||
1040 | host_res->dmmu_base = NULL; | ||
1041 | |||
1042 | kfree(host_res); | ||
1043 | } | ||
1044 | |||
1045 | /* Free the driver's device context: */ | ||
1046 | kfree(drv_datap->base_img); | ||
1047 | kfree((void *)dev_ctxt); | ||
1048 | return status; | ||
1049 | } | ||
1050 | |||
1051 | static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, | ||
1052 | u32 dsp_dest_addr, u32 dsp_src_addr, | ||
1053 | u32 ul_num_bytes, u32 mem_type) | ||
1054 | { | ||
1055 | int status = 0; | ||
1056 | u32 src_addr = dsp_src_addr; | ||
1057 | u32 dest_addr = dsp_dest_addr; | ||
1058 | u32 copy_bytes = 0; | ||
1059 | u32 total_bytes = ul_num_bytes; | ||
1060 | u8 host_buf[BUFFERSIZE]; | ||
1061 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
1062 | |||
1063 | while (total_bytes > 0 && !status) { | ||
1064 | copy_bytes = | ||
1065 | total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes; | ||
1066 | /* Read from External memory */ | ||
1067 | status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr, | ||
1068 | copy_bytes, mem_type); | ||
1069 | if (!status) { | ||
1070 | if (dest_addr < (dev_context->dsp_start_add + | ||
1071 | dev_context->internal_size)) { | ||
1072 | /* Write to Internal memory */ | ||
1073 | status = write_dsp_data(dev_ctxt, host_buf, | ||
1074 | dest_addr, copy_bytes, | ||
1075 | mem_type); | ||
1076 | } else { | ||
1077 | /* Write to External memory */ | ||
1078 | status = | ||
1079 | write_ext_dsp_data(dev_ctxt, host_buf, | ||
1080 | dest_addr, copy_bytes, | ||
1081 | mem_type, false); | ||
1082 | } | ||
1083 | } | ||
1084 | total_bytes -= copy_bytes; | ||
1085 | src_addr += copy_bytes; | ||
1086 | dest_addr += copy_bytes; | ||
1087 | } | ||
1088 | return status; | ||
1089 | } | ||
1090 | |||
1091 | /* Mem Write does not halt the DSP to write unlike bridge_brd_write */ | ||
1092 | static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, | ||
1093 | u8 *host_buff, u32 dsp_addr, | ||
1094 | u32 ul_num_bytes, u32 mem_type) | ||
1095 | { | ||
1096 | int status = 0; | ||
1097 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
1098 | u32 ul_remain_bytes = 0; | ||
1099 | u32 ul_bytes = 0; | ||
1100 | |||
1101 | ul_remain_bytes = ul_num_bytes; | ||
1102 | while (ul_remain_bytes > 0 && !status) { | ||
1103 | ul_bytes = | ||
1104 | ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes; | ||
1105 | if (dsp_addr < (dev_context->dsp_start_add + | ||
1106 | dev_context->internal_size)) { | ||
1107 | status = | ||
1108 | write_dsp_data(dev_ctxt, host_buff, dsp_addr, | ||
1109 | ul_bytes, mem_type); | ||
1110 | } else { | ||
1111 | status = write_ext_dsp_data(dev_ctxt, host_buff, | ||
1112 | dsp_addr, ul_bytes, | ||
1113 | mem_type, true); | ||
1114 | } | ||
1115 | ul_remain_bytes -= ul_bytes; | ||
1116 | dsp_addr += ul_bytes; | ||
1117 | host_buff = host_buff + ul_bytes; | ||
1118 | } | ||
1119 | return status; | ||
1120 | } | ||
1121 | |||
1122 | /* | ||
1123 | * ======== bridge_brd_mem_map ======== | ||
1124 | * This function maps MPU buffer to the DSP address space. It performs | ||
1125 | * linear to physical address translation if required. It translates each | ||
1126 | * page since linear addresses can be physically non-contiguous | ||
1127 | * All address & size arguments are assumed to be page aligned (in proc.c) | ||
1128 | * | ||
1129 | * TODO: Disable MMU while updating the page tables (but that'll stall DSP) | ||
1130 | */ | ||
1131 | static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, | ||
1132 | u32 ul_mpu_addr, u32 virt_addr, | ||
1133 | u32 ul_num_bytes, u32 ul_map_attr, | ||
1134 | struct page **mapped_pages) | ||
1135 | { | ||
1136 | u32 attrs; | ||
1137 | int status = 0; | ||
1138 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
1139 | struct hw_mmu_map_attrs_t hw_attrs; | ||
1140 | struct vm_area_struct *vma; | ||
1141 | struct mm_struct *mm = current->mm; | ||
1142 | u32 write = 0; | ||
1143 | u32 num_usr_pgs = 0; | ||
1144 | struct page *mapped_page, *pg; | ||
1145 | s32 pg_num; | ||
1146 | u32 va = virt_addr; | ||
1147 | struct task_struct *curr_task = current; | ||
1148 | u32 pg_i = 0; | ||
1149 | u32 mpu_addr, pa; | ||
1150 | |||
1151 | dev_dbg(bridge, | ||
1152 | "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n", | ||
1153 | __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes, | ||
1154 | ul_map_attr); | ||
1155 | if (ul_num_bytes == 0) | ||
1156 | return -EINVAL; | ||
1157 | |||
1158 | if (ul_map_attr & DSP_MAP_DIR_MASK) { | ||
1159 | attrs = ul_map_attr; | ||
1160 | } else { | ||
1161 | /* Assign default attributes */ | ||
1162 | attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16); | ||
1163 | } | ||
1164 | /* Take mapping properties */ | ||
1165 | if (attrs & DSP_MAPBIGENDIAN) | ||
1166 | hw_attrs.endianism = HW_BIG_ENDIAN; | ||
1167 | else | ||
1168 | hw_attrs.endianism = HW_LITTLE_ENDIAN; | ||
1169 | |||
1170 | hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t) | ||
1171 | ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2); | ||
1172 | /* Ignore element_size if mixed_size is enabled */ | ||
1173 | if (hw_attrs.mixed_size == 0) { | ||
1174 | if (attrs & DSP_MAPELEMSIZE8) { | ||
1175 | /* Size is 8 bit */ | ||
1176 | hw_attrs.element_size = HW_ELEM_SIZE8BIT; | ||
1177 | } else if (attrs & DSP_MAPELEMSIZE16) { | ||
1178 | /* Size is 16 bit */ | ||
1179 | hw_attrs.element_size = HW_ELEM_SIZE16BIT; | ||
1180 | } else if (attrs & DSP_MAPELEMSIZE32) { | ||
1181 | /* Size is 32 bit */ | ||
1182 | hw_attrs.element_size = HW_ELEM_SIZE32BIT; | ||
1183 | } else if (attrs & DSP_MAPELEMSIZE64) { | ||
1184 | /* Size is 64 bit */ | ||
1185 | hw_attrs.element_size = HW_ELEM_SIZE64BIT; | ||
1186 | } else { | ||
1187 | /* | ||
1188 | * Mixedsize isn't enabled, so size can't be | ||
1189 | * zero here | ||
1190 | */ | ||
1191 | return -EINVAL; | ||
1192 | } | ||
1193 | } | ||
1194 | if (attrs & DSP_MAPDONOTLOCK) | ||
1195 | hw_attrs.donotlockmpupage = 1; | ||
1196 | else | ||
1197 | hw_attrs.donotlockmpupage = 0; | ||
1198 | |||
1199 | if (attrs & DSP_MAPVMALLOCADDR) { | ||
1200 | return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr, | ||
1201 | ul_num_bytes, &hw_attrs); | ||
1202 | } | ||
1203 | /* | ||
1204 | * Do OS-specific user-va to pa translation. | ||
1205 | * Combine physically contiguous regions to reduce TLBs. | ||
1206 | * Pass the translated pa to pte_update. | ||
1207 | */ | ||
1208 | if ((attrs & DSP_MAPPHYSICALADDR)) { | ||
1209 | status = pte_update(dev_context, ul_mpu_addr, virt_addr, | ||
1210 | ul_num_bytes, &hw_attrs); | ||
1211 | goto func_cont; | ||
1212 | } | ||
1213 | |||
1214 | /* | ||
1215 | * Important Note: ul_mpu_addr is mapped from user application process | ||
1216 | * to current process - it must lie completely within the current | ||
1217 | * virtual memory address space in order to be of use to us here! | ||
1218 | */ | ||
1219 | down_read(&mm->mmap_sem); | ||
1220 | vma = find_vma(mm, ul_mpu_addr); | ||
1221 | if (vma) | ||
1222 | dev_dbg(bridge, | ||
1223 | "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, " | ||
1224 | "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, | ||
1225 | ul_num_bytes, vma->vm_start, vma->vm_end, | ||
1226 | vma->vm_flags); | ||
1227 | |||
1228 | /* | ||
1229 | * It is observed that under some circumstances, the user buffer is | ||
1230 | * spread across several VMAs. So loop through and check if the entire | ||
1231 | * user buffer is covered | ||
1232 | */ | ||
1233 | while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) { | ||
1234 | /* jump to the next VMA region */ | ||
1235 | vma = find_vma(mm, vma->vm_end + 1); | ||
1236 | dev_dbg(bridge, | ||
1237 | "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, " | ||
1238 | "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, | ||
1239 | ul_num_bytes, vma->vm_start, vma->vm_end, | ||
1240 | vma->vm_flags); | ||
1241 | } | ||
1242 | if (!vma) { | ||
1243 | pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", | ||
1244 | __func__, ul_mpu_addr, ul_num_bytes); | ||
1245 | status = -EINVAL; | ||
1246 | up_read(&mm->mmap_sem); | ||
1247 | goto func_cont; | ||
1248 | } | ||
1249 | |||
1250 | if (vma->vm_flags & VM_IO) { | ||
1251 | num_usr_pgs = ul_num_bytes / PG_SIZE4K; | ||
1252 | mpu_addr = ul_mpu_addr; | ||
1253 | |||
1254 | /* Get the physical addresses for user buffer */ | ||
1255 | for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { | ||
1256 | pa = user_va2_pa(mm, mpu_addr); | ||
1257 | if (!pa) { | ||
1258 | status = -EPERM; | ||
1259 | pr_err("DSPBRIDGE: VM_IO mapping physical" | ||
1260 | "address is invalid\n"); | ||
1261 | break; | ||
1262 | } | ||
1263 | if (pfn_valid(__phys_to_pfn(pa))) { | ||
1264 | pg = PHYS_TO_PAGE(pa); | ||
1265 | get_page(pg); | ||
1266 | if (page_count(pg) < 1) { | ||
1267 | pr_err("Bad page in VM_IO buffer\n"); | ||
1268 | bad_page_dump(pa, pg); | ||
1269 | } | ||
1270 | } | ||
1271 | status = pte_set(dev_context->pt_attrs, pa, | ||
1272 | va, HW_PAGE_SIZE4KB, &hw_attrs); | ||
1273 | if (status) | ||
1274 | break; | ||
1275 | |||
1276 | va += HW_PAGE_SIZE4KB; | ||
1277 | mpu_addr += HW_PAGE_SIZE4KB; | ||
1278 | pa += HW_PAGE_SIZE4KB; | ||
1279 | } | ||
1280 | } else { | ||
1281 | num_usr_pgs = ul_num_bytes / PG_SIZE4K; | ||
1282 | if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) | ||
1283 | write = 1; | ||
1284 | |||
1285 | for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { | ||
1286 | pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1, | ||
1287 | write, 1, &mapped_page, NULL); | ||
1288 | if (pg_num > 0) { | ||
1289 | if (page_count(mapped_page) < 1) { | ||
1290 | pr_err("Bad page count after doing" | ||
1291 | "get_user_pages on" | ||
1292 | "user buffer\n"); | ||
1293 | bad_page_dump(page_to_phys(mapped_page), | ||
1294 | mapped_page); | ||
1295 | } | ||
1296 | status = pte_set(dev_context->pt_attrs, | ||
1297 | page_to_phys(mapped_page), va, | ||
1298 | HW_PAGE_SIZE4KB, &hw_attrs); | ||
1299 | if (status) | ||
1300 | break; | ||
1301 | |||
1302 | if (mapped_pages) | ||
1303 | mapped_pages[pg_i] = mapped_page; | ||
1304 | |||
1305 | va += HW_PAGE_SIZE4KB; | ||
1306 | ul_mpu_addr += HW_PAGE_SIZE4KB; | ||
1307 | } else { | ||
1308 | pr_err("DSPBRIDGE: get_user_pages FAILED," | ||
1309 | "MPU addr = 0x%x," | ||
1310 | "vma->vm_flags = 0x%lx," | ||
1311 | "get_user_pages Err" | ||
1312 | "Value = %d, Buffer" | ||
1313 | "size=0x%x\n", ul_mpu_addr, | ||
1314 | vma->vm_flags, pg_num, ul_num_bytes); | ||
1315 | status = -EPERM; | ||
1316 | break; | ||
1317 | } | ||
1318 | } | ||
1319 | } | ||
1320 | up_read(&mm->mmap_sem); | ||
1321 | func_cont: | ||
1322 | if (status) { | ||
1323 | /* | ||
1324 | * Roll out the mapped pages incase it failed in middle of | ||
1325 | * mapping | ||
1326 | */ | ||
1327 | if (pg_i) { | ||
1328 | bridge_brd_mem_un_map(dev_context, virt_addr, | ||
1329 | (pg_i * PG_SIZE4K)); | ||
1330 | } | ||
1331 | status = -EPERM; | ||
1332 | } | ||
1333 | /* | ||
1334 | * In any case, flush the TLB | ||
1335 | * This is called from here instead from pte_update to avoid unnecessary | ||
1336 | * repetition while mapping non-contiguous physical regions of a virtual | ||
1337 | * region | ||
1338 | */ | ||
1339 | flush_all(dev_context); | ||
1340 | dev_dbg(bridge, "%s status %x\n", __func__, status); | ||
1341 | return status; | ||
1342 | } | ||
1343 | |||
1344 | /* | ||
1345 | * ======== bridge_brd_mem_un_map ======== | ||
1346 | * Invalidate the PTEs for the DSP VA block to be unmapped. | ||
1347 | * | ||
1348 | * PTEs of a mapped memory block are contiguous in any page table | ||
1349 | * So, instead of looking up the PTE address for every 4K block, | ||
1350 | * we clear consecutive PTEs until we unmap all the bytes | ||
1351 | */ | ||
1352 | static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, | ||
1353 | u32 virt_addr, u32 ul_num_bytes) | ||
1354 | { | ||
1355 | u32 l1_base_va; | ||
1356 | u32 l2_base_va; | ||
1357 | u32 l2_base_pa; | ||
1358 | u32 l2_page_num; | ||
1359 | u32 pte_val; | ||
1360 | u32 pte_size; | ||
1361 | u32 pte_count; | ||
1362 | u32 pte_addr_l1; | ||
1363 | u32 pte_addr_l2 = 0; | ||
1364 | u32 rem_bytes; | ||
1365 | u32 rem_bytes_l2; | ||
1366 | u32 va_curr; | ||
1367 | struct page *pg = NULL; | ||
1368 | int status = 0; | ||
1369 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
1370 | struct pg_table_attrs *pt = dev_context->pt_attrs; | ||
1371 | u32 temp; | ||
1372 | u32 paddr; | ||
1373 | u32 numof4k_pages = 0; | ||
1374 | |||
1375 | va_curr = virt_addr; | ||
1376 | rem_bytes = ul_num_bytes; | ||
1377 | rem_bytes_l2 = 0; | ||
1378 | l1_base_va = pt->l1_base_va; | ||
1379 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | ||
1380 | dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, " | ||
1381 | "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr, | ||
1382 | ul_num_bytes, l1_base_va, pte_addr_l1); | ||
1383 | |||
1384 | while (rem_bytes && !status) { | ||
1385 | u32 va_curr_orig = va_curr; | ||
1386 | /* Find whether the L1 PTE points to a valid L2 PT */ | ||
1387 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | ||
1388 | pte_val = *(u32 *) pte_addr_l1; | ||
1389 | pte_size = hw_mmu_pte_size_l1(pte_val); | ||
1390 | |||
1391 | if (pte_size != HW_MMU_COARSE_PAGE_SIZE) | ||
1392 | goto skip_coarse_page; | ||
1393 | |||
1394 | /* | ||
1395 | * Get the L2 PA from the L1 PTE, and find | ||
1396 | * corresponding L2 VA | ||
1397 | */ | ||
1398 | l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); | ||
1399 | l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va; | ||
1400 | l2_page_num = | ||
1401 | (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; | ||
1402 | /* | ||
1403 | * Find the L2 PTE address from which we will start | ||
1404 | * clearing, the number of PTEs to be cleared on this | ||
1405 | * page, and the size of VA space that needs to be | ||
1406 | * cleared on this L2 page | ||
1407 | */ | ||
1408 | pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr); | ||
1409 | pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1); | ||
1410 | pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32); | ||
1411 | if (rem_bytes < (pte_count * PG_SIZE4K)) | ||
1412 | pte_count = rem_bytes / PG_SIZE4K; | ||
1413 | rem_bytes_l2 = pte_count * PG_SIZE4K; | ||
1414 | |||
1415 | /* | ||
1416 | * Unmap the VA space on this L2 PT. A quicker way | ||
1417 | * would be to clear pte_count entries starting from | ||
1418 | * pte_addr_l2. However, below code checks that we don't | ||
1419 | * clear invalid entries or less than 64KB for a 64KB | ||
1420 | * entry. Similar checking is done for L1 PTEs too | ||
1421 | * below | ||
1422 | */ | ||
1423 | while (rem_bytes_l2 && !status) { | ||
1424 | pte_val = *(u32 *) pte_addr_l2; | ||
1425 | pte_size = hw_mmu_pte_size_l2(pte_val); | ||
1426 | /* va_curr aligned to pte_size? */ | ||
1427 | if (pte_size == 0 || rem_bytes_l2 < pte_size || | ||
1428 | va_curr & (pte_size - 1)) { | ||
1429 | status = -EPERM; | ||
1430 | break; | ||
1431 | } | ||
1432 | |||
1433 | /* Collect Physical addresses from VA */ | ||
1434 | paddr = (pte_val & ~(pte_size - 1)); | ||
1435 | if (pte_size == HW_PAGE_SIZE64KB) | ||
1436 | numof4k_pages = 16; | ||
1437 | else | ||
1438 | numof4k_pages = 1; | ||
1439 | temp = 0; | ||
1440 | while (temp++ < numof4k_pages) { | ||
1441 | if (!pfn_valid(__phys_to_pfn(paddr))) { | ||
1442 | paddr += HW_PAGE_SIZE4KB; | ||
1443 | continue; | ||
1444 | } | ||
1445 | pg = PHYS_TO_PAGE(paddr); | ||
1446 | if (page_count(pg) < 1) { | ||
1447 | pr_info("DSPBRIDGE: UNMAP function: " | ||
1448 | "COUNT 0 FOR PA 0x%x, size = " | ||
1449 | "0x%x\n", paddr, ul_num_bytes); | ||
1450 | bad_page_dump(paddr, pg); | ||
1451 | } else { | ||
1452 | set_page_dirty(pg); | ||
1453 | page_cache_release(pg); | ||
1454 | } | ||
1455 | paddr += HW_PAGE_SIZE4KB; | ||
1456 | } | ||
1457 | if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) { | ||
1458 | status = -EPERM; | ||
1459 | goto EXIT_LOOP; | ||
1460 | } | ||
1461 | |||
1462 | status = 0; | ||
1463 | rem_bytes_l2 -= pte_size; | ||
1464 | va_curr += pte_size; | ||
1465 | pte_addr_l2 += (pte_size >> 12) * sizeof(u32); | ||
1466 | } | ||
1467 | spin_lock(&pt->pg_lock); | ||
1468 | if (rem_bytes_l2 == 0) { | ||
1469 | pt->pg_info[l2_page_num].num_entries -= pte_count; | ||
1470 | if (pt->pg_info[l2_page_num].num_entries == 0) { | ||
1471 | /* | ||
1472 | * Clear the L1 PTE pointing to the L2 PT | ||
1473 | */ | ||
1474 | if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig, | ||
1475 | HW_MMU_COARSE_PAGE_SIZE)) | ||
1476 | status = 0; | ||
1477 | else { | ||
1478 | status = -EPERM; | ||
1479 | spin_unlock(&pt->pg_lock); | ||
1480 | goto EXIT_LOOP; | ||
1481 | } | ||
1482 | } | ||
1483 | rem_bytes -= pte_count * PG_SIZE4K; | ||
1484 | } else | ||
1485 | status = -EPERM; | ||
1486 | |||
1487 | spin_unlock(&pt->pg_lock); | ||
1488 | continue; | ||
1489 | skip_coarse_page: | ||
1490 | /* va_curr aligned to pte_size? */ | ||
1491 | /* pte_size = 1 MB or 16 MB */ | ||
1492 | if (pte_size == 0 || rem_bytes < pte_size || | ||
1493 | va_curr & (pte_size - 1)) { | ||
1494 | status = -EPERM; | ||
1495 | break; | ||
1496 | } | ||
1497 | |||
1498 | if (pte_size == HW_PAGE_SIZE1MB) | ||
1499 | numof4k_pages = 256; | ||
1500 | else | ||
1501 | numof4k_pages = 4096; | ||
1502 | temp = 0; | ||
1503 | /* Collect Physical addresses from VA */ | ||
1504 | paddr = (pte_val & ~(pte_size - 1)); | ||
1505 | while (temp++ < numof4k_pages) { | ||
1506 | if (pfn_valid(__phys_to_pfn(paddr))) { | ||
1507 | pg = PHYS_TO_PAGE(paddr); | ||
1508 | if (page_count(pg) < 1) { | ||
1509 | pr_info("DSPBRIDGE: UNMAP function: " | ||
1510 | "COUNT 0 FOR PA 0x%x, size = " | ||
1511 | "0x%x\n", paddr, ul_num_bytes); | ||
1512 | bad_page_dump(paddr, pg); | ||
1513 | } else { | ||
1514 | set_page_dirty(pg); | ||
1515 | page_cache_release(pg); | ||
1516 | } | ||
1517 | } | ||
1518 | paddr += HW_PAGE_SIZE4KB; | ||
1519 | } | ||
1520 | if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) { | ||
1521 | status = 0; | ||
1522 | rem_bytes -= pte_size; | ||
1523 | va_curr += pte_size; | ||
1524 | } else { | ||
1525 | status = -EPERM; | ||
1526 | goto EXIT_LOOP; | ||
1527 | } | ||
1528 | } | ||
1529 | /* | ||
1530 | * It is better to flush the TLB here, so that any stale old entries | ||
1531 | * get flushed | ||
1532 | */ | ||
1533 | EXIT_LOOP: | ||
1534 | flush_all(dev_context); | ||
1535 | dev_dbg(bridge, | ||
1536 | "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x," | ||
1537 | " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1, | ||
1538 | pte_addr_l2, rem_bytes, rem_bytes_l2, status); | ||
1539 | return status; | ||
1540 | } | ||
1541 | |||
1542 | /* | ||
1543 | * ======== user_va2_pa ======== | ||
1544 | * Purpose: | ||
1545 | * This function walks through the page tables to convert a userland | ||
1546 | * virtual address to physical address | ||
1547 | */ | ||
1548 | static u32 user_va2_pa(struct mm_struct *mm, u32 address) | ||
1549 | { | ||
1550 | pgd_t *pgd; | ||
1551 | pud_t *pud; | ||
1552 | pmd_t *pmd; | ||
1553 | pte_t *ptep, pte; | ||
1554 | |||
1555 | pgd = pgd_offset(mm, address); | ||
1556 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | ||
1557 | return 0; | ||
1558 | |||
1559 | pud = pud_offset(pgd, address); | ||
1560 | if (pud_none(*pud) || pud_bad(*pud)) | ||
1561 | return 0; | ||
1562 | |||
1563 | pmd = pmd_offset(pud, address); | ||
1564 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | ||
1565 | return 0; | ||
1566 | |||
1567 | ptep = pte_offset_map(pmd, address); | ||
1568 | if (ptep) { | ||
1569 | pte = *ptep; | ||
1570 | if (pte_present(pte)) | ||
1571 | return pte & PAGE_MASK; | ||
1572 | } | ||
1573 | |||
1574 | return 0; | ||
1575 | } | ||
1576 | |||
1577 | /* | ||
1578 | * ======== pte_update ======== | ||
1579 | * This function calculates the optimum page-aligned addresses and sizes | ||
1580 | * Caller must pass page-aligned values | ||
1581 | */ | ||
1582 | static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, | ||
1583 | u32 va, u32 size, | ||
1584 | struct hw_mmu_map_attrs_t *map_attrs) | ||
1585 | { | ||
1586 | u32 i; | ||
1587 | u32 all_bits; | ||
1588 | u32 pa_curr = pa; | ||
1589 | u32 va_curr = va; | ||
1590 | u32 num_bytes = size; | ||
1591 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
1592 | int status = 0; | ||
1593 | u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, | ||
1594 | HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB | ||
1595 | }; | ||
1596 | |||
1597 | while (num_bytes && !status) { | ||
1598 | /* To find the max. page size with which both PA & VA are | ||
1599 | * aligned */ | ||
1600 | all_bits = pa_curr | va_curr; | ||
1601 | |||
1602 | for (i = 0; i < 4; i++) { | ||
1603 | if ((num_bytes >= page_size[i]) && ((all_bits & | ||
1604 | (page_size[i] - | ||
1605 | 1)) == 0)) { | ||
1606 | status = | ||
1607 | pte_set(dev_context->pt_attrs, pa_curr, | ||
1608 | va_curr, page_size[i], map_attrs); | ||
1609 | pa_curr += page_size[i]; | ||
1610 | va_curr += page_size[i]; | ||
1611 | num_bytes -= page_size[i]; | ||
1612 | /* Don't try smaller sizes. Hopefully we have | ||
1613 | * reached an address aligned to a bigger page | ||
1614 | * size */ | ||
1615 | break; | ||
1616 | } | ||
1617 | } | ||
1618 | } | ||
1619 | |||
1620 | return status; | ||
1621 | } | ||
1622 | |||
1623 | /* | ||
1624 | * ======== pte_set ======== | ||
1625 | * This function calculates PTE address (MPU virtual) to be updated | ||
1626 | * It also manages the L2 page tables | ||
1627 | */ | ||
1628 | static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | ||
1629 | u32 size, struct hw_mmu_map_attrs_t *attrs) | ||
1630 | { | ||
1631 | u32 i; | ||
1632 | u32 pte_val; | ||
1633 | u32 pte_addr_l1; | ||
1634 | u32 pte_size; | ||
1635 | /* Base address of the PT that will be updated */ | ||
1636 | u32 pg_tbl_va; | ||
1637 | u32 l1_base_va; | ||
1638 | /* Compiler warns that the next three variables might be used | ||
1639 | * uninitialized in this function. Doesn't seem so. Working around, | ||
1640 | * anyways. */ | ||
1641 | u32 l2_base_va = 0; | ||
1642 | u32 l2_base_pa = 0; | ||
1643 | u32 l2_page_num = 0; | ||
1644 | int status = 0; | ||
1645 | |||
1646 | l1_base_va = pt->l1_base_va; | ||
1647 | pg_tbl_va = l1_base_va; | ||
1648 | if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) { | ||
1649 | /* Find whether the L1 PTE points to a valid L2 PT */ | ||
1650 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va); | ||
1651 | if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) { | ||
1652 | pte_val = *(u32 *) pte_addr_l1; | ||
1653 | pte_size = hw_mmu_pte_size_l1(pte_val); | ||
1654 | } else { | ||
1655 | return -EPERM; | ||
1656 | } | ||
1657 | spin_lock(&pt->pg_lock); | ||
1658 | if (pte_size == HW_MMU_COARSE_PAGE_SIZE) { | ||
1659 | /* Get the L2 PA from the L1 PTE, and find | ||
1660 | * corresponding L2 VA */ | ||
1661 | l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); | ||
1662 | l2_base_va = | ||
1663 | l2_base_pa - pt->l2_base_pa + pt->l2_base_va; | ||
1664 | l2_page_num = | ||
1665 | (l2_base_pa - | ||
1666 | pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; | ||
1667 | } else if (pte_size == 0) { | ||
1668 | /* L1 PTE is invalid. Allocate a L2 PT and | ||
1669 | * point the L1 PTE to it */ | ||
1670 | /* Find a free L2 PT. */ | ||
1671 | for (i = 0; (i < pt->l2_num_pages) && | ||
1672 | (pt->pg_info[i].num_entries != 0); i++) | ||
1673 | ; | ||
1674 | if (i < pt->l2_num_pages) { | ||
1675 | l2_page_num = i; | ||
1676 | l2_base_pa = pt->l2_base_pa + (l2_page_num * | ||
1677 | HW_MMU_COARSE_PAGE_SIZE); | ||
1678 | l2_base_va = pt->l2_base_va + (l2_page_num * | ||
1679 | HW_MMU_COARSE_PAGE_SIZE); | ||
1680 | /* Endianness attributes are ignored for | ||
1681 | * HW_MMU_COARSE_PAGE_SIZE */ | ||
1682 | status = | ||
1683 | hw_mmu_pte_set(l1_base_va, l2_base_pa, va, | ||
1684 | HW_MMU_COARSE_PAGE_SIZE, | ||
1685 | attrs); | ||
1686 | } else { | ||
1687 | status = -ENOMEM; | ||
1688 | } | ||
1689 | } else { | ||
1690 | /* Found valid L1 PTE of another size. | ||
1691 | * Should not overwrite it. */ | ||
1692 | status = -EPERM; | ||
1693 | } | ||
1694 | if (!status) { | ||
1695 | pg_tbl_va = l2_base_va; | ||
1696 | if (size == HW_PAGE_SIZE64KB) | ||
1697 | pt->pg_info[l2_page_num].num_entries += 16; | ||
1698 | else | ||
1699 | pt->pg_info[l2_page_num].num_entries++; | ||
1700 | dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum " | ||
1701 | "%x, num_entries %x\n", l2_base_va, | ||
1702 | l2_base_pa, l2_page_num, | ||
1703 | pt->pg_info[l2_page_num].num_entries); | ||
1704 | } | ||
1705 | spin_unlock(&pt->pg_lock); | ||
1706 | } | ||
1707 | if (!status) { | ||
1708 | dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n", | ||
1709 | pg_tbl_va, pa, va, size); | ||
1710 | dev_dbg(bridge, "PTE: endianism %x, element_size %x, " | ||
1711 | "mixed_size %x\n", attrs->endianism, | ||
1712 | attrs->element_size, attrs->mixed_size); | ||
1713 | status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs); | ||
1714 | } | ||
1715 | |||
1716 | return status; | ||
1717 | } | ||
1718 | |||
1719 | /* Memory map kernel VA -- memory allocated with vmalloc */ | ||
1720 | static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | ||
1721 | u32 ul_mpu_addr, u32 virt_addr, | ||
1722 | u32 ul_num_bytes, | ||
1723 | struct hw_mmu_map_attrs_t *hw_attrs) | ||
1724 | { | ||
1725 | int status = 0; | ||
1726 | struct page *page[1]; | ||
1727 | u32 i; | ||
1728 | u32 pa_curr; | ||
1729 | u32 pa_next; | ||
1730 | u32 va_curr; | ||
1731 | u32 size_curr; | ||
1732 | u32 num_pages; | ||
1733 | u32 pa; | ||
1734 | u32 num_of4k_pages; | ||
1735 | u32 temp = 0; | ||
1736 | |||
1737 | /* | ||
1738 | * Do Kernel va to pa translation. | ||
1739 | * Combine physically contiguous regions to reduce TLBs. | ||
1740 | * Pass the translated pa to pte_update. | ||
1741 | */ | ||
1742 | num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */ | ||
1743 | i = 0; | ||
1744 | va_curr = ul_mpu_addr; | ||
1745 | page[0] = vmalloc_to_page((void *)va_curr); | ||
1746 | pa_next = page_to_phys(page[0]); | ||
1747 | while (!status && (i < num_pages)) { | ||
1748 | /* | ||
1749 | * Reuse pa_next from the previous iteration to avoid | ||
1750 | * an extra va2pa call | ||
1751 | */ | ||
1752 | pa_curr = pa_next; | ||
1753 | size_curr = PAGE_SIZE; | ||
1754 | /* | ||
1755 | * If the next page is physically contiguous, | ||
1756 | * map it with the current one by increasing | ||
1757 | * the size of the region to be mapped | ||
1758 | */ | ||
1759 | while (++i < num_pages) { | ||
1760 | page[0] = | ||
1761 | vmalloc_to_page((void *)(va_curr + size_curr)); | ||
1762 | pa_next = page_to_phys(page[0]); | ||
1763 | |||
1764 | if (pa_next == (pa_curr + size_curr)) | ||
1765 | size_curr += PAGE_SIZE; | ||
1766 | else | ||
1767 | break; | ||
1768 | |||
1769 | } | ||
1770 | if (pa_next == 0) { | ||
1771 | status = -ENOMEM; | ||
1772 | break; | ||
1773 | } | ||
1774 | pa = pa_curr; | ||
1775 | num_of4k_pages = size_curr / HW_PAGE_SIZE4KB; | ||
1776 | while (temp++ < num_of4k_pages) { | ||
1777 | get_page(PHYS_TO_PAGE(pa)); | ||
1778 | pa += HW_PAGE_SIZE4KB; | ||
1779 | } | ||
1780 | status = pte_update(dev_context, pa_curr, virt_addr + | ||
1781 | (va_curr - ul_mpu_addr), size_curr, | ||
1782 | hw_attrs); | ||
1783 | va_curr += size_curr; | ||
1784 | } | ||
1785 | /* | ||
1786 | * In any case, flush the TLB | ||
1787 | * This is called from here instead from pte_update to avoid unnecessary | ||
1788 | * repetition while mapping non-contiguous physical regions of a virtual | ||
1789 | * region | ||
1790 | */ | ||
1791 | flush_all(dev_context); | ||
1792 | dev_dbg(bridge, "%s status %x\n", __func__, status); | ||
1793 | return status; | ||
1794 | } | ||
1795 | |||
1796 | /* | ||
1797 | * ======== wait_for_start ======== | ||
1798 | * Wait for the singal from DSP that it has started, or time out. | ||
1799 | */ | ||
1800 | bool wait_for_start(struct bridge_dev_context *dev_context, | ||
1801 | void __iomem *sync_addr) | ||
1802 | { | ||
1803 | u16 timeout = TIHELEN_ACKTIMEOUT; | ||
1804 | |||
1805 | /* Wait for response from board */ | ||
1806 | while (__raw_readw(sync_addr) && --timeout) | ||
1807 | udelay(10); | ||
1808 | |||
1809 | /* If timed out: return false */ | ||
1810 | if (!timeout) { | ||
1811 | pr_err("%s: Timed out waiting DSP to Start\n", __func__); | ||
1812 | return false; | ||
1813 | } | ||
1814 | return true; | ||
1815 | } | ||
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c deleted file mode 100644 index 657104f37f7d..000000000000 --- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c +++ /dev/null | |||
@@ -1,556 +0,0 @@ | |||
1 | /* | ||
2 | * tiomap_pwr.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Implementation of DSP wake/sleep routines. | ||
7 | * | ||
8 | * Copyright (C) 2007-2008 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | /* ----------------------------------- Host OS */ | ||
20 | #include <dspbridge/host_os.h> | ||
21 | |||
22 | #include <linux/platform_data/dsp-omap.h> | ||
23 | |||
24 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
25 | #include <dspbridge/dbdefs.h> | ||
26 | #include <dspbridge/drv.h> | ||
27 | #include <dspbridge/io_sm.h> | ||
28 | |||
29 | /* ----------------------------------- Platform Manager */ | ||
30 | #include <dspbridge/brddefs.h> | ||
31 | #include <dspbridge/dev.h> | ||
32 | #include <dspbridge/io.h> | ||
33 | |||
34 | /* ------------------------------------ Hardware Abstraction Layer */ | ||
35 | #include <hw_defs.h> | ||
36 | #include <hw_mmu.h> | ||
37 | |||
38 | #include <dspbridge/pwr.h> | ||
39 | |||
40 | /* ----------------------------------- Bridge Driver */ | ||
41 | #include <dspbridge/dspdeh.h> | ||
42 | #include <dspbridge/wdt.h> | ||
43 | |||
44 | /* ----------------------------------- specific to this file */ | ||
45 | #include "_tiomap.h" | ||
46 | #include "_tiomap_pwr.h" | ||
47 | #include <mach-omap2/prm-regbits-34xx.h> | ||
48 | #include <mach-omap2/cm-regbits-34xx.h> | ||
49 | |||
50 | #define PWRSTST_TIMEOUT 200 | ||
51 | |||
52 | /* | ||
53 | * ======== handle_constraints_set ======== | ||
54 | * Sets new DSP constraint | ||
55 | */ | ||
56 | int handle_constraints_set(struct bridge_dev_context *dev_context, | ||
57 | void *pargs) | ||
58 | { | ||
59 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
60 | u32 *constraint_val; | ||
61 | struct omap_dsp_platform_data *pdata = | ||
62 | omap_dspbridge_dev->dev.platform_data; | ||
63 | |||
64 | constraint_val = (u32 *) (pargs); | ||
65 | /* Read the target value requested by DSP */ | ||
66 | dev_dbg(bridge, "OPP: %s opp requested = 0x%x\n", __func__, | ||
67 | (u32) *(constraint_val + 1)); | ||
68 | |||
69 | /* Set the new opp value */ | ||
70 | if (pdata->dsp_set_min_opp) | ||
71 | (*pdata->dsp_set_min_opp) ((u32) *(constraint_val + 1)); | ||
72 | #endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */ | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * ======== handle_hibernation_from_dsp ======== | ||
78 | * Handle Hibernation requested from DSP | ||
79 | */ | ||
80 | int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context) | ||
81 | { | ||
82 | int status = 0; | ||
83 | #ifdef CONFIG_PM | ||
84 | u16 timeout = PWRSTST_TIMEOUT / 10; | ||
85 | u32 pwr_state; | ||
86 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
87 | u32 opplevel; | ||
88 | struct io_mgr *hio_mgr; | ||
89 | #endif | ||
90 | struct omap_dsp_platform_data *pdata = | ||
91 | omap_dspbridge_dev->dev.platform_data; | ||
92 | |||
93 | pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & | ||
94 | OMAP_POWERSTATEST_MASK; | ||
95 | /* Wait for DSP to move into OFF state */ | ||
96 | while ((pwr_state != PWRDM_POWER_OFF) && --timeout) { | ||
97 | if (msleep_interruptible(10)) { | ||
98 | pr_err("Waiting for DSP OFF mode interrupted\n"); | ||
99 | return -EPERM; | ||
100 | } | ||
101 | pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, | ||
102 | OMAP2_PM_PWSTST) & | ||
103 | OMAP_POWERSTATEST_MASK; | ||
104 | } | ||
105 | if (timeout == 0) { | ||
106 | pr_err("%s: Timed out waiting for DSP off mode\n", __func__); | ||
107 | status = -ETIMEDOUT; | ||
108 | return status; | ||
109 | } else { | ||
110 | |||
111 | /* Save mailbox settings */ | ||
112 | omap_mbox_save_ctx(dev_context->mbox); | ||
113 | |||
114 | /* Turn off DSP Peripheral clocks and DSP Load monitor timer */ | ||
115 | status = dsp_clock_disable_all(dev_context->dsp_per_clks); | ||
116 | |||
117 | /* Disable wdt on hibernation. */ | ||
118 | dsp_wdt_enable(false); | ||
119 | |||
120 | if (!status) { | ||
121 | /* Update the Bridger Driver state */ | ||
122 | dev_context->brd_state = BRD_DSP_HIBERNATION; | ||
123 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
124 | status = | ||
125 | dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); | ||
126 | if (!hio_mgr) { | ||
127 | status = DSP_EHANDLE; | ||
128 | return status; | ||
129 | } | ||
130 | io_sh_msetting(hio_mgr, SHM_GETOPP, &opplevel); | ||
131 | |||
132 | /* | ||
133 | * Set the OPP to low level before moving to OFF | ||
134 | * mode | ||
135 | */ | ||
136 | if (pdata->dsp_set_min_opp) | ||
137 | (*pdata->dsp_set_min_opp) (VDD1_OPP1); | ||
138 | status = 0; | ||
139 | #endif /* CONFIG_TIDSPBRIDGE_DVFS */ | ||
140 | } | ||
141 | } | ||
142 | #endif | ||
143 | return status; | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * ======== sleep_dsp ======== | ||
148 | * Put DSP in low power consuming state. | ||
149 | */ | ||
150 | int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd, | ||
151 | void *pargs) | ||
152 | { | ||
153 | int status = 0; | ||
154 | #ifdef CONFIG_PM | ||
155 | #ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR | ||
156 | struct deh_mgr *hdeh_mgr; | ||
157 | #endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */ | ||
158 | u16 timeout = PWRSTST_TIMEOUT / 10; | ||
159 | u32 pwr_state, target_pwr_state; | ||
160 | struct omap_dsp_platform_data *pdata = | ||
161 | omap_dspbridge_dev->dev.platform_data; | ||
162 | |||
163 | /* Check if sleep code is valid */ | ||
164 | if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP)) | ||
165 | return -EINVAL; | ||
166 | |||
167 | switch (dev_context->brd_state) { | ||
168 | case BRD_RUNNING: | ||
169 | omap_mbox_save_ctx(dev_context->mbox); | ||
170 | if (dsp_test_sleepstate == PWRDM_POWER_OFF) { | ||
171 | sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE); | ||
172 | dev_dbg(bridge, "PM: %s - sent hibernate cmd to DSP\n", | ||
173 | __func__); | ||
174 | target_pwr_state = PWRDM_POWER_OFF; | ||
175 | } else { | ||
176 | sm_interrupt_dsp(dev_context, MBX_PM_DSPRETENTION); | ||
177 | target_pwr_state = PWRDM_POWER_RET; | ||
178 | } | ||
179 | break; | ||
180 | case BRD_RETENTION: | ||
181 | omap_mbox_save_ctx(dev_context->mbox); | ||
182 | if (dsp_test_sleepstate == PWRDM_POWER_OFF) { | ||
183 | sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE); | ||
184 | target_pwr_state = PWRDM_POWER_OFF; | ||
185 | } else | ||
186 | return 0; | ||
187 | break; | ||
188 | case BRD_HIBERNATION: | ||
189 | case BRD_DSP_HIBERNATION: | ||
190 | /* Already in Hibernation, so just return */ | ||
191 | dev_dbg(bridge, "PM: %s - DSP already in hibernation\n", | ||
192 | __func__); | ||
193 | return 0; | ||
194 | case BRD_STOPPED: | ||
195 | dev_dbg(bridge, "PM: %s - Board in STOP state\n", __func__); | ||
196 | return 0; | ||
197 | default: | ||
198 | dev_dbg(bridge, "PM: %s - Bridge in Illegal state\n", __func__); | ||
199 | return -EPERM; | ||
200 | } | ||
201 | |||
202 | /* Get the PRCM DSP power domain status */ | ||
203 | pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) & | ||
204 | OMAP_POWERSTATEST_MASK; | ||
205 | |||
206 | /* Wait for DSP to move into target power state */ | ||
207 | while ((pwr_state != target_pwr_state) && --timeout) { | ||
208 | if (msleep_interruptible(10)) { | ||
209 | pr_err("Waiting for DSP to Suspend interrupted\n"); | ||
210 | return -EPERM; | ||
211 | } | ||
212 | pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, | ||
213 | OMAP2_PM_PWSTST) & | ||
214 | OMAP_POWERSTATEST_MASK; | ||
215 | } | ||
216 | |||
217 | if (!timeout) { | ||
218 | pr_err("%s: Timed out waiting for DSP off mode, state %x\n", | ||
219 | __func__, pwr_state); | ||
220 | #ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR | ||
221 | dev_get_deh_mgr(dev_context->dev_obj, &hdeh_mgr); | ||
222 | bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0); | ||
223 | #endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */ | ||
224 | return -ETIMEDOUT; | ||
225 | } else { | ||
226 | /* Update the Bridger Driver state */ | ||
227 | if (dsp_test_sleepstate == PWRDM_POWER_OFF) | ||
228 | dev_context->brd_state = BRD_HIBERNATION; | ||
229 | else | ||
230 | dev_context->brd_state = BRD_RETENTION; | ||
231 | |||
232 | /* Disable wdt on hibernation. */ | ||
233 | dsp_wdt_enable(false); | ||
234 | |||
235 | /* Turn off DSP Peripheral clocks */ | ||
236 | status = dsp_clock_disable_all(dev_context->dsp_per_clks); | ||
237 | if (status) | ||
238 | return status; | ||
239 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
240 | else if (target_pwr_state == PWRDM_POWER_OFF) { | ||
241 | /* | ||
242 | * Set the OPP to low level before moving to OFF mode | ||
243 | */ | ||
244 | if (pdata->dsp_set_min_opp) | ||
245 | (*pdata->dsp_set_min_opp) (VDD1_OPP1); | ||
246 | } | ||
247 | #endif /* CONFIG_TIDSPBRIDGE_DVFS */ | ||
248 | } | ||
249 | #endif /* CONFIG_PM */ | ||
250 | return status; | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * ======== wake_dsp ======== | ||
255 | * Wake up DSP from sleep. | ||
256 | */ | ||
257 | int wake_dsp(struct bridge_dev_context *dev_context, void *pargs) | ||
258 | { | ||
259 | int status = 0; | ||
260 | #ifdef CONFIG_PM | ||
261 | |||
262 | /* Check the board state, if it is not 'SLEEP' then return */ | ||
263 | if (dev_context->brd_state == BRD_RUNNING || | ||
264 | dev_context->brd_state == BRD_STOPPED) { | ||
265 | /* The Device is in 'RET' or 'OFF' state and Bridge state is not | ||
266 | * 'SLEEP', this means state inconsistency, so return */ | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | /* Send a wakeup message to DSP */ | ||
271 | sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP); | ||
272 | |||
273 | /* Set the device state to RUNNIG */ | ||
274 | dev_context->brd_state = BRD_RUNNING; | ||
275 | #endif /* CONFIG_PM */ | ||
276 | return status; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * ======== dsp_peripheral_clk_ctrl ======== | ||
281 | * Enable/Disable the DSP peripheral clocks as needed.. | ||
282 | */ | ||
283 | int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context, | ||
284 | void *pargs) | ||
285 | { | ||
286 | u32 ext_clk = 0; | ||
287 | u32 ext_clk_id = 0; | ||
288 | u32 ext_clk_cmd = 0; | ||
289 | u32 clk_id_index = MBX_PM_MAX_RESOURCES; | ||
290 | u32 tmp_index; | ||
291 | u32 dsp_per_clks_before; | ||
292 | int status = 0; | ||
293 | |||
294 | dsp_per_clks_before = dev_context->dsp_per_clks; | ||
295 | |||
296 | ext_clk = (u32) *((u32 *) pargs); | ||
297 | ext_clk_id = ext_clk & MBX_PM_CLK_IDMASK; | ||
298 | |||
299 | /* process the power message -- TODO, keep it in a separate function */ | ||
300 | for (tmp_index = 0; tmp_index < MBX_PM_MAX_RESOURCES; tmp_index++) { | ||
301 | if (ext_clk_id == bpwr_clkid[tmp_index]) { | ||
302 | clk_id_index = tmp_index; | ||
303 | break; | ||
304 | } | ||
305 | } | ||
306 | /* TODO -- Assert may be a too hard restriction here.. May be we should | ||
307 | * just return with failure when the CLK ID does not match */ | ||
308 | if (clk_id_index == MBX_PM_MAX_RESOURCES) { | ||
309 | /* return with a more meaningfull error code */ | ||
310 | return -EPERM; | ||
311 | } | ||
312 | ext_clk_cmd = (ext_clk >> MBX_PM_CLK_CMDSHIFT) & MBX_PM_CLK_CMDMASK; | ||
313 | switch (ext_clk_cmd) { | ||
314 | case BPWR_DISABLE_CLOCK: | ||
315 | status = dsp_clk_disable(bpwr_clks[clk_id_index].clk); | ||
316 | dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, | ||
317 | false); | ||
318 | if (!status) { | ||
319 | (dev_context->dsp_per_clks) &= | ||
320 | (~((u32) (1 << bpwr_clks[clk_id_index].clk))); | ||
321 | } | ||
322 | break; | ||
323 | case BPWR_ENABLE_CLOCK: | ||
324 | status = dsp_clk_enable(bpwr_clks[clk_id_index].clk); | ||
325 | dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true); | ||
326 | if (!status) | ||
327 | (dev_context->dsp_per_clks) |= | ||
328 | (1 << bpwr_clks[clk_id_index].clk); | ||
329 | break; | ||
330 | default: | ||
331 | dev_dbg(bridge, "%s: Unsupported CMD\n", __func__); | ||
332 | /* unsupported cmd */ | ||
333 | /* TODO -- provide support for AUTOIDLE Enable/Disable | ||
334 | * commands */ | ||
335 | } | ||
336 | return status; | ||
337 | } | ||
338 | |||
339 | /* | ||
340 | * ========pre_scale_dsp======== | ||
341 | * Sends prescale notification to DSP | ||
342 | * | ||
343 | */ | ||
344 | int pre_scale_dsp(struct bridge_dev_context *dev_context, void *pargs) | ||
345 | { | ||
346 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
347 | u32 level; | ||
348 | u32 voltage_domain; | ||
349 | |||
350 | voltage_domain = *((u32 *) pargs); | ||
351 | level = *((u32 *) pargs + 1); | ||
352 | |||
353 | dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n", | ||
354 | __func__, voltage_domain, level); | ||
355 | if ((dev_context->brd_state == BRD_HIBERNATION) || | ||
356 | (dev_context->brd_state == BRD_RETENTION) || | ||
357 | (dev_context->brd_state == BRD_DSP_HIBERNATION)) { | ||
358 | dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n"); | ||
359 | return 0; | ||
360 | } else if (dev_context->brd_state == BRD_RUNNING) { | ||
361 | /* Send a prenotification to DSP */ | ||
362 | dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__); | ||
363 | sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY); | ||
364 | return 0; | ||
365 | } else { | ||
366 | return -EPERM; | ||
367 | } | ||
368 | #endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */ | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * ========post_scale_dsp======== | ||
374 | * Sends postscale notification to DSP | ||
375 | * | ||
376 | */ | ||
377 | int post_scale_dsp(struct bridge_dev_context *dev_context, | ||
378 | void *pargs) | ||
379 | { | ||
380 | int status = 0; | ||
381 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
382 | u32 level; | ||
383 | u32 voltage_domain; | ||
384 | struct io_mgr *hio_mgr; | ||
385 | |||
386 | status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr); | ||
387 | if (!hio_mgr) | ||
388 | return -EFAULT; | ||
389 | |||
390 | voltage_domain = *((u32 *) pargs); | ||
391 | level = *((u32 *) pargs + 1); | ||
392 | dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n", | ||
393 | __func__, voltage_domain, level); | ||
394 | if ((dev_context->brd_state == BRD_HIBERNATION) || | ||
395 | (dev_context->brd_state == BRD_RETENTION) || | ||
396 | (dev_context->brd_state == BRD_DSP_HIBERNATION)) { | ||
397 | /* Update the OPP value in shared memory */ | ||
398 | io_sh_msetting(hio_mgr, SHM_CURROPP, &level); | ||
399 | dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n", | ||
400 | __func__); | ||
401 | } else if (dev_context->brd_state == BRD_RUNNING) { | ||
402 | /* Update the OPP value in shared memory */ | ||
403 | io_sh_msetting(hio_mgr, SHM_CURROPP, &level); | ||
404 | /* Send a post notification to DSP */ | ||
405 | sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_POSTNOTIFY); | ||
406 | dev_dbg(bridge, | ||
407 | "OPP: %s wrote to shm. Sent post notification to DSP\n", | ||
408 | __func__); | ||
409 | } else { | ||
410 | status = -EPERM; | ||
411 | } | ||
412 | #endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */ | ||
413 | return status; | ||
414 | } | ||
415 | |||
416 | void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable) | ||
417 | { | ||
418 | struct cfg_hostres *resources; | ||
419 | int status = 0; | ||
420 | u32 iva2_grpsel; | ||
421 | u32 mpu_grpsel; | ||
422 | struct dev_object *hdev_object = NULL; | ||
423 | struct bridge_dev_context *bridge_context = NULL; | ||
424 | |||
425 | hdev_object = (struct dev_object *)drv_get_first_dev_object(); | ||
426 | if (!hdev_object) | ||
427 | return; | ||
428 | |||
429 | status = dev_get_bridge_context(hdev_object, &bridge_context); | ||
430 | if (!bridge_context) | ||
431 | return; | ||
432 | |||
433 | resources = bridge_context->resources; | ||
434 | if (!resources) | ||
435 | return; | ||
436 | |||
437 | switch (clock_id) { | ||
438 | case BPWR_GP_TIMER5: | ||
439 | iva2_grpsel = readl(resources->per_pm_base + 0xA8); | ||
440 | mpu_grpsel = readl(resources->per_pm_base + 0xA4); | ||
441 | if (enable) { | ||
442 | iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK; | ||
443 | mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK; | ||
444 | } else { | ||
445 | mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK; | ||
446 | iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK; | ||
447 | } | ||
448 | writel(iva2_grpsel, resources->per_pm_base + 0xA8); | ||
449 | writel(mpu_grpsel, resources->per_pm_base + 0xA4); | ||
450 | break; | ||
451 | case BPWR_GP_TIMER6: | ||
452 | iva2_grpsel = readl(resources->per_pm_base + 0xA8); | ||
453 | mpu_grpsel = readl(resources->per_pm_base + 0xA4); | ||
454 | if (enable) { | ||
455 | iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK; | ||
456 | mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK; | ||
457 | } else { | ||
458 | mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK; | ||
459 | iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK; | ||
460 | } | ||
461 | writel(iva2_grpsel, resources->per_pm_base + 0xA8); | ||
462 | writel(mpu_grpsel, resources->per_pm_base + 0xA4); | ||
463 | break; | ||
464 | case BPWR_GP_TIMER7: | ||
465 | iva2_grpsel = readl(resources->per_pm_base + 0xA8); | ||
466 | mpu_grpsel = readl(resources->per_pm_base + 0xA4); | ||
467 | if (enable) { | ||
468 | iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK; | ||
469 | mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK; | ||
470 | } else { | ||
471 | mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK; | ||
472 | iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK; | ||
473 | } | ||
474 | writel(iva2_grpsel, resources->per_pm_base + 0xA8); | ||
475 | writel(mpu_grpsel, resources->per_pm_base + 0xA4); | ||
476 | break; | ||
477 | case BPWR_GP_TIMER8: | ||
478 | iva2_grpsel = readl(resources->per_pm_base + 0xA8); | ||
479 | mpu_grpsel = readl(resources->per_pm_base + 0xA4); | ||
480 | if (enable) { | ||
481 | iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK; | ||
482 | mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK; | ||
483 | } else { | ||
484 | mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK; | ||
485 | iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK; | ||
486 | } | ||
487 | writel(iva2_grpsel, resources->per_pm_base + 0xA8); | ||
488 | writel(mpu_grpsel, resources->per_pm_base + 0xA4); | ||
489 | break; | ||
490 | case BPWR_MCBSP1: | ||
491 | iva2_grpsel = readl(resources->core_pm_base + 0xA8); | ||
492 | mpu_grpsel = readl(resources->core_pm_base + 0xA4); | ||
493 | if (enable) { | ||
494 | iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK; | ||
495 | mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK; | ||
496 | } else { | ||
497 | mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK; | ||
498 | iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK; | ||
499 | } | ||
500 | writel(iva2_grpsel, resources->core_pm_base + 0xA8); | ||
501 | writel(mpu_grpsel, resources->core_pm_base + 0xA4); | ||
502 | break; | ||
503 | case BPWR_MCBSP2: | ||
504 | iva2_grpsel = readl(resources->per_pm_base + 0xA8); | ||
505 | mpu_grpsel = readl(resources->per_pm_base + 0xA4); | ||
506 | if (enable) { | ||
507 | iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK; | ||
508 | mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK; | ||
509 | } else { | ||
510 | mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK; | ||
511 | iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK; | ||
512 | } | ||
513 | writel(iva2_grpsel, resources->per_pm_base + 0xA8); | ||
514 | writel(mpu_grpsel, resources->per_pm_base + 0xA4); | ||
515 | break; | ||
516 | case BPWR_MCBSP3: | ||
517 | iva2_grpsel = readl(resources->per_pm_base + 0xA8); | ||
518 | mpu_grpsel = readl(resources->per_pm_base + 0xA4); | ||
519 | if (enable) { | ||
520 | iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK; | ||
521 | mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK; | ||
522 | } else { | ||
523 | mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK; | ||
524 | iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK; | ||
525 | } | ||
526 | writel(iva2_grpsel, resources->per_pm_base + 0xA8); | ||
527 | writel(mpu_grpsel, resources->per_pm_base + 0xA4); | ||
528 | break; | ||
529 | case BPWR_MCBSP4: | ||
530 | iva2_grpsel = readl(resources->per_pm_base + 0xA8); | ||
531 | mpu_grpsel = readl(resources->per_pm_base + 0xA4); | ||
532 | if (enable) { | ||
533 | iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK; | ||
534 | mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK; | ||
535 | } else { | ||
536 | mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK; | ||
537 | iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK; | ||
538 | } | ||
539 | writel(iva2_grpsel, resources->per_pm_base + 0xA8); | ||
540 | writel(mpu_grpsel, resources->per_pm_base + 0xA4); | ||
541 | break; | ||
542 | case BPWR_MCBSP5: | ||
543 | iva2_grpsel = readl(resources->per_pm_base + 0xA8); | ||
544 | mpu_grpsel = readl(resources->per_pm_base + 0xA4); | ||
545 | if (enable) { | ||
546 | iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK; | ||
547 | mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK; | ||
548 | } else { | ||
549 | mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK; | ||
550 | iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK; | ||
551 | } | ||
552 | writel(iva2_grpsel, resources->per_pm_base + 0xA8); | ||
553 | writel(mpu_grpsel, resources->per_pm_base + 0xA4); | ||
554 | break; | ||
555 | } | ||
556 | } | ||
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c deleted file mode 100644 index 28364672c7f8..000000000000 --- a/drivers/staging/tidspbridge/core/tiomap_io.c +++ /dev/null | |||
@@ -1,440 +0,0 @@ | |||
1 | /* | ||
2 | * tiomap_io.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Implementation for the io read/write routines. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/platform_data/dsp-omap.h> | ||
20 | |||
21 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
22 | #include <dspbridge/dbdefs.h> | ||
23 | |||
24 | /* ----------------------------------- Platform Manager */ | ||
25 | #include <dspbridge/dev.h> | ||
26 | #include <dspbridge/drv.h> | ||
27 | |||
28 | /* ----------------------------------- OS Adaptation Layer */ | ||
29 | #include <dspbridge/wdt.h> | ||
30 | |||
31 | /* ----------------------------------- specific to this file */ | ||
32 | #include "_tiomap.h" | ||
33 | #include "_tiomap_pwr.h" | ||
34 | #include "tiomap_io.h" | ||
35 | |||
36 | static u32 ul_ext_base; | ||
37 | static u32 ul_ext_end; | ||
38 | |||
39 | static u32 shm0_end; | ||
40 | static u32 ul_dyn_ext_base; | ||
41 | static u32 ul_trace_sec_beg; | ||
42 | static u32 ul_trace_sec_end; | ||
43 | static u32 ul_shm_base_virt; | ||
44 | |||
45 | bool symbols_reloaded = true; | ||
46 | |||
47 | /* | ||
48 | * ======== read_ext_dsp_data ======== | ||
49 | * Copies DSP external memory buffers to the host side buffers. | ||
50 | */ | ||
51 | int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, | ||
52 | u8 *host_buff, u32 dsp_addr, | ||
53 | u32 ul_num_bytes, u32 mem_type) | ||
54 | { | ||
55 | int status = 0; | ||
56 | struct bridge_dev_context *dev_context = dev_ctxt; | ||
57 | u32 offset; | ||
58 | u32 ul_tlb_base_virt = 0; | ||
59 | u32 ul_shm_offset_virt = 0; | ||
60 | u32 dw_ext_prog_virt_mem; | ||
61 | u32 dw_base_addr = dev_context->dsp_ext_base_addr; | ||
62 | bool trace_read = false; | ||
63 | |||
64 | if (!ul_shm_base_virt) { | ||
65 | status = dev_get_symbol(dev_context->dev_obj, | ||
66 | SHMBASENAME, &ul_shm_base_virt); | ||
67 | } | ||
68 | |||
69 | /* Check if it is a read of Trace section */ | ||
70 | if (!status && !ul_trace_sec_beg) { | ||
71 | status = dev_get_symbol(dev_context->dev_obj, | ||
72 | DSP_TRACESEC_BEG, &ul_trace_sec_beg); | ||
73 | } | ||
74 | |||
75 | if (!status && !ul_trace_sec_end) { | ||
76 | status = dev_get_symbol(dev_context->dev_obj, | ||
77 | DSP_TRACESEC_END, &ul_trace_sec_end); | ||
78 | } | ||
79 | |||
80 | if (!status) { | ||
81 | if ((dsp_addr <= ul_trace_sec_end) && | ||
82 | (dsp_addr >= ul_trace_sec_beg)) | ||
83 | trace_read = true; | ||
84 | } | ||
85 | |||
86 | /* If reading from TRACE, force remap/unmap */ | ||
87 | if (trace_read && dw_base_addr) { | ||
88 | dw_base_addr = 0; | ||
89 | dev_context->dsp_ext_base_addr = 0; | ||
90 | } | ||
91 | |||
92 | if (!dw_base_addr) { | ||
93 | /* Initialize ul_ext_base and ul_ext_end */ | ||
94 | ul_ext_base = 0; | ||
95 | ul_ext_end = 0; | ||
96 | |||
97 | /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */ | ||
98 | if (!status && !ul_dyn_ext_base) { | ||
99 | status = dev_get_symbol(dev_context->dev_obj, | ||
100 | DYNEXTBASE, &ul_dyn_ext_base); | ||
101 | } | ||
102 | |||
103 | if (!status) { | ||
104 | status = dev_get_symbol(dev_context->dev_obj, | ||
105 | EXTBASE, &ul_ext_base); | ||
106 | } | ||
107 | |||
108 | if (!status) { | ||
109 | status = dev_get_symbol(dev_context->dev_obj, | ||
110 | EXTEND, &ul_ext_end); | ||
111 | } | ||
112 | |||
113 | /* Trace buffer is right after the shm SEG0, | ||
114 | * so set the base address to SHMBASE */ | ||
115 | if (trace_read) { | ||
116 | ul_ext_base = ul_shm_base_virt; | ||
117 | ul_ext_end = ul_trace_sec_end; | ||
118 | } | ||
119 | |||
120 | |||
121 | if (ul_ext_end < ul_ext_base) | ||
122 | status = -EPERM; | ||
123 | |||
124 | if (!status) { | ||
125 | ul_tlb_base_virt = | ||
126 | dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; | ||
127 | dw_ext_prog_virt_mem = | ||
128 | dev_context->atlb_entry[0].gpp_va; | ||
129 | |||
130 | if (!trace_read) { | ||
131 | ul_shm_offset_virt = | ||
132 | ul_shm_base_virt - ul_tlb_base_virt; | ||
133 | ul_shm_offset_virt += | ||
134 | PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + | ||
135 | 1, HW_PAGE_SIZE64KB); | ||
136 | dw_ext_prog_virt_mem -= ul_shm_offset_virt; | ||
137 | dw_ext_prog_virt_mem += | ||
138 | (ul_ext_base - ul_dyn_ext_base); | ||
139 | dev_context->dsp_ext_base_addr = | ||
140 | dw_ext_prog_virt_mem; | ||
141 | |||
142 | /* | ||
143 | * This dsp_ext_base_addr will get cleared | ||
144 | * only when the board is stopped. | ||
145 | */ | ||
146 | if (!dev_context->dsp_ext_base_addr) | ||
147 | status = -EPERM; | ||
148 | } | ||
149 | |||
150 | dw_base_addr = dw_ext_prog_virt_mem; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | if (!dw_base_addr || !ul_ext_base || !ul_ext_end) | ||
155 | status = -EPERM; | ||
156 | |||
157 | offset = dsp_addr - ul_ext_base; | ||
158 | |||
159 | if (!status) | ||
160 | memcpy(host_buff, (u8 *) dw_base_addr + offset, ul_num_bytes); | ||
161 | |||
162 | return status; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * ======== write_dsp_data ======== | ||
167 | * purpose: | ||
168 | * Copies buffers to the DSP internal/external memory. | ||
169 | */ | ||
170 | int write_dsp_data(struct bridge_dev_context *dev_context, | ||
171 | u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes, | ||
172 | u32 mem_type) | ||
173 | { | ||
174 | u32 offset; | ||
175 | u32 dw_base_addr = dev_context->dsp_base_addr; | ||
176 | struct cfg_hostres *resources = dev_context->resources; | ||
177 | int status = 0; | ||
178 | u32 base1, base2, base3; | ||
179 | |||
180 | base1 = OMAP_DSP_MEM1_SIZE; | ||
181 | base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE; | ||
182 | base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE; | ||
183 | |||
184 | if (!resources) | ||
185 | return -EPERM; | ||
186 | |||
187 | offset = dsp_addr - dev_context->dsp_start_add; | ||
188 | if (offset < base1) { | ||
189 | dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[2], | ||
190 | resources->mem_length[2]); | ||
191 | } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) { | ||
192 | dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[3], | ||
193 | resources->mem_length[3]); | ||
194 | offset = offset - base2; | ||
195 | } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE && | ||
196 | offset < base3 + OMAP_DSP_MEM3_SIZE) { | ||
197 | dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[4], | ||
198 | resources->mem_length[4]); | ||
199 | offset = offset - base3; | ||
200 | } else { | ||
201 | return -EPERM; | ||
202 | } | ||
203 | if (ul_num_bytes) | ||
204 | memcpy((u8 *) (dw_base_addr + offset), host_buff, ul_num_bytes); | ||
205 | else | ||
206 | *((u32 *) host_buff) = dw_base_addr + offset; | ||
207 | |||
208 | return status; | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * ======== write_ext_dsp_data ======== | ||
213 | * purpose: | ||
214 | * Copies buffers to the external memory. | ||
215 | * | ||
216 | */ | ||
217 | int write_ext_dsp_data(struct bridge_dev_context *dev_context, | ||
218 | u8 *host_buff, u32 dsp_addr, | ||
219 | u32 ul_num_bytes, u32 mem_type, | ||
220 | bool dynamic_load) | ||
221 | { | ||
222 | u32 dw_base_addr = dev_context->dsp_ext_base_addr; | ||
223 | u32 dw_offset = 0; | ||
224 | u8 temp_byte1, temp_byte2; | ||
225 | u8 remain_byte[4]; | ||
226 | s32 i; | ||
227 | int ret = 0; | ||
228 | u32 dw_ext_prog_virt_mem; | ||
229 | u32 ul_tlb_base_virt = 0; | ||
230 | u32 ul_shm_offset_virt = 0; | ||
231 | struct cfg_hostres *host_res = dev_context->resources; | ||
232 | bool trace_load = false; | ||
233 | |||
234 | temp_byte1 = 0x0; | ||
235 | temp_byte2 = 0x0; | ||
236 | |||
237 | if (symbols_reloaded) { | ||
238 | /* Check if it is a load to Trace section */ | ||
239 | ret = dev_get_symbol(dev_context->dev_obj, | ||
240 | DSP_TRACESEC_BEG, &ul_trace_sec_beg); | ||
241 | if (!ret) | ||
242 | ret = dev_get_symbol(dev_context->dev_obj, | ||
243 | DSP_TRACESEC_END, | ||
244 | &ul_trace_sec_end); | ||
245 | } | ||
246 | if (!ret) { | ||
247 | if ((dsp_addr <= ul_trace_sec_end) && | ||
248 | (dsp_addr >= ul_trace_sec_beg)) | ||
249 | trace_load = true; | ||
250 | } | ||
251 | |||
252 | /* If dynamic, force remap/unmap */ | ||
253 | if ((dynamic_load || trace_load) && dw_base_addr) { | ||
254 | dw_base_addr = 0; | ||
255 | MEM_UNMAP_LINEAR_ADDRESS((void *) | ||
256 | dev_context->dsp_ext_base_addr); | ||
257 | dev_context->dsp_ext_base_addr = 0x0; | ||
258 | } | ||
259 | if (!dw_base_addr) { | ||
260 | if (symbols_reloaded) | ||
261 | /* Get SHM_BEG EXT_BEG and EXT_END. */ | ||
262 | ret = dev_get_symbol(dev_context->dev_obj, | ||
263 | SHMBASENAME, &ul_shm_base_virt); | ||
264 | if (dynamic_load) { | ||
265 | if (!ret) { | ||
266 | if (symbols_reloaded) | ||
267 | ret = | ||
268 | dev_get_symbol | ||
269 | (dev_context->dev_obj, DYNEXTBASE, | ||
270 | &ul_ext_base); | ||
271 | } | ||
272 | if (!ret) { | ||
273 | /* DR OMAPS00013235 : DLModules array may be | ||
274 | * in EXTMEM. It is expected that DYNEXTMEM and | ||
275 | * EXTMEM are contiguous, so checking for the | ||
276 | * upper bound at EXTEND should be Ok. */ | ||
277 | if (symbols_reloaded) | ||
278 | ret = | ||
279 | dev_get_symbol | ||
280 | (dev_context->dev_obj, EXTEND, | ||
281 | &ul_ext_end); | ||
282 | } | ||
283 | } else { | ||
284 | if (symbols_reloaded) { | ||
285 | if (!ret) | ||
286 | ret = | ||
287 | dev_get_symbol | ||
288 | (dev_context->dev_obj, EXTBASE, | ||
289 | &ul_ext_base); | ||
290 | if (!ret) | ||
291 | ret = | ||
292 | dev_get_symbol | ||
293 | (dev_context->dev_obj, EXTEND, | ||
294 | &ul_ext_end); | ||
295 | } | ||
296 | } | ||
297 | /* Trace buffer it right after the shm SEG0, so set the | ||
298 | * base address to SHMBASE */ | ||
299 | if (trace_load) | ||
300 | ul_ext_base = ul_shm_base_virt; | ||
301 | |||
302 | if (ul_ext_end < ul_ext_base) | ||
303 | ret = -EPERM; | ||
304 | |||
305 | if (!ret) { | ||
306 | ul_tlb_base_virt = | ||
307 | dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; | ||
308 | |||
309 | if (symbols_reloaded) { | ||
310 | ret = dev_get_symbol | ||
311 | (dev_context->dev_obj, | ||
312 | DSP_TRACESEC_END, &shm0_end); | ||
313 | if (!ret) { | ||
314 | ret = | ||
315 | dev_get_symbol | ||
316 | (dev_context->dev_obj, DYNEXTBASE, | ||
317 | &ul_dyn_ext_base); | ||
318 | } | ||
319 | } | ||
320 | ul_shm_offset_virt = | ||
321 | ul_shm_base_virt - ul_tlb_base_virt; | ||
322 | if (trace_load) { | ||
323 | dw_ext_prog_virt_mem = | ||
324 | dev_context->atlb_entry[0].gpp_va; | ||
325 | } else { | ||
326 | dw_ext_prog_virt_mem = host_res->mem_base[1]; | ||
327 | dw_ext_prog_virt_mem += | ||
328 | (ul_ext_base - ul_dyn_ext_base); | ||
329 | } | ||
330 | |||
331 | dev_context->dsp_ext_base_addr = | ||
332 | (u32) MEM_LINEAR_ADDRESS((void *) | ||
333 | dw_ext_prog_virt_mem, | ||
334 | ul_ext_end - ul_ext_base); | ||
335 | dw_base_addr += dev_context->dsp_ext_base_addr; | ||
336 | /* This dsp_ext_base_addr will get cleared only when | ||
337 | * the board is stopped. */ | ||
338 | if (!dev_context->dsp_ext_base_addr) | ||
339 | ret = -EPERM; | ||
340 | } | ||
341 | } | ||
342 | if (!dw_base_addr || !ul_ext_base || !ul_ext_end) | ||
343 | ret = -EPERM; | ||
344 | |||
345 | if (!ret) { | ||
346 | for (i = 0; i < 4; i++) | ||
347 | remain_byte[i] = 0x0; | ||
348 | |||
349 | dw_offset = dsp_addr - ul_ext_base; | ||
350 | /* Also make sure the dsp_addr is < ul_ext_end */ | ||
351 | if (dsp_addr > ul_ext_end || dw_offset > dsp_addr) | ||
352 | ret = -EPERM; | ||
353 | } | ||
354 | if (!ret) { | ||
355 | if (ul_num_bytes) | ||
356 | memcpy((u8 *) dw_base_addr + dw_offset, host_buff, | ||
357 | ul_num_bytes); | ||
358 | else | ||
359 | *((u32 *) host_buff) = dw_base_addr + dw_offset; | ||
360 | } | ||
361 | /* Unmap here to force remap for other Ext loads */ | ||
362 | if ((dynamic_load || trace_load) && dev_context->dsp_ext_base_addr) { | ||
363 | MEM_UNMAP_LINEAR_ADDRESS((void *) | ||
364 | dev_context->dsp_ext_base_addr); | ||
365 | dev_context->dsp_ext_base_addr = 0x0; | ||
366 | } | ||
367 | symbols_reloaded = false; | ||
368 | return ret; | ||
369 | } | ||
370 | |||
371 | int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) | ||
372 | { | ||
373 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
374 | u32 opplevel = 0; | ||
375 | #endif | ||
376 | struct omap_dsp_platform_data *pdata = | ||
377 | omap_dspbridge_dev->dev.platform_data; | ||
378 | struct cfg_hostres *resources = dev_context->resources; | ||
379 | int status = 0; | ||
380 | u32 temp; | ||
381 | |||
382 | if (!dev_context->mbox) | ||
383 | return 0; | ||
384 | |||
385 | if (!resources) | ||
386 | return -EPERM; | ||
387 | |||
388 | if (dev_context->brd_state == BRD_DSP_HIBERNATION || | ||
389 | dev_context->brd_state == BRD_HIBERNATION) { | ||
390 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
391 | if (pdata->dsp_get_opp) | ||
392 | opplevel = (*pdata->dsp_get_opp) (); | ||
393 | if (opplevel == VDD1_OPP1) { | ||
394 | if (pdata->dsp_set_min_opp) | ||
395 | (*pdata->dsp_set_min_opp) (VDD1_OPP2); | ||
396 | } | ||
397 | #endif | ||
398 | /* Restart the peripheral clocks */ | ||
399 | dsp_clock_enable_all(dev_context->dsp_per_clks); | ||
400 | dsp_wdt_enable(true); | ||
401 | |||
402 | /* | ||
403 | * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control | ||
404 | * in CM_AUTOIDLE_PLL_IVA2 register | ||
405 | */ | ||
406 | (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT, | ||
407 | OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL); | ||
408 | |||
409 | /* | ||
410 | * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to | ||
411 | * 0.75 MHz - 1.0 MHz | ||
412 | * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode | ||
413 | */ | ||
414 | (*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK | | ||
415 | OMAP3430_EN_IVA2_DPLL_MASK, | ||
416 | 0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT | | ||
417 | 0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT, | ||
418 | OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL); | ||
419 | |||
420 | /* Restore mailbox settings */ | ||
421 | omap_mbox_restore_ctx(dev_context->mbox); | ||
422 | |||
423 | /* Access MMU SYS CONFIG register to generate a short wakeup */ | ||
424 | temp = readl(resources->dmmu_base + 0x10); | ||
425 | |||
426 | dev_context->brd_state = BRD_RUNNING; | ||
427 | } else if (dev_context->brd_state == BRD_RETENTION) { | ||
428 | /* Restart the peripheral clocks */ | ||
429 | dsp_clock_enable_all(dev_context->dsp_per_clks); | ||
430 | } | ||
431 | |||
432 | status = omap_mbox_msg_send(dev_context->mbox, mb_val); | ||
433 | |||
434 | if (status) { | ||
435 | pr_err("omap_mbox_msg_send Fail and status = %d\n", status); | ||
436 | status = -EPERM; | ||
437 | } | ||
438 | |||
439 | return 0; | ||
440 | } | ||
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.h b/drivers/staging/tidspbridge/core/tiomap_io.h deleted file mode 100644 index a3f19c7b79f3..000000000000 --- a/drivers/staging/tidspbridge/core/tiomap_io.h +++ /dev/null | |||
@@ -1,104 +0,0 @@ | |||
1 | /* | ||
2 | * tiomap_io.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Definitions, types and function prototypes for the io (r/w external mem). | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef _TIOMAP_IO_ | ||
20 | #define _TIOMAP_IO_ | ||
21 | |||
22 | /* | ||
23 | * Symbol that defines beginning of shared memory. | ||
24 | * For OMAP (Helen) this is the DSP Virtual base address of SDRAM. | ||
25 | * This will be used to program DSP MMU to map DSP Virt to GPP phys. | ||
26 | * (see dspMmuTlbEntry()). | ||
27 | */ | ||
28 | #define SHMBASENAME "SHM_BEG" | ||
29 | #define EXTBASE "EXT_BEG" | ||
30 | #define EXTEND "_EXT_END" | ||
31 | #define DYNEXTBASE "_DYNEXT_BEG" | ||
32 | #define DYNEXTEND "_DYNEXT_END" | ||
33 | #define IVAEXTMEMBASE "_IVAEXTMEM_BEG" | ||
34 | #define IVAEXTMEMEND "_IVAEXTMEM_END" | ||
35 | |||
36 | #define DSP_TRACESEC_BEG "_BRIDGE_TRACE_BEG" | ||
37 | #define DSP_TRACESEC_END "_BRIDGE_TRACE_END" | ||
38 | |||
39 | #define SYS_PUTCBEG "_SYS_PUTCBEG" | ||
40 | #define SYS_PUTCEND "_SYS_PUTCEND" | ||
41 | #define BRIDGE_SYS_PUTC_CURRENT "_BRIDGE_SYS_PUTC_current" | ||
42 | |||
43 | #define WORDSWAP_ENABLE 0x3 /* Enable word swap */ | ||
44 | |||
45 | /* | ||
46 | * ======== read_ext_dsp_data ======== | ||
47 | * Reads it from DSP External memory. The external memory for the DSP | ||
48 | * is configured by the combination of DSP MMU and shm Memory manager in the CDB | ||
49 | */ | ||
50 | extern int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, | ||
51 | u8 *host_buff, u32 dsp_addr, | ||
52 | u32 ul_num_bytes, u32 mem_type); | ||
53 | |||
54 | /* | ||
55 | * ======== write_dsp_data ======== | ||
56 | */ | ||
57 | extern int write_dsp_data(struct bridge_dev_context *dev_context, | ||
58 | u8 *host_buff, u32 dsp_addr, | ||
59 | u32 ul_num_bytes, u32 mem_type); | ||
60 | |||
61 | /* | ||
62 | * ======== write_ext_dsp_data ======== | ||
63 | * Writes to the DSP External memory for external program. | ||
64 | * The ext mem for progra is configured by the combination of DSP MMU and | ||
65 | * shm Memory manager in the CDB | ||
66 | */ | ||
67 | extern int write_ext_dsp_data(struct bridge_dev_context *dev_context, | ||
68 | u8 *host_buff, u32 dsp_addr, | ||
69 | u32 ul_num_bytes, u32 mem_type, | ||
70 | bool dynamic_load); | ||
71 | |||
72 | /* | ||
73 | * ======== write_ext32_bit_dsp_data ======== | ||
74 | * Writes 32 bit data to the external memory | ||
75 | */ | ||
76 | extern inline void write_ext32_bit_dsp_data(const | ||
77 | struct bridge_dev_context *dev_context, | ||
78 | u32 dsp_addr, u32 val) | ||
79 | { | ||
80 | *(u32 *) dsp_addr = ((dev_context->tc_word_swap_on) ? (((val << 16) & | ||
81 | 0xFFFF0000) | | ||
82 | ((val >> 16) & | ||
83 | 0x0000FFFF)) : | ||
84 | val); | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * ======== read_ext32_bit_dsp_data ======== | ||
89 | * Reads 32 bit data from the external memory | ||
90 | */ | ||
91 | extern inline u32 read_ext32_bit_dsp_data(const struct bridge_dev_context | ||
92 | *dev_context, u32 dsp_addr) | ||
93 | { | ||
94 | u32 ret; | ||
95 | ret = *(u32 *) dsp_addr; | ||
96 | |||
97 | ret = ((dev_context->tc_word_swap_on) ? (((ret << 16) | ||
98 | & 0xFFFF0000) | ((ret >> 16) & | ||
99 | 0x0000FFFF)) | ||
100 | : ret); | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | #endif /* _TIOMAP_IO_ */ | ||
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c deleted file mode 100644 index e68f0ba8e12b..000000000000 --- a/drivers/staging/tidspbridge/core/ue_deh.c +++ /dev/null | |||
@@ -1,272 +0,0 @@ | |||
1 | /* | ||
2 | * ue_deh.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Implements upper edge DSP exception handling (DEH) functions. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * Copyright (C) 2010 Felipe Contreras | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | |||
23 | #include <dspbridge/dbdefs.h> | ||
24 | #include <dspbridge/dspdeh.h> | ||
25 | #include <dspbridge/dev.h> | ||
26 | #include "_tiomap.h" | ||
27 | #include "_deh.h" | ||
28 | |||
29 | #include <dspbridge/io_sm.h> | ||
30 | #include <dspbridge/drv.h> | ||
31 | #include <dspbridge/wdt.h> | ||
32 | |||
33 | static u32 fault_addr; | ||
34 | |||
35 | static void mmu_fault_dpc(unsigned long data) | ||
36 | { | ||
37 | struct deh_mgr *deh = (void *)data; | ||
38 | |||
39 | if (!deh) | ||
40 | return; | ||
41 | |||
42 | bridge_deh_notify(deh, DSP_MMUFAULT, 0); | ||
43 | } | ||
44 | |||
45 | static irqreturn_t mmu_fault_isr(int irq, void *data) | ||
46 | { | ||
47 | struct deh_mgr *deh = data; | ||
48 | struct cfg_hostres *resources; | ||
49 | u32 event; | ||
50 | |||
51 | if (!deh) | ||
52 | return IRQ_HANDLED; | ||
53 | |||
54 | resources = deh->bridge_context->resources; | ||
55 | if (!resources) { | ||
56 | dev_dbg(bridge, "%s: Failed to get Host Resources\n", | ||
57 | __func__); | ||
58 | return IRQ_HANDLED; | ||
59 | } | ||
60 | |||
61 | hw_mmu_event_status(resources->dmmu_base, &event); | ||
62 | if (event == HW_MMU_TRANSLATION_FAULT) { | ||
63 | hw_mmu_fault_addr_read(resources->dmmu_base, &fault_addr); | ||
64 | dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, | ||
65 | event, fault_addr); | ||
66 | /* | ||
67 | * Schedule a DPC directly. In the future, it may be | ||
68 | * necessary to check if DSP MMU fault is intended for | ||
69 | * Bridge. | ||
70 | */ | ||
71 | tasklet_schedule(&deh->dpc_tasklet); | ||
72 | |||
73 | /* Disable the MMU events, else once we clear it will | ||
74 | * start to raise INTs again */ | ||
75 | hw_mmu_event_disable(resources->dmmu_base, | ||
76 | HW_MMU_TRANSLATION_FAULT); | ||
77 | } else { | ||
78 | hw_mmu_event_disable(resources->dmmu_base, | ||
79 | HW_MMU_ALL_INTERRUPTS); | ||
80 | } | ||
81 | return IRQ_HANDLED; | ||
82 | } | ||
83 | |||
84 | int bridge_deh_create(struct deh_mgr **ret_deh, | ||
85 | struct dev_object *hdev_obj) | ||
86 | { | ||
87 | int status; | ||
88 | struct deh_mgr *deh; | ||
89 | struct bridge_dev_context *hbridge_context = NULL; | ||
90 | |||
91 | /* Message manager will be created when a file is loaded, since | ||
92 | * size of message buffer in shared memory is configurable in | ||
93 | * the base image. */ | ||
94 | /* Get Bridge context info. */ | ||
95 | dev_get_bridge_context(hdev_obj, &hbridge_context); | ||
96 | /* Allocate IO manager object: */ | ||
97 | deh = kzalloc(sizeof(*deh), GFP_KERNEL); | ||
98 | if (!deh) { | ||
99 | status = -ENOMEM; | ||
100 | goto err; | ||
101 | } | ||
102 | |||
103 | /* Create an NTFY object to manage notifications */ | ||
104 | deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); | ||
105 | if (!deh->ntfy_obj) { | ||
106 | status = -ENOMEM; | ||
107 | goto err; | ||
108 | } | ||
109 | ntfy_init(deh->ntfy_obj); | ||
110 | |||
111 | /* Create a MMUfault DPC */ | ||
112 | tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); | ||
113 | |||
114 | /* Fill in context structure */ | ||
115 | deh->bridge_context = hbridge_context; | ||
116 | |||
117 | /* Install ISR function for DSP MMU fault */ | ||
118 | status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, | ||
119 | "DspBridge\tiommu fault", deh); | ||
120 | if (status < 0) | ||
121 | goto err; | ||
122 | |||
123 | *ret_deh = deh; | ||
124 | return 0; | ||
125 | |||
126 | err: | ||
127 | bridge_deh_destroy(deh); | ||
128 | *ret_deh = NULL; | ||
129 | return status; | ||
130 | } | ||
131 | |||
132 | int bridge_deh_destroy(struct deh_mgr *deh) | ||
133 | { | ||
134 | if (!deh) | ||
135 | return -EFAULT; | ||
136 | |||
137 | /* If notification object exists, delete it */ | ||
138 | if (deh->ntfy_obj) { | ||
139 | ntfy_delete(deh->ntfy_obj); | ||
140 | kfree(deh->ntfy_obj); | ||
141 | } | ||
142 | /* Disable DSP MMU fault */ | ||
143 | free_irq(INT_DSP_MMU_IRQ, deh); | ||
144 | |||
145 | /* Free DPC object */ | ||
146 | tasklet_kill(&deh->dpc_tasklet); | ||
147 | |||
148 | /* Deallocate the DEH manager object */ | ||
149 | kfree(deh); | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask, | ||
155 | u32 notify_type, | ||
156 | struct dsp_notification *hnotification) | ||
157 | { | ||
158 | if (!deh) | ||
159 | return -EFAULT; | ||
160 | |||
161 | if (event_mask) | ||
162 | return ntfy_register(deh->ntfy_obj, hnotification, | ||
163 | event_mask, notify_type); | ||
164 | else | ||
165 | return ntfy_unregister(deh->ntfy_obj, hnotification); | ||
166 | } | ||
167 | |||
168 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
169 | static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) | ||
170 | { | ||
171 | struct cfg_hostres *resources; | ||
172 | struct hw_mmu_map_attrs_t map_attrs = { | ||
173 | .endianism = HW_LITTLE_ENDIAN, | ||
174 | .element_size = HW_ELEM_SIZE16BIT, | ||
175 | .mixed_size = HW_MMU_CPUES, | ||
176 | }; | ||
177 | void *dummy_va_addr; | ||
178 | |||
179 | resources = dev_context->resources; | ||
180 | dummy_va_addr = (void *)__get_free_page(GFP_ATOMIC); | ||
181 | |||
182 | /* | ||
183 | * Before acking the MMU fault, let's make sure MMU can only | ||
184 | * access entry #0. Then add a new entry so that the DSP OS | ||
185 | * can continue in order to dump the stack. | ||
186 | */ | ||
187 | hw_mmu_twl_disable(resources->dmmu_base); | ||
188 | hw_mmu_tlb_flush_all(resources->dmmu_base); | ||
189 | |||
190 | hw_mmu_tlb_add(resources->dmmu_base, | ||
191 | virt_to_phys(dummy_va_addr), fault_addr, | ||
192 | HW_PAGE_SIZE4KB, 1, | ||
193 | &map_attrs, HW_SET, HW_SET); | ||
194 | |||
195 | dsp_clk_enable(DSP_CLK_GPT8); | ||
196 | |||
197 | dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); | ||
198 | |||
199 | /* Clear MMU interrupt */ | ||
200 | hw_mmu_event_ack(resources->dmmu_base, | ||
201 | HW_MMU_TRANSLATION_FAULT); | ||
202 | dump_dsp_stack(dev_context); | ||
203 | dsp_clk_disable(DSP_CLK_GPT8); | ||
204 | |||
205 | hw_mmu_disable(resources->dmmu_base); | ||
206 | free_page((unsigned long)dummy_va_addr); | ||
207 | } | ||
208 | #endif | ||
209 | |||
210 | static inline const char *event_to_string(int event) | ||
211 | { | ||
212 | switch (event) { | ||
213 | case DSP_SYSERROR: return "DSP_SYSERROR"; break; | ||
214 | case DSP_MMUFAULT: return "DSP_MMUFAULT"; break; | ||
215 | case DSP_PWRERROR: return "DSP_PWRERROR"; break; | ||
216 | case DSP_WDTOVERFLOW: return "DSP_WDTOVERFLOW"; break; | ||
217 | default: return "unknown event"; break; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | void bridge_deh_notify(struct deh_mgr *deh, int event, int info) | ||
222 | { | ||
223 | struct bridge_dev_context *dev_context; | ||
224 | const char *str = event_to_string(event); | ||
225 | |||
226 | if (!deh) | ||
227 | return; | ||
228 | |||
229 | dev_dbg(bridge, "%s: device exception", __func__); | ||
230 | dev_context = deh->bridge_context; | ||
231 | |||
232 | switch (event) { | ||
233 | case DSP_SYSERROR: | ||
234 | dev_err(bridge, "%s: %s, info=0x%x", __func__, | ||
235 | str, info); | ||
236 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
237 | dump_dl_modules(dev_context); | ||
238 | dump_dsp_stack(dev_context); | ||
239 | #endif | ||
240 | break; | ||
241 | case DSP_MMUFAULT: | ||
242 | dev_err(bridge, "%s: %s, addr=0x%x", __func__, | ||
243 | str, fault_addr); | ||
244 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
245 | print_dsp_trace_buffer(dev_context); | ||
246 | dump_dl_modules(dev_context); | ||
247 | mmu_fault_print_stack(dev_context); | ||
248 | #endif | ||
249 | break; | ||
250 | default: | ||
251 | dev_err(bridge, "%s: %s", __func__, str); | ||
252 | break; | ||
253 | } | ||
254 | |||
255 | /* Filter subsequent notifications when an error occurs */ | ||
256 | if (dev_context->brd_state != BRD_ERROR) { | ||
257 | ntfy_notify(deh->ntfy_obj, event); | ||
258 | #ifdef CONFIG_TIDSPBRIDGE_RECOVERY | ||
259 | bridge_recover_schedule(); | ||
260 | #endif | ||
261 | } | ||
262 | |||
263 | /* Set the Board state as ERROR */ | ||
264 | dev_context->brd_state = BRD_ERROR; | ||
265 | /* Disable all the clocks that were enabled by DSP */ | ||
266 | dsp_clock_disable_all(dev_context->dsp_per_clks); | ||
267 | /* | ||
268 | * Avoid the subsequent WDT if it happens once, | ||
269 | * also if fatal error occurs. | ||
270 | */ | ||
271 | dsp_wdt_enable(false); | ||
272 | } | ||
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c deleted file mode 100644 index b19f887dfd88..000000000000 --- a/drivers/staging/tidspbridge/core/wdt.c +++ /dev/null | |||
@@ -1,144 +0,0 @@ | |||
1 | /* | ||
2 | * wdt.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * IO dispatcher for a shared memory channel driver. | ||
7 | * | ||
8 | * Copyright (C) 2010 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | #include <linux/types.h> | ||
19 | |||
20 | #include <dspbridge/dbdefs.h> | ||
21 | #include <dspbridge/dspdeh.h> | ||
22 | #include <dspbridge/dev.h> | ||
23 | #include <dspbridge/_chnl_sm.h> | ||
24 | #include <dspbridge/wdt.h> | ||
25 | #include <dspbridge/host_os.h> | ||
26 | |||
27 | |||
28 | #define OMAP34XX_WDT3_BASE (0x49000000 + 0x30000) | ||
29 | #define INT_34XX_WDT3_IRQ (36 + NR_IRQS) | ||
30 | |||
31 | static struct dsp_wdt_setting dsp_wdt; | ||
32 | |||
33 | void dsp_wdt_dpc(unsigned long data) | ||
34 | { | ||
35 | struct deh_mgr *deh_mgr; | ||
36 | |||
37 | dev_get_deh_mgr(dev_get_first(), &deh_mgr); | ||
38 | if (deh_mgr) | ||
39 | bridge_deh_notify(deh_mgr, DSP_WDTOVERFLOW, 0); | ||
40 | } | ||
41 | |||
42 | irqreturn_t dsp_wdt_isr(int irq, void *data) | ||
43 | { | ||
44 | u32 value; | ||
45 | /* ack wdt3 interrupt */ | ||
46 | value = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); | ||
47 | __raw_writel(value, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); | ||
48 | |||
49 | tasklet_schedule(&dsp_wdt.wdt3_tasklet); | ||
50 | return IRQ_HANDLED; | ||
51 | } | ||
52 | |||
53 | int dsp_wdt_init(void) | ||
54 | { | ||
55 | int ret = 0; | ||
56 | |||
57 | dsp_wdt.sm_wdt = NULL; | ||
58 | dsp_wdt.reg_base = ioremap(OMAP34XX_WDT3_BASE, SZ_4K); | ||
59 | if (!dsp_wdt.reg_base) | ||
60 | return -ENOMEM; | ||
61 | |||
62 | tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0); | ||
63 | |||
64 | dsp_wdt.fclk = clk_get(NULL, "wdt3_fck"); | ||
65 | |||
66 | if (!IS_ERR(dsp_wdt.fclk)) { | ||
67 | clk_prepare(dsp_wdt.fclk); | ||
68 | |||
69 | dsp_wdt.iclk = clk_get(NULL, "wdt3_ick"); | ||
70 | if (IS_ERR(dsp_wdt.iclk)) { | ||
71 | clk_put(dsp_wdt.fclk); | ||
72 | dsp_wdt.fclk = NULL; | ||
73 | ret = -EFAULT; | ||
74 | } else { | ||
75 | clk_prepare(dsp_wdt.iclk); | ||
76 | } | ||
77 | } else | ||
78 | ret = -EFAULT; | ||
79 | |||
80 | if (!ret) | ||
81 | ret = request_irq(INT_34XX_WDT3_IRQ, dsp_wdt_isr, 0, | ||
82 | "dsp_wdt", &dsp_wdt); | ||
83 | |||
84 | /* Disable at this moment, it will be enabled when DSP starts */ | ||
85 | if (!ret) | ||
86 | disable_irq(INT_34XX_WDT3_IRQ); | ||
87 | |||
88 | return ret; | ||
89 | } | ||
90 | |||
91 | void dsp_wdt_sm_set(void *data) | ||
92 | { | ||
93 | dsp_wdt.sm_wdt = data; | ||
94 | dsp_wdt.sm_wdt->wdt_overflow = 5; /* in seconds */ | ||
95 | } | ||
96 | |||
97 | |||
98 | void dsp_wdt_exit(void) | ||
99 | { | ||
100 | free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt); | ||
101 | tasklet_kill(&dsp_wdt.wdt3_tasklet); | ||
102 | |||
103 | if (dsp_wdt.fclk) { | ||
104 | clk_unprepare(dsp_wdt.fclk); | ||
105 | clk_put(dsp_wdt.fclk); | ||
106 | } | ||
107 | if (dsp_wdt.iclk) { | ||
108 | clk_unprepare(dsp_wdt.iclk); | ||
109 | clk_put(dsp_wdt.iclk); | ||
110 | } | ||
111 | |||
112 | dsp_wdt.fclk = NULL; | ||
113 | dsp_wdt.iclk = NULL; | ||
114 | dsp_wdt.sm_wdt = NULL; | ||
115 | |||
116 | if (dsp_wdt.reg_base) | ||
117 | iounmap(dsp_wdt.reg_base); | ||
118 | dsp_wdt.reg_base = NULL; | ||
119 | } | ||
120 | |||
121 | void dsp_wdt_enable(bool enable) | ||
122 | { | ||
123 | u32 tmp; | ||
124 | static bool wdt_enable; | ||
125 | |||
126 | if (wdt_enable == enable || !dsp_wdt.fclk || !dsp_wdt.iclk) | ||
127 | return; | ||
128 | |||
129 | wdt_enable = enable; | ||
130 | |||
131 | if (enable) { | ||
132 | clk_enable(dsp_wdt.fclk); | ||
133 | clk_enable(dsp_wdt.iclk); | ||
134 | dsp_wdt.sm_wdt->wdt_setclocks = 1; | ||
135 | tmp = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); | ||
136 | __raw_writel(tmp, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); | ||
137 | enable_irq(INT_34XX_WDT3_IRQ); | ||
138 | } else { | ||
139 | disable_irq(INT_34XX_WDT3_IRQ); | ||
140 | dsp_wdt.sm_wdt->wdt_setclocks = 0; | ||
141 | clk_disable(dsp_wdt.iclk); | ||
142 | clk_disable(dsp_wdt.fclk); | ||
143 | } | ||
144 | } | ||
diff --git a/drivers/staging/tidspbridge/dynload/cload.c b/drivers/staging/tidspbridge/dynload/cload.c deleted file mode 100644 index 83f2106ff8a7..000000000000 --- a/drivers/staging/tidspbridge/dynload/cload.c +++ /dev/null | |||
@@ -1,1959 +0,0 @@ | |||
1 | /* | ||
2 | * cload.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #include <linux/slab.h> | ||
18 | |||
19 | #include "header.h" | ||
20 | |||
21 | #include "module_list.h" | ||
22 | #define LINKER_MODULES_HEADER ("_" MODULES_HEADER) | ||
23 | |||
24 | /* | ||
25 | * forward references | ||
26 | */ | ||
27 | static void dload_symbols(struct dload_state *dlthis); | ||
28 | static void dload_data(struct dload_state *dlthis); | ||
29 | static void allocate_sections(struct dload_state *dlthis); | ||
30 | static void string_table_free(struct dload_state *dlthis); | ||
31 | static void symbol_table_free(struct dload_state *dlthis); | ||
32 | static void section_table_free(struct dload_state *dlthis); | ||
33 | static void init_module_handle(struct dload_state *dlthis); | ||
34 | #if BITS_PER_AU > BITS_PER_BYTE | ||
35 | static char *unpack_name(struct dload_state *dlthis, u32 soffset); | ||
36 | #endif | ||
37 | |||
38 | static const char cinitname[] = { ".cinit" }; | ||
39 | static const char loader_dllview_root[] = { "?DLModules?" }; | ||
40 | |||
41 | /* | ||
42 | * Error strings | ||
43 | */ | ||
44 | static const char readstrm[] = { "Error reading %s from input stream" }; | ||
45 | static const char err_alloc[] = { "Syms->dload_allocate( %d ) failed" }; | ||
46 | static const char tgtalloc[] = { | ||
47 | "Target memory allocate failed, section %s size " FMT_UI32 }; | ||
48 | static const char initfail[] = { "%s to target address " FMT_UI32 " failed" }; | ||
49 | static const char dlvwrite[] = { "Write to DLLview list failed" }; | ||
50 | static const char iconnect[] = { "Connect call to init interface failed" }; | ||
51 | static const char err_checksum[] = { "Checksum failed on %s" }; | ||
52 | |||
53 | /************************************************************************* | ||
54 | * Procedure dload_error | ||
55 | * | ||
56 | * Parameters: | ||
57 | * errtxt description of the error, printf style | ||
58 | * ... additional information | ||
59 | * | ||
60 | * Effect: | ||
61 | * Reports or records the error as appropriate. | ||
62 | *********************************************************************** */ | ||
63 | void dload_error(struct dload_state *dlthis, const char *errtxt, ...) | ||
64 | { | ||
65 | va_list args; | ||
66 | |||
67 | va_start(args, errtxt); | ||
68 | dlthis->mysym->error_report(dlthis->mysym, errtxt, args); | ||
69 | va_end(args); | ||
70 | dlthis->dload_errcount += 1; | ||
71 | |||
72 | } /* dload_error */ | ||
73 | |||
74 | #define DL_ERROR(zza, zzb) dload_error(dlthis, zza, zzb) | ||
75 | |||
76 | /************************************************************************* | ||
77 | * Procedure dload_syms_error | ||
78 | * | ||
79 | * Parameters: | ||
80 | * errtxt description of the error, printf style | ||
81 | * ... additional information | ||
82 | * | ||
83 | * Effect: | ||
84 | * Reports or records the error as appropriate. | ||
85 | *********************************************************************** */ | ||
86 | void dload_syms_error(struct dynamic_loader_sym *syms, const char *errtxt, ...) | ||
87 | { | ||
88 | va_list args; | ||
89 | |||
90 | va_start(args, errtxt); | ||
91 | syms->error_report(syms, errtxt, args); | ||
92 | va_end(args); | ||
93 | } | ||
94 | |||
95 | /************************************************************************* | ||
96 | * Procedure dynamic_load_module | ||
97 | * | ||
98 | * Parameters: | ||
99 | * module The input stream that supplies the module image | ||
100 | * syms Host-side symbol table and malloc/free functions | ||
101 | * alloc Target-side memory allocation | ||
102 | * init Target-side memory initialization | ||
103 | * options Option flags DLOAD_* | ||
104 | * mhandle A module handle for use with Dynamic_Unload | ||
105 | * | ||
106 | * Effect: | ||
107 | * The module image is read using *module. Target storage for the new | ||
108 | * image is | ||
109 | * obtained from *alloc. Symbols defined and referenced by the module are | ||
110 | * managed using *syms. The image is then relocated and references | ||
111 | * resolved as necessary, and the resulting executable bits are placed | ||
112 | * into target memory using *init. | ||
113 | * | ||
114 | * Returns: | ||
115 | * On a successful load, a module handle is placed in *mhandle, | ||
116 | * and zero is returned. On error, the number of errors detected is | ||
117 | * returned. Individual errors are reported during the load process | ||
118 | * using syms->error_report(). | ||
119 | ********************************************************************** */ | ||
120 | int dynamic_load_module(struct dynamic_loader_stream *module, | ||
121 | struct dynamic_loader_sym *syms, | ||
122 | struct dynamic_loader_allocate *alloc, | ||
123 | struct dynamic_loader_initialize *init, | ||
124 | unsigned options, void **mhandle) | ||
125 | { | ||
126 | register unsigned *dp, sz; | ||
127 | struct dload_state dl_state; /* internal state for this call */ | ||
128 | |||
129 | /* blast our internal state */ | ||
130 | dp = (unsigned *)&dl_state; | ||
131 | for (sz = sizeof(dl_state) / sizeof(unsigned); sz > 0; sz -= 1) | ||
132 | *dp++ = 0; | ||
133 | |||
134 | /* Enable _only_ BSS initialization if enabled by user */ | ||
135 | if ((options & DLOAD_INITBSS) == DLOAD_INITBSS) | ||
136 | dl_state.myoptions = DLOAD_INITBSS; | ||
137 | |||
138 | /* Check that mandatory arguments are present */ | ||
139 | if (!module || !syms) { | ||
140 | dload_error(&dl_state, "Required parameter is NULL"); | ||
141 | } else { | ||
142 | dl_state.strm = module; | ||
143 | dl_state.mysym = syms; | ||
144 | dload_headers(&dl_state); | ||
145 | if (!dl_state.dload_errcount) | ||
146 | dload_strings(&dl_state, false); | ||
147 | if (!dl_state.dload_errcount) | ||
148 | dload_sections(&dl_state); | ||
149 | |||
150 | if (init && !dl_state.dload_errcount) { | ||
151 | if (init->connect(init)) { | ||
152 | dl_state.myio = init; | ||
153 | dl_state.myalloc = alloc; | ||
154 | /* do now, before reducing symbols */ | ||
155 | allocate_sections(&dl_state); | ||
156 | } else | ||
157 | dload_error(&dl_state, iconnect); | ||
158 | } | ||
159 | |||
160 | if (!dl_state.dload_errcount) { | ||
161 | /* fix up entry point address */ | ||
162 | unsigned sref = dl_state.dfile_hdr.df_entry_secn - 1; | ||
163 | |||
164 | if (sref < dl_state.allocated_secn_count) | ||
165 | dl_state.dfile_hdr.df_entrypt += | ||
166 | dl_state.ldr_sections[sref].run_addr; | ||
167 | |||
168 | dload_symbols(&dl_state); | ||
169 | } | ||
170 | |||
171 | if (init && !dl_state.dload_errcount) | ||
172 | dload_data(&dl_state); | ||
173 | |||
174 | init_module_handle(&dl_state); | ||
175 | |||
176 | /* dl_state.myio is init or 0 at this point. */ | ||
177 | if (dl_state.myio) { | ||
178 | if ((!dl_state.dload_errcount) && | ||
179 | (dl_state.dfile_hdr.df_entry_secn != DN_UNDEF) && | ||
180 | (!init->execute(init, | ||
181 | dl_state.dfile_hdr.df_entrypt))) | ||
182 | dload_error(&dl_state, "Init->Execute Failed"); | ||
183 | init->release(init); | ||
184 | } | ||
185 | |||
186 | symbol_table_free(&dl_state); | ||
187 | section_table_free(&dl_state); | ||
188 | string_table_free(&dl_state); | ||
189 | dload_tramp_cleanup(&dl_state); | ||
190 | |||
191 | if (dl_state.dload_errcount) { | ||
192 | dynamic_unload_module(dl_state.myhandle, syms, alloc, | ||
193 | init); | ||
194 | dl_state.myhandle = NULL; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | if (mhandle) | ||
199 | *mhandle = dl_state.myhandle; /* give back the handle */ | ||
200 | |||
201 | return dl_state.dload_errcount; | ||
202 | } /* DLOAD_File */ | ||
203 | |||
204 | /************************************************************************* | ||
205 | * Procedure dynamic_open_module | ||
206 | * | ||
207 | * Parameters: | ||
208 | * module The input stream that supplies the module image | ||
209 | * syms Host-side symbol table and malloc/free functions | ||
210 | * alloc Target-side memory allocation | ||
211 | * init Target-side memory initialization | ||
212 | * options Option flags DLOAD_* | ||
213 | * mhandle A module handle for use with Dynamic_Unload | ||
214 | * | ||
215 | * Effect: | ||
216 | * The module image is read using *module. Target storage for the new | ||
217 | * image is | ||
218 | * obtained from *alloc. Symbols defined and referenced by the module are | ||
219 | * managed using *syms. The image is then relocated and references | ||
220 | * resolved as necessary, and the resulting executable bits are placed | ||
221 | * into target memory using *init. | ||
222 | * | ||
223 | * Returns: | ||
224 | * On a successful load, a module handle is placed in *mhandle, | ||
225 | * and zero is returned. On error, the number of errors detected is | ||
226 | * returned. Individual errors are reported during the load process | ||
227 | * using syms->error_report(). | ||
228 | ********************************************************************** */ | ||
229 | int | ||
230 | dynamic_open_module(struct dynamic_loader_stream *module, | ||
231 | struct dynamic_loader_sym *syms, | ||
232 | struct dynamic_loader_allocate *alloc, | ||
233 | struct dynamic_loader_initialize *init, | ||
234 | unsigned options, void **mhandle) | ||
235 | { | ||
236 | register unsigned *dp, sz; | ||
237 | struct dload_state dl_state; /* internal state for this call */ | ||
238 | |||
239 | /* blast our internal state */ | ||
240 | dp = (unsigned *)&dl_state; | ||
241 | for (sz = sizeof(dl_state) / sizeof(unsigned); sz > 0; sz -= 1) | ||
242 | *dp++ = 0; | ||
243 | |||
244 | /* Enable _only_ BSS initialization if enabled by user */ | ||
245 | if ((options & DLOAD_INITBSS) == DLOAD_INITBSS) | ||
246 | dl_state.myoptions = DLOAD_INITBSS; | ||
247 | |||
248 | /* Check that mandatory arguments are present */ | ||
249 | if (!module || !syms) { | ||
250 | dload_error(&dl_state, "Required parameter is NULL"); | ||
251 | } else { | ||
252 | dl_state.strm = module; | ||
253 | dl_state.mysym = syms; | ||
254 | dload_headers(&dl_state); | ||
255 | if (!dl_state.dload_errcount) | ||
256 | dload_strings(&dl_state, false); | ||
257 | if (!dl_state.dload_errcount) | ||
258 | dload_sections(&dl_state); | ||
259 | |||
260 | if (init && !dl_state.dload_errcount) { | ||
261 | if (init->connect(init)) { | ||
262 | dl_state.myio = init; | ||
263 | dl_state.myalloc = alloc; | ||
264 | /* do now, before reducing symbols */ | ||
265 | allocate_sections(&dl_state); | ||
266 | } else | ||
267 | dload_error(&dl_state, iconnect); | ||
268 | } | ||
269 | |||
270 | if (!dl_state.dload_errcount) { | ||
271 | /* fix up entry point address */ | ||
272 | unsigned sref = dl_state.dfile_hdr.df_entry_secn - 1; | ||
273 | |||
274 | if (sref < dl_state.allocated_secn_count) | ||
275 | dl_state.dfile_hdr.df_entrypt += | ||
276 | dl_state.ldr_sections[sref].run_addr; | ||
277 | |||
278 | dload_symbols(&dl_state); | ||
279 | } | ||
280 | |||
281 | init_module_handle(&dl_state); | ||
282 | |||
283 | /* dl_state.myio is either 0 or init at this point. */ | ||
284 | if (dl_state.myio) { | ||
285 | if ((!dl_state.dload_errcount) && | ||
286 | (dl_state.dfile_hdr.df_entry_secn != DN_UNDEF) && | ||
287 | (!init->execute(init, | ||
288 | dl_state.dfile_hdr.df_entrypt))) | ||
289 | dload_error(&dl_state, "Init->Execute Failed"); | ||
290 | init->release(init); | ||
291 | } | ||
292 | |||
293 | symbol_table_free(&dl_state); | ||
294 | section_table_free(&dl_state); | ||
295 | string_table_free(&dl_state); | ||
296 | |||
297 | if (dl_state.dload_errcount) { | ||
298 | dynamic_unload_module(dl_state.myhandle, syms, alloc, | ||
299 | init); | ||
300 | dl_state.myhandle = NULL; | ||
301 | } | ||
302 | } | ||
303 | |||
304 | if (mhandle) | ||
305 | *mhandle = dl_state.myhandle; /* give back the handle */ | ||
306 | |||
307 | return dl_state.dload_errcount; | ||
308 | } /* DLOAD_File */ | ||
309 | |||
310 | /************************************************************************* | ||
311 | * Procedure dload_headers | ||
312 | * | ||
313 | * Parameters: | ||
314 | * none | ||
315 | * | ||
316 | * Effect: | ||
317 | * Loads the DOFF header and verify record. Deals with any byte-order | ||
318 | * issues and checks them for validity. | ||
319 | *********************************************************************** */ | ||
320 | #define COMBINED_HEADER_SIZE (sizeof(struct doff_filehdr_t)+ \ | ||
321 | sizeof(struct doff_verify_rec_t)) | ||
322 | |||
323 | void dload_headers(struct dload_state *dlthis) | ||
324 | { | ||
325 | u32 map; | ||
326 | |||
327 | /* Read the header and the verify record as one. If we don't get it | ||
328 | all, we're done */ | ||
329 | if (dlthis->strm->read_buffer(dlthis->strm, &dlthis->dfile_hdr, | ||
330 | COMBINED_HEADER_SIZE) != | ||
331 | COMBINED_HEADER_SIZE) { | ||
332 | DL_ERROR(readstrm, "File Headers"); | ||
333 | return; | ||
334 | } | ||
335 | /* | ||
336 | * Verify that we have the byte order of the file correct. | ||
337 | * If not, must fix it before we can continue | ||
338 | */ | ||
339 | map = REORDER_MAP(dlthis->dfile_hdr.df_byte_reshuffle); | ||
340 | if (map != REORDER_MAP(BYTE_RESHUFFLE_VALUE)) { | ||
341 | /* input is either byte-shuffled or bad */ | ||
342 | if ((map & 0xFCFCFCFC) == 0) { /* no obviously bogus bits */ | ||
343 | dload_reorder(&dlthis->dfile_hdr, COMBINED_HEADER_SIZE, | ||
344 | map); | ||
345 | } | ||
346 | if (dlthis->dfile_hdr.df_byte_reshuffle != | ||
347 | BYTE_RESHUFFLE_VALUE) { | ||
348 | /* didn't fix the problem, the byte swap map is bad */ | ||
349 | dload_error(dlthis, | ||
350 | "Bad byte swap map " FMT_UI32 " in header", | ||
351 | dlthis->dfile_hdr.df_byte_reshuffle); | ||
352 | return; | ||
353 | } | ||
354 | dlthis->reorder_map = map; /* keep map for future use */ | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * Verify checksum of header and verify record | ||
359 | */ | ||
360 | if (~dload_checksum(&dlthis->dfile_hdr, | ||
361 | sizeof(struct doff_filehdr_t)) || | ||
362 | ~dload_checksum(&dlthis->verify, | ||
363 | sizeof(struct doff_verify_rec_t))) { | ||
364 | DL_ERROR(err_checksum, "header or verify record"); | ||
365 | return; | ||
366 | } | ||
367 | #if HOST_ENDIANNESS | ||
368 | dlthis->dfile_hdr.df_byte_reshuffle = map; /* put back for later */ | ||
369 | #endif | ||
370 | |||
371 | /* Check for valid target ID */ | ||
372 | if ((dlthis->dfile_hdr.df_target_id != TARGET_ID) && | ||
373 | -(dlthis->dfile_hdr.df_target_id != TMS470_ID)) { | ||
374 | dload_error(dlthis, "Bad target ID 0x%x and TARGET_ID 0x%x", | ||
375 | dlthis->dfile_hdr.df_target_id, TARGET_ID); | ||
376 | return; | ||
377 | } | ||
378 | /* Check for valid file format */ | ||
379 | if ((dlthis->dfile_hdr.df_doff_version != DOFF0)) { | ||
380 | dload_error(dlthis, "Bad DOFF version 0x%x", | ||
381 | dlthis->dfile_hdr.df_doff_version); | ||
382 | return; | ||
383 | } | ||
384 | |||
385 | /* | ||
386 | * Apply reasonableness checks to count fields | ||
387 | */ | ||
388 | if (dlthis->dfile_hdr.df_strtab_size > MAX_REASONABLE_STRINGTAB) { | ||
389 | dload_error(dlthis, "Excessive string table size " FMT_UI32, | ||
390 | dlthis->dfile_hdr.df_strtab_size); | ||
391 | return; | ||
392 | } | ||
393 | if (dlthis->dfile_hdr.df_no_scns > MAX_REASONABLE_SECTIONS) { | ||
394 | dload_error(dlthis, "Excessive section count 0x%x", | ||
395 | dlthis->dfile_hdr.df_no_scns); | ||
396 | return; | ||
397 | } | ||
398 | #ifndef TARGET_ENDIANNESS | ||
399 | /* | ||
400 | * Check that endianness does not disagree with explicit specification | ||
401 | */ | ||
402 | if ((dlthis->dfile_hdr.df_flags >> ALIGN_COFF_ENDIANNESS) & | ||
403 | dlthis->myoptions & ENDIANNESS_MASK) { | ||
404 | dload_error(dlthis, | ||
405 | "Input endianness disagrees with specified option"); | ||
406 | return; | ||
407 | } | ||
408 | dlthis->big_e_target = dlthis->dfile_hdr.df_flags & DF_BIG; | ||
409 | #endif | ||
410 | |||
411 | } /* dload_headers */ | ||
412 | |||
413 | /* COFF Section Processing | ||
414 | * | ||
415 | * COFF sections are read in and retained intact. Each record is embedded | ||
416 | * in a new structure that records the updated load and | ||
417 | * run addresses of the section */ | ||
418 | |||
419 | static const char secn_errid[] = { "section" }; | ||
420 | |||
421 | /************************************************************************* | ||
422 | * Procedure dload_sections | ||
423 | * | ||
424 | * Parameters: | ||
425 | * none | ||
426 | * | ||
427 | * Effect: | ||
428 | * Loads the section records into an internal table. | ||
429 | *********************************************************************** */ | ||
430 | void dload_sections(struct dload_state *dlthis) | ||
431 | { | ||
432 | s16 siz; | ||
433 | struct doff_scnhdr_t *shp; | ||
434 | unsigned nsecs = dlthis->dfile_hdr.df_no_scns; | ||
435 | |||
436 | /* allocate space for the DOFF section records */ | ||
437 | siz = nsecs * sizeof(struct doff_scnhdr_t); | ||
438 | shp = | ||
439 | (struct doff_scnhdr_t *)dlthis->mysym->dload_allocate(dlthis->mysym, | ||
440 | siz); | ||
441 | if (!shp) { /* not enough storage */ | ||
442 | DL_ERROR(err_alloc, siz); | ||
443 | return; | ||
444 | } | ||
445 | dlthis->sect_hdrs = shp; | ||
446 | |||
447 | /* read in the section records */ | ||
448 | if (dlthis->strm->read_buffer(dlthis->strm, shp, siz) != siz) { | ||
449 | DL_ERROR(readstrm, secn_errid); | ||
450 | return; | ||
451 | } | ||
452 | |||
453 | /* if we need to fix up byte order, do it now */ | ||
454 | if (dlthis->reorder_map) | ||
455 | dload_reorder(shp, siz, dlthis->reorder_map); | ||
456 | |||
457 | /* check for validity */ | ||
458 | if (~dload_checksum(dlthis->sect_hdrs, siz) != | ||
459 | dlthis->verify.dv_scn_rec_checksum) { | ||
460 | DL_ERROR(err_checksum, secn_errid); | ||
461 | return; | ||
462 | } | ||
463 | |||
464 | } /* dload_sections */ | ||
465 | |||
466 | /***************************************************************************** | ||
467 | * Procedure allocate_sections | ||
468 | * | ||
469 | * Parameters: | ||
470 | * alloc target memory allocator class | ||
471 | * | ||
472 | * Effect: | ||
473 | * Assigns new (target) addresses for sections | ||
474 | **************************************************************************** */ | ||
475 | static void allocate_sections(struct dload_state *dlthis) | ||
476 | { | ||
477 | u16 curr_sect, nsecs, siz; | ||
478 | struct doff_scnhdr_t *shp; | ||
479 | struct ldr_section_info *asecs; | ||
480 | struct my_handle *hndl; | ||
481 | |||
482 | nsecs = dlthis->dfile_hdr.df_no_scns; | ||
483 | if (!nsecs) | ||
484 | return; | ||
485 | if ((dlthis->myalloc == NULL) && | ||
486 | (dlthis->dfile_hdr.df_target_scns > 0)) { | ||
487 | DL_ERROR("Arg 3 (alloc) required but NULL", 0); | ||
488 | return; | ||
489 | } | ||
490 | /* | ||
491 | * allocate space for the module handle, which we will keep for unload | ||
492 | * purposes include an additional section store for an auto-generated | ||
493 | * trampoline section in case we need it. | ||
494 | */ | ||
495 | siz = (dlthis->dfile_hdr.df_target_scns + 1) * | ||
496 | sizeof(struct ldr_section_info) + MY_HANDLE_SIZE; | ||
497 | |||
498 | hndl = | ||
499 | (struct my_handle *)dlthis->mysym->dload_allocate(dlthis->mysym, | ||
500 | siz); | ||
501 | if (!hndl) { /* not enough storage */ | ||
502 | DL_ERROR(err_alloc, siz); | ||
503 | return; | ||
504 | } | ||
505 | /* initialize the handle header */ | ||
506 | hndl->dm.next = hndl->dm.prev = hndl; /* circular list */ | ||
507 | hndl->dm.root = NULL; | ||
508 | hndl->dm.dbthis = 0; | ||
509 | dlthis->myhandle = hndl; /* save away for return */ | ||
510 | /* pointer to the section list of allocated sections */ | ||
511 | dlthis->ldr_sections = asecs = hndl->secns; | ||
512 | /* * Insert names into all sections, make copies of | ||
513 | the sections we allocate */ | ||
514 | shp = dlthis->sect_hdrs; | ||
515 | for (curr_sect = 0; curr_sect < nsecs; curr_sect++) { | ||
516 | u32 soffset = shp->ds_offset; | ||
517 | #if BITS_PER_AU <= BITS_PER_BYTE | ||
518 | /* attempt to insert the name of this section */ | ||
519 | if (soffset < dlthis->dfile_hdr.df_strtab_size) | ||
520 | ((struct ldr_section_info *)shp)->name = | ||
521 | dlthis->str_head + soffset; | ||
522 | else { | ||
523 | dload_error(dlthis, "Bad name offset in section %d", | ||
524 | curr_sect); | ||
525 | ((struct ldr_section_info *)shp)->name = NULL; | ||
526 | } | ||
527 | #endif | ||
528 | /* allocate target storage for sections that require it */ | ||
529 | if (ds_needs_allocation(shp)) { | ||
530 | *asecs = *(struct ldr_section_info *)shp; | ||
531 | asecs->context = 0; /* zero the context field */ | ||
532 | #if BITS_PER_AU > BITS_PER_BYTE | ||
533 | asecs->name = unpack_name(dlthis, soffset); | ||
534 | dlthis->debug_string_size = soffset + dlthis->temp_len; | ||
535 | #else | ||
536 | dlthis->debug_string_size = soffset; | ||
537 | #endif | ||
538 | if (dlthis->myalloc != NULL) { | ||
539 | if (!dlthis->myalloc-> | ||
540 | dload_allocate(dlthis->myalloc, asecs, | ||
541 | ds_alignment(asecs->type))) { | ||
542 | dload_error(dlthis, tgtalloc, | ||
543 | asecs->name, asecs->size); | ||
544 | return; | ||
545 | } | ||
546 | } | ||
547 | /* keep address deltas in original section table */ | ||
548 | shp->ds_vaddr = asecs->load_addr - shp->ds_vaddr; | ||
549 | shp->ds_paddr = asecs->run_addr - shp->ds_paddr; | ||
550 | dlthis->allocated_secn_count += 1; | ||
551 | } /* allocate target storage */ | ||
552 | shp += 1; | ||
553 | asecs += 1; | ||
554 | } | ||
555 | #if BITS_PER_AU <= BITS_PER_BYTE | ||
556 | dlthis->debug_string_size += | ||
557 | strlen(dlthis->str_head + dlthis->debug_string_size) + 1; | ||
558 | #endif | ||
559 | } /* allocate sections */ | ||
560 | |||
561 | /************************************************************************* | ||
562 | * Procedure section_table_free | ||
563 | * | ||
564 | * Parameters: | ||
565 | * none | ||
566 | * | ||
567 | * Effect: | ||
568 | * Frees any state used by the symbol table. | ||
569 | * | ||
570 | * WARNING: | ||
571 | * This routine is not allowed to declare errors! | ||
572 | *********************************************************************** */ | ||
573 | static void section_table_free(struct dload_state *dlthis) | ||
574 | { | ||
575 | struct doff_scnhdr_t *shp; | ||
576 | |||
577 | shp = dlthis->sect_hdrs; | ||
578 | if (shp) | ||
579 | dlthis->mysym->dload_deallocate(dlthis->mysym, shp); | ||
580 | |||
581 | } /* section_table_free */ | ||
582 | |||
583 | /************************************************************************* | ||
584 | * Procedure dload_strings | ||
585 | * | ||
586 | * Parameters: | ||
587 | * sec_names_only If true only read in the "section names" | ||
588 | * portion of the string table | ||
589 | * | ||
590 | * Effect: | ||
591 | * Loads the DOFF string table into memory. DOFF keeps all strings in a | ||
592 | * big unsorted array. We just read that array into memory in bulk. | ||
593 | *********************************************************************** */ | ||
594 | static const char stringtbl[] = { "string table" }; | ||
595 | |||
596 | void dload_strings(struct dload_state *dlthis, bool sec_names_only) | ||
597 | { | ||
598 | u32 ssiz; | ||
599 | char *strbuf; | ||
600 | |||
601 | if (sec_names_only) { | ||
602 | ssiz = BYTE_TO_HOST(DOFF_ALIGN | ||
603 | (dlthis->dfile_hdr.df_scn_name_size)); | ||
604 | } else { | ||
605 | ssiz = BYTE_TO_HOST(DOFF_ALIGN | ||
606 | (dlthis->dfile_hdr.df_strtab_size)); | ||
607 | } | ||
608 | if (ssiz == 0) | ||
609 | return; | ||
610 | |||
611 | /* get some memory for the string table */ | ||
612 | #if BITS_PER_AU > BITS_PER_BYTE | ||
613 | strbuf = (char *)dlthis->mysym->dload_allocate(dlthis->mysym, ssiz + | ||
614 | dlthis->dfile_hdr. | ||
615 | df_max_str_len); | ||
616 | #else | ||
617 | strbuf = (char *)dlthis->mysym->dload_allocate(dlthis->mysym, ssiz); | ||
618 | #endif | ||
619 | if (strbuf == NULL) { | ||
620 | DL_ERROR(err_alloc, ssiz); | ||
621 | return; | ||
622 | } | ||
623 | dlthis->str_head = strbuf; | ||
624 | #if BITS_PER_AU > BITS_PER_BYTE | ||
625 | dlthis->str_temp = strbuf + ssiz; | ||
626 | #endif | ||
627 | /* read in the strings and verify them */ | ||
628 | if ((unsigned)(dlthis->strm->read_buffer(dlthis->strm, strbuf, | ||
629 | ssiz)) != ssiz) { | ||
630 | DL_ERROR(readstrm, stringtbl); | ||
631 | } | ||
632 | /* if we need to fix up byte order, do it now */ | ||
633 | #ifndef _BIG_ENDIAN | ||
634 | if (dlthis->reorder_map) | ||
635 | dload_reorder(strbuf, ssiz, dlthis->reorder_map); | ||
636 | |||
637 | if ((!sec_names_only) && (~dload_checksum(strbuf, ssiz) != | ||
638 | dlthis->verify.dv_str_tab_checksum)) { | ||
639 | DL_ERROR(err_checksum, stringtbl); | ||
640 | } | ||
641 | #else | ||
642 | if (dlthis->dfile_hdr.df_byte_reshuffle != | ||
643 | HOST_BYTE_ORDER(REORDER_MAP(BYTE_RESHUFFLE_VALUE))) { | ||
644 | /* put strings in big-endian order, not in PC order */ | ||
645 | dload_reorder(strbuf, ssiz, | ||
646 | HOST_BYTE_ORDER(dlthis-> | ||
647 | dfile_hdr.df_byte_reshuffle)); | ||
648 | } | ||
649 | if ((!sec_names_only) && (~dload_reverse_checksum(strbuf, ssiz) != | ||
650 | dlthis->verify.dv_str_tab_checksum)) { | ||
651 | DL_ERROR(err_checksum, stringtbl); | ||
652 | } | ||
653 | #endif | ||
654 | } /* dload_strings */ | ||
655 | |||
656 | /************************************************************************* | ||
657 | * Procedure string_table_free | ||
658 | * | ||
659 | * Parameters: | ||
660 | * none | ||
661 | * | ||
662 | * Effect: | ||
663 | * Frees any state used by the string table. | ||
664 | * | ||
665 | * WARNING: | ||
666 | * This routine is not allowed to declare errors! | ||
667 | ************************************************************************ */ | ||
668 | static void string_table_free(struct dload_state *dlthis) | ||
669 | { | ||
670 | if (dlthis->str_head) | ||
671 | dlthis->mysym->dload_deallocate(dlthis->mysym, | ||
672 | dlthis->str_head); | ||
673 | |||
674 | } /* string_table_free */ | ||
675 | |||
676 | /* | ||
677 | * Symbol Table Maintenance Functions | ||
678 | * | ||
679 | * COFF symbols are read by dload_symbols(), which is called after | ||
680 | * sections have been allocated. Symbols which might be used in | ||
681 | * relocation (ie, not debug info) are retained in an internal temporary | ||
682 | * compressed table (type local_symbol). A particular symbol is recovered | ||
683 | * by index by calling dload_find_symbol(). dload_find_symbol | ||
684 | * reconstructs a more explicit representation (type SLOTVEC) which is | ||
685 | * used by reloc.c | ||
686 | */ | ||
687 | /* real size of debug header */ | ||
688 | #define DBG_HDR_SIZE (sizeof(struct dll_module) - sizeof(struct dll_sect)) | ||
689 | |||
690 | static const char sym_errid[] = { "symbol" }; | ||
691 | |||
692 | /************************************************************************** | ||
693 | * Procedure dload_symbols | ||
694 | * | ||
695 | * Parameters: | ||
696 | * none | ||
697 | * | ||
698 | * Effect: | ||
699 | * Reads in symbols and retains ones that might be needed for relocation | ||
700 | * purposes. | ||
701 | *********************************************************************** */ | ||
702 | /* size of symbol buffer no bigger than target data buffer, to limit stack | ||
703 | * usage */ | ||
704 | #define MY_SYM_BUF_SIZ (BYTE_TO_HOST(IMAGE_PACKET_SIZE)/\ | ||
705 | sizeof(struct doff_syment_t)) | ||
706 | |||
707 | static void dload_symbols(struct dload_state *dlthis) | ||
708 | { | ||
709 | u32 sym_count, siz, dsiz, symbols_left; | ||
710 | u32 checks; | ||
711 | struct local_symbol *sp; | ||
712 | struct dynload_symbol *symp; | ||
713 | struct dynload_symbol *newsym; | ||
714 | struct doff_syment_t *my_sym_buf; | ||
715 | |||
716 | sym_count = dlthis->dfile_hdr.df_no_syms; | ||
717 | if (sym_count == 0) | ||
718 | return; | ||
719 | |||
720 | /* | ||
721 | * We keep a local symbol table for all of the symbols in the input. | ||
722 | * This table contains only section & value info, as we do not have | ||
723 | * to do any name processing for locals. We reuse this storage | ||
724 | * as a temporary for .dllview record construction. | ||
725 | * Allocate storage for the whole table. Add 1 to the section count | ||
726 | * in case a trampoline section is auto-generated as well as the | ||
727 | * size of the trampoline section name so DLLView doesn't get lost. | ||
728 | */ | ||
729 | |||
730 | siz = sym_count * sizeof(struct local_symbol); | ||
731 | dsiz = DBG_HDR_SIZE + | ||
732 | (sizeof(struct dll_sect) * dlthis->allocated_secn_count) + | ||
733 | BYTE_TO_HOST_ROUND(dlthis->debug_string_size + 1); | ||
734 | if (dsiz > siz) | ||
735 | siz = dsiz; /* larger of symbols and .dllview temp */ | ||
736 | sp = (struct local_symbol *)dlthis->mysym->dload_allocate(dlthis->mysym, | ||
737 | siz); | ||
738 | if (!sp) { | ||
739 | DL_ERROR(err_alloc, siz); | ||
740 | return; | ||
741 | } | ||
742 | dlthis->local_symtab = sp; | ||
743 | /* Read the symbols in the input, store them in the table, and post any | ||
744 | * globals to the global symbol table. In the process, externals | ||
745 | become defined from the global symbol table */ | ||
746 | checks = dlthis->verify.dv_sym_tab_checksum; | ||
747 | symbols_left = sym_count; | ||
748 | |||
749 | my_sym_buf = kzalloc(sizeof(*my_sym_buf) * MY_SYM_BUF_SIZ, GFP_KERNEL); | ||
750 | if (!my_sym_buf) | ||
751 | return; | ||
752 | |||
753 | do { /* read all symbols */ | ||
754 | char *sname; | ||
755 | u32 val; | ||
756 | s32 delta; | ||
757 | struct doff_syment_t *input_sym; | ||
758 | unsigned syms_in_buf; | ||
759 | |||
760 | input_sym = my_sym_buf; | ||
761 | syms_in_buf = symbols_left > MY_SYM_BUF_SIZ ? | ||
762 | MY_SYM_BUF_SIZ : symbols_left; | ||
763 | siz = syms_in_buf * sizeof(struct doff_syment_t); | ||
764 | if (dlthis->strm->read_buffer(dlthis->strm, input_sym, siz) != | ||
765 | siz) { | ||
766 | DL_ERROR(readstrm, sym_errid); | ||
767 | goto free_sym_buf; | ||
768 | } | ||
769 | if (dlthis->reorder_map) | ||
770 | dload_reorder(input_sym, siz, dlthis->reorder_map); | ||
771 | |||
772 | checks += dload_checksum(input_sym, siz); | ||
773 | do { /* process symbols in buffer */ | ||
774 | symbols_left -= 1; | ||
775 | /* attempt to derive the name of this symbol */ | ||
776 | sname = NULL; | ||
777 | if (input_sym->dn_offset > 0) { | ||
778 | #if BITS_PER_AU <= BITS_PER_BYTE | ||
779 | if ((u32) input_sym->dn_offset < | ||
780 | dlthis->dfile_hdr.df_strtab_size) | ||
781 | sname = dlthis->str_head + | ||
782 | BYTE_TO_HOST(input_sym->dn_offset); | ||
783 | else | ||
784 | dload_error(dlthis, | ||
785 | "Bad name offset in symbol " | ||
786 | " %d", symbols_left); | ||
787 | #else | ||
788 | sname = unpack_name(dlthis, | ||
789 | input_sym->dn_offset); | ||
790 | #endif | ||
791 | } | ||
792 | val = input_sym->dn_value; | ||
793 | delta = 0; | ||
794 | sp->sclass = input_sym->dn_sclass; | ||
795 | sp->secnn = input_sym->dn_scnum; | ||
796 | /* if this is an undefined symbol, | ||
797 | * define it (or fail) now */ | ||
798 | if (sp->secnn == DN_UNDEF) { | ||
799 | /* pointless for static undefined */ | ||
800 | if (input_sym->dn_sclass != DN_EXT) | ||
801 | goto loop_cont; | ||
802 | |||
803 | /* try to define symbol from previously | ||
804 | * loaded images */ | ||
805 | symp = dlthis->mysym->find_matching_symbol | ||
806 | (dlthis->mysym, sname); | ||
807 | if (!symp) { | ||
808 | DL_ERROR | ||
809 | ("Undefined external symbol %s", | ||
810 | sname); | ||
811 | goto loop_cont; | ||
812 | } | ||
813 | val = delta = symp->value; | ||
814 | #ifdef ENABLE_TRAMP_DEBUG | ||
815 | dload_syms_error(dlthis->mysym, | ||
816 | "===> ext sym [%s] at %x", | ||
817 | sname, val); | ||
818 | #endif | ||
819 | |||
820 | goto loop_cont; | ||
821 | } | ||
822 | /* symbol defined by this module */ | ||
823 | if (sp->secnn > 0) { | ||
824 | /* symbol references a section */ | ||
825 | if ((unsigned)sp->secnn <= | ||
826 | dlthis->allocated_secn_count) { | ||
827 | /* section was allocated */ | ||
828 | struct doff_scnhdr_t *srefp = | ||
829 | &dlthis->sect_hdrs[sp->secnn - 1]; | ||
830 | |||
831 | if (input_sym->dn_sclass == | ||
832 | DN_STATLAB || | ||
833 | input_sym->dn_sclass == DN_EXTLAB) { | ||
834 | /* load */ | ||
835 | delta = srefp->ds_vaddr; | ||
836 | } else { | ||
837 | /* run */ | ||
838 | delta = srefp->ds_paddr; | ||
839 | } | ||
840 | val += delta; | ||
841 | } | ||
842 | goto loop_itr; | ||
843 | } | ||
844 | /* This symbol is an absolute symbol */ | ||
845 | if (sp->secnn == DN_ABS && ((sp->sclass == DN_EXT) || | ||
846 | (sp->sclass == | ||
847 | DN_EXTLAB))) { | ||
848 | symp = | ||
849 | dlthis->mysym->find_matching_symbol(dlthis-> | ||
850 | mysym, | ||
851 | sname); | ||
852 | if (!symp) | ||
853 | goto loop_itr; | ||
854 | /* This absolute symbol is already defined. */ | ||
855 | if (symp->value == input_sym->dn_value) { | ||
856 | /* If symbol values are equal, continue | ||
857 | * but don't add to the global symbol | ||
858 | * table */ | ||
859 | sp->value = val; | ||
860 | sp->delta = delta; | ||
861 | sp += 1; | ||
862 | input_sym += 1; | ||
863 | continue; | ||
864 | } else { | ||
865 | /* If symbol values are not equal, | ||
866 | * return with redefinition error */ | ||
867 | DL_ERROR("Absolute symbol %s is " | ||
868 | "defined multiple times with " | ||
869 | "different values", sname); | ||
870 | goto free_sym_buf; | ||
871 | } | ||
872 | } | ||
873 | loop_itr: | ||
874 | /* if this is a global symbol, post it to the | ||
875 | * global table */ | ||
876 | if (input_sym->dn_sclass == DN_EXT || | ||
877 | input_sym->dn_sclass == DN_EXTLAB) { | ||
878 | /* Keep this global symbol for subsequent | ||
879 | * modules. Don't complain on error, to allow | ||
880 | * symbol API to suppress global symbols */ | ||
881 | if (!sname) | ||
882 | goto loop_cont; | ||
883 | |||
884 | newsym = dlthis->mysym->add_to_symbol_table | ||
885 | (dlthis->mysym, sname, | ||
886 | (unsigned)dlthis->myhandle); | ||
887 | if (newsym) | ||
888 | newsym->value = val; | ||
889 | |||
890 | } /* global */ | ||
891 | loop_cont: | ||
892 | sp->value = val; | ||
893 | sp->delta = delta; | ||
894 | sp += 1; | ||
895 | input_sym += 1; | ||
896 | } while ((syms_in_buf -= 1) > 0); /* process sym in buf */ | ||
897 | } while (symbols_left > 0); /* read all symbols */ | ||
898 | if (~checks) | ||
899 | dload_error(dlthis, "Checksum of symbols failed"); | ||
900 | |||
901 | free_sym_buf: | ||
902 | kfree(my_sym_buf); | ||
903 | return; | ||
904 | } /* dload_symbols */ | ||
905 | |||
906 | /***************************************************************************** | ||
907 | * Procedure symbol_table_free | ||
908 | * | ||
909 | * Parameters: | ||
910 | * none | ||
911 | * | ||
912 | * Effect: | ||
913 | * Frees any state used by the symbol table. | ||
914 | * | ||
915 | * WARNING: | ||
916 | * This routine is not allowed to declare errors! | ||
917 | **************************************************************************** */ | ||
918 | static void symbol_table_free(struct dload_state *dlthis) | ||
919 | { | ||
920 | if (dlthis->local_symtab) { | ||
921 | if (dlthis->dload_errcount) { /* blow off our symbols */ | ||
922 | dlthis->mysym->purge_symbol_table(dlthis->mysym, | ||
923 | (unsigned) | ||
924 | dlthis->myhandle); | ||
925 | } | ||
926 | dlthis->mysym->dload_deallocate(dlthis->mysym, | ||
927 | dlthis->local_symtab); | ||
928 | } | ||
929 | } /* symbol_table_free */ | ||
930 | |||
931 | /* .cinit Processing | ||
932 | * | ||
933 | * The dynamic loader does .cinit interpretation. cload_cinit() | ||
934 | * acts as a special write-to-target function, in that it takes relocated | ||
935 | * data from the normal data flow, and interprets it as .cinit actions. | ||
936 | * Because the normal data flow does not necessarily process the whole | ||
937 | * .cinit section in one buffer, cload_cinit() must be prepared to | ||
938 | * interpret the data piecemeal. A state machine is used for this | ||
939 | * purpose. | ||
940 | */ | ||
941 | |||
942 | /* The following are only for use by reloc.c and things it calls */ | ||
943 | static const struct ldr_section_info cinit_info_init = { cinitname, 0, 0, | ||
944 | (ldr_addr)-1, 0, DLOAD_BSS, 0 | ||
945 | }; | ||
946 | |||
947 | /************************************************************************* | ||
948 | * Procedure cload_cinit | ||
949 | * | ||
950 | * Parameters: | ||
951 | * ipacket Pointer to data packet to be loaded | ||
952 | * | ||
953 | * Effect: | ||
954 | * Interprets the data in the buffer as .cinit data, and performs the | ||
955 | * appropriate initializations. | ||
956 | *********************************************************************** */ | ||
957 | static void cload_cinit(struct dload_state *dlthis, | ||
958 | struct image_packet_t *ipacket) | ||
959 | { | ||
960 | #if TDATA_TO_HOST(CINIT_COUNT)*BITS_PER_AU > 16 | ||
961 | s32 init_count, left; | ||
962 | #else | ||
963 | s16 init_count, left; | ||
964 | #endif | ||
965 | unsigned char *pktp = ipacket->img_data; | ||
966 | unsigned char *pktend = pktp + BYTE_TO_HOST_ROUND(ipacket->packet_size); | ||
967 | int temp; | ||
968 | ldr_addr atmp; | ||
969 | struct ldr_section_info cinit_info; | ||
970 | |||
971 | /* PROCESS ALL THE INITIALIZATION RECORDS THE BUFFER. */ | ||
972 | while (true) { | ||
973 | left = pktend - pktp; | ||
974 | switch (dlthis->cinit_state) { | ||
975 | case CI_COUNT: /* count field */ | ||
976 | if (left < TDATA_TO_HOST(CINIT_COUNT)) | ||
977 | goto loopexit; | ||
978 | temp = dload_unpack(dlthis, (tgt_au_t *) pktp, | ||
979 | CINIT_COUNT * TDATA_AU_BITS, 0, | ||
980 | ROP_SGN); | ||
981 | pktp += TDATA_TO_HOST(CINIT_COUNT); | ||
982 | /* negative signifies BSS table, zero means done */ | ||
983 | if (temp <= 0) { | ||
984 | dlthis->cinit_state = CI_DONE; | ||
985 | break; | ||
986 | } | ||
987 | dlthis->cinit_count = temp; | ||
988 | dlthis->cinit_state = CI_ADDRESS; | ||
989 | break; | ||
990 | #if CINIT_ALIGN < CINIT_ADDRESS | ||
991 | case CI_PARTADDRESS: | ||
992 | pktp -= TDATA_TO_HOST(CINIT_ALIGN); | ||
993 | /* back up pointer into space courtesy of caller */ | ||
994 | *(uint16_t *) pktp = dlthis->cinit_addr; | ||
995 | /* stuff in saved bits !! FALL THRU !! */ | ||
996 | #endif | ||
997 | case CI_ADDRESS: /* Address field for a copy packet */ | ||
998 | if (left < TDATA_TO_HOST(CINIT_ADDRESS)) { | ||
999 | #if CINIT_ALIGN < CINIT_ADDRESS | ||
1000 | if (left == TDATA_TO_HOST(CINIT_ALIGN)) { | ||
1001 | /* address broken into halves */ | ||
1002 | dlthis->cinit_addr = *(uint16_t *) pktp; | ||
1003 | /* remember 1st half */ | ||
1004 | dlthis->cinit_state = CI_PARTADDRESS; | ||
1005 | left = 0; | ||
1006 | } | ||
1007 | #endif | ||
1008 | goto loopexit; | ||
1009 | } | ||
1010 | atmp = dload_unpack(dlthis, (tgt_au_t *) pktp, | ||
1011 | CINIT_ADDRESS * TDATA_AU_BITS, 0, | ||
1012 | ROP_UNS); | ||
1013 | pktp += TDATA_TO_HOST(CINIT_ADDRESS); | ||
1014 | #if CINIT_PAGE_BITS > 0 | ||
1015 | dlthis->cinit_page = atmp & | ||
1016 | ((1 << CINIT_PAGE_BITS) - 1); | ||
1017 | atmp >>= CINIT_PAGE_BITS; | ||
1018 | #else | ||
1019 | dlthis->cinit_page = CINIT_DEFAULT_PAGE; | ||
1020 | #endif | ||
1021 | dlthis->cinit_addr = atmp; | ||
1022 | dlthis->cinit_state = CI_COPY; | ||
1023 | break; | ||
1024 | case CI_COPY: /* copy bits to the target */ | ||
1025 | init_count = HOST_TO_TDATA(left); | ||
1026 | if (init_count > dlthis->cinit_count) | ||
1027 | init_count = dlthis->cinit_count; | ||
1028 | if (init_count == 0) | ||
1029 | goto loopexit; /* get more bits */ | ||
1030 | cinit_info = cinit_info_init; | ||
1031 | cinit_info.page = dlthis->cinit_page; | ||
1032 | if (!dlthis->myio->writemem(dlthis->myio, pktp, | ||
1033 | TDATA_TO_TADDR | ||
1034 | (dlthis->cinit_addr), | ||
1035 | &cinit_info, | ||
1036 | TDATA_TO_HOST(init_count))) { | ||
1037 | dload_error(dlthis, initfail, "write", | ||
1038 | dlthis->cinit_addr); | ||
1039 | } | ||
1040 | dlthis->cinit_count -= init_count; | ||
1041 | if (dlthis->cinit_count <= 0) { | ||
1042 | dlthis->cinit_state = CI_COUNT; | ||
1043 | init_count = (init_count + CINIT_ALIGN - 1) & | ||
1044 | -CINIT_ALIGN; | ||
1045 | /* align to next init */ | ||
1046 | } | ||
1047 | pktp += TDATA_TO_HOST(init_count); | ||
1048 | dlthis->cinit_addr += init_count; | ||
1049 | break; | ||
1050 | case CI_DONE: /* no more .cinit to do */ | ||
1051 | return; | ||
1052 | } /* switch (cinit_state) */ | ||
1053 | } /* while */ | ||
1054 | |||
1055 | loopexit: | ||
1056 | if (left > 0) { | ||
1057 | dload_error(dlthis, "%d bytes left over in cinit packet", left); | ||
1058 | dlthis->cinit_state = CI_DONE; /* left over bytes are bad */ | ||
1059 | } | ||
1060 | } /* cload_cinit */ | ||
1061 | |||
1062 | /* Functions to interface to reloc.c | ||
1063 | * | ||
1064 | * reloc.c is the relocation module borrowed from the linker, with | ||
1065 | * minimal (we hope) changes for our purposes. cload_sect_data() invokes | ||
1066 | * this module on a section to relocate and load the image data for that | ||
1067 | * section. The actual read and write actions are supplied by the global | ||
1068 | * routines below. | ||
1069 | */ | ||
1070 | |||
1071 | /************************************************************************ | ||
1072 | * Procedure relocate_packet | ||
1073 | * | ||
1074 | * Parameters: | ||
1075 | * ipacket Pointer to an image packet to relocate | ||
1076 | * | ||
1077 | * Effect: | ||
1078 | * Performs the required relocations on the packet. Returns a checksum | ||
1079 | * of the relocation operations. | ||
1080 | *********************************************************************** */ | ||
1081 | #define MY_RELOC_BUF_SIZ 8 | ||
1082 | /* careful! exists at the same time as the image buffer */ | ||
1083 | static int relocate_packet(struct dload_state *dlthis, | ||
1084 | struct image_packet_t *ipacket, | ||
1085 | u32 *checks, bool *tramps_generated) | ||
1086 | { | ||
1087 | u32 rnum; | ||
1088 | *tramps_generated = false; | ||
1089 | |||
1090 | rnum = ipacket->num_relocs; | ||
1091 | do { /* all relocs */ | ||
1092 | unsigned rinbuf; | ||
1093 | int siz; | ||
1094 | struct reloc_record_t *rp, rrec[MY_RELOC_BUF_SIZ]; | ||
1095 | |||
1096 | rp = rrec; | ||
1097 | rinbuf = rnum > MY_RELOC_BUF_SIZ ? MY_RELOC_BUF_SIZ : rnum; | ||
1098 | siz = rinbuf * sizeof(struct reloc_record_t); | ||
1099 | if (dlthis->strm->read_buffer(dlthis->strm, rp, siz) != siz) { | ||
1100 | DL_ERROR(readstrm, "relocation"); | ||
1101 | return 0; | ||
1102 | } | ||
1103 | /* reorder the bytes if need be */ | ||
1104 | if (dlthis->reorder_map) | ||
1105 | dload_reorder(rp, siz, dlthis->reorder_map); | ||
1106 | |||
1107 | *checks += dload_checksum(rp, siz); | ||
1108 | do { | ||
1109 | /* perform the relocation operation */ | ||
1110 | dload_relocate(dlthis, (tgt_au_t *) ipacket->img_data, | ||
1111 | rp, tramps_generated, false); | ||
1112 | rp += 1; | ||
1113 | rnum -= 1; | ||
1114 | } while ((rinbuf -= 1) > 0); | ||
1115 | } while (rnum > 0); /* all relocs */ | ||
1116 | /* If trampoline(s) were generated, we need to do an update of the | ||
1117 | * trampoline copy of the packet since a 2nd phase relo will be done | ||
1118 | * later. */ | ||
1119 | if (*tramps_generated == true) { | ||
1120 | dload_tramp_pkt_udpate(dlthis, | ||
1121 | (dlthis->image_secn - | ||
1122 | dlthis->ldr_sections), | ||
1123 | dlthis->image_offset, ipacket); | ||
1124 | } | ||
1125 | |||
1126 | return 1; | ||
1127 | } /* dload_read_reloc */ | ||
1128 | |||
1129 | #define IPH_SIZE (sizeof(struct image_packet_t) - sizeof(u32)) | ||
1130 | |||
1131 | /* VERY dangerous */ | ||
1132 | static const char imagepak[] = { "image packet" }; | ||
1133 | |||
1134 | struct img_buffer { | ||
1135 | struct image_packet_t ipacket; | ||
1136 | u8 bufr[BYTE_TO_HOST(IMAGE_PACKET_SIZE)]; | ||
1137 | }; | ||
1138 | |||
1139 | /************************************************************************* | ||
1140 | * Procedure dload_data | ||
1141 | * | ||
1142 | * Parameters: | ||
1143 | * none | ||
1144 | * | ||
1145 | * Effect: | ||
1146 | * Read image data from input file, relocate it, and download it to the | ||
1147 | * target. | ||
1148 | *********************************************************************** */ | ||
1149 | static void dload_data(struct dload_state *dlthis) | ||
1150 | { | ||
1151 | u16 curr_sect; | ||
1152 | struct doff_scnhdr_t *sptr = dlthis->sect_hdrs; | ||
1153 | struct ldr_section_info *lptr = dlthis->ldr_sections; | ||
1154 | struct img_buffer *ibuf; | ||
1155 | u8 *dest; | ||
1156 | |||
1157 | /* Indicates whether CINIT processing has occurred */ | ||
1158 | bool cinit_processed = false; | ||
1159 | |||
1160 | ibuf = kzalloc(sizeof(*ibuf), GFP_KERNEL); | ||
1161 | if (!ibuf) | ||
1162 | return; | ||
1163 | |||
1164 | /* Loop through the sections and load them one at a time. | ||
1165 | */ | ||
1166 | for (curr_sect = 0; curr_sect < dlthis->dfile_hdr.df_no_scns; | ||
1167 | curr_sect += 1) { | ||
1168 | if (ds_needs_download(sptr)) { | ||
1169 | s32 nip; | ||
1170 | ldr_addr image_offset = 0; | ||
1171 | /* set relocation info for this section */ | ||
1172 | if (curr_sect < dlthis->allocated_secn_count) | ||
1173 | dlthis->delta_runaddr = sptr->ds_paddr; | ||
1174 | else { | ||
1175 | lptr = (struct ldr_section_info *)sptr; | ||
1176 | dlthis->delta_runaddr = 0; | ||
1177 | } | ||
1178 | dlthis->image_secn = lptr; | ||
1179 | #if BITS_PER_AU > BITS_PER_BYTE | ||
1180 | lptr->name = unpack_name(dlthis, sptr->ds_offset); | ||
1181 | #endif | ||
1182 | nip = sptr->ds_nipacks; | ||
1183 | while ((nip -= 1) >= 0) { /* process packets */ | ||
1184 | |||
1185 | s32 ipsize; | ||
1186 | u32 checks; | ||
1187 | bool tramp_generated = false; | ||
1188 | |||
1189 | /* get the fixed header bits */ | ||
1190 | if (dlthis->strm->read_buffer(dlthis->strm, | ||
1191 | &ibuf->ipacket, | ||
1192 | IPH_SIZE) != | ||
1193 | IPH_SIZE) { | ||
1194 | DL_ERROR(readstrm, imagepak); | ||
1195 | goto free_ibuf; | ||
1196 | } | ||
1197 | /* reorder the header if need be */ | ||
1198 | if (dlthis->reorder_map) { | ||
1199 | dload_reorder(&ibuf->ipacket, IPH_SIZE, | ||
1200 | dlthis->reorder_map); | ||
1201 | } | ||
1202 | /* now read the rest of the packet */ | ||
1203 | ipsize = | ||
1204 | BYTE_TO_HOST(DOFF_ALIGN | ||
1205 | (ibuf->ipacket.packet_size)); | ||
1206 | if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) { | ||
1207 | DL_ERROR("Bad image packet size %d", | ||
1208 | ipsize); | ||
1209 | goto free_ibuf; | ||
1210 | } | ||
1211 | dest = ibuf->bufr; | ||
1212 | /* End of determination */ | ||
1213 | |||
1214 | if (dlthis->strm->read_buffer(dlthis->strm, | ||
1215 | ibuf->bufr, | ||
1216 | ipsize) != | ||
1217 | ipsize) { | ||
1218 | DL_ERROR(readstrm, imagepak); | ||
1219 | goto free_ibuf; | ||
1220 | } | ||
1221 | ibuf->ipacket.img_data = dest; | ||
1222 | |||
1223 | /* reorder the bytes if need be */ | ||
1224 | #if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16) | ||
1225 | if (dlthis->reorder_map) { | ||
1226 | dload_reorder(dest, ipsize, | ||
1227 | dlthis->reorder_map); | ||
1228 | } | ||
1229 | checks = dload_checksum(dest, ipsize); | ||
1230 | #else | ||
1231 | if (dlthis->dfile_hdr.df_byte_reshuffle != | ||
1232 | TARGET_ORDER(REORDER_MAP | ||
1233 | (BYTE_RESHUFFLE_VALUE))) { | ||
1234 | /* put image bytes in big-endian order, | ||
1235 | * not PC order */ | ||
1236 | dload_reorder(dest, ipsize, | ||
1237 | TARGET_ORDER | ||
1238 | (dlthis->dfile_hdr. | ||
1239 | df_byte_reshuffle)); | ||
1240 | } | ||
1241 | #if TARGET_AU_BITS > 8 | ||
1242 | checks = dload_reverse_checksum16(dest, ipsize); | ||
1243 | #else | ||
1244 | checks = dload_reverse_checksum(dest, ipsize); | ||
1245 | #endif | ||
1246 | #endif | ||
1247 | |||
1248 | checks += dload_checksum(&ibuf->ipacket, | ||
1249 | IPH_SIZE); | ||
1250 | /* relocate the image bits as needed */ | ||
1251 | if (ibuf->ipacket.num_relocs) { | ||
1252 | dlthis->image_offset = image_offset; | ||
1253 | if (!relocate_packet(dlthis, | ||
1254 | &ibuf->ipacket, | ||
1255 | &checks, | ||
1256 | &tramp_generated)) | ||
1257 | goto free_ibuf; /* error */ | ||
1258 | } | ||
1259 | if (~checks) | ||
1260 | DL_ERROR(err_checksum, imagepak); | ||
1261 | /* Only write the result to the target if no | ||
1262 | * trampoline was generated. Otherwise it | ||
1263 | *will be done during trampoline finalize. */ | ||
1264 | |||
1265 | if (tramp_generated == false) { | ||
1266 | |||
1267 | /* stuff the result into target | ||
1268 | * memory */ | ||
1269 | if (dload_check_type(sptr, | ||
1270 | DLOAD_CINIT)) { | ||
1271 | cload_cinit(dlthis, | ||
1272 | &ibuf->ipacket); | ||
1273 | cinit_processed = true; | ||
1274 | } else { | ||
1275 | /* FIXME */ | ||
1276 | if (!dlthis->myio-> | ||
1277 | writemem(dlthis-> | ||
1278 | myio, | ||
1279 | ibuf->bufr, | ||
1280 | lptr-> | ||
1281 | load_addr + | ||
1282 | image_offset, | ||
1283 | lptr, | ||
1284 | BYTE_TO_HOST | ||
1285 | (ibuf-> | ||
1286 | ipacket. | ||
1287 | packet_size))) { | ||
1288 | DL_ERROR | ||
1289 | ("Write to " | ||
1290 | FMT_UI32 | ||
1291 | " failed", | ||
1292 | lptr-> | ||
1293 | load_addr + | ||
1294 | image_offset); | ||
1295 | } | ||
1296 | } | ||
1297 | } | ||
1298 | image_offset += | ||
1299 | BYTE_TO_TADDR(ibuf->ipacket.packet_size); | ||
1300 | } /* process packets */ | ||
1301 | /* if this is a BSS section, we may want to fill it */ | ||
1302 | if (!dload_check_type(sptr, DLOAD_BSS)) | ||
1303 | goto loop_cont; | ||
1304 | |||
1305 | if (!(dlthis->myoptions & DLOAD_INITBSS)) | ||
1306 | goto loop_cont; | ||
1307 | |||
1308 | if (cinit_processed) { | ||
1309 | /* Don't clear BSS after load-time | ||
1310 | * initialization */ | ||
1311 | DL_ERROR | ||
1312 | ("Zero-initialization at " FMT_UI32 | ||
1313 | " after " "load-time initialization!", | ||
1314 | lptr->load_addr); | ||
1315 | goto loop_cont; | ||
1316 | } | ||
1317 | /* fill the .bss area */ | ||
1318 | dlthis->myio->fillmem(dlthis->myio, | ||
1319 | TADDR_TO_HOST(lptr->load_addr), | ||
1320 | lptr, TADDR_TO_HOST(lptr->size), | ||
1321 | DLOAD_FILL_BSS); | ||
1322 | goto loop_cont; | ||
1323 | } | ||
1324 | /* if DS_DOWNLOAD_MASK */ | ||
1325 | /* If not loading, but BSS, zero initialize */ | ||
1326 | if (!dload_check_type(sptr, DLOAD_BSS)) | ||
1327 | goto loop_cont; | ||
1328 | |||
1329 | if (!(dlthis->myoptions & DLOAD_INITBSS)) | ||
1330 | goto loop_cont; | ||
1331 | |||
1332 | if (curr_sect >= dlthis->allocated_secn_count) | ||
1333 | lptr = (struct ldr_section_info *)sptr; | ||
1334 | |||
1335 | if (cinit_processed) { | ||
1336 | /*Don't clear BSS after load-time initialization */ | ||
1337 | DL_ERROR("Zero-initialization at " FMT_UI32 | ||
1338 | " attempted after " | ||
1339 | "load-time initialization!", lptr->load_addr); | ||
1340 | goto loop_cont; | ||
1341 | } | ||
1342 | /* fill the .bss area */ | ||
1343 | dlthis->myio->fillmem(dlthis->myio, | ||
1344 | TADDR_TO_HOST(lptr->load_addr), lptr, | ||
1345 | TADDR_TO_HOST(lptr->size), | ||
1346 | DLOAD_FILL_BSS); | ||
1347 | loop_cont: | ||
1348 | sptr += 1; | ||
1349 | lptr += 1; | ||
1350 | } /* load sections */ | ||
1351 | |||
1352 | /* Finalize any trampolines that were created during the load */ | ||
1353 | if (dload_tramp_finalize(dlthis) == 0) { | ||
1354 | DL_ERROR("Finalization of auto-trampolines (size = " FMT_UI32 | ||
1355 | ") failed", dlthis->tramp.tramp_sect_next_addr); | ||
1356 | } | ||
1357 | free_ibuf: | ||
1358 | kfree(ibuf); | ||
1359 | return; | ||
1360 | } /* dload_data */ | ||
1361 | |||
1362 | /************************************************************************* | ||
1363 | * Procedure dload_reorder | ||
1364 | * | ||
1365 | * Parameters: | ||
1366 | * data 32-bit aligned pointer to data to be byte-swapped | ||
1367 | * dsiz size of the data to be reordered in sizeof() units. | ||
1368 | * map 32-bit map defining how to reorder the data. Value | ||
1369 | * must be REORDER_MAP() of some permutation | ||
1370 | * of 0x00 01 02 03 | ||
1371 | * | ||
1372 | * Effect: | ||
1373 | * Re-arranges the bytes in each word according to the map specified. | ||
1374 | * | ||
1375 | *********************************************************************** */ | ||
1376 | /* mask for byte shift count */ | ||
1377 | #define SHIFT_COUNT_MASK (3 << LOG_BITS_PER_BYTE) | ||
1378 | |||
1379 | void dload_reorder(void *data, int dsiz, unsigned int map) | ||
1380 | { | ||
1381 | register u32 tmp, tmap, datv; | ||
1382 | u32 *dp = (u32 *) data; | ||
1383 | |||
1384 | map <<= LOG_BITS_PER_BYTE; /* align map with SHIFT_COUNT_MASK */ | ||
1385 | do { | ||
1386 | tmp = 0; | ||
1387 | datv = *dp; | ||
1388 | tmap = map; | ||
1389 | do { | ||
1390 | tmp |= (datv & BYTE_MASK) << (tmap & SHIFT_COUNT_MASK); | ||
1391 | tmap >>= BITS_PER_BYTE; | ||
1392 | } while (datv >>= BITS_PER_BYTE); | ||
1393 | *dp++ = tmp; | ||
1394 | } while ((dsiz -= sizeof(u32)) > 0); | ||
1395 | } /* dload_reorder */ | ||
1396 | |||
1397 | /************************************************************************* | ||
1398 | * Procedure dload_checksum | ||
1399 | * | ||
1400 | * Parameters: | ||
1401 | * data 32-bit aligned pointer to data to be checksummed | ||
1402 | * siz size of the data to be checksummed in sizeof() units. | ||
1403 | * | ||
1404 | * Effect: | ||
1405 | * Returns a checksum of the specified block | ||
1406 | * | ||
1407 | *********************************************************************** */ | ||
1408 | u32 dload_checksum(void *data, unsigned siz) | ||
1409 | { | ||
1410 | u32 sum; | ||
1411 | u32 *dp; | ||
1412 | int left; | ||
1413 | |||
1414 | sum = 0; | ||
1415 | dp = (u32 *) data; | ||
1416 | for (left = siz; left > 0; left -= sizeof(u32)) | ||
1417 | sum += *dp++; | ||
1418 | return sum; | ||
1419 | } /* dload_checksum */ | ||
1420 | |||
1421 | #if HOST_ENDIANNESS | ||
1422 | /************************************************************************* | ||
1423 | * Procedure dload_reverse_checksum | ||
1424 | * | ||
1425 | * Parameters: | ||
1426 | * data 32-bit aligned pointer to data to be checksummed | ||
1427 | * siz size of the data to be checksummed in sizeof() units. | ||
1428 | * | ||
1429 | * Effect: | ||
1430 | * Returns a checksum of the specified block, which is assumed to be bytes | ||
1431 | * in big-endian order. | ||
1432 | * | ||
1433 | * Notes: | ||
1434 | * In a big-endian host, things like the string table are stored as bytes | ||
1435 | * in host order. But dllcreate always checksums in little-endian order. | ||
1436 | * It is most efficient to just handle the difference a word at a time. | ||
1437 | * | ||
1438 | ********************************************************************** */ | ||
1439 | u32 dload_reverse_checksum(void *data, unsigned siz) | ||
1440 | { | ||
1441 | u32 sum, temp; | ||
1442 | u32 *dp; | ||
1443 | int left; | ||
1444 | |||
1445 | sum = 0; | ||
1446 | dp = (u32 *) data; | ||
1447 | |||
1448 | for (left = siz; left > 0; left -= sizeof(u32)) { | ||
1449 | temp = *dp++; | ||
1450 | sum += temp << BITS_PER_BYTE * 3; | ||
1451 | sum += temp >> BITS_PER_BYTE * 3; | ||
1452 | sum += (temp >> BITS_PER_BYTE) & (BYTE_MASK << BITS_PER_BYTE); | ||
1453 | sum += (temp & (BYTE_MASK << BITS_PER_BYTE)) << BITS_PER_BYTE; | ||
1454 | } | ||
1455 | |||
1456 | return sum; | ||
1457 | } /* dload_reverse_checksum */ | ||
1458 | |||
1459 | #if (TARGET_AU_BITS > 8) && (TARGET_AU_BITS < 32) | ||
1460 | u32 dload_reverse_checksum16(void *data, unsigned siz) | ||
1461 | { | ||
1462 | uint_fast32_t sum, temp; | ||
1463 | u32 *dp; | ||
1464 | int left; | ||
1465 | |||
1466 | sum = 0; | ||
1467 | dp = (u32 *) data; | ||
1468 | |||
1469 | for (left = siz; left > 0; left -= sizeof(u32)) { | ||
1470 | temp = *dp++; | ||
1471 | sum += temp << BITS_PER_BYTE * 2; | ||
1472 | sum += temp >> BITS_PER_BYTE * 2; | ||
1473 | } | ||
1474 | |||
1475 | return sum; | ||
1476 | } /* dload_reverse_checksum16 */ | ||
1477 | #endif | ||
1478 | #endif | ||
1479 | |||
1480 | /************************************************************************* | ||
1481 | * Procedure swap_words | ||
1482 | * | ||
1483 | * Parameters: | ||
1484 | * data 32-bit aligned pointer to data to be swapped | ||
1485 | * siz size of the data to be swapped. | ||
1486 | * bitmap Bit map of how to swap each 32-bit word; 1 => 2 shorts, | ||
1487 | * 0 => 1 long | ||
1488 | * | ||
1489 | * Effect: | ||
1490 | * Swaps the specified data according to the specified map | ||
1491 | * | ||
1492 | *********************************************************************** */ | ||
1493 | static void swap_words(void *data, unsigned siz, unsigned bitmap) | ||
1494 | { | ||
1495 | register int i; | ||
1496 | #if TARGET_AU_BITS < 16 | ||
1497 | register u16 *sp; | ||
1498 | #endif | ||
1499 | register u32 *lp; | ||
1500 | |||
1501 | siz /= sizeof(u16); | ||
1502 | |||
1503 | #if TARGET_AU_BITS < 16 | ||
1504 | /* pass 1: do all the bytes */ | ||
1505 | i = siz; | ||
1506 | sp = (u16 *) data; | ||
1507 | do { | ||
1508 | register u16 tmp; | ||
1509 | |||
1510 | tmp = *sp; | ||
1511 | *sp++ = SWAP16BY8(tmp); | ||
1512 | } while ((i -= 1) > 0); | ||
1513 | #endif | ||
1514 | |||
1515 | #if TARGET_AU_BITS < 32 | ||
1516 | /* pass 2: fixup the 32-bit words */ | ||
1517 | i = siz >> 1; | ||
1518 | lp = (u32 *) data; | ||
1519 | do { | ||
1520 | if ((bitmap & 1) == 0) { | ||
1521 | register u32 tmp; | ||
1522 | tmp = *lp; | ||
1523 | *lp = SWAP32BY16(tmp); | ||
1524 | } | ||
1525 | lp += 1; | ||
1526 | bitmap >>= 1; | ||
1527 | } while ((i -= 1) > 0); | ||
1528 | #endif | ||
1529 | } /* swap_words */ | ||
1530 | |||
1531 | /************************************************************************* | ||
1532 | * Procedure copy_tgt_strings | ||
1533 | * | ||
1534 | * Parameters: | ||
1535 | * dstp Destination address. Assumed to be 32-bit aligned | ||
1536 | * srcp Source address. Assumed to be 32-bit aligned | ||
1537 | * charcount Number of characters to copy. | ||
1538 | * | ||
1539 | * Effect: | ||
1540 | * Copies strings from the source (which is in usual .dof file order on | ||
1541 | * the loading processor) to the destination buffer (which should be in proper | ||
1542 | * target addressable unit order). Makes sure the last string in the | ||
1543 | * buffer is NULL terminated (for safety). | ||
1544 | * Returns the first unused destination address. | ||
1545 | *********************************************************************** */ | ||
1546 | static char *copy_tgt_strings(void *dstp, void *srcp, unsigned charcount) | ||
1547 | { | ||
1548 | register tgt_au_t *src = (tgt_au_t *) srcp; | ||
1549 | register tgt_au_t *dst = (tgt_au_t *) dstp; | ||
1550 | register int cnt = charcount; | ||
1551 | |||
1552 | do { | ||
1553 | #if TARGET_AU_BITS <= BITS_PER_AU | ||
1554 | /* byte-swapping issues may exist for strings on target */ | ||
1555 | *dst++ = *src++; | ||
1556 | #else | ||
1557 | *dst++ = *src++; | ||
1558 | #endif | ||
1559 | } while ((cnt -= (sizeof(tgt_au_t) * BITS_PER_AU / BITS_PER_BYTE)) > 0); | ||
1560 | /*apply force to make sure that the string table has null terminator */ | ||
1561 | #if (BITS_PER_AU == BITS_PER_BYTE) && (TARGET_AU_BITS == BITS_PER_BYTE) | ||
1562 | dst[-1] = 0; | ||
1563 | #else | ||
1564 | /* little endian */ | ||
1565 | dst[-1] &= (1 << (BITS_PER_AU - BITS_PER_BYTE)) - 1; | ||
1566 | #endif | ||
1567 | return (char *)dst; | ||
1568 | } /* copy_tgt_strings */ | ||
1569 | |||
1570 | /************************************************************************* | ||
1571 | * Procedure init_module_handle | ||
1572 | * | ||
1573 | * Parameters: | ||
1574 | * none | ||
1575 | * | ||
1576 | * Effect: | ||
1577 | * Initializes the module handle we use to enable unloading, and installs | ||
1578 | * the debug information required by the target. | ||
1579 | * | ||
1580 | * Notes: | ||
1581 | * The handle returned from dynamic_load_module needs to encapsulate all the | ||
1582 | * allocations done for the module, and enable them plus the modules symbols to | ||
1583 | * be deallocated. | ||
1584 | * | ||
1585 | *********************************************************************** */ | ||
1586 | #ifndef _BIG_ENDIAN | ||
1587 | static const struct ldr_section_info dllview_info_init = { ".dllview", 0, 0, | ||
1588 | (ldr_addr)-1, DBG_LIST_PAGE, DLOAD_DATA, 0 | ||
1589 | }; | ||
1590 | #else | ||
1591 | static const struct ldr_section_info dllview_info_init = { ".dllview", 0, 0, | ||
1592 | (ldr_addr)-1, DLOAD_DATA, DBG_LIST_PAGE, 0 | ||
1593 | }; | ||
1594 | #endif | ||
1595 | static void init_module_handle(struct dload_state *dlthis) | ||
1596 | { | ||
1597 | struct my_handle *hndl; | ||
1598 | u16 curr_sect; | ||
1599 | struct ldr_section_info *asecs; | ||
1600 | struct dll_module *dbmod; | ||
1601 | struct dll_sect *dbsec; | ||
1602 | struct dbg_mirror_root *mlist; | ||
1603 | register char *cp; | ||
1604 | struct modules_header mhdr; | ||
1605 | struct ldr_section_info dllview_info; | ||
1606 | struct dynload_symbol *debug_mirror_sym; | ||
1607 | |||
1608 | hndl = dlthis->myhandle; | ||
1609 | if (!hndl) | ||
1610 | return; /* must be errors detected, so forget it */ | ||
1611 | |||
1612 | /* Store the section count */ | ||
1613 | hndl->secn_count = dlthis->allocated_secn_count; | ||
1614 | |||
1615 | /* If a trampoline section was created, add it in */ | ||
1616 | if (dlthis->tramp.tramp_sect_next_addr != 0) | ||
1617 | hndl->secn_count += 1; | ||
1618 | |||
1619 | hndl->secn_count = hndl->secn_count << 1; | ||
1620 | |||
1621 | hndl->secn_count = dlthis->allocated_secn_count << 1; | ||
1622 | #ifndef TARGET_ENDIANNESS | ||
1623 | if (dlthis->big_e_target) | ||
1624 | hndl->secn_count += 1; /* flag for big-endian */ | ||
1625 | #endif | ||
1626 | if (dlthis->dload_errcount) | ||
1627 | return; /* abandon if errors detected */ | ||
1628 | /* Locate the symbol that names the header for the CCS debug list | ||
1629 | of modules. If not found, we just don't generate the debug record. | ||
1630 | If found, we create our modules list. We make sure to create the | ||
1631 | loader_dllview_root even if there is no relocation info to record, | ||
1632 | just to try to put both symbols in the same symbol table and | ||
1633 | module. */ | ||
1634 | debug_mirror_sym = dlthis->mysym->find_matching_symbol(dlthis->mysym, | ||
1635 | loader_dllview_root); | ||
1636 | if (!debug_mirror_sym) { | ||
1637 | struct dynload_symbol *dlmodsym; | ||
1638 | struct dbg_mirror_root *mlst; | ||
1639 | |||
1640 | /* our root symbol is not yet present; | ||
1641 | check if we have DLModules defined */ | ||
1642 | dlmodsym = dlthis->mysym->find_matching_symbol(dlthis->mysym, | ||
1643 | LINKER_MODULES_HEADER); | ||
1644 | if (!dlmodsym) | ||
1645 | return; /* no DLModules list so no debug info */ | ||
1646 | /* if we have DLModules defined, construct our header */ | ||
1647 | mlst = (struct dbg_mirror_root *) | ||
1648 | dlthis->mysym->dload_allocate(dlthis->mysym, | ||
1649 | sizeof(struct | ||
1650 | dbg_mirror_root)); | ||
1651 | if (!mlst) { | ||
1652 | DL_ERROR(err_alloc, sizeof(struct dbg_mirror_root)); | ||
1653 | return; | ||
1654 | } | ||
1655 | mlst->next = NULL; | ||
1656 | mlst->changes = 0; | ||
1657 | mlst->refcount = 0; | ||
1658 | mlst->dbthis = TDATA_TO_TADDR(dlmodsym->value); | ||
1659 | /* add our root symbol */ | ||
1660 | debug_mirror_sym = dlthis->mysym->add_to_symbol_table | ||
1661 | (dlthis->mysym, loader_dllview_root, | ||
1662 | (unsigned)dlthis->myhandle); | ||
1663 | if (!debug_mirror_sym) { | ||
1664 | /* failed, recover memory */ | ||
1665 | dlthis->mysym->dload_deallocate(dlthis->mysym, mlst); | ||
1666 | return; | ||
1667 | } | ||
1668 | debug_mirror_sym->value = (u32) mlst; | ||
1669 | } | ||
1670 | /* First create the DLLview record and stuff it into the buffer. | ||
1671 | Then write it to the DSP. Record pertinent locations in our hndl, | ||
1672 | and add it to the per-processor list of handles with debug info. */ | ||
1673 | #ifndef DEBUG_HEADER_IN_LOADER | ||
1674 | mlist = (struct dbg_mirror_root *)debug_mirror_sym->value; | ||
1675 | if (!mlist) | ||
1676 | return; | ||
1677 | #else | ||
1678 | mlist = (struct dbg_mirror_root *)&debug_list_header; | ||
1679 | #endif | ||
1680 | hndl->dm.root = mlist; /* set pointer to root into our handle */ | ||
1681 | if (!dlthis->allocated_secn_count) | ||
1682 | return; /* no load addresses to be recorded */ | ||
1683 | /* reuse temporary symbol storage */ | ||
1684 | dbmod = (struct dll_module *)dlthis->local_symtab; | ||
1685 | /* Create the DLLview record in the memory we retain for our handle */ | ||
1686 | dbmod->num_sects = dlthis->allocated_secn_count; | ||
1687 | dbmod->timestamp = dlthis->verify.dv_timdat; | ||
1688 | dbmod->version = INIT_VERSION; | ||
1689 | dbmod->verification = VERIFICATION; | ||
1690 | asecs = dlthis->ldr_sections; | ||
1691 | dbsec = dbmod->sects; | ||
1692 | for (curr_sect = dlthis->allocated_secn_count; | ||
1693 | curr_sect > 0; curr_sect -= 1) { | ||
1694 | dbsec->sect_load_adr = asecs->load_addr; | ||
1695 | dbsec->sect_run_adr = asecs->run_addr; | ||
1696 | dbsec += 1; | ||
1697 | asecs += 1; | ||
1698 | } | ||
1699 | |||
1700 | /* If a trampoline section was created go ahead and add its info */ | ||
1701 | if (dlthis->tramp.tramp_sect_next_addr != 0) { | ||
1702 | dbmod->num_sects++; | ||
1703 | dbsec->sect_load_adr = asecs->load_addr; | ||
1704 | dbsec->sect_run_adr = asecs->run_addr; | ||
1705 | dbsec++; | ||
1706 | asecs++; | ||
1707 | } | ||
1708 | |||
1709 | /* now cram in the names */ | ||
1710 | cp = copy_tgt_strings(dbsec, dlthis->str_head, | ||
1711 | dlthis->debug_string_size); | ||
1712 | |||
1713 | /* If a trampoline section was created, add its name so DLLView | ||
1714 | * can show the user the section info. */ | ||
1715 | if (dlthis->tramp.tramp_sect_next_addr != 0) { | ||
1716 | cp = copy_tgt_strings(cp, | ||
1717 | dlthis->tramp.final_string_table, | ||
1718 | strlen(dlthis->tramp.final_string_table) + | ||
1719 | 1); | ||
1720 | } | ||
1721 | |||
1722 | /* round off the size of the debug record, and remember same */ | ||
1723 | hndl->dm.dbsiz = HOST_TO_TDATA_ROUND(cp - (char *)dbmod); | ||
1724 | *cp = 0; /* strictly to make our test harness happy */ | ||
1725 | dllview_info = dllview_info_init; | ||
1726 | dllview_info.size = TDATA_TO_TADDR(hndl->dm.dbsiz); | ||
1727 | /* Initialize memory context to default heap */ | ||
1728 | dllview_info.context = 0; | ||
1729 | hndl->dm.context = 0; | ||
1730 | /* fill in next pointer and size */ | ||
1731 | if (mlist->next) { | ||
1732 | dbmod->next_module = TADDR_TO_TDATA(mlist->next->dm.dbthis); | ||
1733 | dbmod->next_module_size = mlist->next->dm.dbsiz; | ||
1734 | } else { | ||
1735 | dbmod->next_module_size = 0; | ||
1736 | dbmod->next_module = 0; | ||
1737 | } | ||
1738 | /* allocate memory for on-DSP DLLview debug record */ | ||
1739 | if (!dlthis->myalloc) | ||
1740 | return; | ||
1741 | if (!dlthis->myalloc->dload_allocate(dlthis->myalloc, &dllview_info, | ||
1742 | HOST_TO_TADDR(sizeof(u32)))) { | ||
1743 | return; | ||
1744 | } | ||
1745 | /* Store load address of .dllview section */ | ||
1746 | hndl->dm.dbthis = dllview_info.load_addr; | ||
1747 | /* Store memory context (segid) in which .dllview section | ||
1748 | * was allocated */ | ||
1749 | hndl->dm.context = dllview_info.context; | ||
1750 | mlist->refcount += 1; | ||
1751 | /* swap bytes in the entire debug record, but not the string table */ | ||
1752 | if (TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN)) { | ||
1753 | swap_words(dbmod, (char *)dbsec - (char *)dbmod, | ||
1754 | DLL_MODULE_BITMAP); | ||
1755 | } | ||
1756 | /* Update the DLLview list on the DSP write new record */ | ||
1757 | if (!dlthis->myio->writemem(dlthis->myio, dbmod, | ||
1758 | dllview_info.load_addr, &dllview_info, | ||
1759 | TADDR_TO_HOST(dllview_info.size))) { | ||
1760 | return; | ||
1761 | } | ||
1762 | /* write new header */ | ||
1763 | mhdr.first_module_size = hndl->dm.dbsiz; | ||
1764 | mhdr.first_module = TADDR_TO_TDATA(dllview_info.load_addr); | ||
1765 | /* swap bytes in the module header, if needed */ | ||
1766 | if (TARGET_ENDIANNESS_DIFFERS(TARGET_BIG_ENDIAN)) { | ||
1767 | swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16), | ||
1768 | MODULES_HEADER_BITMAP); | ||
1769 | } | ||
1770 | dllview_info = dllview_info_init; | ||
1771 | if (!dlthis->myio->writemem(dlthis->myio, &mhdr, mlist->dbthis, | ||
1772 | &dllview_info, | ||
1773 | sizeof(struct modules_header) - | ||
1774 | sizeof(u16))) { | ||
1775 | return; | ||
1776 | } | ||
1777 | /* Add the module handle to this processor's list | ||
1778 | of handles with debug info */ | ||
1779 | hndl->dm.next = mlist->next; | ||
1780 | if (hndl->dm.next) | ||
1781 | hndl->dm.next->dm.prev = hndl; | ||
1782 | hndl->dm.prev = (struct my_handle *)mlist; | ||
1783 | mlist->next = hndl; /* insert after root */ | ||
1784 | } /* init_module_handle */ | ||
1785 | |||
1786 | /************************************************************************* | ||
1787 | * Procedure dynamic_unload_module | ||
1788 | * | ||
1789 | * Parameters: | ||
1790 | * mhandle A module handle from dynamic_load_module | ||
1791 | * syms Host-side symbol table and malloc/free functions | ||
1792 | * alloc Target-side memory allocation | ||
1793 | * | ||
1794 | * Effect: | ||
1795 | * The module specified by mhandle is unloaded. Unloading causes all | ||
1796 | * target memory to be deallocated, all symbols defined by the module to | ||
1797 | * be purged, and any host-side storage used by the dynamic loader for | ||
1798 | * this module to be released. | ||
1799 | * | ||
1800 | * Returns: | ||
1801 | * Zero for success. On error, the number of errors detected is returned. | ||
1802 | * Individual errors are reported using syms->error_report(). | ||
1803 | *********************************************************************** */ | ||
1804 | int dynamic_unload_module(void *mhandle, | ||
1805 | struct dynamic_loader_sym *syms, | ||
1806 | struct dynamic_loader_allocate *alloc, | ||
1807 | struct dynamic_loader_initialize *init) | ||
1808 | { | ||
1809 | s16 curr_sect; | ||
1810 | struct ldr_section_info *asecs; | ||
1811 | struct my_handle *hndl; | ||
1812 | struct dbg_mirror_root *root; | ||
1813 | unsigned errcount = 0; | ||
1814 | struct ldr_section_info dllview_info = dllview_info_init; | ||
1815 | struct modules_header mhdr; | ||
1816 | |||
1817 | hndl = (struct my_handle *)mhandle; | ||
1818 | if (!hndl) | ||
1819 | return 0; /* if handle is null, nothing to do */ | ||
1820 | /* Clear out the module symbols | ||
1821 | * Note that if this is the module that defined MODULES_HEADER | ||
1822 | (the head of the target debug list) | ||
1823 | * then this operation will blow away that symbol. | ||
1824 | It will therefore be impossible for subsequent | ||
1825 | * operations to add entries to this un-referenceable list. */ | ||
1826 | if (!syms) | ||
1827 | return 1; | ||
1828 | syms->purge_symbol_table(syms, (unsigned)hndl); | ||
1829 | /* Deallocate target memory for sections | ||
1830 | * NOTE: The trampoline section, if created, gets deleted here, too */ | ||
1831 | |||
1832 | asecs = hndl->secns; | ||
1833 | if (alloc) | ||
1834 | for (curr_sect = (hndl->secn_count >> 1); curr_sect > 0; | ||
1835 | curr_sect -= 1) { | ||
1836 | asecs->name = NULL; | ||
1837 | alloc->dload_deallocate(alloc, asecs++); | ||
1838 | } | ||
1839 | root = hndl->dm.root; | ||
1840 | if (!root) { | ||
1841 | /* there is a debug list containing this module */ | ||
1842 | goto func_end; | ||
1843 | } | ||
1844 | if (!hndl->dm.dbthis) { /* target-side dllview record exists */ | ||
1845 | goto loop_end; | ||
1846 | } | ||
1847 | /* Retrieve memory context in which .dllview was allocated */ | ||
1848 | dllview_info.context = hndl->dm.context; | ||
1849 | if (hndl->dm.prev == hndl) | ||
1850 | goto exitunltgt; | ||
1851 | |||
1852 | /* target-side dllview record is in list */ | ||
1853 | /* dequeue this record from our GPP-side mirror list */ | ||
1854 | hndl->dm.prev->dm.next = hndl->dm.next; | ||
1855 | if (hndl->dm.next) | ||
1856 | hndl->dm.next->dm.prev = hndl->dm.prev; | ||
1857 | /* Update next_module of previous entry in target list | ||
1858 | * We are using mhdr here as a surrogate for either a | ||
1859 | struct modules_header or a dll_module */ | ||
1860 | if (hndl->dm.next) { | ||
1861 | mhdr.first_module = TADDR_TO_TDATA(hndl->dm.next->dm.dbthis); | ||
1862 | mhdr.first_module_size = hndl->dm.next->dm.dbsiz; | ||
1863 | } else { | ||
1864 | mhdr.first_module = 0; | ||
1865 | mhdr.first_module_size = 0; | ||
1866 | } | ||
1867 | if (!init) | ||
1868 | goto exitunltgt; | ||
1869 | |||
1870 | if (!init->connect(init)) { | ||
1871 | dload_syms_error(syms, iconnect); | ||
1872 | errcount += 1; | ||
1873 | goto exitunltgt; | ||
1874 | } | ||
1875 | /* swap bytes in the module header, if needed */ | ||
1876 | if (TARGET_ENDIANNESS_DIFFERS(hndl->secn_count & 0x1)) { | ||
1877 | swap_words(&mhdr, sizeof(struct modules_header) - sizeof(u16), | ||
1878 | MODULES_HEADER_BITMAP); | ||
1879 | } | ||
1880 | if (!init->writemem(init, &mhdr, hndl->dm.prev->dm.dbthis, | ||
1881 | &dllview_info, sizeof(struct modules_header) - | ||
1882 | sizeof(mhdr.update_flag))) { | ||
1883 | dload_syms_error(syms, dlvwrite); | ||
1884 | errcount += 1; | ||
1885 | } | ||
1886 | /* update change counter */ | ||
1887 | root->changes += 1; | ||
1888 | if (!init->writemem(init, &(root->changes), | ||
1889 | root->dbthis + HOST_TO_TADDR | ||
1890 | (sizeof(mhdr.first_module) + | ||
1891 | sizeof(mhdr.first_module_size)), | ||
1892 | &dllview_info, sizeof(mhdr.update_flag))) { | ||
1893 | dload_syms_error(syms, dlvwrite); | ||
1894 | errcount += 1; | ||
1895 | } | ||
1896 | init->release(init); | ||
1897 | exitunltgt: | ||
1898 | /* release target storage */ | ||
1899 | dllview_info.size = TDATA_TO_TADDR(hndl->dm.dbsiz); | ||
1900 | dllview_info.load_addr = hndl->dm.dbthis; | ||
1901 | if (alloc) | ||
1902 | alloc->dload_deallocate(alloc, &dllview_info); | ||
1903 | root->refcount -= 1; | ||
1904 | /* target-side dllview record exists */ | ||
1905 | loop_end: | ||
1906 | #ifndef DEBUG_HEADER_IN_LOADER | ||
1907 | if (root->refcount <= 0) { | ||
1908 | /* if all references gone, blow off the header */ | ||
1909 | /* our root symbol may be gone due to the Purge above, | ||
1910 | but if not, do not destroy the root */ | ||
1911 | if (syms->find_matching_symbol | ||
1912 | (syms, loader_dllview_root) == NULL) | ||
1913 | syms->dload_deallocate(syms, root); | ||
1914 | } | ||
1915 | #endif | ||
1916 | func_end: | ||
1917 | /* there is a debug list containing this module */ | ||
1918 | syms->dload_deallocate(syms, mhandle); /* release our storage */ | ||
1919 | return errcount; | ||
1920 | } /* dynamic_unload_module */ | ||
1921 | |||
1922 | #if BITS_PER_AU > BITS_PER_BYTE | ||
1923 | /************************************************************************* | ||
1924 | * Procedure unpack_name | ||
1925 | * | ||
1926 | * Parameters: | ||
1927 | * soffset Byte offset into the string table | ||
1928 | * | ||
1929 | * Effect: | ||
1930 | * Returns a pointer to the string specified by the offset supplied, or | ||
1931 | * NULL for error. | ||
1932 | * | ||
1933 | *********************************************************************** */ | ||
1934 | static char *unpack_name(struct dload_state *dlthis, u32 soffset) | ||
1935 | { | ||
1936 | u8 tmp, *src; | ||
1937 | char *dst; | ||
1938 | |||
1939 | if (soffset >= dlthis->dfile_hdr.df_strtab_size) { | ||
1940 | dload_error(dlthis, "Bad string table offset " FMT_UI32, | ||
1941 | soffset); | ||
1942 | return NULL; | ||
1943 | } | ||
1944 | src = (uint_least8_t *) dlthis->str_head + | ||
1945 | (soffset >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)); | ||
1946 | dst = dlthis->str_temp; | ||
1947 | if (soffset & 1) | ||
1948 | *dst++ = *src++; /* only 1 character in first word */ | ||
1949 | do { | ||
1950 | tmp = *src++; | ||
1951 | *dst = (tmp >> BITS_PER_BYTE); | ||
1952 | if (!(*dst++)) | ||
1953 | break; | ||
1954 | } while ((*dst++ = tmp & BYTE_MASK)); | ||
1955 | dlthis->temp_len = dst - dlthis->str_temp; | ||
1956 | /* squirrel away length including terminating null */ | ||
1957 | return dlthis->str_temp; | ||
1958 | } /* unpack_name */ | ||
1959 | #endif | ||
diff --git a/drivers/staging/tidspbridge/dynload/dload_internal.h b/drivers/staging/tidspbridge/dynload/dload_internal.h deleted file mode 100644 index b9d079b96190..000000000000 --- a/drivers/staging/tidspbridge/dynload/dload_internal.h +++ /dev/null | |||
@@ -1,344 +0,0 @@ | |||
1 | /* | ||
2 | * dload_internal.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef _DLOAD_INTERNAL_ | ||
18 | #define _DLOAD_INTERNAL_ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | |||
22 | /* | ||
23 | * Internal state definitions for the dynamic loader | ||
24 | */ | ||
25 | |||
26 | /* type used for relocation intermediate results */ | ||
27 | typedef s32 rvalue; | ||
28 | |||
29 | /* unsigned version of same; must have at least as many bits */ | ||
30 | typedef u32 urvalue; | ||
31 | |||
32 | /* | ||
33 | * Dynamic loader configuration constants | ||
34 | */ | ||
35 | /* error issued if input has more sections than this limit */ | ||
36 | #define REASONABLE_SECTION_LIMIT 100 | ||
37 | |||
38 | /* (Addressable unit) value used to clear BSS section */ | ||
39 | #define DLOAD_FILL_BSS 0 | ||
40 | |||
41 | /* | ||
42 | * Reorder maps explained (?) | ||
43 | * | ||
44 | * The doff file format defines a 32-bit pattern used to determine the | ||
45 | * byte order of an image being read. That value is | ||
46 | * BYTE_RESHUFFLE_VALUE == 0x00010203 | ||
47 | * For purposes of the reorder routine, we would rather have the all-is-OK | ||
48 | * for 32-bits pattern be 0x03020100. This first macro makes the | ||
49 | * translation from doff file header value to MAP value: */ | ||
50 | #define REORDER_MAP(rawmap) ((rawmap) ^ 0x3030303) | ||
51 | /* This translation is made in dload_headers. Thereafter, the all-is-OK | ||
52 | * value for the maps stored in dlthis is REORDER_MAP(BYTE_RESHUFFLE_VALUE). | ||
53 | * But sadly, not all bits of the doff file are 32-bit integers. | ||
54 | * The notable exceptions are strings and image bits. | ||
55 | * Strings obey host byte order: */ | ||
56 | #if defined(_BIG_ENDIAN) | ||
57 | #define HOST_BYTE_ORDER(cookedmap) ((cookedmap) ^ 0x3030303) | ||
58 | #else | ||
59 | #define HOST_BYTE_ORDER(cookedmap) (cookedmap) | ||
60 | #endif | ||
61 | /* Target bits consist of target AUs (could be bytes, or 16-bits, | ||
62 | * or 32-bits) stored as an array in host order. A target order | ||
63 | * map is defined by: */ | ||
64 | #if !defined(_BIG_ENDIAN) || TARGET_AU_BITS > 16 | ||
65 | #define TARGET_ORDER(cookedmap) (cookedmap) | ||
66 | #elif TARGET_AU_BITS > 8 | ||
67 | #define TARGET_ORDER(cookedmap) ((cookedmap) ^ 0x2020202) | ||
68 | #else | ||
69 | #define TARGET_ORDER(cookedmap) ((cookedmap) ^ 0x3030303) | ||
70 | #endif | ||
71 | |||
72 | /* forward declaration for handle returned by dynamic loader */ | ||
73 | struct my_handle; | ||
74 | |||
75 | /* | ||
76 | * a list of module handles, which mirrors the debug list on the target | ||
77 | */ | ||
78 | struct dbg_mirror_root { | ||
79 | /* must be same as dbg_mirror_list; __DLModules address on target */ | ||
80 | u32 dbthis; | ||
81 | struct my_handle *next; /* must be same as dbg_mirror_list */ | ||
82 | u16 changes; /* change counter */ | ||
83 | u16 refcount; /* number of modules referencing this root */ | ||
84 | }; | ||
85 | |||
86 | struct dbg_mirror_list { | ||
87 | u32 dbthis; | ||
88 | struct my_handle *next, *prev; | ||
89 | struct dbg_mirror_root *root; | ||
90 | u16 dbsiz; | ||
91 | u32 context; /* Save context for .dllview memory allocation */ | ||
92 | }; | ||
93 | |||
94 | #define VARIABLE_SIZE 1 | ||
95 | /* | ||
96 | * the structure we actually return as an opaque module handle | ||
97 | */ | ||
98 | struct my_handle { | ||
99 | struct dbg_mirror_list dm; /* !!! must be first !!! */ | ||
100 | /* sections following << 1, LSB is set for big-endian target */ | ||
101 | u16 secn_count; | ||
102 | struct ldr_section_info secns[VARIABLE_SIZE]; | ||
103 | }; | ||
104 | #define MY_HANDLE_SIZE (sizeof(struct my_handle) -\ | ||
105 | sizeof(struct ldr_section_info)) | ||
106 | /* real size of my_handle */ | ||
107 | |||
108 | /* | ||
109 | * reduced symbol structure used for symbols during relocation | ||
110 | */ | ||
111 | struct local_symbol { | ||
112 | s32 value; /* Relocated symbol value */ | ||
113 | s32 delta; /* Original value in input file */ | ||
114 | s16 secnn; /* section number */ | ||
115 | s16 sclass; /* symbol class */ | ||
116 | }; | ||
117 | |||
118 | /* | ||
119 | * Trampoline data structures | ||
120 | */ | ||
121 | #define TRAMP_NO_GEN_AVAIL 65535 | ||
122 | #define TRAMP_SYM_PREFIX "__$dbTR__" | ||
123 | #define TRAMP_SECT_NAME ".dbTR" | ||
124 | /* MUST MATCH THE LENGTH ABOVE!! */ | ||
125 | #define TRAMP_SYM_PREFIX_LEN 9 | ||
126 | /* Includes NULL termination */ | ||
127 | #define TRAMP_SYM_HEX_ASCII_LEN 9 | ||
128 | |||
129 | #define GET_CONTAINER(ptr, type, field) ((type *)((unsigned long)ptr -\ | ||
130 | (unsigned long)(&((type *)0)->field))) | ||
131 | #ifndef FIELD_OFFSET | ||
132 | #define FIELD_OFFSET(type, field) ((unsigned long)(&((type *)0)->field)) | ||
133 | #endif | ||
134 | |||
135 | /* | ||
136 | The trampoline code for the target is located in a table called | ||
137 | "tramp_gen_info" with is indexed by looking up the index in the table | ||
138 | "tramp_map". The tramp_map index is acquired using the target | ||
139 | HASH_FUNC on the relocation type that caused the trampoline. Each | ||
140 | trampoline code table entry MUST follow this format: | ||
141 | |||
142 | |----------------------------------------------| | ||
143 | | tramp_gen_code_hdr | | ||
144 | |----------------------------------------------| | ||
145 | | Trampoline image code | | ||
146 | | (the raw instruction code for the target) | | ||
147 | |----------------------------------------------| | ||
148 | | Relocation entries for the image code | | ||
149 | |----------------------------------------------| | ||
150 | |||
151 | This is very similar to how image data is laid out in the DOFF file | ||
152 | itself. | ||
153 | */ | ||
154 | struct tramp_gen_code_hdr { | ||
155 | u32 tramp_code_size; /* in BYTES */ | ||
156 | u32 num_relos; | ||
157 | u32 relo_offset; /* in BYTES */ | ||
158 | }; | ||
159 | |||
160 | struct tramp_img_pkt { | ||
161 | struct tramp_img_pkt *next; /* MUST BE FIRST */ | ||
162 | u32 base; | ||
163 | struct tramp_gen_code_hdr hdr; | ||
164 | u8 payload[VARIABLE_SIZE]; | ||
165 | }; | ||
166 | |||
167 | struct tramp_img_dup_relo { | ||
168 | struct tramp_img_dup_relo *next; | ||
169 | struct reloc_record_t relo; | ||
170 | }; | ||
171 | |||
172 | struct tramp_img_dup_pkt { | ||
173 | struct tramp_img_dup_pkt *next; /* MUST BE FIRST */ | ||
174 | s16 secnn; | ||
175 | u32 offset; | ||
176 | struct image_packet_t img_pkt; | ||
177 | struct tramp_img_dup_relo *relo_chain; | ||
178 | |||
179 | /* PAYLOAD OF IMG PKT FOLLOWS */ | ||
180 | }; | ||
181 | |||
182 | struct tramp_sym { | ||
183 | struct tramp_sym *next; /* MUST BE FIRST */ | ||
184 | u32 index; | ||
185 | u32 str_index; | ||
186 | struct local_symbol sym_info; | ||
187 | }; | ||
188 | |||
189 | struct tramp_string { | ||
190 | struct tramp_string *next; /* MUST BE FIRST */ | ||
191 | u32 index; | ||
192 | char str[VARIABLE_SIZE]; /* NULL terminated */ | ||
193 | }; | ||
194 | |||
195 | struct tramp_info { | ||
196 | u32 tramp_sect_next_addr; | ||
197 | struct ldr_section_info sect_info; | ||
198 | |||
199 | struct tramp_sym *symbol_head; | ||
200 | struct tramp_sym *symbol_tail; | ||
201 | u32 tramp_sym_next_index; | ||
202 | struct local_symbol *final_sym_table; | ||
203 | |||
204 | struct tramp_string *string_head; | ||
205 | struct tramp_string *string_tail; | ||
206 | u32 tramp_string_next_index; | ||
207 | u32 tramp_string_size; | ||
208 | char *final_string_table; | ||
209 | |||
210 | struct tramp_img_pkt *tramp_pkts; | ||
211 | struct tramp_img_dup_pkt *dup_pkts; | ||
212 | }; | ||
213 | |||
214 | /* | ||
215 | * States of the .cinit state machine | ||
216 | */ | ||
217 | enum cinit_mode { | ||
218 | CI_COUNT = 0, /* expecting a count */ | ||
219 | CI_ADDRESS, /* expecting an address */ | ||
220 | #if CINIT_ALIGN < CINIT_ADDRESS /* handle case of partial address field */ | ||
221 | CI_PARTADDRESS, /* have only part of the address */ | ||
222 | #endif | ||
223 | CI_COPY, /* in the middle of copying data */ | ||
224 | CI_DONE /* end of .cinit table */ | ||
225 | }; | ||
226 | |||
227 | /* | ||
228 | * The internal state of the dynamic loader, which is passed around as | ||
229 | * an object | ||
230 | */ | ||
231 | struct dload_state { | ||
232 | struct dynamic_loader_stream *strm; /* The module input stream */ | ||
233 | struct dynamic_loader_sym *mysym; /* Symbols for this session */ | ||
234 | /* target memory allocator */ | ||
235 | struct dynamic_loader_allocate *myalloc; | ||
236 | struct dynamic_loader_initialize *myio; /* target memory initializer */ | ||
237 | unsigned myoptions; /* Options parameter dynamic_load_module */ | ||
238 | |||
239 | char *str_head; /* Pointer to string table */ | ||
240 | #if BITS_PER_AU > BITS_PER_BYTE | ||
241 | char *str_temp; /* Pointer to temporary buffer for strings */ | ||
242 | /* big enough to hold longest string */ | ||
243 | unsigned temp_len; /* length of last temporary string */ | ||
244 | char *xstrings; /* Pointer to buffer for expanded */ | ||
245 | /* strings for sec names */ | ||
246 | #endif | ||
247 | /* Total size of strings for DLLView section names */ | ||
248 | unsigned debug_string_size; | ||
249 | /* Pointer to parallel section info for allocated sections only */ | ||
250 | struct doff_scnhdr_t *sect_hdrs; /* Pointer to section table */ | ||
251 | struct ldr_section_info *ldr_sections; | ||
252 | #if TMS32060 | ||
253 | /* The address of the start of the .bss section */ | ||
254 | ldr_addr bss_run_base; | ||
255 | #endif | ||
256 | struct local_symbol *local_symtab; /* Relocation symbol table */ | ||
257 | |||
258 | /* pointer to DL section info for the section being relocated */ | ||
259 | struct ldr_section_info *image_secn; | ||
260 | /* change in run address for current section during relocation */ | ||
261 | ldr_addr delta_runaddr; | ||
262 | ldr_addr image_offset; /* offset of current packet in section */ | ||
263 | enum cinit_mode cinit_state; /* current state of cload_cinit() */ | ||
264 | int cinit_count; /* the current count */ | ||
265 | ldr_addr cinit_addr; /* the current address */ | ||
266 | s16 cinit_page; /* the current page */ | ||
267 | /* Handle to be returned by dynamic_load_module */ | ||
268 | struct my_handle *myhandle; | ||
269 | unsigned dload_errcount; /* Total # of errors reported so far */ | ||
270 | /* Number of target sections that require allocation and relocation */ | ||
271 | unsigned allocated_secn_count; | ||
272 | #ifndef TARGET_ENDIANNESS | ||
273 | int big_e_target; /* Target data in big-endian format */ | ||
274 | #endif | ||
275 | /* map for reordering bytes, 0 if not needed */ | ||
276 | u32 reorder_map; | ||
277 | struct doff_filehdr_t dfile_hdr; /* DOFF file header structure */ | ||
278 | struct doff_verify_rec_t verify; /* Verify record */ | ||
279 | |||
280 | struct tramp_info tramp; /* Trampoline data, if needed */ | ||
281 | |||
282 | int relstkidx; /* index into relocation value stack */ | ||
283 | /* relocation value stack used in relexp.c */ | ||
284 | rvalue relstk[STATIC_EXPR_STK_SIZE]; | ||
285 | |||
286 | }; | ||
287 | |||
288 | #ifdef TARGET_ENDIANNESS | ||
289 | #define TARGET_BIG_ENDIAN TARGET_ENDIANNESS | ||
290 | #else | ||
291 | #define TARGET_BIG_ENDIAN (dlthis->big_e_target) | ||
292 | #endif | ||
293 | |||
294 | /* | ||
295 | * Exports from cload.c to rest of the world | ||
296 | */ | ||
297 | extern void dload_error(struct dload_state *dlthis, const char *errtxt, ...); | ||
298 | extern void dload_syms_error(struct dynamic_loader_sym *syms, | ||
299 | const char *errtxt, ...); | ||
300 | extern void dload_headers(struct dload_state *dlthis); | ||
301 | extern void dload_strings(struct dload_state *dlthis, bool sec_names_only); | ||
302 | extern void dload_sections(struct dload_state *dlthis); | ||
303 | extern void dload_reorder(void *data, int dsiz, u32 map); | ||
304 | extern u32 dload_checksum(void *data, unsigned siz); | ||
305 | |||
306 | #if HOST_ENDIANNESS | ||
307 | extern uint32_t dload_reverse_checksum(void *data, unsigned siz); | ||
308 | #if (TARGET_AU_BITS > 8) && (TARGET_AU_BITS < 32) | ||
309 | extern uint32_t dload_reverse_checksum16(void *data, unsigned siz); | ||
310 | #endif | ||
311 | #endif | ||
312 | |||
313 | /* | ||
314 | * exported by reloc.c | ||
315 | */ | ||
316 | extern void dload_relocate(struct dload_state *dlthis, tgt_au_t *data, | ||
317 | struct reloc_record_t *rp, bool *tramps_generated, | ||
318 | bool second_pass); | ||
319 | |||
320 | extern rvalue dload_unpack(struct dload_state *dlthis, tgt_au_t *data, | ||
321 | int fieldsz, int offset, unsigned sgn); | ||
322 | |||
323 | extern int dload_repack(struct dload_state *dlthis, rvalue val, tgt_au_t *data, | ||
324 | int fieldsz, int offset, unsigned sgn); | ||
325 | |||
326 | /* | ||
327 | * exported by tramp.c | ||
328 | */ | ||
329 | extern bool dload_tramp_avail(struct dload_state *dlthis, | ||
330 | struct reloc_record_t *rp); | ||
331 | |||
332 | int dload_tramp_generate(struct dload_state *dlthis, s16 secnn, | ||
333 | u32 image_offset, struct image_packet_t *ipacket, | ||
334 | struct reloc_record_t *rp); | ||
335 | |||
336 | extern int dload_tramp_pkt_udpate(struct dload_state *dlthis, | ||
337 | s16 secnn, u32 image_offset, | ||
338 | struct image_packet_t *ipacket); | ||
339 | |||
340 | extern int dload_tramp_finalize(struct dload_state *dlthis); | ||
341 | |||
342 | extern void dload_tramp_cleanup(struct dload_state *dlthis); | ||
343 | |||
344 | #endif /* _DLOAD_INTERNAL_ */ | ||
diff --git a/drivers/staging/tidspbridge/dynload/doff.h b/drivers/staging/tidspbridge/dynload/doff.h deleted file mode 100644 index a7c3145746ee..000000000000 --- a/drivers/staging/tidspbridge/dynload/doff.h +++ /dev/null | |||
@@ -1,354 +0,0 @@ | |||
1 | /* | ||
2 | * doff.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Structures & definitions used for dynamically loaded modules file format. | ||
7 | * This format is a reformatted version of COFF. It optimizes the layout for | ||
8 | * the dynamic loader. | ||
9 | * | ||
10 | * .dof files, when viewed as a sequence of 32-bit integers, look the same | ||
11 | * on big-endian and little-endian machines. | ||
12 | * | ||
13 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
14 | * | ||
15 | * This package is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License version 2 as | ||
17 | * published by the Free Software Foundation. | ||
18 | * | ||
19 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
20 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | */ | ||
23 | |||
24 | #ifndef _DOFF_H | ||
25 | #define _DOFF_H | ||
26 | |||
27 | |||
28 | #define BYTE_RESHUFFLE_VALUE 0x00010203 | ||
29 | |||
30 | /* DOFF file header containing fields categorizing the remainder of the file */ | ||
31 | struct doff_filehdr_t { | ||
32 | |||
33 | /* string table size, including filename, in bytes */ | ||
34 | u32 df_strtab_size; | ||
35 | |||
36 | /* entry point if one exists */ | ||
37 | u32 df_entrypt; | ||
38 | |||
39 | /* identifies byte ordering of file; | ||
40 | * always set to BYTE_RESHUFFLE_VALUE */ | ||
41 | u32 df_byte_reshuffle; | ||
42 | |||
43 | /* Size of the string table up to and including the last section name */ | ||
44 | /* Size includes the name of the COFF file also */ | ||
45 | u32 df_scn_name_size; | ||
46 | |||
47 | #ifndef _BIG_ENDIAN | ||
48 | /* number of symbols */ | ||
49 | u16 df_no_syms; | ||
50 | |||
51 | /* length in bytes of the longest string, including terminating NULL */ | ||
52 | /* excludes the name of the file */ | ||
53 | u16 df_max_str_len; | ||
54 | |||
55 | /* total number of sections including no-load ones */ | ||
56 | u16 df_no_scns; | ||
57 | |||
58 | /* number of sections containing target code allocated or downloaded */ | ||
59 | u16 df_target_scns; | ||
60 | |||
61 | /* unique id for dll file format & version */ | ||
62 | u16 df_doff_version; | ||
63 | |||
64 | /* identifies ISA */ | ||
65 | u16 df_target_id; | ||
66 | |||
67 | /* useful file flags */ | ||
68 | u16 df_flags; | ||
69 | |||
70 | /* section reference for entry point, N_UNDEF for none, */ | ||
71 | /* N_ABS for absolute address */ | ||
72 | s16 df_entry_secn; | ||
73 | #else | ||
74 | /* length of the longest string, including terminating NULL */ | ||
75 | u16 df_max_str_len; | ||
76 | |||
77 | /* number of symbols */ | ||
78 | u16 df_no_syms; | ||
79 | |||
80 | /* number of sections containing target code allocated or downloaded */ | ||
81 | u16 df_target_scns; | ||
82 | |||
83 | /* total number of sections including no-load ones */ | ||
84 | u16 df_no_scns; | ||
85 | |||
86 | /* identifies ISA */ | ||
87 | u16 df_target_id; | ||
88 | |||
89 | /* unique id for dll file format & version */ | ||
90 | u16 df_doff_version; | ||
91 | |||
92 | /* section reference for entry point, N_UNDEF for none, */ | ||
93 | /* N_ABS for absolute address */ | ||
94 | s16 df_entry_secn; | ||
95 | |||
96 | /* useful file flags */ | ||
97 | u16 df_flags; | ||
98 | #endif | ||
99 | /* checksum for file header record */ | ||
100 | u32 df_checksum; | ||
101 | |||
102 | }; | ||
103 | |||
104 | /* flags in the df_flags field */ | ||
105 | #define DF_LITTLE 0x100 | ||
106 | #define DF_BIG 0x200 | ||
107 | #define DF_BYTE_ORDER (DF_LITTLE | DF_BIG) | ||
108 | |||
109 | /* Supported processors */ | ||
110 | #define TMS470_ID 0x97 | ||
111 | #define LEAD_ID 0x98 | ||
112 | #define TMS32060_ID 0x99 | ||
113 | #define LEAD3_ID 0x9c | ||
114 | |||
115 | /* Primary processor for loading */ | ||
116 | #if TMS32060 | ||
117 | #define TARGET_ID TMS32060_ID | ||
118 | #endif | ||
119 | |||
120 | /* Verification record containing values used to test integrity of the bits */ | ||
121 | struct doff_verify_rec_t { | ||
122 | |||
123 | /* time and date stamp */ | ||
124 | u32 dv_timdat; | ||
125 | |||
126 | /* checksum for all section records */ | ||
127 | u32 dv_scn_rec_checksum; | ||
128 | |||
129 | /* checksum for string table */ | ||
130 | u32 dv_str_tab_checksum; | ||
131 | |||
132 | /* checksum for symbol table */ | ||
133 | u32 dv_sym_tab_checksum; | ||
134 | |||
135 | /* checksum for verification record */ | ||
136 | u32 dv_verify_rec_checksum; | ||
137 | |||
138 | }; | ||
139 | |||
140 | /* String table is an array of null-terminated strings. The first entry is | ||
141 | * the filename, which is added by DLLcreate. No new structure definitions | ||
142 | * are required. | ||
143 | */ | ||
144 | |||
145 | /* Section Records including information on the corresponding image packets */ | ||
146 | /* | ||
147 | * !!WARNING!! | ||
148 | * | ||
149 | * This structure is expected to match in form ldr_section_info in | ||
150 | * dynamic_loader.h | ||
151 | */ | ||
152 | |||
153 | struct doff_scnhdr_t { | ||
154 | |||
155 | s32 ds_offset; /* offset into string table of name */ | ||
156 | s32 ds_paddr; /* RUN address, in target AU */ | ||
157 | s32 ds_vaddr; /* LOAD address, in target AU */ | ||
158 | s32 ds_size; /* section size, in target AU */ | ||
159 | #ifndef _BIG_ENDIAN | ||
160 | u16 ds_page; /* memory page id */ | ||
161 | u16 ds_flags; /* section flags */ | ||
162 | #else | ||
163 | u16 ds_flags; /* section flags */ | ||
164 | u16 ds_page; /* memory page id */ | ||
165 | #endif | ||
166 | u32 ds_first_pkt_offset; | ||
167 | /* Absolute byte offset into the file */ | ||
168 | /* where the first image record resides */ | ||
169 | |||
170 | s32 ds_nipacks; /* number of image packets */ | ||
171 | |||
172 | }; | ||
173 | |||
174 | /* Symbol table entry */ | ||
175 | struct doff_syment_t { | ||
176 | |||
177 | s32 dn_offset; /* offset into string table of name */ | ||
178 | s32 dn_value; /* value of symbol */ | ||
179 | #ifndef _BIG_ENDIAN | ||
180 | s16 dn_scnum; /* section number */ | ||
181 | s16 dn_sclass; /* storage class */ | ||
182 | #else | ||
183 | s16 dn_sclass; /* storage class */ | ||
184 | s16 dn_scnum; /* section number, 1-based */ | ||
185 | #endif | ||
186 | |||
187 | }; | ||
188 | |||
189 | /* special values for dn_scnum */ | ||
190 | #define DN_UNDEF 0 /* undefined symbol */ | ||
191 | #define DN_ABS (-1) /* value of symbol is absolute */ | ||
192 | /* special values for dn_sclass */ | ||
193 | #define DN_EXT 2 | ||
194 | #define DN_STATLAB 20 | ||
195 | #define DN_EXTLAB 21 | ||
196 | |||
197 | /* Default value of image bits in packet */ | ||
198 | /* Configurable by user on the command line */ | ||
199 | #define IMAGE_PACKET_SIZE 1024 | ||
200 | |||
201 | /* An image packet contains a chunk of data from a section along with */ | ||
202 | /* information necessary for its processing. */ | ||
203 | struct image_packet_t { | ||
204 | |||
205 | s32 num_relocs; /* number of relocations for */ | ||
206 | /* this packet */ | ||
207 | |||
208 | s32 packet_size; /* number of bytes in array */ | ||
209 | /* "bits" occupied by */ | ||
210 | /* valid data. Could be */ | ||
211 | /* < IMAGE_PACKET_SIZE to */ | ||
212 | /* prevent splitting a */ | ||
213 | /* relocation across packets. */ | ||
214 | /* Last packet of a section */ | ||
215 | /* will most likely contain */ | ||
216 | /* < IMAGE_PACKET_SIZE bytes */ | ||
217 | /* of valid data */ | ||
218 | |||
219 | s32 img_chksum; /* Checksum for image packet */ | ||
220 | /* and the corresponding */ | ||
221 | /* relocation records */ | ||
222 | |||
223 | u8 *img_data; /* Actual data in section */ | ||
224 | |||
225 | }; | ||
226 | |||
227 | /* The relocation structure definition matches the COFF version. Offsets */ | ||
228 | /* however are relative to the image packet base not the section base. */ | ||
229 | struct reloc_record_t { | ||
230 | |||
231 | s32 vaddr; | ||
232 | |||
233 | /* expressed in target AUs */ | ||
234 | |||
235 | union { | ||
236 | struct { | ||
237 | #ifndef _BIG_ENDIAN | ||
238 | u8 _offset; /* bit offset of rel fld */ | ||
239 | u8 _fieldsz; /* size of rel fld */ | ||
240 | u8 _wordsz; /* # bytes containing rel fld */ | ||
241 | u8 _dum1; | ||
242 | u16 _dum2; | ||
243 | u16 _type; | ||
244 | #else | ||
245 | unsigned _dum1:8; | ||
246 | unsigned _wordsz:8; /* # bytes containing rel fld */ | ||
247 | unsigned _fieldsz:8; /* size of rel fld */ | ||
248 | unsigned _offset:8; /* bit offset of rel fld */ | ||
249 | u16 _type; | ||
250 | u16 _dum2; | ||
251 | #endif | ||
252 | } _r_field; | ||
253 | |||
254 | struct { | ||
255 | u32 _spc; /* image packet relative PC */ | ||
256 | #ifndef _BIG_ENDIAN | ||
257 | u16 _dum; | ||
258 | u16 _type; /* relocation type */ | ||
259 | #else | ||
260 | u16 _type; /* relocation type */ | ||
261 | u16 _dum; | ||
262 | #endif | ||
263 | } _r_spc; | ||
264 | |||
265 | struct { | ||
266 | u32 _uval; /* constant value */ | ||
267 | #ifndef _BIG_ENDIAN | ||
268 | u16 _dum; | ||
269 | u16 _type; /* relocation type */ | ||
270 | #else | ||
271 | u16 _type; /* relocation type */ | ||
272 | u16 _dum; | ||
273 | #endif | ||
274 | } _r_uval; | ||
275 | |||
276 | struct { | ||
277 | s32 _symndx; /* 32-bit sym tbl index */ | ||
278 | #ifndef _BIG_ENDIAN | ||
279 | u16 _disp; /* extra addr encode data */ | ||
280 | u16 _type; /* relocation type */ | ||
281 | #else | ||
282 | u16 _type; /* relocation type */ | ||
283 | u16 _disp; /* extra addr encode data */ | ||
284 | #endif | ||
285 | } _r_sym; | ||
286 | } _u_reloc; | ||
287 | |||
288 | }; | ||
289 | |||
290 | /* abbreviations for convenience */ | ||
291 | #ifndef TYPE | ||
292 | #define TYPE _u_reloc._r_sym._type | ||
293 | #define UVAL _u_reloc._r_uval._uval | ||
294 | #define SYMNDX _u_reloc._r_sym._symndx | ||
295 | #define OFFSET _u_reloc._r_field._offset | ||
296 | #define FIELDSZ _u_reloc._r_field._fieldsz | ||
297 | #define WORDSZ _u_reloc._r_field._wordsz | ||
298 | #define R_DISP _u_reloc._r_sym._disp | ||
299 | #endif | ||
300 | |||
301 | /**************************************************************************** */ | ||
302 | /* */ | ||
303 | /* Important DOFF macros used for file processing */ | ||
304 | /* */ | ||
305 | /**************************************************************************** */ | ||
306 | |||
307 | /* DOFF Versions */ | ||
308 | #define DOFF0 0 | ||
309 | |||
310 | /* Return the address/size >= to addr that is at a 32-bit boundary */ | ||
311 | /* This assumes that a byte is 8 bits */ | ||
312 | #define DOFF_ALIGN(addr) (((addr) + 3) & ~3UL) | ||
313 | |||
314 | /**************************************************************************** */ | ||
315 | /* */ | ||
316 | /* The DOFF section header flags field is laid out as follows: */ | ||
317 | /* */ | ||
318 | /* Bits 0-3 : Section Type */ | ||
319 | /* Bit 4 : Set when section requires target memory to be allocated by DL */ | ||
320 | /* Bit 5 : Set when section requires downloading */ | ||
321 | /* Bits 8-11: Alignment, same as COFF */ | ||
322 | /* */ | ||
323 | /**************************************************************************** */ | ||
324 | |||
325 | /* Enum for DOFF section types (bits 0-3 of flag): See dynamic_loader.h */ | ||
326 | #define DS_SECTION_TYPE_MASK 0xF | ||
327 | /* DS_ALLOCATE indicates whether a section needs space on the target */ | ||
328 | #define DS_ALLOCATE_MASK 0x10 | ||
329 | /* DS_DOWNLOAD indicates that the loader needs to copy bits */ | ||
330 | #define DS_DOWNLOAD_MASK 0x20 | ||
331 | /* Section alignment requirement in AUs */ | ||
332 | #define DS_ALIGNMENT_SHIFT 8 | ||
333 | |||
334 | static inline bool dload_check_type(struct doff_scnhdr_t *sptr, u32 flag) | ||
335 | { | ||
336 | return (sptr->ds_flags & DS_SECTION_TYPE_MASK) == flag; | ||
337 | } | ||
338 | static inline bool ds_needs_allocation(struct doff_scnhdr_t *sptr) | ||
339 | { | ||
340 | return sptr->ds_flags & DS_ALLOCATE_MASK; | ||
341 | } | ||
342 | |||
343 | static inline bool ds_needs_download(struct doff_scnhdr_t *sptr) | ||
344 | { | ||
345 | return sptr->ds_flags & DS_DOWNLOAD_MASK; | ||
346 | } | ||
347 | |||
348 | static inline int ds_alignment(u16 ds_flags) | ||
349 | { | ||
350 | return 1 << ((ds_flags >> DS_ALIGNMENT_SHIFT) & DS_SECTION_TYPE_MASK); | ||
351 | } | ||
352 | |||
353 | |||
354 | #endif /* _DOFF_H */ | ||
diff --git a/drivers/staging/tidspbridge/dynload/getsection.c b/drivers/staging/tidspbridge/dynload/getsection.c deleted file mode 100644 index e0b37714dd65..000000000000 --- a/drivers/staging/tidspbridge/dynload/getsection.c +++ /dev/null | |||
@@ -1,407 +0,0 @@ | |||
1 | /* | ||
2 | * getsection.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #include <dspbridge/getsection.h> | ||
18 | #include "header.h" | ||
19 | |||
20 | /* | ||
21 | * Error strings | ||
22 | */ | ||
23 | static const char readstrm[] = { "Error reading %s from input stream" }; | ||
24 | static const char seek[] = { "Set file position to %d failed" }; | ||
25 | static const char isiz[] = { "Bad image packet size %d" }; | ||
26 | static const char err_checksum[] = { "Checksum failed on %s" }; | ||
27 | |||
28 | static const char err_reloc[] = { "dload_get_section unable to read" | ||
29 | "sections containing relocation entries" | ||
30 | }; | ||
31 | |||
32 | #if BITS_PER_AU > BITS_PER_BYTE | ||
33 | static const char err_alloc[] = { "Syms->dload_allocate( %d ) failed" }; | ||
34 | static const char stbl[] = { "Bad string table offset " FMT_UI32 }; | ||
35 | #endif | ||
36 | |||
37 | /************************************************************** */ | ||
38 | /********************* SUPPORT FUNCTIONS ********************** */ | ||
39 | /************************************************************** */ | ||
40 | |||
41 | #if BITS_PER_AU > BITS_PER_BYTE | ||
42 | /************************************************************************** | ||
43 | * Procedure unpack_sec_name | ||
44 | * | ||
45 | * Parameters: | ||
46 | * dlthis Handle from dload_module_open for this module | ||
47 | * soffset Byte offset into the string table | ||
48 | * dst Place to store the expanded string | ||
49 | * | ||
50 | * Effect: | ||
51 | * Stores a string from the string table into the destination, expanding | ||
52 | * it in the process. Returns a pointer just past the end of the stored | ||
53 | * string on success, or NULL on failure. | ||
54 | * | ||
55 | ************************************************************************ */ | ||
56 | static char *unpack_sec_name(struct dload_state *dlthis, u32 soffset, char *dst) | ||
57 | { | ||
58 | u8 tmp, *src; | ||
59 | |||
60 | if (soffset >= dlthis->dfile_hdr.df_scn_name_size) { | ||
61 | dload_error(dlthis, stbl, soffset); | ||
62 | return NULL; | ||
63 | } | ||
64 | src = (u8 *) dlthis->str_head + | ||
65 | (soffset >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)); | ||
66 | if (soffset & 1) | ||
67 | *dst++ = *src++; /* only 1 character in first word */ | ||
68 | do { | ||
69 | tmp = *src++; | ||
70 | *dst = (tmp >> BITS_PER_BYTE) | ||
71 | if (!(*dst++)) | ||
72 | break; | ||
73 | } while ((*dst++ = tmp & BYTE_MASK)); | ||
74 | |||
75 | return dst; | ||
76 | } | ||
77 | |||
78 | /************************************************************************** | ||
79 | * Procedure expand_sec_names | ||
80 | * | ||
81 | * Parameters: | ||
82 | * dlthis Handle from dload_module_open for this module | ||
83 | * | ||
84 | * Effect: | ||
85 | * Allocates a buffer, unpacks and copies strings from string table into it. | ||
86 | * Stores a pointer to the buffer into a state variable. | ||
87 | ************************************************************************* */ | ||
88 | static void expand_sec_names(struct dload_state *dlthis) | ||
89 | { | ||
90 | char *xstrings, *curr, *next; | ||
91 | u32 xsize; | ||
92 | u16 sec; | ||
93 | struct ldr_section_info *shp; | ||
94 | /* assume worst-case size requirement */ | ||
95 | xsize = dlthis->dfile_hdr.df_max_str_len * dlthis->dfile_hdr.df_no_scns; | ||
96 | xstrings = (char *)dlthis->mysym->dload_allocate(dlthis->mysym, xsize); | ||
97 | if (xstrings == NULL) { | ||
98 | dload_error(dlthis, err_alloc, xsize); | ||
99 | return; | ||
100 | } | ||
101 | dlthis->xstrings = xstrings; | ||
102 | /* For each sec, copy and expand its name */ | ||
103 | curr = xstrings; | ||
104 | for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) { | ||
105 | shp = (struct ldr_section_info *)&dlthis->sect_hdrs[sec]; | ||
106 | next = unpack_sec_name(dlthis, *(u32 *) &shp->name, curr); | ||
107 | if (next == NULL) | ||
108 | break; /* error */ | ||
109 | shp->name = curr; | ||
110 | curr = next; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | #endif | ||
115 | |||
116 | /************************************************************** */ | ||
117 | /********************* EXPORTED FUNCTIONS ********************* */ | ||
118 | /************************************************************** */ | ||
119 | |||
120 | /************************************************************************** | ||
121 | * Procedure dload_module_open | ||
122 | * | ||
123 | * Parameters: | ||
124 | * module The input stream that supplies the module image | ||
125 | * syms Host-side malloc/free and error reporting functions. | ||
126 | * Other methods are unused. | ||
127 | * | ||
128 | * Effect: | ||
129 | * Reads header information from a dynamic loader module using the | ||
130 | specified | ||
131 | * stream object, and returns a handle for the module information. This | ||
132 | * handle may be used in subsequent query calls to obtain information | ||
133 | * contained in the module. | ||
134 | * | ||
135 | * Returns: | ||
136 | * NULL if an error is encountered, otherwise a module handle for use | ||
137 | * in subsequent operations. | ||
138 | ************************************************************************* */ | ||
139 | void *dload_module_open(struct dynamic_loader_stream *module, | ||
140 | struct dynamic_loader_sym *syms) | ||
141 | { | ||
142 | struct dload_state *dlthis; /* internal state for this call */ | ||
143 | unsigned *dp, sz; | ||
144 | u32 sec_start; | ||
145 | #if BITS_PER_AU <= BITS_PER_BYTE | ||
146 | u16 sec; | ||
147 | #endif | ||
148 | |||
149 | /* Check that mandatory arguments are present */ | ||
150 | if (!module || !syms) { | ||
151 | if (syms != NULL) | ||
152 | dload_syms_error(syms, "Required parameter is NULL"); | ||
153 | |||
154 | return NULL; | ||
155 | } | ||
156 | |||
157 | dlthis = (struct dload_state *) | ||
158 | syms->dload_allocate(syms, sizeof(struct dload_state)); | ||
159 | if (!dlthis) { | ||
160 | /* not enough storage */ | ||
161 | dload_syms_error(syms, "Can't allocate module info"); | ||
162 | return NULL; | ||
163 | } | ||
164 | |||
165 | /* clear our internal state */ | ||
166 | dp = (unsigned *)dlthis; | ||
167 | for (sz = sizeof(struct dload_state) / sizeof(unsigned); | ||
168 | sz > 0; sz -= 1) | ||
169 | *dp++ = 0; | ||
170 | |||
171 | dlthis->strm = module; | ||
172 | dlthis->mysym = syms; | ||
173 | |||
174 | /* read in the doff image and store in our state variable */ | ||
175 | dload_headers(dlthis); | ||
176 | |||
177 | if (!dlthis->dload_errcount) | ||
178 | dload_strings(dlthis, true); | ||
179 | |||
180 | /* skip ahead past the unread portion of the string table */ | ||
181 | sec_start = sizeof(struct doff_filehdr_t) + | ||
182 | sizeof(struct doff_verify_rec_t) + | ||
183 | BYTE_TO_HOST(DOFF_ALIGN(dlthis->dfile_hdr.df_strtab_size)); | ||
184 | |||
185 | if (dlthis->strm->set_file_posn(dlthis->strm, sec_start) != 0) { | ||
186 | dload_error(dlthis, seek, sec_start); | ||
187 | return NULL; | ||
188 | } | ||
189 | |||
190 | if (!dlthis->dload_errcount) | ||
191 | dload_sections(dlthis); | ||
192 | |||
193 | if (dlthis->dload_errcount) { | ||
194 | dload_module_close(dlthis); /* errors, blow off our state */ | ||
195 | dlthis = NULL; | ||
196 | return NULL; | ||
197 | } | ||
198 | #if BITS_PER_AU > BITS_PER_BYTE | ||
199 | /* Expand all section names from the string table into the */ | ||
200 | /* state variable, and convert section names from a relative */ | ||
201 | /* string table offset to a pointers to the expanded string. */ | ||
202 | expand_sec_names(dlthis); | ||
203 | #else | ||
204 | /* Convert section names from a relative string table offset */ | ||
205 | /* to a pointer into the string table. */ | ||
206 | for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) { | ||
207 | struct ldr_section_info *shp = | ||
208 | (struct ldr_section_info *)&dlthis->sect_hdrs[sec]; | ||
209 | shp->name = dlthis->str_head + *(u32 *) &shp->name; | ||
210 | } | ||
211 | #endif | ||
212 | |||
213 | return dlthis; | ||
214 | } | ||
215 | |||
216 | /*************************************************************************** | ||
217 | * Procedure dload_get_section_info | ||
218 | * | ||
219 | * Parameters: | ||
220 | * minfo Handle from dload_module_open for this module | ||
221 | * section_name Pointer to the string name of the section desired | ||
222 | * section_info Address of a section info structure pointer to be | ||
223 | * initialized | ||
224 | * | ||
225 | * Effect: | ||
226 | * Finds the specified section in the module information, and initializes | ||
227 | * the provided struct ldr_section_info pointer. | ||
228 | * | ||
229 | * Returns: | ||
230 | * true for success, false for section not found | ||
231 | ************************************************************************* */ | ||
232 | int dload_get_section_info(void *minfo, const char *section_name, | ||
233 | const struct ldr_section_info **const section_info) | ||
234 | { | ||
235 | struct dload_state *dlthis; | ||
236 | struct ldr_section_info *shp; | ||
237 | u16 sec; | ||
238 | |||
239 | dlthis = (struct dload_state *)minfo; | ||
240 | if (!dlthis) | ||
241 | return false; | ||
242 | |||
243 | for (sec = 0; sec < dlthis->dfile_hdr.df_no_scns; sec++) { | ||
244 | shp = (struct ldr_section_info *)&dlthis->sect_hdrs[sec]; | ||
245 | if (strcmp(section_name, shp->name) == 0) { | ||
246 | *section_info = shp; | ||
247 | return true; | ||
248 | } | ||
249 | } | ||
250 | |||
251 | return false; | ||
252 | } | ||
253 | |||
254 | #define IPH_SIZE (sizeof(struct image_packet_t) - sizeof(u32)) | ||
255 | |||
256 | /************************************************************************** | ||
257 | * Procedure dload_get_section | ||
258 | * | ||
259 | * Parameters: | ||
260 | * minfo Handle from dload_module_open for this module | ||
261 | * section_info Pointer to a section info structure for the desired | ||
262 | * section | ||
263 | * section_data Buffer to contain the section initialized data | ||
264 | * | ||
265 | * Effect: | ||
266 | * Copies the initialized data for the specified section into the | ||
267 | * supplied buffer. | ||
268 | * | ||
269 | * Returns: | ||
270 | * true for success, false for section not found | ||
271 | ************************************************************************* */ | ||
272 | int dload_get_section(void *minfo, | ||
273 | const struct ldr_section_info *section_info, | ||
274 | void *section_data) | ||
275 | { | ||
276 | struct dload_state *dlthis; | ||
277 | u32 pos; | ||
278 | struct doff_scnhdr_t *sptr = NULL; | ||
279 | s32 nip; | ||
280 | struct image_packet_t ipacket; | ||
281 | s32 ipsize; | ||
282 | u32 checks; | ||
283 | s8 *dest = (s8 *) section_data; | ||
284 | |||
285 | dlthis = (struct dload_state *)minfo; | ||
286 | if (!dlthis) | ||
287 | return false; | ||
288 | sptr = (struct doff_scnhdr_t *)section_info; | ||
289 | if (sptr == NULL) | ||
290 | return false; | ||
291 | |||
292 | /* skip ahead to the start of the first packet */ | ||
293 | pos = BYTE_TO_HOST(DOFF_ALIGN((u32) sptr->ds_first_pkt_offset)); | ||
294 | if (dlthis->strm->set_file_posn(dlthis->strm, pos) != 0) { | ||
295 | dload_error(dlthis, seek, pos); | ||
296 | return false; | ||
297 | } | ||
298 | |||
299 | nip = sptr->ds_nipacks; | ||
300 | while ((nip -= 1) >= 0) { /* for each packet */ | ||
301 | /* get the fixed header bits */ | ||
302 | if (dlthis->strm->read_buffer(dlthis->strm, &ipacket, | ||
303 | IPH_SIZE) != IPH_SIZE) { | ||
304 | dload_error(dlthis, readstrm, "image packet"); | ||
305 | return false; | ||
306 | } | ||
307 | /* reorder the header if need be */ | ||
308 | if (dlthis->reorder_map) | ||
309 | dload_reorder(&ipacket, IPH_SIZE, dlthis->reorder_map); | ||
310 | |||
311 | /* Now read the packet image bits. Note: round the size up to | ||
312 | * the next multiple of 4 bytes; this is what checksum | ||
313 | * routines want. */ | ||
314 | ipsize = BYTE_TO_HOST(DOFF_ALIGN(ipacket.packet_size)); | ||
315 | if (ipsize > BYTE_TO_HOST(IMAGE_PACKET_SIZE)) { | ||
316 | dload_error(dlthis, isiz, ipsize); | ||
317 | return false; | ||
318 | } | ||
319 | if (dlthis->strm->read_buffer | ||
320 | (dlthis->strm, dest, ipsize) != ipsize) { | ||
321 | dload_error(dlthis, readstrm, "image packet"); | ||
322 | return false; | ||
323 | } | ||
324 | /* reorder the bytes if need be */ | ||
325 | #if !defined(_BIG_ENDIAN) || (TARGET_AU_BITS > 16) | ||
326 | if (dlthis->reorder_map) | ||
327 | dload_reorder(dest, ipsize, dlthis->reorder_map); | ||
328 | |||
329 | checks = dload_checksum(dest, ipsize); | ||
330 | #else | ||
331 | if (dlthis->dfile_hdr.df_byte_reshuffle != | ||
332 | TARGET_ORDER(REORDER_MAP(BYTE_RESHUFFLE_VALUE))) { | ||
333 | /* put image bytes in big-endian order, not PC order */ | ||
334 | dload_reorder(dest, ipsize, | ||
335 | TARGET_ORDER(dlthis-> | ||
336 | dfile_hdr.df_byte_reshuffle)); | ||
337 | } | ||
338 | #if TARGET_AU_BITS > 8 | ||
339 | checks = dload_reverse_checksum16(dest, ipsize); | ||
340 | #else | ||
341 | checks = dload_reverse_checksum(dest, ipsize); | ||
342 | #endif | ||
343 | #endif | ||
344 | checks += dload_checksum(&ipacket, IPH_SIZE); | ||
345 | |||
346 | /* NYI: unable to handle relocation entries here. Reloc | ||
347 | * entries referring to fields that span the packet boundaries | ||
348 | * may result in packets of sizes that are not multiple of | ||
349 | * 4 bytes. Our checksum implementation works on 32-bit words | ||
350 | * only. */ | ||
351 | if (ipacket.num_relocs != 0) { | ||
352 | dload_error(dlthis, err_reloc, ipsize); | ||
353 | return false; | ||
354 | } | ||
355 | |||
356 | if (~checks) { | ||
357 | dload_error(dlthis, err_checksum, "image packet"); | ||
358 | return false; | ||
359 | } | ||
360 | |||
361 | /*Advance destination ptr by the size of the just-read packet */ | ||
362 | dest += ipsize; | ||
363 | } | ||
364 | |||
365 | return true; | ||
366 | } | ||
367 | |||
368 | /*************************************************************************** | ||
369 | * Procedure dload_module_close | ||
370 | * | ||
371 | * Parameters: | ||
372 | * minfo Handle from dload_module_open for this module | ||
373 | * | ||
374 | * Effect: | ||
375 | * Releases any storage associated with the module handle. On return, | ||
376 | * the module handle is invalid. | ||
377 | * | ||
378 | * Returns: | ||
379 | * Zero for success. On error, the number of errors detected is returned. | ||
380 | * Individual errors are reported using syms->error_report(), where syms was | ||
381 | * an argument to dload_module_open | ||
382 | ************************************************************************* */ | ||
383 | void dload_module_close(void *minfo) | ||
384 | { | ||
385 | struct dload_state *dlthis; | ||
386 | |||
387 | dlthis = (struct dload_state *)minfo; | ||
388 | if (!dlthis) | ||
389 | return; | ||
390 | |||
391 | if (dlthis->str_head) | ||
392 | dlthis->mysym->dload_deallocate(dlthis->mysym, | ||
393 | dlthis->str_head); | ||
394 | |||
395 | if (dlthis->sect_hdrs) | ||
396 | dlthis->mysym->dload_deallocate(dlthis->mysym, | ||
397 | dlthis->sect_hdrs); | ||
398 | |||
399 | #if BITS_PER_AU > BITS_PER_BYTE | ||
400 | if (dlthis->xstrings) | ||
401 | dlthis->mysym->dload_deallocate(dlthis->mysym, | ||
402 | dlthis->xstrings); | ||
403 | |||
404 | #endif | ||
405 | |||
406 | dlthis->mysym->dload_deallocate(dlthis->mysym, dlthis); | ||
407 | } | ||
diff --git a/drivers/staging/tidspbridge/dynload/header.h b/drivers/staging/tidspbridge/dynload/header.h deleted file mode 100644 index 5b50a15a343e..000000000000 --- a/drivers/staging/tidspbridge/dynload/header.h +++ /dev/null | |||
@@ -1,49 +0,0 @@ | |||
1 | /* | ||
2 | * header.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #include <linux/string.h> | ||
18 | #define DL_STRCMP strcmp | ||
19 | |||
20 | /* maximum parenthesis nesting in relocation stack expressions */ | ||
21 | #define STATIC_EXPR_STK_SIZE 10 | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | |||
25 | #include "doff.h" | ||
26 | #include <dspbridge/dynamic_loader.h> | ||
27 | #include "params.h" | ||
28 | #include "dload_internal.h" | ||
29 | #include "reloc_table.h" | ||
30 | |||
31 | /* | ||
32 | * Plausibility limits | ||
33 | * | ||
34 | * These limits are imposed upon the input DOFF file as a check for validity. | ||
35 | * They are hard limits, in that the load will fail if they are exceeded. | ||
36 | * The numbers selected are arbitrary, in that the loader implementation does | ||
37 | * not require these limits. | ||
38 | */ | ||
39 | |||
40 | /* maximum number of bytes in string table */ | ||
41 | #define MAX_REASONABLE_STRINGTAB (0x100000) | ||
42 | /* maximum number of code,data,etc. sections */ | ||
43 | #define MAX_REASONABLE_SECTIONS (200) | ||
44 | /* maximum number of linker symbols */ | ||
45 | #define MAX_REASONABLE_SYMBOLS (100000) | ||
46 | |||
47 | /* shift count to align F_BIG with DLOAD_LITTLE */ | ||
48 | #define ALIGN_COFF_ENDIANNESS 7 | ||
49 | #define ENDIANNESS_MASK (DF_BYTE_ORDER >> ALIGN_COFF_ENDIANNESS) | ||
diff --git a/drivers/staging/tidspbridge/dynload/module_list.h b/drivers/staging/tidspbridge/dynload/module_list.h deleted file mode 100644 index a216bb131a40..000000000000 --- a/drivers/staging/tidspbridge/dynload/module_list.h +++ /dev/null | |||
@@ -1,159 +0,0 @@ | |||
1 | /* | ||
2 | * dspbridge/mpu_driver/src/dynload/module_list.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | /* | ||
18 | * This C header file gives the layout of the data structure created by the | ||
19 | * dynamic loader to describe the set of modules loaded into the DSP. | ||
20 | * | ||
21 | * Linked List Structure: | ||
22 | * ---------------------- | ||
23 | * The data structure defined here is a singly-linked list. The list | ||
24 | * represents the set of modules which are currently loaded in the DSP memory. | ||
25 | * The first entry in the list is a header record which contains a flag | ||
26 | * representing the state of the list. The rest of the entries in the list | ||
27 | * are module records. | ||
28 | * | ||
29 | * Global symbol _DLModules designates the first record in the list (i.e. the | ||
30 | * header record). This symbol must be defined in any program that wishes to | ||
31 | * use DLLview plug-in. | ||
32 | * | ||
33 | * String Representation: | ||
34 | * ---------------------- | ||
35 | * The string names of the module and its sections are stored in a block of | ||
36 | * memory which follows the module record itself. The strings are ordered: | ||
37 | * module name first, followed by section names in order from the first | ||
38 | * section to the last. String names are tightly packed arrays of 8-bit | ||
39 | * characters (two characters per 16-bit word on the C55x). Strings are | ||
40 | * zero-byte-terminated. | ||
41 | * | ||
42 | * Creating and updating the list: | ||
43 | * ------------------------------- | ||
44 | * Upon loading a new module into the DSP memory the dynamic loader inserts a | ||
45 | * new module record as the first module record in the list. The fields of | ||
46 | * this module record are initialized to reflect the properties of the module. | ||
47 | * The dynamic loader does NOT increment the flag/counter in the list's header | ||
48 | * record. | ||
49 | * | ||
50 | * Upon unloading a module from the DSP memory the dynamic loader removes the | ||
51 | * module's record from this list. The dynamic loader also increments the | ||
52 | * flag/counter in the list's header record to indicate that the list has been | ||
53 | * changed. | ||
54 | */ | ||
55 | |||
56 | #ifndef _MODULE_LIST_H_ | ||
57 | #define _MODULE_LIST_H_ | ||
58 | |||
59 | #include <linux/types.h> | ||
60 | |||
61 | /* Global pointer to the modules_header structure */ | ||
62 | #define MODULES_HEADER "_DLModules" | ||
63 | #define MODULES_HEADER_NO_UNDERSCORE "DLModules" | ||
64 | |||
65 | /* Initial version number */ | ||
66 | #define INIT_VERSION 1 | ||
67 | |||
68 | /* Verification number -- to be recorded in each module record */ | ||
69 | #define VERIFICATION 0x79 | ||
70 | |||
71 | /* forward declarations */ | ||
72 | struct dll_module; | ||
73 | struct dll_sect; | ||
74 | |||
75 | /* the first entry in the list is the modules_header record; | ||
76 | * its address is contained in the global _DLModules pointer */ | ||
77 | struct modules_header { | ||
78 | |||
79 | /* | ||
80 | * Address of the first dll_module record in the list or NULL. | ||
81 | * Note: for C55x this is a word address (C55x data is | ||
82 | * word-addressable) | ||
83 | */ | ||
84 | u32 first_module; | ||
85 | |||
86 | /* Combined storage size (in target addressable units) of the | ||
87 | * dll_module record which follows this header record, or zero | ||
88 | * if the list is empty. This size includes the module's string table. | ||
89 | * Note: for C55x the unit is a 16-bit word */ | ||
90 | u16 first_module_size; | ||
91 | |||
92 | /* Counter is incremented whenever a module record is removed from | ||
93 | * the list */ | ||
94 | u16 update_flag; | ||
95 | |||
96 | }; | ||
97 | |||
98 | /* for each 32-bits in above structure, a bitmap, LSB first, whose bits are: | ||
99 | * 0 => a 32-bit value, 1 => 2 16-bit values */ | ||
100 | /* swapping bitmap for type modules_header */ | ||
101 | #define MODULES_HEADER_BITMAP 0x2 | ||
102 | |||
103 | /* information recorded about each section in a module */ | ||
104 | struct dll_sect { | ||
105 | |||
106 | /* Load-time address of the section. | ||
107 | * Note: for C55x this is a byte address for program sections, and | ||
108 | * a word address for data sections. C55x program memory is | ||
109 | * byte-addressable, while data memory is word-addressable. */ | ||
110 | u32 sect_load_adr; | ||
111 | |||
112 | /* Run-time address of the section. | ||
113 | * Note 1: for C55x this is a byte address for program sections, and | ||
114 | * a word address for data sections. | ||
115 | * Note 2: for C55x two most significant bits of this field indicate | ||
116 | * the section type: '00' for a code section, '11' for a data section | ||
117 | * (C55 addresses are really only 24-bits wide). */ | ||
118 | u32 sect_run_adr; | ||
119 | |||
120 | }; | ||
121 | |||
122 | /* the rest of the entries in the list are module records */ | ||
123 | struct dll_module { | ||
124 | |||
125 | /* Address of the next dll_module record in the list, or 0 if this is | ||
126 | * the last record in the list. | ||
127 | * Note: for C55x this is a word address (C55x data is | ||
128 | * word-addressable) */ | ||
129 | u32 next_module; | ||
130 | |||
131 | /* Combined storage size (in target addressable units) of the | ||
132 | * dll_module record which follows this one, or zero if this is the | ||
133 | * last record in the list. This size includes the module's string | ||
134 | * table. | ||
135 | * Note: for C55x the unit is a 16-bit word. */ | ||
136 | u16 next_module_size; | ||
137 | |||
138 | /* version number of the tooling; set to INIT_VERSION for Phase 1 */ | ||
139 | u16 version; | ||
140 | |||
141 | /* the verification word; set to VERIFICATION */ | ||
142 | u16 verification; | ||
143 | |||
144 | /* Number of sections in the sects array */ | ||
145 | u16 num_sects; | ||
146 | |||
147 | /* Module's "unique" id; copy of the timestamp from the host | ||
148 | * COFF file */ | ||
149 | u32 timestamp; | ||
150 | |||
151 | /* Array of num_sects elements of the module's section records */ | ||
152 | struct dll_sect sects[1]; | ||
153 | }; | ||
154 | |||
155 | /* for each 32 bits in above structure, a bitmap, LSB first, whose bits are: | ||
156 | * 0 => a 32-bit value, 1 => 2 16-bit values */ | ||
157 | #define DLL_MODULE_BITMAP 0x6 /* swapping bitmap for type dll_module */ | ||
158 | |||
159 | #endif /* _MODULE_LIST_H_ */ | ||
diff --git a/drivers/staging/tidspbridge/dynload/params.h b/drivers/staging/tidspbridge/dynload/params.h deleted file mode 100644 index d797fcd3b662..000000000000 --- a/drivers/staging/tidspbridge/dynload/params.h +++ /dev/null | |||
@@ -1,226 +0,0 @@ | |||
1 | /* | ||
2 | * params.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * This file defines host and target properties for all machines | ||
7 | * supported by the dynamic loader. To be tedious... | ||
8 | * | ||
9 | * host: the machine on which the dynamic loader runs | ||
10 | * target: the machine that the dynamic loader is loading | ||
11 | * | ||
12 | * Host and target may or may not be the same, depending upon the particular | ||
13 | * use. | ||
14 | * | ||
15 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
16 | * | ||
17 | * This package is free software; you can redistribute it and/or modify | ||
18 | * it under the terms of the GNU General Public License version 2 as | ||
19 | * published by the Free Software Foundation. | ||
20 | * | ||
21 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
22 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
23 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
24 | */ | ||
25 | |||
26 | /****************************************************************************** | ||
27 | * | ||
28 | * Host Properties | ||
29 | * | ||
30 | **************************************************************************** */ | ||
31 | |||
32 | #define BITS_PER_BYTE 8 /* bits in the standard PC/SUN byte */ | ||
33 | #define LOG_BITS_PER_BYTE 3 /* log base 2 of same */ | ||
34 | #define BYTE_MASK ((1U<<BITS_PER_BYTE)-1) | ||
35 | |||
36 | #if defined(__TMS320C55X__) || defined(_TMS320C5XX) | ||
37 | #define BITS_PER_AU 16 | ||
38 | #define LOG_BITS_PER_AU 4 | ||
39 | /* use this print string in error messages for uint32_t */ | ||
40 | #define FMT_UI32 "0x%lx" | ||
41 | #define FMT8_UI32 "%08lx" /* same but no 0x, fixed width field */ | ||
42 | #else | ||
43 | /* bits in the smallest addressable data storage unit */ | ||
44 | #define BITS_PER_AU 8 | ||
45 | /* log base 2 of the same; useful for shift counts */ | ||
46 | #define LOG_BITS_PER_AU 3 | ||
47 | #define FMT_UI32 "0x%x" | ||
48 | #define FMT8_UI32 "%08x" | ||
49 | #endif | ||
50 | |||
51 | /* generic fastest method for swapping bytes and shorts */ | ||
52 | #define SWAP32BY16(zz) (((zz) << 16) | ((zz) >> 16)) | ||
53 | #define SWAP16BY8(zz) (((zz) << 8) | ((zz) >> 8)) | ||
54 | |||
55 | /* !! don't be tempted to insert type definitions here; use <stdint.h> !! */ | ||
56 | |||
57 | /****************************************************************************** | ||
58 | * | ||
59 | * Target Properties | ||
60 | * | ||
61 | **************************************************************************** */ | ||
62 | |||
63 | /*-------------------------------------------------------------------------- */ | ||
64 | /* TMS320C6x Target Specific Parameters (byte-addressable) */ | ||
65 | /*-------------------------------------------------------------------------- */ | ||
66 | #if TMS32060 | ||
67 | #define MEMORG 0x0L /* Size of configured memory */ | ||
68 | #define MEMSIZE 0x0L /* (full address space) */ | ||
69 | |||
70 | #define CINIT_ALIGN 8 /* alignment of cinit record in TDATA AUs */ | ||
71 | #define CINIT_COUNT 4 /* width of count field in TDATA AUs */ | ||
72 | #define CINIT_ADDRESS 4 /* width of address field in TDATA AUs */ | ||
73 | #define CINIT_PAGE_BITS 0 /* Number of LSBs of address that | ||
74 | * are page number */ | ||
75 | |||
76 | #define LENIENT_SIGNED_RELEXPS 0 /* DOES SIGNED ALLOW MAX UNSIGNED */ | ||
77 | |||
78 | #undef TARGET_ENDIANNESS /* may be big or little endian */ | ||
79 | |||
80 | /* align a target address to a word boundary */ | ||
81 | #define TARGET_WORD_ALIGN(zz) (((zz) + 0x3) & -0x4) | ||
82 | #endif | ||
83 | |||
84 | /*-------------------------------------------------------------------------- | ||
85 | * | ||
86 | * DEFAULT SETTINGS and DERIVED PROPERTIES | ||
87 | * | ||
88 | * This section establishes defaults for values not specified above | ||
89 | *-------------------------------------------------------------------------- */ | ||
90 | #ifndef TARGET_AU_BITS | ||
91 | #define TARGET_AU_BITS 8 /* width of the target addressable unit */ | ||
92 | #define LOG_TARGET_AU_BITS 3 /* log2 of same */ | ||
93 | #endif | ||
94 | |||
95 | #ifndef CINIT_DEFAULT_PAGE | ||
96 | #define CINIT_DEFAULT_PAGE 0 /* default .cinit page number */ | ||
97 | #endif | ||
98 | |||
99 | #ifndef DATA_RUN2LOAD | ||
100 | #define DATA_RUN2LOAD(zz) (zz) /* translate data run address to load address */ | ||
101 | #endif | ||
102 | |||
103 | #ifndef DBG_LIST_PAGE | ||
104 | #define DBG_LIST_PAGE 0 /* page number for .dllview section */ | ||
105 | #endif | ||
106 | |||
107 | #ifndef TARGET_WORD_ALIGN | ||
108 | /* align a target address to a word boundary */ | ||
109 | #define TARGET_WORD_ALIGN(zz) (zz) | ||
110 | #endif | ||
111 | |||
112 | #ifndef TDATA_TO_TADDR | ||
113 | #define TDATA_TO_TADDR(zz) (zz) /* target data address to target AU address */ | ||
114 | #define TADDR_TO_TDATA(zz) (zz) /* target AU address to target data address */ | ||
115 | #define TDATA_AU_BITS TARGET_AU_BITS /* bits per data AU */ | ||
116 | #define LOG_TDATA_AU_BITS LOG_TARGET_AU_BITS | ||
117 | #endif | ||
118 | |||
119 | /* | ||
120 | * | ||
121 | * Useful properties and conversions derived from the above | ||
122 | * | ||
123 | */ | ||
124 | |||
125 | /* | ||
126 | * Conversions between host and target addresses | ||
127 | */ | ||
128 | #if LOG_BITS_PER_AU == LOG_TARGET_AU_BITS | ||
129 | /* translate target addressable unit to host address */ | ||
130 | #define TADDR_TO_HOST(x) (x) | ||
131 | /* translate host address to target addressable unit */ | ||
132 | #define HOST_TO_TADDR(x) (x) | ||
133 | #elif LOG_BITS_PER_AU > LOG_TARGET_AU_BITS | ||
134 | #define TADDR_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU-LOG_TARGET_AU_BITS)) | ||
135 | #define HOST_TO_TADDR(x) ((x) << (LOG_BITS_PER_AU-LOG_TARGET_AU_BITS)) | ||
136 | #else | ||
137 | #define TADDR_TO_HOST(x) ((x) << (LOG_TARGET_AU_BITS-LOG_BITS_PER_AU)) | ||
138 | #define HOST_TO_TADDR(x) ((x) >> (LOG_TARGET_AU_BITS-LOG_BITS_PER_AU)) | ||
139 | #endif | ||
140 | |||
141 | #if LOG_BITS_PER_AU == LOG_TDATA_AU_BITS | ||
142 | /* translate target addressable unit to host address */ | ||
143 | #define TDATA_TO_HOST(x) (x) | ||
144 | /* translate host address to target addressable unit */ | ||
145 | #define HOST_TO_TDATA(x) (x) | ||
146 | /* translate host address to target addressable unit, round up */ | ||
147 | #define HOST_TO_TDATA_ROUND(x) (x) | ||
148 | /* byte offset to host offset, rounded up for TDATA size */ | ||
149 | #define BYTE_TO_HOST_TDATA_ROUND(x) BYTE_TO_HOST_ROUND(x) | ||
150 | #elif LOG_BITS_PER_AU > LOG_TDATA_AU_BITS | ||
151 | #define TDATA_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS)) | ||
152 | #define HOST_TO_TDATA(x) ((x) << (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS)) | ||
153 | #define HOST_TO_TDATA_ROUND(x) ((x) << (LOG_BITS_PER_AU-LOG_TDATA_AU_BITS)) | ||
154 | #define BYTE_TO_HOST_TDATA_ROUND(x) BYTE_TO_HOST_ROUND(x) | ||
155 | #else | ||
156 | #define TDATA_TO_HOST(x) ((x) << (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU)) | ||
157 | #define HOST_TO_TDATA(x) ((x) >> (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU)) | ||
158 | #define HOST_TO_TDATA_ROUND(x) (((x) +\ | ||
159 | (1<<(LOG_TDATA_AU_BITS-LOG_BITS_PER_AU))-1) >>\ | ||
160 | (LOG_TDATA_AU_BITS-LOG_BITS_PER_AU)) | ||
161 | #define BYTE_TO_HOST_TDATA_ROUND(x) (BYTE_TO_HOST((x) +\ | ||
162 | (1<<(LOG_TDATA_AU_BITS-LOG_BITS_PER_BYTE))-1) &\ | ||
163 | -(TDATA_AU_BITS/BITS_PER_AU)) | ||
164 | #endif | ||
165 | |||
166 | /* | ||
167 | * Input in DOFF format is always expresed in bytes, regardless of loading host | ||
168 | * so we wind up converting from bytes to target and host units even when the | ||
169 | * host is not a byte machine. | ||
170 | */ | ||
171 | #if LOG_BITS_PER_AU == LOG_BITS_PER_BYTE | ||
172 | #define BYTE_TO_HOST(x) (x) | ||
173 | #define BYTE_TO_HOST_ROUND(x) (x) | ||
174 | #define HOST_TO_BYTE(x) (x) | ||
175 | #elif LOG_BITS_PER_AU >= LOG_BITS_PER_BYTE | ||
176 | #define BYTE_TO_HOST(x) ((x) >> (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)) | ||
177 | #define BYTE_TO_HOST_ROUND(x) ((x + (BITS_PER_AU/BITS_PER_BYTE-1)) >>\ | ||
178 | (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)) | ||
179 | #define HOST_TO_BYTE(x) ((x) << (LOG_BITS_PER_AU - LOG_BITS_PER_BYTE)) | ||
180 | #else | ||
181 | /* lets not try to deal with sub-8-bit byte machines */ | ||
182 | #endif | ||
183 | |||
184 | #if LOG_TARGET_AU_BITS == LOG_BITS_PER_BYTE | ||
185 | /* translate target addressable unit to byte address */ | ||
186 | #define TADDR_TO_BYTE(x) (x) | ||
187 | /* translate byte address to target addressable unit */ | ||
188 | #define BYTE_TO_TADDR(x) (x) | ||
189 | #elif LOG_TARGET_AU_BITS > LOG_BITS_PER_BYTE | ||
190 | #define TADDR_TO_BYTE(x) ((x) << (LOG_TARGET_AU_BITS-LOG_BITS_PER_BYTE)) | ||
191 | #define BYTE_TO_TADDR(x) ((x) >> (LOG_TARGET_AU_BITS-LOG_BITS_PER_BYTE)) | ||
192 | #else | ||
193 | /* lets not try to deal with sub-8-bit byte machines */ | ||
194 | #endif | ||
195 | |||
196 | #ifdef _BIG_ENDIAN | ||
197 | #define HOST_ENDIANNESS 1 | ||
198 | #else | ||
199 | #define HOST_ENDIANNESS 0 | ||
200 | #endif | ||
201 | |||
202 | #ifdef TARGET_ENDIANNESS | ||
203 | #define TARGET_ENDIANNESS_DIFFERS(rtend) (HOST_ENDIANNESS^TARGET_ENDIANNESS) | ||
204 | #elif HOST_ENDIANNESS | ||
205 | #define TARGET_ENDIANNESS_DIFFERS(rtend) (!(rtend)) | ||
206 | #else | ||
207 | #define TARGET_ENDIANNESS_DIFFERS(rtend) (rtend) | ||
208 | #endif | ||
209 | |||
210 | /* the unit in which we process target image data */ | ||
211 | #if TARGET_AU_BITS <= 8 | ||
212 | typedef u8 tgt_au_t; | ||
213 | #elif TARGET_AU_BITS <= 16 | ||
214 | typedef u16 tgt_au_t; | ||
215 | #else | ||
216 | typedef u32 tgt_au_t; | ||
217 | #endif | ||
218 | |||
219 | /* size of that unit */ | ||
220 | #if TARGET_AU_BITS < BITS_PER_AU | ||
221 | #define TGTAU_BITS BITS_PER_AU | ||
222 | #define LOG_TGTAU_BITS LOG_BITS_PER_AU | ||
223 | #else | ||
224 | #define TGTAU_BITS TARGET_AU_BITS | ||
225 | #define LOG_TGTAU_BITS LOG_TARGET_AU_BITS | ||
226 | #endif | ||
diff --git a/drivers/staging/tidspbridge/dynload/reloc.c b/drivers/staging/tidspbridge/dynload/reloc.c deleted file mode 100644 index bb422b693290..000000000000 --- a/drivers/staging/tidspbridge/dynload/reloc.c +++ /dev/null | |||
@@ -1,486 +0,0 @@ | |||
1 | /* | ||
2 | * reloc.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #include "header.h" | ||
18 | |||
19 | #if TMS32060 | ||
20 | /* the magic symbol for the start of BSS */ | ||
21 | static const char bsssymbol[] = { ".bss" }; | ||
22 | #endif | ||
23 | |||
24 | #if TMS32060 | ||
25 | #include "reloc_table_c6000.c" | ||
26 | #endif | ||
27 | |||
28 | #if TMS32060 | ||
29 | /* From coff.h - ignore these relocation operations */ | ||
30 | #define R_C60ALIGN 0x76 /* C60: Alignment info for compressor */ | ||
31 | #define R_C60FPHEAD 0x77 /* C60: Explicit assembly directive */ | ||
32 | #define R_C60NOCMP 0x100 /* C60: Don't compress this code scn */ | ||
33 | #endif | ||
34 | |||
35 | /************************************************************************** | ||
36 | * Procedure dload_unpack | ||
37 | * | ||
38 | * Parameters: | ||
39 | * data pointer to storage unit containing lowest host address of | ||
40 | * image data | ||
41 | * fieldsz Size of bit field, 0 < fieldsz <= sizeof(rvalue)*BITS_PER_AU | ||
42 | * offset Offset from LSB, 0 <= offset < BITS_PER_AU | ||
43 | * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY) | ||
44 | * | ||
45 | * Effect: | ||
46 | * Extracts the specified field and returns it. | ||
47 | ************************************************************************* */ | ||
48 | rvalue dload_unpack(struct dload_state *dlthis, tgt_au_t *data, int fieldsz, | ||
49 | int offset, unsigned sgn) | ||
50 | { | ||
51 | register rvalue objval; | ||
52 | register int shift, direction; | ||
53 | register tgt_au_t *dp = data; | ||
54 | |||
55 | fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */ | ||
56 | /* * collect up enough bits to contain the desired field */ | ||
57 | if (TARGET_BIG_ENDIAN) { | ||
58 | dp += (fieldsz + offset) >> LOG_TGTAU_BITS; | ||
59 | direction = -1; | ||
60 | } else | ||
61 | direction = 1; | ||
62 | objval = *dp >> offset; | ||
63 | shift = TGTAU_BITS - offset; | ||
64 | while (shift <= fieldsz) { | ||
65 | dp += direction; | ||
66 | objval += (rvalue) *dp << shift; | ||
67 | shift += TGTAU_BITS; | ||
68 | } | ||
69 | |||
70 | /* * sign or zero extend the value appropriately */ | ||
71 | if (sgn == ROP_UNS) | ||
72 | objval &= (2 << fieldsz) - 1; | ||
73 | else { | ||
74 | shift = sizeof(rvalue) * BITS_PER_AU - 1 - fieldsz; | ||
75 | objval = (objval << shift) >> shift; | ||
76 | } | ||
77 | |||
78 | return objval; | ||
79 | |||
80 | } /* dload_unpack */ | ||
81 | |||
82 | /************************************************************************** | ||
83 | * Procedure dload_repack | ||
84 | * | ||
85 | * Parameters: | ||
86 | * val Value to insert | ||
87 | * data Pointer to storage unit containing lowest host address of | ||
88 | * image data | ||
89 | * fieldsz Size of bit field, 0 < fieldsz <= sizeof(rvalue)*BITS_PER_AU | ||
90 | * offset Offset from LSB, 0 <= offset < BITS_PER_AU | ||
91 | * sgn Signedness of the field (ROP_SGN, ROP_UNS, ROP_MAX, ROP_ANY) | ||
92 | * | ||
93 | * Effect: | ||
94 | * Stuffs the specified value in the specified field. Returns 0 for | ||
95 | * success | ||
96 | * or 1 if the value will not fit in the specified field according to the | ||
97 | * specified signedness rule. | ||
98 | ************************************************************************* */ | ||
99 | static const unsigned char ovf_limit[] = { 1, 2, 2 }; | ||
100 | |||
101 | int dload_repack(struct dload_state *dlthis, rvalue val, tgt_au_t *data, | ||
102 | int fieldsz, int offset, unsigned sgn) | ||
103 | { | ||
104 | register urvalue objval, mask; | ||
105 | register int shift, direction; | ||
106 | register tgt_au_t *dp = data; | ||
107 | |||
108 | fieldsz -= 1; /* avoid nastiness with 32-bit shift of 32-bit value */ | ||
109 | /* clip the bits */ | ||
110 | mask = (2UL << fieldsz) - 1; | ||
111 | objval = (val & mask); | ||
112 | /* * store the bits through the specified mask */ | ||
113 | if (TARGET_BIG_ENDIAN) { | ||
114 | dp += (fieldsz + offset) >> LOG_TGTAU_BITS; | ||
115 | direction = -1; | ||
116 | } else | ||
117 | direction = 1; | ||
118 | |||
119 | /* insert LSBs */ | ||
120 | *dp = (*dp & ~(mask << offset)) + (objval << offset); | ||
121 | shift = TGTAU_BITS - offset; | ||
122 | /* align mask and objval with AU boundary */ | ||
123 | objval >>= shift; | ||
124 | mask >>= shift; | ||
125 | |||
126 | while (mask) { | ||
127 | dp += direction; | ||
128 | *dp = (*dp & ~mask) + objval; | ||
129 | objval >>= TGTAU_BITS; | ||
130 | mask >>= TGTAU_BITS; | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * check for overflow | ||
135 | */ | ||
136 | if (sgn) { | ||
137 | unsigned tmp = (val >> fieldsz) + (sgn & 0x1); | ||
138 | |||
139 | if (tmp > ovf_limit[sgn - 1]) | ||
140 | return 1; | ||
141 | } | ||
142 | return 0; | ||
143 | |||
144 | } /* dload_repack */ | ||
145 | |||
146 | /* lookup table for the scaling amount in a C6x instruction */ | ||
147 | #if TMS32060 | ||
148 | #define SCALE_BITS 4 /* there are 4 bits in the scale field */ | ||
149 | #define SCALE_MASK 0x7 /* we really only use the bottom 3 bits */ | ||
150 | static const u8 c60_scale[SCALE_MASK + 1] = { | ||
151 | 1, 0, 0, 0, 1, 1, 2, 2 | ||
152 | }; | ||
153 | #endif | ||
154 | |||
155 | /************************************************************************** | ||
156 | * Procedure dload_relocate | ||
157 | * | ||
158 | * Parameters: | ||
159 | * data Pointer to base of image data | ||
160 | * rp Pointer to relocation operation | ||
161 | * | ||
162 | * Effect: | ||
163 | * Performs the specified relocation operation | ||
164 | ************************************************************************* */ | ||
165 | void dload_relocate(struct dload_state *dlthis, tgt_au_t *data, | ||
166 | struct reloc_record_t *rp, bool *tramps_generated, | ||
167 | bool second_pass) | ||
168 | { | ||
169 | rvalue val, reloc_amt, orig_val = 0; | ||
170 | unsigned int fieldsz = 0; | ||
171 | unsigned int offset = 0; | ||
172 | unsigned int reloc_info = 0; | ||
173 | unsigned int reloc_action = 0; | ||
174 | register int rx = 0; | ||
175 | rvalue *stackp = NULL; | ||
176 | int top; | ||
177 | struct local_symbol *svp = NULL; | ||
178 | #ifdef RFV_SCALE | ||
179 | unsigned int scale = 0; | ||
180 | #endif | ||
181 | struct image_packet_t *img_pkt = NULL; | ||
182 | |||
183 | /* The image packet data struct is only used during first pass | ||
184 | * relocation in the event that a trampoline is needed. 2nd pass | ||
185 | * relocation doesn't guarantee that data is coming from an | ||
186 | * image_packet_t structure. See cload.c, dload_data for how img_data is | ||
187 | * set. If that changes this needs to be updated!!! */ | ||
188 | if (second_pass == false) | ||
189 | img_pkt = (struct image_packet_t *)((u8 *) data - | ||
190 | sizeof(struct | ||
191 | image_packet_t)); | ||
192 | |||
193 | rx = HASH_FUNC(rp->TYPE); | ||
194 | while (rop_map1[rx] != rp->TYPE) { | ||
195 | rx = HASH_L(rop_map2[rx]); | ||
196 | if (rx < 0) { | ||
197 | #if TMS32060 | ||
198 | switch (rp->TYPE) { | ||
199 | case R_C60ALIGN: | ||
200 | case R_C60NOCMP: | ||
201 | case R_C60FPHEAD: | ||
202 | /* Ignore these reloc types and return */ | ||
203 | break; | ||
204 | default: | ||
205 | /* Unknown reloc type, print error and return */ | ||
206 | dload_error(dlthis, "Bad coff operator 0x%x", | ||
207 | rp->TYPE); | ||
208 | } | ||
209 | #else | ||
210 | dload_error(dlthis, "Bad coff operator 0x%x", rp->TYPE); | ||
211 | #endif | ||
212 | return; | ||
213 | } | ||
214 | } | ||
215 | rx = HASH_I(rop_map2[rx]); | ||
216 | if ((rx < (sizeof(rop_action) / sizeof(u16))) | ||
217 | && (rx < (sizeof(rop_info) / sizeof(u16))) && (rx > 0)) { | ||
218 | reloc_action = rop_action[rx]; | ||
219 | reloc_info = rop_info[rx]; | ||
220 | } else { | ||
221 | dload_error(dlthis, "Buffer Overflow - Array Index Out " | ||
222 | "of Bounds"); | ||
223 | } | ||
224 | |||
225 | /* Compute the relocation amount for the referenced symbol, if any */ | ||
226 | reloc_amt = rp->UVAL; | ||
227 | if (RFV_SYM(reloc_info)) { /* relocation uses a symbol reference */ | ||
228 | /* If this is first pass, use the module local symbol table, | ||
229 | * else use the trampoline symbol table. */ | ||
230 | if (second_pass == false) { | ||
231 | if ((u32) rp->SYMNDX < dlthis->dfile_hdr.df_no_syms) { | ||
232 | /* real symbol reference */ | ||
233 | svp = &dlthis->local_symtab[rp->SYMNDX]; | ||
234 | reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ? | ||
235 | svp->delta : svp->value; | ||
236 | } | ||
237 | /* reloc references current section */ | ||
238 | else if (rp->SYMNDX == -1) { | ||
239 | reloc_amt = (RFV_SYM(reloc_info) == ROP_SYMD) ? | ||
240 | dlthis->delta_runaddr : | ||
241 | dlthis->image_secn->run_addr; | ||
242 | } | ||
243 | } | ||
244 | } | ||
245 | /* relocation uses a symbol reference */ | ||
246 | /* Handle stack adjustment */ | ||
247 | val = 0; | ||
248 | top = RFV_STK(reloc_info); | ||
249 | if (top) { | ||
250 | top += dlthis->relstkidx - RSTK_UOP; | ||
251 | if (top >= STATIC_EXPR_STK_SIZE) { | ||
252 | dload_error(dlthis, | ||
253 | "Expression stack overflow in %s at offset " | ||
254 | FMT_UI32, dlthis->image_secn->name, | ||
255 | rp->vaddr + dlthis->image_offset); | ||
256 | return; | ||
257 | } | ||
258 | val = dlthis->relstk[dlthis->relstkidx]; | ||
259 | dlthis->relstkidx = top; | ||
260 | stackp = &dlthis->relstk[top]; | ||
261 | } | ||
262 | /* Derive field position and size, if we need them */ | ||
263 | if (reloc_info & ROP_RW) { /* read or write action in our future */ | ||
264 | fieldsz = RFV_WIDTH(reloc_action); | ||
265 | if (fieldsz) { /* field info from table */ | ||
266 | offset = RFV_POSN(reloc_action); | ||
267 | if (TARGET_BIG_ENDIAN) | ||
268 | /* make sure vaddr is the lowest target | ||
269 | * address containing bits */ | ||
270 | rp->vaddr += RFV_BIGOFF(reloc_info); | ||
271 | } else { /* field info from relocation op */ | ||
272 | fieldsz = rp->FIELDSZ; | ||
273 | offset = rp->OFFSET; | ||
274 | if (TARGET_BIG_ENDIAN) | ||
275 | /* make sure vaddr is the lowest target | ||
276 | address containing bits */ | ||
277 | rp->vaddr += (rp->WORDSZ - offset - fieldsz) | ||
278 | >> LOG_TARGET_AU_BITS; | ||
279 | } | ||
280 | data = (tgt_au_t *) ((char *)data + TADDR_TO_HOST(rp->vaddr)); | ||
281 | /* compute lowest host location of referenced data */ | ||
282 | #if BITS_PER_AU > TARGET_AU_BITS | ||
283 | /* conversion from target address to host address may lose | ||
284 | address bits; add loss to offset */ | ||
285 | if (TARGET_BIG_ENDIAN) { | ||
286 | offset += -((rp->vaddr << LOG_TARGET_AU_BITS) + | ||
287 | offset + fieldsz) & | ||
288 | (BITS_PER_AU - TARGET_AU_BITS); | ||
289 | } else { | ||
290 | offset += (rp->vaddr << LOG_TARGET_AU_BITS) & | ||
291 | (BITS_PER_AU - 1); | ||
292 | } | ||
293 | #endif | ||
294 | #ifdef RFV_SCALE | ||
295 | scale = RFV_SCALE(reloc_info); | ||
296 | #endif | ||
297 | } | ||
298 | /* read the object value from the current image, if so ordered */ | ||
299 | if (reloc_info & ROP_R) { | ||
300 | /* relocation reads current image value */ | ||
301 | val = dload_unpack(dlthis, data, fieldsz, offset, | ||
302 | RFV_SIGN(reloc_info)); | ||
303 | /* Save off the original value in case the relo overflows and | ||
304 | * we can trampoline it. */ | ||
305 | orig_val = val; | ||
306 | |||
307 | #ifdef RFV_SCALE | ||
308 | val <<= scale; | ||
309 | #endif | ||
310 | } | ||
311 | /* perform the necessary arithmetic */ | ||
312 | switch (RFV_ACTION(reloc_action)) { /* relocation actions */ | ||
313 | case RACT_VAL: | ||
314 | break; | ||
315 | case RACT_ASGN: | ||
316 | val = reloc_amt; | ||
317 | break; | ||
318 | case RACT_ADD: | ||
319 | val += reloc_amt; | ||
320 | break; | ||
321 | case RACT_PCR: | ||
322 | /*----------------------------------------------------------- | ||
323 | * Handle special cases of jumping from absolute sections | ||
324 | * (special reloc type) or to absolute destination | ||
325 | * (symndx == -1). In either case, set the appropriate | ||
326 | * relocation amount to 0. | ||
327 | *----------------------------------------------------------- */ | ||
328 | if (rp->SYMNDX == -1) | ||
329 | reloc_amt = 0; | ||
330 | val += reloc_amt - dlthis->delta_runaddr; | ||
331 | break; | ||
332 | case RACT_ADDISP: | ||
333 | val += rp->R_DISP + reloc_amt; | ||
334 | break; | ||
335 | case RACT_ASGPC: | ||
336 | val = dlthis->image_secn->run_addr + reloc_amt; | ||
337 | break; | ||
338 | case RACT_PLUS: | ||
339 | if (stackp != NULL) | ||
340 | val += *stackp; | ||
341 | break; | ||
342 | case RACT_SUB: | ||
343 | if (stackp != NULL) | ||
344 | val = *stackp - val; | ||
345 | break; | ||
346 | case RACT_NEG: | ||
347 | val = -val; | ||
348 | break; | ||
349 | case RACT_MPY: | ||
350 | if (stackp != NULL) | ||
351 | val *= *stackp; | ||
352 | break; | ||
353 | case RACT_DIV: | ||
354 | if (stackp != NULL) | ||
355 | val = *stackp / val; | ||
356 | break; | ||
357 | case RACT_MOD: | ||
358 | if (stackp != NULL) | ||
359 | val = *stackp % val; | ||
360 | break; | ||
361 | case RACT_SR: | ||
362 | if (val >= sizeof(rvalue) * BITS_PER_AU) | ||
363 | val = 0; | ||
364 | else if (stackp != NULL) | ||
365 | val = (urvalue) *stackp >> val; | ||
366 | break; | ||
367 | case RACT_ASR: | ||
368 | if (val >= sizeof(rvalue) * BITS_PER_AU) | ||
369 | val = sizeof(rvalue) * BITS_PER_AU - 1; | ||
370 | else if (stackp != NULL) | ||
371 | val = *stackp >> val; | ||
372 | break; | ||
373 | case RACT_SL: | ||
374 | if (val >= sizeof(rvalue) * BITS_PER_AU) | ||
375 | val = 0; | ||
376 | else if (stackp != NULL) | ||
377 | val = *stackp << val; | ||
378 | break; | ||
379 | case RACT_AND: | ||
380 | if (stackp != NULL) | ||
381 | val &= *stackp; | ||
382 | break; | ||
383 | case RACT_OR: | ||
384 | if (stackp != NULL) | ||
385 | val |= *stackp; | ||
386 | break; | ||
387 | case RACT_XOR: | ||
388 | if (stackp != NULL) | ||
389 | val ^= *stackp; | ||
390 | break; | ||
391 | case RACT_NOT: | ||
392 | val = ~val; | ||
393 | break; | ||
394 | #if TMS32060 | ||
395 | case RACT_C6SECT: | ||
396 | /* actually needed address of secn containing symbol */ | ||
397 | if (svp != NULL) { | ||
398 | if (rp->SYMNDX >= 0) | ||
399 | if (svp->secnn > 0) | ||
400 | reloc_amt = dlthis->ldr_sections | ||
401 | [svp->secnn - 1].run_addr; | ||
402 | } | ||
403 | /* !!! FALL THRU !!! */ | ||
404 | case RACT_C6BASE: | ||
405 | if (dlthis->bss_run_base == 0) { | ||
406 | struct dynload_symbol *symp; | ||
407 | |||
408 | symp = dlthis->mysym->find_matching_symbol | ||
409 | (dlthis->mysym, bsssymbol); | ||
410 | /* lookup value of global BSS base */ | ||
411 | if (symp) | ||
412 | dlthis->bss_run_base = symp->value; | ||
413 | else | ||
414 | dload_error(dlthis, | ||
415 | "Global BSS base referenced in %s " | ||
416 | "offset" FMT_UI32 " but not " | ||
417 | "defined", | ||
418 | dlthis->image_secn->name, | ||
419 | rp->vaddr + dlthis->image_offset); | ||
420 | } | ||
421 | reloc_amt -= dlthis->bss_run_base; | ||
422 | /* !!! FALL THRU !!! */ | ||
423 | case RACT_C6DSPL: | ||
424 | /* scale factor determined by 3 LSBs of field */ | ||
425 | scale = c60_scale[val & SCALE_MASK]; | ||
426 | offset += SCALE_BITS; | ||
427 | fieldsz -= SCALE_BITS; | ||
428 | val >>= SCALE_BITS; /* ignore the scale field hereafter */ | ||
429 | val <<= scale; | ||
430 | val += reloc_amt; /* do the usual relocation */ | ||
431 | if (((1 << scale) - 1) & val) | ||
432 | dload_error(dlthis, | ||
433 | "Unaligned reference in %s offset " | ||
434 | FMT_UI32, dlthis->image_secn->name, | ||
435 | rp->vaddr + dlthis->image_offset); | ||
436 | break; | ||
437 | #endif | ||
438 | } /* relocation actions */ | ||
439 | /* * Put back result as required */ | ||
440 | if (reloc_info & ROP_W) { /* relocation writes image value */ | ||
441 | #ifdef RFV_SCALE | ||
442 | val >>= scale; | ||
443 | #endif | ||
444 | if (dload_repack(dlthis, val, data, fieldsz, offset, | ||
445 | RFV_SIGN(reloc_info))) { | ||
446 | /* Check to see if this relo can be trampolined, | ||
447 | * but only in first phase relocation. 2nd phase | ||
448 | * relocation cannot trampoline. */ | ||
449 | if ((second_pass == false) && | ||
450 | (dload_tramp_avail(dlthis, rp) == true)) { | ||
451 | |||
452 | /* Before generating the trampoline, restore | ||
453 | * the value to its original so the 2nd pass | ||
454 | * relo will work. */ | ||
455 | dload_repack(dlthis, orig_val, data, fieldsz, | ||
456 | offset, RFV_SIGN(reloc_info)); | ||
457 | if (!dload_tramp_generate(dlthis, | ||
458 | (dlthis->image_secn - | ||
459 | dlthis->ldr_sections), | ||
460 | dlthis->image_offset, | ||
461 | img_pkt, rp)) { | ||
462 | dload_error(dlthis, | ||
463 | "Failed to " | ||
464 | "generate trampoline for " | ||
465 | "bit overflow"); | ||
466 | dload_error(dlthis, | ||
467 | "Relocation val " FMT_UI32 | ||
468 | " overflows %d bits in %s " | ||
469 | "offset " FMT_UI32, val, | ||
470 | fieldsz, | ||
471 | dlthis->image_secn->name, | ||
472 | dlthis->image_offset + | ||
473 | rp->vaddr); | ||
474 | } else | ||
475 | *tramps_generated = true; | ||
476 | } else { | ||
477 | dload_error(dlthis, "Relocation value " | ||
478 | FMT_UI32 " overflows %d bits in %s" | ||
479 | " offset " FMT_UI32, val, fieldsz, | ||
480 | dlthis->image_secn->name, | ||
481 | dlthis->image_offset + rp->vaddr); | ||
482 | } | ||
483 | } | ||
484 | } else if (top) | ||
485 | *stackp = val; | ||
486 | } /* reloc_value */ | ||
diff --git a/drivers/staging/tidspbridge/dynload/reloc_table.h b/drivers/staging/tidspbridge/dynload/reloc_table.h deleted file mode 100644 index 6aab03d4668d..000000000000 --- a/drivers/staging/tidspbridge/dynload/reloc_table.h +++ /dev/null | |||
@@ -1,102 +0,0 @@ | |||
1 | /* | ||
2 | * reloc_table.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef _RELOC_TABLE_H_ | ||
18 | #define _RELOC_TABLE_H_ | ||
19 | /* | ||
20 | * Table of relocation operator properties | ||
21 | */ | ||
22 | #include <linux/types.h> | ||
23 | |||
24 | /* How does this relocation operation access the program image? */ | ||
25 | #define ROP_N 0 /* does not access image */ | ||
26 | #define ROP_R 1 /* read from image */ | ||
27 | #define ROP_W 2 /* write to image */ | ||
28 | #define ROP_RW 3 /* read from and write to image */ | ||
29 | |||
30 | /* For program image access, what are the overflow rules for the bit field? */ | ||
31 | /* Beware! Procedure repack depends on this encoding */ | ||
32 | #define ROP_ANY 0 /* no overflow ever, just truncate the value */ | ||
33 | #define ROP_SGN 1 /* signed field */ | ||
34 | #define ROP_UNS 2 /* unsigned field */ | ||
35 | #define ROP_MAX 3 /* allow maximum range of either signed or unsigned */ | ||
36 | |||
37 | /* How does the relocation operation use the symbol reference */ | ||
38 | #define ROP_IGN 0 /* no symbol is referenced */ | ||
39 | #define ROP_LIT 0 /* use rp->UVAL literal field */ | ||
40 | #define ROP_SYM 1 /* symbol value is used in relocation */ | ||
41 | #define ROP_SYMD 2 /* delta value vs last link is used */ | ||
42 | |||
43 | /* How does the reloc op use the stack? */ | ||
44 | #define RSTK_N 0 /* Does not use */ | ||
45 | #define RSTK_POP 1 /* Does a POP */ | ||
46 | #define RSTK_UOP 2 /* Unary op, stack position unaffected */ | ||
47 | #define RSTK_PSH 3 /* Does a push */ | ||
48 | |||
49 | /* | ||
50 | * Computational actions performed by the dynamic loader | ||
51 | */ | ||
52 | enum dload_actions { | ||
53 | /* don't alter the current val (from stack or mem fetch) */ | ||
54 | RACT_VAL, | ||
55 | /* set value to reference amount (from symbol reference) */ | ||
56 | RACT_ASGN, | ||
57 | RACT_ADD, /* add reference to value */ | ||
58 | RACT_PCR, /* add reference minus PC delta to value */ | ||
59 | RACT_ADDISP, /* add reference plus R_DISP */ | ||
60 | RACT_ASGPC, /* set value to section addr plus reference */ | ||
61 | |||
62 | RACT_PLUS, /* stack + */ | ||
63 | RACT_SUB, /* stack - */ | ||
64 | RACT_NEG, /* stack unary - */ | ||
65 | |||
66 | RACT_MPY, /* stack * */ | ||
67 | RACT_DIV, /* stack / */ | ||
68 | RACT_MOD, /* stack % */ | ||
69 | |||
70 | RACT_SR, /* stack unsigned >> */ | ||
71 | RACT_ASR, /* stack signed >> */ | ||
72 | RACT_SL, /* stack << */ | ||
73 | RACT_AND, /* stack & */ | ||
74 | RACT_OR, /* stack | */ | ||
75 | RACT_XOR, /* stack ^ */ | ||
76 | RACT_NOT, /* stack ~ */ | ||
77 | RACT_C6SECT, /* for C60 R_SECT op */ | ||
78 | RACT_C6BASE, /* for C60 R_BASE op */ | ||
79 | RACT_C6DSPL, /* for C60 scaled 15-bit displacement */ | ||
80 | RACT_PCR23T /* for ARM Thumb long branch */ | ||
81 | }; | ||
82 | |||
83 | /* | ||
84 | * macros used to extract values | ||
85 | */ | ||
86 | #define RFV_POSN(aaa) ((aaa) & 0xF) | ||
87 | #define RFV_WIDTH(aaa) (((aaa) >> 4) & 0x3F) | ||
88 | #define RFV_ACTION(aaa) ((aaa) >> 10) | ||
89 | |||
90 | #define RFV_SIGN(iii) (((iii) >> 2) & 0x3) | ||
91 | #define RFV_SYM(iii) (((iii) >> 4) & 0x3) | ||
92 | #define RFV_STK(iii) (((iii) >> 6) & 0x3) | ||
93 | #define RFV_ACCS(iii) ((iii) & 0x3) | ||
94 | |||
95 | #if (TMS32060) | ||
96 | #define RFV_SCALE(iii) ((iii) >> 11) | ||
97 | #define RFV_BIGOFF(iii) (((iii) >> 8) & 0x7) | ||
98 | #else | ||
99 | #define RFV_BIGOFF(iii) ((iii) >> 8) | ||
100 | #endif | ||
101 | |||
102 | #endif /* _RELOC_TABLE_H_ */ | ||
diff --git a/drivers/staging/tidspbridge/dynload/reloc_table_c6000.c b/drivers/staging/tidspbridge/dynload/reloc_table_c6000.c deleted file mode 100644 index a28bc0442491..000000000000 --- a/drivers/staging/tidspbridge/dynload/reloc_table_c6000.c +++ /dev/null | |||
@@ -1,257 +0,0 @@ | |||
1 | /* | ||
2 | * reloc_table_c6000.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | /* Tables generated for c6000 */ | ||
18 | |||
19 | #define HASH_FUNC(zz) (((((zz) + 1) * 1845UL) >> 11) & 63) | ||
20 | #define HASH_L(zz) ((zz) >> 8) | ||
21 | #define HASH_I(zz) ((zz) & 0xFF) | ||
22 | |||
23 | static const u16 rop_map1[] = { | ||
24 | 0, | ||
25 | 1, | ||
26 | 2, | ||
27 | 20, | ||
28 | 4, | ||
29 | 5, | ||
30 | 6, | ||
31 | 15, | ||
32 | 80, | ||
33 | 81, | ||
34 | 82, | ||
35 | 83, | ||
36 | 84, | ||
37 | 85, | ||
38 | 86, | ||
39 | 87, | ||
40 | 17, | ||
41 | 18, | ||
42 | 19, | ||
43 | 21, | ||
44 | 16, | ||
45 | 16394, | ||
46 | 16404, | ||
47 | 65535, | ||
48 | 65535, | ||
49 | 65535, | ||
50 | 65535, | ||
51 | 65535, | ||
52 | 65535, | ||
53 | 32, | ||
54 | 65535, | ||
55 | 65535, | ||
56 | 65535, | ||
57 | 65535, | ||
58 | 65535, | ||
59 | 65535, | ||
60 | 40, | ||
61 | 112, | ||
62 | 113, | ||
63 | 65535, | ||
64 | 16384, | ||
65 | 16385, | ||
66 | 16386, | ||
67 | 16387, | ||
68 | 16388, | ||
69 | 16389, | ||
70 | 16390, | ||
71 | 16391, | ||
72 | 16392, | ||
73 | 16393, | ||
74 | 16395, | ||
75 | 16396, | ||
76 | 16397, | ||
77 | 16398, | ||
78 | 16399, | ||
79 | 16400, | ||
80 | 16401, | ||
81 | 16402, | ||
82 | 16403, | ||
83 | 16405, | ||
84 | 16406, | ||
85 | 65535, | ||
86 | 65535, | ||
87 | 65535 | ||
88 | }; | ||
89 | |||
90 | static const s16 rop_map2[] = { | ||
91 | -256, | ||
92 | -255, | ||
93 | -254, | ||
94 | -245, | ||
95 | -253, | ||
96 | -252, | ||
97 | -251, | ||
98 | -250, | ||
99 | -241, | ||
100 | -240, | ||
101 | -239, | ||
102 | -238, | ||
103 | -237, | ||
104 | -236, | ||
105 | 1813, | ||
106 | 5142, | ||
107 | -248, | ||
108 | -247, | ||
109 | 778, | ||
110 | -244, | ||
111 | -249, | ||
112 | -221, | ||
113 | -211, | ||
114 | -1, | ||
115 | -1, | ||
116 | -1, | ||
117 | -1, | ||
118 | -1, | ||
119 | -1, | ||
120 | -243, | ||
121 | -1, | ||
122 | -1, | ||
123 | -1, | ||
124 | -1, | ||
125 | -1, | ||
126 | -1, | ||
127 | -242, | ||
128 | -233, | ||
129 | -232, | ||
130 | -1, | ||
131 | -231, | ||
132 | -230, | ||
133 | -229, | ||
134 | -228, | ||
135 | -227, | ||
136 | -226, | ||
137 | -225, | ||
138 | -224, | ||
139 | -223, | ||
140 | 5410, | ||
141 | -220, | ||
142 | -219, | ||
143 | -218, | ||
144 | -217, | ||
145 | -216, | ||
146 | -215, | ||
147 | -214, | ||
148 | -213, | ||
149 | 5676, | ||
150 | -210, | ||
151 | -209, | ||
152 | -1, | ||
153 | -1, | ||
154 | -1 | ||
155 | }; | ||
156 | |||
157 | static const u16 rop_action[] = { | ||
158 | 2560, | ||
159 | 2304, | ||
160 | 2304, | ||
161 | 2432, | ||
162 | 2432, | ||
163 | 2560, | ||
164 | 2176, | ||
165 | 2304, | ||
166 | 2560, | ||
167 | 3200, | ||
168 | 3328, | ||
169 | 3584, | ||
170 | 3456, | ||
171 | 2304, | ||
172 | 4208, | ||
173 | 20788, | ||
174 | 21812, | ||
175 | 3415, | ||
176 | 3245, | ||
177 | 2311, | ||
178 | 4359, | ||
179 | 19764, | ||
180 | 2311, | ||
181 | 3191, | ||
182 | 3280, | ||
183 | 6656, | ||
184 | 7680, | ||
185 | 8704, | ||
186 | 9728, | ||
187 | 10752, | ||
188 | 11776, | ||
189 | 12800, | ||
190 | 13824, | ||
191 | 14848, | ||
192 | 15872, | ||
193 | 16896, | ||
194 | 17920, | ||
195 | 18944, | ||
196 | 0, | ||
197 | 0, | ||
198 | 0, | ||
199 | 0, | ||
200 | 1536, | ||
201 | 1536, | ||
202 | 1536, | ||
203 | 5632, | ||
204 | 512, | ||
205 | 0 | ||
206 | }; | ||
207 | |||
208 | static const u16 rop_info[] = { | ||
209 | 0, | ||
210 | 35, | ||
211 | 35, | ||
212 | 35, | ||
213 | 35, | ||
214 | 35, | ||
215 | 35, | ||
216 | 35, | ||
217 | 35, | ||
218 | 39, | ||
219 | 39, | ||
220 | 39, | ||
221 | 39, | ||
222 | 35, | ||
223 | 34, | ||
224 | 283, | ||
225 | 299, | ||
226 | 4135, | ||
227 | 4391, | ||
228 | 291, | ||
229 | 33059, | ||
230 | 283, | ||
231 | 295, | ||
232 | 4647, | ||
233 | 4135, | ||
234 | 64, | ||
235 | 64, | ||
236 | 128, | ||
237 | 64, | ||
238 | 64, | ||
239 | 64, | ||
240 | 64, | ||
241 | 64, | ||
242 | 64, | ||
243 | 64, | ||
244 | 64, | ||
245 | 64, | ||
246 | 128, | ||
247 | 201, | ||
248 | 197, | ||
249 | 74, | ||
250 | 70, | ||
251 | 208, | ||
252 | 196, | ||
253 | 200, | ||
254 | 192, | ||
255 | 192, | ||
256 | 66 | ||
257 | }; | ||
diff --git a/drivers/staging/tidspbridge/dynload/tramp.c b/drivers/staging/tidspbridge/dynload/tramp.c deleted file mode 100644 index 5f0431305fbb..000000000000 --- a/drivers/staging/tidspbridge/dynload/tramp.c +++ /dev/null | |||
@@ -1,1143 +0,0 @@ | |||
1 | /* | ||
2 | * tramp.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2009 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #include "header.h" | ||
18 | |||
19 | #if TMS32060 | ||
20 | #include "tramp_table_c6000.c" | ||
21 | #endif | ||
22 | |||
23 | #define MAX_RELOS_PER_PASS 4 | ||
24 | |||
25 | /* | ||
26 | * Function: priv_tramp_sect_tgt_alloc | ||
27 | * Description: Allocate target memory for the trampoline section. The | ||
28 | * target mem size is easily obtained as the next available address. | ||
29 | */ | ||
30 | static int priv_tramp_sect_tgt_alloc(struct dload_state *dlthis) | ||
31 | { | ||
32 | int ret_val = 0; | ||
33 | struct ldr_section_info *sect_info; | ||
34 | |||
35 | /* Populate the trampoline loader section and allocate it on the | ||
36 | * target. The section name is ALWAYS the first string in the final | ||
37 | * string table for trampolines. The trampoline section is always | ||
38 | * 1 beyond the total number of allocated sections. */ | ||
39 | sect_info = &dlthis->ldr_sections[dlthis->allocated_secn_count]; | ||
40 | |||
41 | sect_info->name = dlthis->tramp.final_string_table; | ||
42 | sect_info->size = dlthis->tramp.tramp_sect_next_addr; | ||
43 | sect_info->context = 0; | ||
44 | sect_info->type = | ||
45 | (4 << 8) | DLOAD_TEXT | DS_ALLOCATE_MASK | DS_DOWNLOAD_MASK; | ||
46 | sect_info->page = 0; | ||
47 | sect_info->run_addr = 0; | ||
48 | sect_info->load_addr = 0; | ||
49 | ret_val = dlthis->myalloc->dload_allocate(dlthis->myalloc, | ||
50 | sect_info, | ||
51 | ds_alignment | ||
52 | (sect_info->type)); | ||
53 | |||
54 | if (ret_val == 0) | ||
55 | dload_error(dlthis, "Failed to allocate target memory for" | ||
56 | " trampoline"); | ||
57 | |||
58 | return ret_val; | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * Function: priv_h2a | ||
63 | * Description: Helper function to convert a hex value to its ASCII | ||
64 | * representation. Used for trampoline symbol name generation. | ||
65 | */ | ||
66 | static u8 priv_h2a(u8 value) | ||
67 | { | ||
68 | if (value > 0xF) | ||
69 | return 0xFF; | ||
70 | |||
71 | if (value <= 9) | ||
72 | value += 0x30; | ||
73 | else | ||
74 | value += 0x37; | ||
75 | |||
76 | return value; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Function: priv_tramp_sym_gen_name | ||
81 | * Description: Generate a trampoline symbol name (ASCII) using the value | ||
82 | * of the symbol. This places the new name into the user buffer. | ||
83 | * The name is fixed in length and of the form: __$dbTR__xxxxxxxx | ||
84 | * (where "xxxxxxxx" is the hex value). | ||
85 | */ | ||
86 | static void priv_tramp_sym_gen_name(u32 value, char *dst) | ||
87 | { | ||
88 | u32 i; | ||
89 | char *prefix = TRAMP_SYM_PREFIX; | ||
90 | char *dst_local = dst; | ||
91 | u8 tmp; | ||
92 | |||
93 | /* Clear out the destination, including the ending NULL */ | ||
94 | for (i = 0; i < (TRAMP_SYM_PREFIX_LEN + TRAMP_SYM_HEX_ASCII_LEN); i++) | ||
95 | *(dst_local + i) = 0; | ||
96 | |||
97 | /* Copy the prefix to start */ | ||
98 | for (i = 0; i < strlen(TRAMP_SYM_PREFIX); i++) { | ||
99 | *dst_local = *(prefix + i); | ||
100 | dst_local++; | ||
101 | } | ||
102 | |||
103 | /* Now convert the value passed in to a string equiv of the hex */ | ||
104 | for (i = 0; i < sizeof(value); i++) { | ||
105 | #ifndef _BIG_ENDIAN | ||
106 | tmp = *(((u8 *) &value) + (sizeof(value) - 1) - i); | ||
107 | *dst_local = priv_h2a((tmp & 0xF0) >> 4); | ||
108 | dst_local++; | ||
109 | *dst_local = priv_h2a(tmp & 0x0F); | ||
110 | dst_local++; | ||
111 | #else | ||
112 | tmp = *(((u8 *) &value) + i); | ||
113 | *dst_local = priv_h2a((tmp & 0xF0) >> 4); | ||
114 | dst_local++; | ||
115 | *dst_local = priv_h2a(tmp & 0x0F); | ||
116 | dst_local++; | ||
117 | #endif | ||
118 | } | ||
119 | |||
120 | /* NULL terminate */ | ||
121 | *dst_local = 0; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Function: priv_tramp_string_create | ||
126 | * Description: Create a new string specific to the trampoline loading and add | ||
127 | * it to the trampoline string list. This list contains the | ||
128 | * trampoline section name and trampoline point symbols. | ||
129 | */ | ||
130 | static struct tramp_string *priv_tramp_string_create(struct dload_state *dlthis, | ||
131 | u32 str_len, char *str) | ||
132 | { | ||
133 | struct tramp_string *new_string = NULL; | ||
134 | u32 i; | ||
135 | |||
136 | /* Create a new string object with the specified size. */ | ||
137 | new_string = | ||
138 | (struct tramp_string *)dlthis->mysym->dload_allocate(dlthis->mysym, | ||
139 | (sizeof | ||
140 | (struct | ||
141 | tramp_string) | ||
142 | + str_len + | ||
143 | 1)); | ||
144 | if (new_string != NULL) { | ||
145 | /* Clear the string first. This ensures the ending NULL is | ||
146 | * present and the optimizer won't touch it. */ | ||
147 | for (i = 0; i < (sizeof(struct tramp_string) + str_len + 1); | ||
148 | i++) | ||
149 | *((u8 *) new_string + i) = 0; | ||
150 | |||
151 | /* Add this string to our virtual table by assigning it the | ||
152 | * next index and pushing it to the tail of the list. */ | ||
153 | new_string->index = dlthis->tramp.tramp_string_next_index; | ||
154 | dlthis->tramp.tramp_string_next_index++; | ||
155 | dlthis->tramp.tramp_string_size += str_len + 1; | ||
156 | |||
157 | new_string->next = NULL; | ||
158 | if (dlthis->tramp.string_head == NULL) | ||
159 | dlthis->tramp.string_head = new_string; | ||
160 | else | ||
161 | dlthis->tramp.string_tail->next = new_string; | ||
162 | |||
163 | dlthis->tramp.string_tail = new_string; | ||
164 | |||
165 | /* Copy the string over to the new object */ | ||
166 | for (i = 0; i < str_len; i++) | ||
167 | new_string->str[i] = str[i]; | ||
168 | } | ||
169 | |||
170 | return new_string; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * Function: priv_tramp_string_find | ||
175 | * Description: Walk the trampoline string list and find a match for the | ||
176 | * provided string. If not match is found, NULL is returned. | ||
177 | */ | ||
178 | static struct tramp_string *priv_tramp_string_find(struct dload_state *dlthis, | ||
179 | char *str) | ||
180 | { | ||
181 | struct tramp_string *cur_str = NULL; | ||
182 | struct tramp_string *ret_val = NULL; | ||
183 | u32 i; | ||
184 | u32 str_len = strlen(str); | ||
185 | |||
186 | for (cur_str = dlthis->tramp.string_head; | ||
187 | (ret_val == NULL) && (cur_str != NULL); cur_str = cur_str->next) { | ||
188 | /* If the string lengths aren't equal, don't bother | ||
189 | * comparing */ | ||
190 | if (str_len != strlen(cur_str->str)) | ||
191 | continue; | ||
192 | |||
193 | /* Walk the strings until one of them ends */ | ||
194 | for (i = 0; i < str_len; i++) { | ||
195 | /* If they don't match in the current position then | ||
196 | * break out now, no sense in continuing to look at | ||
197 | * this string. */ | ||
198 | if (str[i] != cur_str->str[i]) | ||
199 | break; | ||
200 | } | ||
201 | |||
202 | if (i == str_len) | ||
203 | ret_val = cur_str; | ||
204 | } | ||
205 | |||
206 | return ret_val; | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Function: priv_string_tbl_finalize | ||
211 | * Description: Flatten the trampoline string list into a table of NULL | ||
212 | * terminated strings. This is the same format of string table | ||
213 | * as used by the COFF/DOFF file. | ||
214 | */ | ||
215 | static int priv_string_tbl_finalize(struct dload_state *dlthis) | ||
216 | { | ||
217 | int ret_val = 0; | ||
218 | struct tramp_string *cur_string; | ||
219 | char *cur_loc; | ||
220 | char *tmp; | ||
221 | |||
222 | /* Allocate enough space for all strings that have been created. The | ||
223 | * table is simply all strings concatenated together will NULL | ||
224 | * endings. */ | ||
225 | dlthis->tramp.final_string_table = | ||
226 | (char *)dlthis->mysym->dload_allocate(dlthis->mysym, | ||
227 | dlthis->tramp. | ||
228 | tramp_string_size); | ||
229 | if (dlthis->tramp.final_string_table != NULL) { | ||
230 | /* We got our buffer, walk the list and release the nodes as* | ||
231 | * we go */ | ||
232 | cur_loc = dlthis->tramp.final_string_table; | ||
233 | cur_string = dlthis->tramp.string_head; | ||
234 | while (cur_string != NULL) { | ||
235 | /* Move the head/tail pointers */ | ||
236 | dlthis->tramp.string_head = cur_string->next; | ||
237 | if (dlthis->tramp.string_tail == cur_string) | ||
238 | dlthis->tramp.string_tail = NULL; | ||
239 | |||
240 | /* Copy the string contents */ | ||
241 | for (tmp = cur_string->str; | ||
242 | *tmp != '\0'; tmp++, cur_loc++) | ||
243 | *cur_loc = *tmp; | ||
244 | |||
245 | /* Pick up the NULL termination since it was missed by | ||
246 | * breaking using it to end the above loop. */ | ||
247 | *cur_loc = '\0'; | ||
248 | cur_loc++; | ||
249 | |||
250 | /* Free the string node, we don't need it any more. */ | ||
251 | dlthis->mysym->dload_deallocate(dlthis->mysym, | ||
252 | cur_string); | ||
253 | |||
254 | /* Move our pointer to the next one */ | ||
255 | cur_string = dlthis->tramp.string_head; | ||
256 | } | ||
257 | |||
258 | /* Update our return value to success */ | ||
259 | ret_val = 1; | ||
260 | } else | ||
261 | dload_error(dlthis, "Failed to allocate trampoline " | ||
262 | "string table"); | ||
263 | |||
264 | return ret_val; | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * Function: priv_tramp_sect_alloc | ||
269 | * Description: Virtually allocate space from the trampoline section. This | ||
270 | * function returns the next offset within the trampoline section | ||
271 | * that is available and moved the next available offset by the | ||
272 | * requested size. NO TARGET ALLOCATION IS DONE AT THIS TIME. | ||
273 | */ | ||
274 | static u32 priv_tramp_sect_alloc(struct dload_state *dlthis, u32 tramp_size) | ||
275 | { | ||
276 | u32 ret_val; | ||
277 | |||
278 | /* If the next available address is 0, this is our first allocation. | ||
279 | * Create a section name string to go into the string table . */ | ||
280 | if (dlthis->tramp.tramp_sect_next_addr == 0) { | ||
281 | dload_syms_error(dlthis->mysym, "*** WARNING *** created " | ||
282 | "dynamic TRAMPOLINE section for module %s", | ||
283 | dlthis->str_head); | ||
284 | } | ||
285 | |||
286 | /* Reserve space for the new trampoline */ | ||
287 | ret_val = dlthis->tramp.tramp_sect_next_addr; | ||
288 | dlthis->tramp.tramp_sect_next_addr += tramp_size; | ||
289 | return ret_val; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Function: priv_tramp_sym_create | ||
294 | * Description: Allocate and create a new trampoline specific symbol and add | ||
295 | * it to the trampoline symbol list. These symbols will include | ||
296 | * trampoline points as well as the external symbols they | ||
297 | * reference. | ||
298 | */ | ||
299 | static struct tramp_sym *priv_tramp_sym_create(struct dload_state *dlthis, | ||
300 | u32 str_index, | ||
301 | struct local_symbol *tmp_sym) | ||
302 | { | ||
303 | struct tramp_sym *new_sym = NULL; | ||
304 | u32 i; | ||
305 | |||
306 | /* Allocate new space for the symbol in the symbol table. */ | ||
307 | new_sym = | ||
308 | (struct tramp_sym *)dlthis->mysym->dload_allocate(dlthis->mysym, | ||
309 | sizeof(struct tramp_sym)); | ||
310 | if (new_sym != NULL) { | ||
311 | for (i = 0; i != sizeof(struct tramp_sym); i++) | ||
312 | *((char *)new_sym + i) = 0; | ||
313 | |||
314 | /* Assign this symbol the next symbol index for easier | ||
315 | * reference later during relocation. */ | ||
316 | new_sym->index = dlthis->tramp.tramp_sym_next_index; | ||
317 | dlthis->tramp.tramp_sym_next_index++; | ||
318 | |||
319 | /* Populate the symbol information. At this point any | ||
320 | * trampoline symbols will be the offset location, not the | ||
321 | * final. Copy over the symbol info to start, then be sure to | ||
322 | * get the string index from the trampoline string table. */ | ||
323 | new_sym->sym_info = *tmp_sym; | ||
324 | new_sym->str_index = str_index; | ||
325 | |||
326 | /* Push the new symbol to the tail of the symbol table list */ | ||
327 | new_sym->next = NULL; | ||
328 | if (dlthis->tramp.symbol_head == NULL) | ||
329 | dlthis->tramp.symbol_head = new_sym; | ||
330 | else | ||
331 | dlthis->tramp.symbol_tail->next = new_sym; | ||
332 | |||
333 | dlthis->tramp.symbol_tail = new_sym; | ||
334 | } | ||
335 | |||
336 | return new_sym; | ||
337 | } | ||
338 | |||
339 | /* | ||
340 | * Function: priv_tramp_sym_get | ||
341 | * Description: Search for the symbol with the matching string index (from | ||
342 | * the trampoline string table) and return the trampoline | ||
343 | * symbol object, if found. Otherwise return NULL. | ||
344 | */ | ||
345 | static struct tramp_sym *priv_tramp_sym_get(struct dload_state *dlthis, | ||
346 | u32 string_index) | ||
347 | { | ||
348 | struct tramp_sym *sym_found = NULL; | ||
349 | |||
350 | /* Walk the symbol table list and search vs. the string index */ | ||
351 | for (sym_found = dlthis->tramp.symbol_head; | ||
352 | sym_found != NULL; sym_found = sym_found->next) { | ||
353 | if (sym_found->str_index == string_index) | ||
354 | break; | ||
355 | } | ||
356 | |||
357 | return sym_found; | ||
358 | } | ||
359 | |||
360 | /* | ||
361 | * Function: priv_tramp_sym_find | ||
362 | * Description: Search for a trampoline symbol based on the string name of | ||
363 | * the symbol. Return the symbol object, if found, otherwise | ||
364 | * return NULL. | ||
365 | */ | ||
366 | static struct tramp_sym *priv_tramp_sym_find(struct dload_state *dlthis, | ||
367 | char *string) | ||
368 | { | ||
369 | struct tramp_sym *sym_found = NULL; | ||
370 | struct tramp_string *str_found = NULL; | ||
371 | |||
372 | /* First, search for the string, then search for the sym based on the | ||
373 | string index. */ | ||
374 | str_found = priv_tramp_string_find(dlthis, string); | ||
375 | if (str_found != NULL) | ||
376 | sym_found = priv_tramp_sym_get(dlthis, str_found->index); | ||
377 | |||
378 | return sym_found; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * Function: priv_tramp_sym_finalize | ||
383 | * Description: Allocate a flat symbol table for the trampoline section, | ||
384 | * put each trampoline symbol into the table, adjust the | ||
385 | * symbol value based on the section address on the target and | ||
386 | * free the trampoline symbol list nodes. | ||
387 | */ | ||
388 | static int priv_tramp_sym_finalize(struct dload_state *dlthis) | ||
389 | { | ||
390 | int ret_val = 0; | ||
391 | struct tramp_sym *cur_sym; | ||
392 | struct ldr_section_info *tramp_sect = | ||
393 | &dlthis->ldr_sections[dlthis->allocated_secn_count]; | ||
394 | struct local_symbol *new_sym; | ||
395 | |||
396 | /* Allocate a table to hold a flattened version of all symbols | ||
397 | * created. */ | ||
398 | dlthis->tramp.final_sym_table = | ||
399 | (struct local_symbol *)dlthis->mysym->dload_allocate(dlthis->mysym, | ||
400 | (sizeof(struct local_symbol) * dlthis->tramp. | ||
401 | tramp_sym_next_index)); | ||
402 | if (dlthis->tramp.final_sym_table != NULL) { | ||
403 | /* Walk the list of all symbols, copy it over to the flattened | ||
404 | * table. After it has been copied, the node can be freed as | ||
405 | * it is no longer needed. */ | ||
406 | new_sym = dlthis->tramp.final_sym_table; | ||
407 | cur_sym = dlthis->tramp.symbol_head; | ||
408 | while (cur_sym != NULL) { | ||
409 | /* Pop it off the list */ | ||
410 | dlthis->tramp.symbol_head = cur_sym->next; | ||
411 | if (cur_sym == dlthis->tramp.symbol_tail) | ||
412 | dlthis->tramp.symbol_tail = NULL; | ||
413 | |||
414 | /* Copy the symbol contents into the flat table */ | ||
415 | *new_sym = cur_sym->sym_info; | ||
416 | |||
417 | /* Now finalize the symbol. If it is in the tramp | ||
418 | * section, we need to adjust for the section start. | ||
419 | * If it is external then we don't need to adjust at | ||
420 | * all. | ||
421 | * NOTE: THIS CODE ASSUMES THAT THE TRAMPOLINE IS | ||
422 | * REFERENCED LIKE A CALL TO AN EXTERNAL SO VALUE AND | ||
423 | * DELTA ARE THE SAME. SEE THE FUNCTION dload_symbols | ||
424 | * WHERE DN_UNDEF IS HANDLED FOR MORE REFERENCE. */ | ||
425 | if (new_sym->secnn < 0) { | ||
426 | new_sym->value += tramp_sect->load_addr; | ||
427 | new_sym->delta = new_sym->value; | ||
428 | } | ||
429 | |||
430 | /* Let go of the symbol node */ | ||
431 | dlthis->mysym->dload_deallocate(dlthis->mysym, cur_sym); | ||
432 | |||
433 | /* Move to the next node */ | ||
434 | cur_sym = dlthis->tramp.symbol_head; | ||
435 | new_sym++; | ||
436 | } | ||
437 | |||
438 | ret_val = 1; | ||
439 | } else | ||
440 | dload_error(dlthis, "Failed to alloc trampoline sym table"); | ||
441 | |||
442 | return ret_val; | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * Function: priv_tgt_img_gen | ||
447 | * Description: Allocate storage for and copy the target specific image data | ||
448 | * and fix up its relocations for the new external symbol. If | ||
449 | * a trampoline image packet was successfully created it is added | ||
450 | * to the trampoline list. | ||
451 | */ | ||
452 | static int priv_tgt_img_gen(struct dload_state *dlthis, u32 base, | ||
453 | u32 gen_index, struct tramp_sym *new_ext_sym) | ||
454 | { | ||
455 | struct tramp_img_pkt *new_img_pkt = NULL; | ||
456 | u32 i; | ||
457 | u32 pkt_size = tramp_img_pkt_size_get(); | ||
458 | u8 *gen_tbl_entry; | ||
459 | u8 *pkt_data; | ||
460 | struct reloc_record_t *cur_relo; | ||
461 | int ret_val = 0; | ||
462 | |||
463 | /* Allocate a new image packet and set it up. */ | ||
464 | new_img_pkt = | ||
465 | (struct tramp_img_pkt *)dlthis->mysym->dload_allocate(dlthis->mysym, | ||
466 | pkt_size); | ||
467 | if (new_img_pkt != NULL) { | ||
468 | /* Save the base, this is where it goes in the section */ | ||
469 | new_img_pkt->base = base; | ||
470 | |||
471 | /* Copy over the image data and relos from the target table */ | ||
472 | pkt_data = (u8 *) &new_img_pkt->hdr; | ||
473 | gen_tbl_entry = (u8 *) &tramp_gen_info[gen_index]; | ||
474 | for (i = 0; i < pkt_size; i++) { | ||
475 | *pkt_data = *gen_tbl_entry; | ||
476 | pkt_data++; | ||
477 | gen_tbl_entry++; | ||
478 | } | ||
479 | |||
480 | /* Update the relocations to point to the external symbol */ | ||
481 | cur_relo = | ||
482 | (struct reloc_record_t *)((u8 *) &new_img_pkt->hdr + | ||
483 | new_img_pkt->hdr.relo_offset); | ||
484 | for (i = 0; i < new_img_pkt->hdr.num_relos; i++) | ||
485 | cur_relo[i].SYMNDX = new_ext_sym->index; | ||
486 | |||
487 | /* Add it to the trampoline list. */ | ||
488 | new_img_pkt->next = dlthis->tramp.tramp_pkts; | ||
489 | dlthis->tramp.tramp_pkts = new_img_pkt; | ||
490 | |||
491 | ret_val = 1; | ||
492 | } | ||
493 | |||
494 | return ret_val; | ||
495 | } | ||
496 | |||
497 | /* | ||
498 | * Function: priv_pkt_relo | ||
499 | * Description: Take the provided image data and the collection of relocations | ||
500 | * for it and perform the relocations. Note that all relocations | ||
501 | * at this stage are considered SECOND PASS since the original | ||
502 | * image has already been processed in the first pass. This means | ||
503 | * TRAMPOLINES ARE TREATED AS 2ND PASS even though this is really | ||
504 | * the first (and only) relocation that will be performed on them. | ||
505 | */ | ||
506 | static int priv_pkt_relo(struct dload_state *dlthis, tgt_au_t *data, | ||
507 | struct reloc_record_t *rp[], u32 relo_count) | ||
508 | { | ||
509 | int ret_val = 1; | ||
510 | u32 i; | ||
511 | bool tmp; | ||
512 | |||
513 | /* Walk through all of the relos and process them. This function is | ||
514 | * the equivalent of relocate_packet() from cload.c, but specialized | ||
515 | * for trampolines and 2nd phase relocations. */ | ||
516 | for (i = 0; i < relo_count; i++) | ||
517 | dload_relocate(dlthis, data, rp[i], &tmp, true); | ||
518 | |||
519 | return ret_val; | ||
520 | } | ||
521 | |||
522 | /* | ||
523 | * Function: priv_tramp_pkt_finalize | ||
524 | * Description: Walk the list of all trampoline packets and finalize them. | ||
525 | * Each trampoline image packet will be relocated now that the | ||
526 | * trampoline section has been allocated on the target. Once | ||
527 | * all of the relocations are done the trampoline image data | ||
528 | * is written into target memory and the trampoline packet | ||
529 | * is freed: it is no longer needed after this point. | ||
530 | */ | ||
531 | static int priv_tramp_pkt_finalize(struct dload_state *dlthis) | ||
532 | { | ||
533 | int ret_val = 1; | ||
534 | struct tramp_img_pkt *cur_pkt = NULL; | ||
535 | struct reloc_record_t *relos[MAX_RELOS_PER_PASS]; | ||
536 | u32 relos_done; | ||
537 | u32 i; | ||
538 | struct reloc_record_t *cur_relo; | ||
539 | struct ldr_section_info *sect_info = | ||
540 | &dlthis->ldr_sections[dlthis->allocated_secn_count]; | ||
541 | |||
542 | /* Walk the list of trampoline packets and relocate each packet. This | ||
543 | * function is the trampoline equivalent of dload_data() from | ||
544 | * cload.c. */ | ||
545 | cur_pkt = dlthis->tramp.tramp_pkts; | ||
546 | while ((ret_val != 0) && (cur_pkt != NULL)) { | ||
547 | /* Remove the pkt from the list */ | ||
548 | dlthis->tramp.tramp_pkts = cur_pkt->next; | ||
549 | |||
550 | /* Setup section and image offset information for the relo */ | ||
551 | dlthis->image_secn = sect_info; | ||
552 | dlthis->image_offset = cur_pkt->base; | ||
553 | dlthis->delta_runaddr = sect_info->run_addr; | ||
554 | |||
555 | /* Walk through all relos for the packet */ | ||
556 | relos_done = 0; | ||
557 | cur_relo = (struct reloc_record_t *)((u8 *) &cur_pkt->hdr + | ||
558 | cur_pkt->hdr.relo_offset); | ||
559 | while (relos_done < cur_pkt->hdr.num_relos) { | ||
560 | #ifdef ENABLE_TRAMP_DEBUG | ||
561 | dload_syms_error(dlthis->mysym, | ||
562 | "===> Trampoline %x branches to %x", | ||
563 | sect_info->run_addr + | ||
564 | dlthis->image_offset, | ||
565 | dlthis-> | ||
566 | tramp.final_sym_table[cur_relo-> | ||
567 | SYMNDX].value); | ||
568 | #endif | ||
569 | |||
570 | for (i = 0; | ||
571 | ((i < MAX_RELOS_PER_PASS) && | ||
572 | ((i + relos_done) < cur_pkt->hdr.num_relos)); i++) | ||
573 | relos[i] = cur_relo + i; | ||
574 | |||
575 | /* Do the actual relo */ | ||
576 | ret_val = priv_pkt_relo(dlthis, | ||
577 | (tgt_au_t *) &cur_pkt->payload, | ||
578 | relos, i); | ||
579 | if (ret_val == 0) { | ||
580 | dload_error(dlthis, | ||
581 | "Relocation of trampoline pkt at %x" | ||
582 | " failed", cur_pkt->base + | ||
583 | sect_info->run_addr); | ||
584 | break; | ||
585 | } | ||
586 | |||
587 | relos_done += i; | ||
588 | cur_relo += i; | ||
589 | } | ||
590 | |||
591 | /* Make sure we didn't hit a problem */ | ||
592 | if (ret_val != 0) { | ||
593 | /* Relos are done for the packet, write it to the | ||
594 | * target */ | ||
595 | ret_val = dlthis->myio->writemem(dlthis->myio, | ||
596 | &cur_pkt->payload, | ||
597 | sect_info->load_addr + | ||
598 | cur_pkt->base, | ||
599 | sect_info, | ||
600 | BYTE_TO_HOST | ||
601 | (cur_pkt->hdr. | ||
602 | tramp_code_size)); | ||
603 | if (ret_val == 0) { | ||
604 | dload_error(dlthis, | ||
605 | "Write to " FMT_UI32 " failed", | ||
606 | sect_info->load_addr + | ||
607 | cur_pkt->base); | ||
608 | } | ||
609 | |||
610 | /* Done with the pkt, let it go */ | ||
611 | dlthis->mysym->dload_deallocate(dlthis->mysym, cur_pkt); | ||
612 | |||
613 | /* Get the next packet to process */ | ||
614 | cur_pkt = dlthis->tramp.tramp_pkts; | ||
615 | } | ||
616 | } | ||
617 | |||
618 | return ret_val; | ||
619 | } | ||
620 | |||
621 | /* | ||
622 | * Function: priv_dup_pkt_finalize | ||
623 | * Description: Walk the list of duplicate image packets and finalize them. | ||
624 | * Each duplicate packet will be relocated again for the | ||
625 | * relocations that previously failed and have been adjusted | ||
626 | * to point at a trampoline. Once all relocations for a packet | ||
627 | * have been done, write the packet into target memory. The | ||
628 | * duplicate packet and its relocation chain are all freed | ||
629 | * after use here as they are no longer needed after this. | ||
630 | */ | ||
631 | static int priv_dup_pkt_finalize(struct dload_state *dlthis) | ||
632 | { | ||
633 | int ret_val = 1; | ||
634 | struct tramp_img_dup_pkt *cur_pkt; | ||
635 | struct tramp_img_dup_relo *cur_relo; | ||
636 | struct reloc_record_t *relos[MAX_RELOS_PER_PASS]; | ||
637 | struct doff_scnhdr_t *sect_hdr = NULL; | ||
638 | s32 i; | ||
639 | |||
640 | /* Similar to the trampoline pkt finalize, this function walks each dup | ||
641 | * pkt that was generated and performs all relocations that were | ||
642 | * deferred to a 2nd pass. This is the equivalent of dload_data() from | ||
643 | * cload.c, but does not need the additional reorder and checksum | ||
644 | * processing as it has already been done. */ | ||
645 | cur_pkt = dlthis->tramp.dup_pkts; | ||
646 | while ((ret_val != 0) && (cur_pkt != NULL)) { | ||
647 | /* Remove the node from the list, we'll be freeing it | ||
648 | * shortly */ | ||
649 | dlthis->tramp.dup_pkts = cur_pkt->next; | ||
650 | |||
651 | /* Setup the section and image offset for relocation */ | ||
652 | dlthis->image_secn = &dlthis->ldr_sections[cur_pkt->secnn]; | ||
653 | dlthis->image_offset = cur_pkt->offset; | ||
654 | |||
655 | /* In order to get the delta run address, we need to reference | ||
656 | * the original section header. It's a bit ugly, but needed | ||
657 | * for relo. */ | ||
658 | i = (s32) (dlthis->image_secn - dlthis->ldr_sections); | ||
659 | sect_hdr = dlthis->sect_hdrs + i; | ||
660 | dlthis->delta_runaddr = sect_hdr->ds_paddr; | ||
661 | |||
662 | /* Walk all relos in the chain and process each. */ | ||
663 | cur_relo = cur_pkt->relo_chain; | ||
664 | while (cur_relo != NULL) { | ||
665 | /* Process them a chunk at a time to be efficient */ | ||
666 | for (i = 0; (i < MAX_RELOS_PER_PASS) | ||
667 | && (cur_relo != NULL); | ||
668 | i++, cur_relo = cur_relo->next) { | ||
669 | relos[i] = &cur_relo->relo; | ||
670 | cur_pkt->relo_chain = cur_relo->next; | ||
671 | } | ||
672 | |||
673 | /* Do the actual relo */ | ||
674 | ret_val = priv_pkt_relo(dlthis, | ||
675 | cur_pkt->img_pkt.img_data, | ||
676 | relos, i); | ||
677 | if (ret_val == 0) { | ||
678 | dload_error(dlthis, | ||
679 | "Relocation of dup pkt at %x" | ||
680 | " failed", cur_pkt->offset + | ||
681 | dlthis->image_secn->run_addr); | ||
682 | break; | ||
683 | } | ||
684 | |||
685 | /* Release all of these relos, we're done with them */ | ||
686 | while (i > 0) { | ||
687 | dlthis->mysym->dload_deallocate(dlthis->mysym, | ||
688 | GET_CONTAINER | ||
689 | (relos[i - 1], | ||
690 | struct tramp_img_dup_relo, | ||
691 | relo)); | ||
692 | i--; | ||
693 | } | ||
694 | |||
695 | /* DO NOT ADVANCE cur_relo, IT IS ALREADY READY TO | ||
696 | * GO! */ | ||
697 | } | ||
698 | |||
699 | /* Done with all relos. Make sure we didn't have a problem and | ||
700 | * write it out to the target */ | ||
701 | if (ret_val != 0) { | ||
702 | ret_val = dlthis->myio->writemem(dlthis->myio, | ||
703 | cur_pkt->img_pkt. | ||
704 | img_data, | ||
705 | dlthis->image_secn-> | ||
706 | load_addr + | ||
707 | cur_pkt->offset, | ||
708 | dlthis->image_secn, | ||
709 | BYTE_TO_HOST | ||
710 | (cur_pkt->img_pkt. | ||
711 | packet_size)); | ||
712 | if (ret_val == 0) { | ||
713 | dload_error(dlthis, | ||
714 | "Write to " FMT_UI32 " failed", | ||
715 | dlthis->image_secn->load_addr + | ||
716 | cur_pkt->offset); | ||
717 | } | ||
718 | |||
719 | dlthis->mysym->dload_deallocate(dlthis->mysym, cur_pkt); | ||
720 | |||
721 | /* Advance to the next packet */ | ||
722 | cur_pkt = dlthis->tramp.dup_pkts; | ||
723 | } | ||
724 | } | ||
725 | |||
726 | return ret_val; | ||
727 | } | ||
728 | |||
729 | /* | ||
730 | * Function: priv_dup_find | ||
731 | * Description: Walk the list of existing duplicate packets and find a | ||
732 | * match based on the section number and image offset. Return | ||
733 | * the duplicate packet if found, otherwise NULL. | ||
734 | */ | ||
735 | static struct tramp_img_dup_pkt *priv_dup_find(struct dload_state *dlthis, | ||
736 | s16 secnn, u32 image_offset) | ||
737 | { | ||
738 | struct tramp_img_dup_pkt *cur_pkt = NULL; | ||
739 | |||
740 | for (cur_pkt = dlthis->tramp.dup_pkts; | ||
741 | cur_pkt != NULL; cur_pkt = cur_pkt->next) { | ||
742 | if ((cur_pkt->secnn == secnn) && | ||
743 | (cur_pkt->offset == image_offset)) { | ||
744 | /* Found a match, break out */ | ||
745 | break; | ||
746 | } | ||
747 | } | ||
748 | |||
749 | return cur_pkt; | ||
750 | } | ||
751 | |||
752 | /* | ||
753 | * Function: priv_img_pkt_dup | ||
754 | * Description: Duplicate the original image packet. If this is the first | ||
755 | * time this image packet has been seen (based on section number | ||
756 | * and image offset), create a new duplicate packet and add it | ||
757 | * to the dup packet list. If not, just get the existing one and | ||
758 | * update it with the current packet contents (since relocation | ||
759 | * on the packet is still ongoing in first pass.) Create a | ||
760 | * duplicate of the provided relocation, but update it to point | ||
761 | * to the new trampoline symbol. Add the new relocation dup to | ||
762 | * the dup packet's relo chain for 2nd pass relocation later. | ||
763 | */ | ||
764 | static int priv_img_pkt_dup(struct dload_state *dlthis, | ||
765 | s16 secnn, u32 image_offset, | ||
766 | struct image_packet_t *ipacket, | ||
767 | struct reloc_record_t *rp, | ||
768 | struct tramp_sym *new_tramp_sym) | ||
769 | { | ||
770 | struct tramp_img_dup_pkt *dup_pkt = NULL; | ||
771 | u32 new_dup_size; | ||
772 | s32 i; | ||
773 | int ret_val = 0; | ||
774 | struct tramp_img_dup_relo *dup_relo = NULL; | ||
775 | |||
776 | /* Determine if this image packet is already being tracked in the | ||
777 | dup list for other trampolines. */ | ||
778 | dup_pkt = priv_dup_find(dlthis, secnn, image_offset); | ||
779 | |||
780 | if (dup_pkt == NULL) { | ||
781 | /* This image packet does not exist in our tracking, so create | ||
782 | * a new one and add it to the head of the list. */ | ||
783 | new_dup_size = sizeof(struct tramp_img_dup_pkt) + | ||
784 | ipacket->packet_size; | ||
785 | |||
786 | dup_pkt = (struct tramp_img_dup_pkt *) | ||
787 | dlthis->mysym->dload_allocate(dlthis->mysym, new_dup_size); | ||
788 | if (dup_pkt != NULL) { | ||
789 | /* Save off the section and offset information */ | ||
790 | dup_pkt->secnn = secnn; | ||
791 | dup_pkt->offset = image_offset; | ||
792 | dup_pkt->relo_chain = NULL; | ||
793 | |||
794 | /* Copy the original packet content */ | ||
795 | dup_pkt->img_pkt = *ipacket; | ||
796 | dup_pkt->img_pkt.img_data = (u8 *) (dup_pkt + 1); | ||
797 | for (i = 0; i < ipacket->packet_size; i++) | ||
798 | *(dup_pkt->img_pkt.img_data + i) = | ||
799 | *(ipacket->img_data + i); | ||
800 | |||
801 | /* Add the packet to the dup list */ | ||
802 | dup_pkt->next = dlthis->tramp.dup_pkts; | ||
803 | dlthis->tramp.dup_pkts = dup_pkt; | ||
804 | } else | ||
805 | dload_error(dlthis, "Failed to create dup packet!"); | ||
806 | } else { | ||
807 | /* The image packet contents could have changed since | ||
808 | * trampoline detection happens during relocation of the image | ||
809 | * packets. So, we need to update the image packet contents | ||
810 | * before adding relo information. */ | ||
811 | for (i = 0; i < dup_pkt->img_pkt.packet_size; i++) | ||
812 | *(dup_pkt->img_pkt.img_data + i) = | ||
813 | *(ipacket->img_data + i); | ||
814 | } | ||
815 | |||
816 | /* Since the previous code may have allocated a new dup packet for us, | ||
817 | double check that we actually have one. */ | ||
818 | if (dup_pkt != NULL) { | ||
819 | /* Allocate a new node for the relo chain. Each image packet | ||
820 | * can potentially have multiple relocations that cause a | ||
821 | * trampoline to be generated. So, we keep them in a chain, | ||
822 | * order is not important. */ | ||
823 | dup_relo = dlthis->mysym->dload_allocate(dlthis->mysym, | ||
824 | sizeof(struct tramp_img_dup_relo)); | ||
825 | if (dup_relo != NULL) { | ||
826 | /* Copy the relo contents, adjust for the new | ||
827 | * trampoline and add it to the list. */ | ||
828 | dup_relo->relo = *rp; | ||
829 | dup_relo->relo.SYMNDX = new_tramp_sym->index; | ||
830 | |||
831 | dup_relo->next = dup_pkt->relo_chain; | ||
832 | dup_pkt->relo_chain = dup_relo; | ||
833 | |||
834 | /* That's it, we're done. Make sure we update our | ||
835 | * return value to be success since everything finished | ||
836 | * ok */ | ||
837 | ret_val = 1; | ||
838 | } else | ||
839 | dload_error(dlthis, "Unable to alloc dup relo"); | ||
840 | } | ||
841 | |||
842 | return ret_val; | ||
843 | } | ||
844 | |||
845 | /* | ||
846 | * Function: dload_tramp_avail | ||
847 | * Description: Check to see if the target supports a trampoline for this type | ||
848 | * of relocation. Return true if it does, otherwise false. | ||
849 | */ | ||
850 | bool dload_tramp_avail(struct dload_state *dlthis, struct reloc_record_t *rp) | ||
851 | { | ||
852 | bool ret_val = false; | ||
853 | u16 map_index; | ||
854 | u16 gen_index; | ||
855 | |||
856 | /* Check type hash vs. target tramp table */ | ||
857 | map_index = HASH_FUNC(rp->TYPE); | ||
858 | gen_index = tramp_map[map_index]; | ||
859 | if (gen_index != TRAMP_NO_GEN_AVAIL) | ||
860 | ret_val = true; | ||
861 | |||
862 | return ret_val; | ||
863 | } | ||
864 | |||
865 | /* | ||
866 | * Function: dload_tramp_generate | ||
867 | * Description: Create a new trampoline for the provided image packet and | ||
868 | * relocation causing problems. This will create the trampoline | ||
869 | * as well as duplicate/update the image packet and relocation | ||
870 | * causing the problem, which will be relo'd again during | ||
871 | * finalization. | ||
872 | */ | ||
873 | int dload_tramp_generate(struct dload_state *dlthis, s16 secnn, | ||
874 | u32 image_offset, struct image_packet_t *ipacket, | ||
875 | struct reloc_record_t *rp) | ||
876 | { | ||
877 | u16 map_index; | ||
878 | u16 gen_index; | ||
879 | int ret_val = 1; | ||
880 | char tramp_sym_str[TRAMP_SYM_PREFIX_LEN + TRAMP_SYM_HEX_ASCII_LEN]; | ||
881 | struct local_symbol *ref_sym; | ||
882 | struct tramp_sym *new_tramp_sym; | ||
883 | struct tramp_sym *new_ext_sym; | ||
884 | struct tramp_string *new_tramp_str; | ||
885 | u32 new_tramp_base; | ||
886 | struct local_symbol tmp_sym; | ||
887 | struct local_symbol ext_tmp_sym; | ||
888 | |||
889 | /* Hash the relo type to get our generator information */ | ||
890 | map_index = HASH_FUNC(rp->TYPE); | ||
891 | gen_index = tramp_map[map_index]; | ||
892 | if (gen_index != TRAMP_NO_GEN_AVAIL) { | ||
893 | /* If this is the first trampoline, create the section name in | ||
894 | * our string table for debug help later. */ | ||
895 | if (dlthis->tramp.string_head == NULL) { | ||
896 | priv_tramp_string_create(dlthis, | ||
897 | strlen(TRAMP_SECT_NAME), | ||
898 | TRAMP_SECT_NAME); | ||
899 | } | ||
900 | #ifdef ENABLE_TRAMP_DEBUG | ||
901 | dload_syms_error(dlthis->mysym, | ||
902 | "Trampoline at img loc %x, references %x", | ||
903 | dlthis->ldr_sections[secnn].run_addr + | ||
904 | image_offset + rp->vaddr, | ||
905 | dlthis->local_symtab[rp->SYMNDX].value); | ||
906 | #endif | ||
907 | |||
908 | /* Generate the trampoline string, check if already defined. | ||
909 | * If the relo symbol index is -1, it means we need the section | ||
910 | * info for relo later. To do this we'll dummy up a symbol | ||
911 | * with the section delta and run addresses. */ | ||
912 | if (rp->SYMNDX == -1) { | ||
913 | ext_tmp_sym.value = | ||
914 | dlthis->ldr_sections[secnn].run_addr; | ||
915 | ext_tmp_sym.delta = dlthis->sect_hdrs[secnn].ds_paddr; | ||
916 | ref_sym = &ext_tmp_sym; | ||
917 | } else | ||
918 | ref_sym = &(dlthis->local_symtab[rp->SYMNDX]); | ||
919 | |||
920 | priv_tramp_sym_gen_name(ref_sym->value, tramp_sym_str); | ||
921 | new_tramp_sym = priv_tramp_sym_find(dlthis, tramp_sym_str); | ||
922 | if (new_tramp_sym == NULL) { | ||
923 | /* If tramp string not defined, create it and a new | ||
924 | * string, and symbol for it as well as the original | ||
925 | * symbol which caused the trampoline. */ | ||
926 | new_tramp_str = priv_tramp_string_create(dlthis, | ||
927 | strlen | ||
928 | (tramp_sym_str), | ||
929 | tramp_sym_str); | ||
930 | if (new_tramp_str == NULL) { | ||
931 | dload_error(dlthis, "Failed to create new " | ||
932 | "trampoline string\n"); | ||
933 | ret_val = 0; | ||
934 | } else { | ||
935 | /* Allocate tramp section space for the new | ||
936 | * tramp from the target */ | ||
937 | new_tramp_base = priv_tramp_sect_alloc(dlthis, | ||
938 | tramp_size_get()); | ||
939 | |||
940 | /* We have a string, create the new symbol and | ||
941 | * duplicate the external. */ | ||
942 | tmp_sym.value = new_tramp_base; | ||
943 | tmp_sym.delta = 0; | ||
944 | tmp_sym.secnn = -1; | ||
945 | tmp_sym.sclass = 0; | ||
946 | new_tramp_sym = priv_tramp_sym_create(dlthis, | ||
947 | new_tramp_str-> | ||
948 | index, | ||
949 | &tmp_sym); | ||
950 | |||
951 | new_ext_sym = priv_tramp_sym_create(dlthis, -1, | ||
952 | ref_sym); | ||
953 | |||
954 | if ((new_tramp_sym != NULL) && | ||
955 | (new_ext_sym != NULL)) { | ||
956 | /* Call the image generator to get the | ||
957 | * new image data and fix up its | ||
958 | * relocations for the external | ||
959 | * symbol. */ | ||
960 | ret_val = priv_tgt_img_gen(dlthis, | ||
961 | new_tramp_base, | ||
962 | gen_index, | ||
963 | new_ext_sym); | ||
964 | |||
965 | /* Add generated image data to tramp | ||
966 | * image list */ | ||
967 | if (ret_val != 1) { | ||
968 | dload_error(dlthis, "Failed to " | ||
969 | "create img pkt for" | ||
970 | " trampoline\n"); | ||
971 | } | ||
972 | } else { | ||
973 | dload_error(dlthis, "Failed to create " | ||
974 | "new tramp syms " | ||
975 | "(%8.8X, %8.8X)\n", | ||
976 | new_tramp_sym, new_ext_sym); | ||
977 | ret_val = 0; | ||
978 | } | ||
979 | } | ||
980 | } | ||
981 | |||
982 | /* Duplicate the image data and relo record that caused the | ||
983 | * tramp, including update the relo data to point to the tramp | ||
984 | * symbol. */ | ||
985 | if (ret_val == 1) { | ||
986 | ret_val = priv_img_pkt_dup(dlthis, secnn, image_offset, | ||
987 | ipacket, rp, new_tramp_sym); | ||
988 | if (ret_val != 1) { | ||
989 | dload_error(dlthis, "Failed to create dup of " | ||
990 | "original img pkt\n"); | ||
991 | } | ||
992 | } | ||
993 | } | ||
994 | |||
995 | return ret_val; | ||
996 | } | ||
997 | |||
998 | /* | ||
999 | * Function: dload_tramp_pkt_update | ||
1000 | * Description: Update the duplicate copy of this image packet, which the | ||
1001 | * trampoline layer is already tracking. This call is critical | ||
1002 | * to make if trampolines were generated anywhere within the | ||
1003 | * packet and first pass relo continued on the remainder. The | ||
1004 | * trampoline layer needs the updates image data so when 2nd | ||
1005 | * pass relo is done during finalize the image packet can be | ||
1006 | * written to the target since all relo is done. | ||
1007 | */ | ||
1008 | int dload_tramp_pkt_udpate(struct dload_state *dlthis, s16 secnn, | ||
1009 | u32 image_offset, struct image_packet_t *ipacket) | ||
1010 | { | ||
1011 | struct tramp_img_dup_pkt *dup_pkt = NULL; | ||
1012 | s32 i; | ||
1013 | int ret_val = 0; | ||
1014 | |||
1015 | /* Find the image packet in question, the caller needs us to update it | ||
1016 | since a trampoline was previously generated. */ | ||
1017 | dup_pkt = priv_dup_find(dlthis, secnn, image_offset); | ||
1018 | if (dup_pkt != NULL) { | ||
1019 | for (i = 0; i < dup_pkt->img_pkt.packet_size; i++) | ||
1020 | *(dup_pkt->img_pkt.img_data + i) = | ||
1021 | *(ipacket->img_data + i); | ||
1022 | |||
1023 | ret_val = 1; | ||
1024 | } else { | ||
1025 | dload_error(dlthis, | ||
1026 | "Unable to find existing DUP pkt for %x, offset %x", | ||
1027 | secnn, image_offset); | ||
1028 | |||
1029 | } | ||
1030 | |||
1031 | return ret_val; | ||
1032 | } | ||
1033 | |||
1034 | /* | ||
1035 | * Function: dload_tramp_finalize | ||
1036 | * Description: If any trampolines were created, finalize everything on the | ||
1037 | * target by allocating the trampoline section on the target, | ||
1038 | * finalizing the trampoline symbols, finalizing the trampoline | ||
1039 | * packets (write the new section to target memory) and finalize | ||
1040 | * the duplicate packets by doing 2nd pass relo over them. | ||
1041 | */ | ||
1042 | int dload_tramp_finalize(struct dload_state *dlthis) | ||
1043 | { | ||
1044 | int ret_val = 1; | ||
1045 | |||
1046 | if (dlthis->tramp.tramp_sect_next_addr != 0) { | ||
1047 | /* Finalize strings into a flat table. This is needed so it | ||
1048 | * can be added to the debug string table later. */ | ||
1049 | ret_val = priv_string_tbl_finalize(dlthis); | ||
1050 | |||
1051 | /* Do target allocation for section BEFORE finalizing | ||
1052 | * symbols. */ | ||
1053 | if (ret_val != 0) | ||
1054 | ret_val = priv_tramp_sect_tgt_alloc(dlthis); | ||
1055 | |||
1056 | /* Finalize symbols with their correct target information and | ||
1057 | * flatten */ | ||
1058 | if (ret_val != 0) | ||
1059 | ret_val = priv_tramp_sym_finalize(dlthis); | ||
1060 | |||
1061 | /* Finalize all trampoline packets. This performs the | ||
1062 | * relocation on the packets as well as writing them to target | ||
1063 | * memory. */ | ||
1064 | if (ret_val != 0) | ||
1065 | ret_val = priv_tramp_pkt_finalize(dlthis); | ||
1066 | |||
1067 | /* Perform a 2nd pass relocation on the dup list. */ | ||
1068 | if (ret_val != 0) | ||
1069 | ret_val = priv_dup_pkt_finalize(dlthis); | ||
1070 | } | ||
1071 | |||
1072 | return ret_val; | ||
1073 | } | ||
1074 | |||
1075 | /* | ||
1076 | * Function: dload_tramp_cleanup | ||
1077 | * Description: Release all temporary resources used in the trampoline layer. | ||
1078 | * Note that the target memory which may have been allocated and | ||
1079 | * written to store the trampolines is NOT RELEASED HERE since it | ||
1080 | * is potentially still in use. It is automatically released | ||
1081 | * when the module is unloaded. | ||
1082 | */ | ||
1083 | void dload_tramp_cleanup(struct dload_state *dlthis) | ||
1084 | { | ||
1085 | struct tramp_info *tramp = &dlthis->tramp; | ||
1086 | struct tramp_sym *cur_sym; | ||
1087 | struct tramp_string *cur_string; | ||
1088 | struct tramp_img_pkt *cur_tramp_pkt; | ||
1089 | struct tramp_img_dup_pkt *cur_dup_pkt; | ||
1090 | struct tramp_img_dup_relo *cur_dup_relo; | ||
1091 | |||
1092 | /* If there were no tramps generated, just return */ | ||
1093 | if (tramp->tramp_sect_next_addr == 0) | ||
1094 | return; | ||
1095 | |||
1096 | /* Destroy all tramp information */ | ||
1097 | for (cur_sym = tramp->symbol_head; | ||
1098 | cur_sym != NULL; cur_sym = tramp->symbol_head) { | ||
1099 | tramp->symbol_head = cur_sym->next; | ||
1100 | if (tramp->symbol_tail == cur_sym) | ||
1101 | tramp->symbol_tail = NULL; | ||
1102 | |||
1103 | dlthis->mysym->dload_deallocate(dlthis->mysym, cur_sym); | ||
1104 | } | ||
1105 | |||
1106 | if (tramp->final_sym_table != NULL) | ||
1107 | dlthis->mysym->dload_deallocate(dlthis->mysym, | ||
1108 | tramp->final_sym_table); | ||
1109 | |||
1110 | for (cur_string = tramp->string_head; | ||
1111 | cur_string != NULL; cur_string = tramp->string_head) { | ||
1112 | tramp->string_head = cur_string->next; | ||
1113 | if (tramp->string_tail == cur_string) | ||
1114 | tramp->string_tail = NULL; | ||
1115 | |||
1116 | dlthis->mysym->dload_deallocate(dlthis->mysym, cur_string); | ||
1117 | } | ||
1118 | |||
1119 | if (tramp->final_string_table != NULL) | ||
1120 | dlthis->mysym->dload_deallocate(dlthis->mysym, | ||
1121 | tramp->final_string_table); | ||
1122 | |||
1123 | for (cur_tramp_pkt = tramp->tramp_pkts; | ||
1124 | cur_tramp_pkt != NULL; cur_tramp_pkt = tramp->tramp_pkts) { | ||
1125 | tramp->tramp_pkts = cur_tramp_pkt->next; | ||
1126 | dlthis->mysym->dload_deallocate(dlthis->mysym, cur_tramp_pkt); | ||
1127 | } | ||
1128 | |||
1129 | for (cur_dup_pkt = tramp->dup_pkts; | ||
1130 | cur_dup_pkt != NULL; cur_dup_pkt = tramp->dup_pkts) { | ||
1131 | tramp->dup_pkts = cur_dup_pkt->next; | ||
1132 | |||
1133 | for (cur_dup_relo = cur_dup_pkt->relo_chain; | ||
1134 | cur_dup_relo != NULL; | ||
1135 | cur_dup_relo = cur_dup_pkt->relo_chain) { | ||
1136 | cur_dup_pkt->relo_chain = cur_dup_relo->next; | ||
1137 | dlthis->mysym->dload_deallocate(dlthis->mysym, | ||
1138 | cur_dup_relo); | ||
1139 | } | ||
1140 | |||
1141 | dlthis->mysym->dload_deallocate(dlthis->mysym, cur_dup_pkt); | ||
1142 | } | ||
1143 | } | ||
diff --git a/drivers/staging/tidspbridge/dynload/tramp_table_c6000.c b/drivers/staging/tidspbridge/dynload/tramp_table_c6000.c deleted file mode 100644 index 09cc64f213c0..000000000000 --- a/drivers/staging/tidspbridge/dynload/tramp_table_c6000.c +++ /dev/null | |||
@@ -1,164 +0,0 @@ | |||
1 | /* | ||
2 | * tramp_table_c6000.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #include "dload_internal.h" | ||
18 | |||
19 | /* These are defined in coff.h, but may not be available on all platforms | ||
20 | so we'll go ahead and define them here. */ | ||
21 | #ifndef R_C60LO16 | ||
22 | #define R_C60LO16 0x54 /* C60: MVK Low Half Register */ | ||
23 | #define R_C60HI16 0x55 /* C60: MVKH/MVKLH High Half Register */ | ||
24 | #endif | ||
25 | |||
26 | #define C6X_TRAMP_WORD_COUNT 8 | ||
27 | #define C6X_TRAMP_MAX_RELOS 8 | ||
28 | |||
29 | /* THIS HASH FUNCTION MUST MATCH THE ONE reloc_table_c6000.c */ | ||
30 | #define HASH_FUNC(zz) (((((zz) + 1) * 1845UL) >> 11) & 63) | ||
31 | |||
32 | /* THIS MUST MATCH reloc_record_t FOR A SYMBOL BASED RELO */ | ||
33 | struct c6000_relo_record { | ||
34 | s32 vaddr; | ||
35 | s32 symndx; | ||
36 | #ifndef _BIG_ENDIAN | ||
37 | u16 disp; | ||
38 | u16 type; | ||
39 | #else | ||
40 | u16 type; | ||
41 | u16 disp; | ||
42 | #endif | ||
43 | }; | ||
44 | |||
45 | struct c6000_gen_code { | ||
46 | struct tramp_gen_code_hdr hdr; | ||
47 | u32 tramp_instrs[C6X_TRAMP_WORD_COUNT]; | ||
48 | struct c6000_relo_record relos[C6X_TRAMP_MAX_RELOS]; | ||
49 | }; | ||
50 | |||
51 | /* Hash mapping for relos that can cause trampolines. */ | ||
52 | static const u16 tramp_map[] = { | ||
53 | 65535, | ||
54 | 65535, | ||
55 | 65535, | ||
56 | 65535, | ||
57 | 65535, | ||
58 | 65535, | ||
59 | 65535, | ||
60 | 65535, | ||
61 | 65535, | ||
62 | 65535, | ||
63 | 0, | ||
64 | 65535, | ||
65 | 65535, | ||
66 | 65535, | ||
67 | 65535, | ||
68 | 65535, | ||
69 | 65535, | ||
70 | 65535, | ||
71 | 65535, | ||
72 | 65535, | ||
73 | 65535, | ||
74 | 65535, | ||
75 | 65535, | ||
76 | 65535, | ||
77 | 65535, | ||
78 | 65535, | ||
79 | 65535, | ||
80 | 65535, | ||
81 | 65535, | ||
82 | 65535, | ||
83 | 65535, | ||
84 | 65535, | ||
85 | 65535, | ||
86 | 65535, | ||
87 | 65535, | ||
88 | 65535, | ||
89 | 65535, | ||
90 | 65535, | ||
91 | 65535, | ||
92 | 65535, | ||
93 | 65535, | ||
94 | 65535, | ||
95 | 65535, | ||
96 | 65535, | ||
97 | 65535, | ||
98 | 65535, | ||
99 | 65535, | ||
100 | 65535, | ||
101 | 65535, | ||
102 | 65535, | ||
103 | 65535, | ||
104 | 65535, | ||
105 | 65535, | ||
106 | 65535, | ||
107 | 65535, | ||
108 | 65535, | ||
109 | 65535, | ||
110 | 65535, | ||
111 | 65535, | ||
112 | 65535, | ||
113 | 65535, | ||
114 | 65535, | ||
115 | 65535, | ||
116 | 65535 | ||
117 | }; | ||
118 | |||
119 | static const struct c6000_gen_code tramp_gen_info[] = { | ||
120 | /* Tramp caused by R_C60PCR21 */ | ||
121 | { | ||
122 | /* Header - 8 instructions, 2 relos */ | ||
123 | { | ||
124 | sizeof(u32) * C6X_TRAMP_WORD_COUNT, | ||
125 | 2, | ||
126 | FIELD_OFFSET(struct c6000_gen_code, relos) | ||
127 | }, | ||
128 | |||
129 | /* Trampoline instructions */ | ||
130 | { | ||
131 | 0x053C54F7, /* STW.D2T2 B10, *sp--[2] */ | ||
132 | 0x0500002A, /* || MVK.S2 <blank>, B10 */ | ||
133 | 0x0500006A, /* MVKH.S2 <blank>, B10 */ | ||
134 | 0x00280362, /* B.S2 B10 */ | ||
135 | 0x053C52E6, /* LDW.D2T2 *++sp[2], B10 */ | ||
136 | 0x00006000, /* NOP 4 */ | ||
137 | 0x00000000, /* NOP */ | ||
138 | 0x00000000 /* NOP */ | ||
139 | }, | ||
140 | |||
141 | /* Relocations */ | ||
142 | { | ||
143 | {4, 0, 0, R_C60LO16}, | ||
144 | {8, 0, 0, R_C60HI16}, | ||
145 | {0, 0, 0, 0x0000}, | ||
146 | {0, 0, 0, 0x0000}, | ||
147 | {0, 0, 0, 0x0000}, | ||
148 | {0, 0, 0, 0x0000}, | ||
149 | {0, 0, 0, 0x0000}, | ||
150 | {0, 0, 0, 0x0000} | ||
151 | } | ||
152 | } | ||
153 | }; | ||
154 | |||
155 | /* TARGET SPECIFIC FUNCTIONS THAT MUST BE DEFINED */ | ||
156 | static u32 tramp_size_get(void) | ||
157 | { | ||
158 | return sizeof(u32) * C6X_TRAMP_WORD_COUNT; | ||
159 | } | ||
160 | |||
161 | static u32 tramp_img_pkt_size_get(void) | ||
162 | { | ||
163 | return sizeof(struct c6000_gen_code); | ||
164 | } | ||
diff --git a/drivers/staging/tidspbridge/gen/gh.c b/drivers/staging/tidspbridge/gen/gh.c deleted file mode 100644 index 936470cb608e..000000000000 --- a/drivers/staging/tidspbridge/gen/gh.c +++ /dev/null | |||
@@ -1,141 +0,0 @@ | |||
1 | /* | ||
2 | * gh.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/hashtable.h> | ||
19 | #include <linux/slab.h> | ||
20 | |||
21 | struct gh_node { | ||
22 | struct hlist_node hl; | ||
23 | u8 data[0]; | ||
24 | }; | ||
25 | |||
26 | #define GH_HASH_ORDER 8 | ||
27 | |||
28 | struct gh_t_hash_tab { | ||
29 | u32 val_size; | ||
30 | DECLARE_HASHTABLE(hash_table, GH_HASH_ORDER); | ||
31 | u32 (*hash)(const void *key); | ||
32 | bool (*match)(const void *key, const void *value); | ||
33 | void (*delete)(void *key); | ||
34 | }; | ||
35 | |||
36 | /* | ||
37 | * ======== gh_create ======== | ||
38 | */ | ||
39 | |||
40 | struct gh_t_hash_tab *gh_create(u32 val_size, u32 (*hash)(const void *), | ||
41 | bool (*match)(const void *, const void *), | ||
42 | void (*delete)(void *)) | ||
43 | { | ||
44 | struct gh_t_hash_tab *hash_tab; | ||
45 | |||
46 | hash_tab = kzalloc(sizeof(struct gh_t_hash_tab), GFP_KERNEL); | ||
47 | if (!hash_tab) | ||
48 | return ERR_PTR(-ENOMEM); | ||
49 | |||
50 | hash_init(hash_tab->hash_table); | ||
51 | |||
52 | hash_tab->val_size = val_size; | ||
53 | hash_tab->hash = hash; | ||
54 | hash_tab->match = match; | ||
55 | hash_tab->delete = delete; | ||
56 | |||
57 | return hash_tab; | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * ======== gh_delete ======== | ||
62 | */ | ||
63 | void gh_delete(struct gh_t_hash_tab *hash_tab) | ||
64 | { | ||
65 | struct gh_node *n; | ||
66 | struct hlist_node *tmp; | ||
67 | u32 i; | ||
68 | |||
69 | if (hash_tab) { | ||
70 | hash_for_each_safe(hash_tab->hash_table, i, tmp, n, hl) { | ||
71 | hash_del(&n->hl); | ||
72 | if (hash_tab->delete) | ||
73 | hash_tab->delete(n->data); | ||
74 | kfree(n); | ||
75 | } | ||
76 | |||
77 | kfree(hash_tab); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * ======== gh_find ======== | ||
83 | */ | ||
84 | |||
85 | void *gh_find(struct gh_t_hash_tab *hash_tab, const void *key) | ||
86 | { | ||
87 | struct gh_node *n; | ||
88 | u32 key_hash = hash_tab->hash(key); | ||
89 | |||
90 | hash_for_each_possible(hash_tab->hash_table, n, hl, key_hash) { | ||
91 | if (hash_tab->match(key, n->data)) | ||
92 | return n->data; | ||
93 | } | ||
94 | |||
95 | return ERR_PTR(-ENODATA); | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * ======== gh_insert ======== | ||
100 | */ | ||
101 | |||
102 | void *gh_insert(struct gh_t_hash_tab *hash_tab, const void *key, | ||
103 | const void *value) | ||
104 | { | ||
105 | struct gh_node *n; | ||
106 | |||
107 | n = kmalloc(sizeof(struct gh_node) + hash_tab->val_size, | ||
108 | GFP_KERNEL); | ||
109 | |||
110 | if (!n) | ||
111 | return ERR_PTR(-ENOMEM); | ||
112 | |||
113 | INIT_HLIST_NODE(&n->hl); | ||
114 | hash_add(hash_tab->hash_table, &n->hl, hash_tab->hash(key)); | ||
115 | memcpy(n->data, value, hash_tab->val_size); | ||
116 | |||
117 | return n->data; | ||
118 | } | ||
119 | |||
120 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
121 | /** | ||
122 | * gh_iterate() - This function goes through all the elements in the hash table | ||
123 | * looking for the dsp symbols. | ||
124 | * @hash_tab: Hash table | ||
125 | * @callback: pointer to callback function | ||
126 | * @user_data: User data, contains the find_symbol_context pointer | ||
127 | * | ||
128 | */ | ||
129 | void gh_iterate(struct gh_t_hash_tab *hash_tab, | ||
130 | void (*callback)(void *, void *), void *user_data) | ||
131 | { | ||
132 | struct gh_node *n; | ||
133 | u32 i; | ||
134 | |||
135 | if (!hash_tab) | ||
136 | return; | ||
137 | |||
138 | hash_for_each(hash_tab->hash_table, i, n, hl) | ||
139 | callback(&n->data, user_data); | ||
140 | } | ||
141 | #endif | ||
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h deleted file mode 100644 index e48d7f67c60a..000000000000 --- a/drivers/staging/tidspbridge/hw/EasiGlobal.h +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* | ||
2 | * EasiGlobal.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef _EASIGLOBAL_H | ||
18 | #define _EASIGLOBAL_H | ||
19 | #include <linux/types.h> | ||
20 | |||
21 | /* | ||
22 | * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE | ||
23 | * | ||
24 | * DESCRIPTION: Defines used to describe register types for EASI-checker tests. | ||
25 | */ | ||
26 | |||
27 | #define READ_ONLY 1 | ||
28 | #define WRITE_ONLY 2 | ||
29 | #define READ_WRITE 3 | ||
30 | |||
31 | /* | ||
32 | * MACRO: _DEBUG_LEVEL1_EASI | ||
33 | * | ||
34 | * DESCRIPTION: A MACRO which can be used to indicate that a particular beach | ||
35 | * register access function was called. | ||
36 | * | ||
37 | * NOTE: We currently dont use this functionality. | ||
38 | */ | ||
39 | #define _DEBUG_LEVEL1_EASI(easi_num) ((void)0) | ||
40 | |||
41 | #endif /* _EASIGLOBAL_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h deleted file mode 100644 index 1cefca321d71..000000000000 --- a/drivers/staging/tidspbridge/hw/MMUAccInt.h +++ /dev/null | |||
@@ -1,76 +0,0 @@ | |||
1 | /* | ||
2 | * MMUAccInt.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef _MMU_ACC_INT_H | ||
18 | #define _MMU_ACC_INT_H | ||
19 | |||
20 | /* Mappings of level 1 EASI function numbers to function names */ | ||
21 | |||
22 | #define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3) | ||
23 | #define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17) | ||
24 | #define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39) | ||
25 | #define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51) | ||
26 | #define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102) | ||
27 | #define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103) | ||
28 | #define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156) | ||
29 | #define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174) | ||
30 | #define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180) | ||
31 | #define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190) | ||
32 | #define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194) | ||
33 | #define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198) | ||
34 | #define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203) | ||
35 | #define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204) | ||
36 | #define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205) | ||
37 | #define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209) | ||
38 | #define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211) | ||
39 | #define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212) | ||
40 | #define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213) | ||
41 | #define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214) | ||
42 | #define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226) | ||
43 | #define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268) | ||
44 | #define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322) | ||
45 | |||
46 | /* Register offset address definitions */ | ||
47 | #define MMU_MMU_SYSCONFIG_OFFSET 0x10 | ||
48 | #define MMU_MMU_IRQSTATUS_OFFSET 0x18 | ||
49 | #define MMU_MMU_IRQENABLE_OFFSET 0x1c | ||
50 | #define MMU_MMU_WALKING_ST_OFFSET 0x40 | ||
51 | #define MMU_MMU_CNTL_OFFSET 0x44 | ||
52 | #define MMU_MMU_FAULT_AD_OFFSET 0x48 | ||
53 | #define MMU_MMU_TTB_OFFSET 0x4c | ||
54 | #define MMU_MMU_LOCK_OFFSET 0x50 | ||
55 | #define MMU_MMU_LD_TLB_OFFSET 0x54 | ||
56 | #define MMU_MMU_CAM_OFFSET 0x58 | ||
57 | #define MMU_MMU_RAM_OFFSET 0x5c | ||
58 | #define MMU_MMU_GFLUSH_OFFSET 0x60 | ||
59 | #define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64 | ||
60 | /* Bitfield mask and offset declarations */ | ||
61 | #define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18 | ||
62 | #define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3 | ||
63 | #define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1 | ||
64 | #define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0 | ||
65 | #define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1 | ||
66 | #define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0 | ||
67 | #define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4 | ||
68 | #define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2 | ||
69 | #define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2 | ||
70 | #define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1 | ||
71 | #define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00 | ||
72 | #define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10 | ||
73 | #define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0 | ||
74 | #define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4 | ||
75 | |||
76 | #endif /* _MMU_ACC_INT_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h deleted file mode 100644 index ab1a16da731c..000000000000 --- a/drivers/staging/tidspbridge/hw/MMURegAcM.h +++ /dev/null | |||
@@ -1,225 +0,0 @@ | |||
1 | /* | ||
2 | * MMURegAcM.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef _MMU_REG_ACM_H | ||
18 | #define _MMU_REG_ACM_H | ||
19 | |||
20 | #include <linux/io.h> | ||
21 | #include <EasiGlobal.h> | ||
22 | |||
23 | #include "MMUAccInt.h" | ||
24 | |||
25 | #if defined(USE_LEVEL_1_MACROS) | ||
26 | |||
27 | #define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\ | ||
28 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\ | ||
29 | __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET)) | ||
30 | |||
31 | #define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\ | ||
32 | {\ | ||
33 | const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ | ||
34 | register u32 data = __raw_readl((base_address)+offset);\ | ||
35 | register u32 new_value = (value);\ | ||
36 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\ | ||
37 | data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\ | ||
38 | new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\ | ||
39 | new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\ | ||
40 | new_value |= data;\ | ||
41 | __raw_writel(new_value, base_address+offset);\ | ||
42 | } | ||
43 | |||
44 | #define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\ | ||
45 | {\ | ||
46 | const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ | ||
47 | register u32 data = __raw_readl((base_address)+offset);\ | ||
48 | register u32 new_value = (value);\ | ||
49 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\ | ||
50 | data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\ | ||
51 | new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\ | ||
52 | new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\ | ||
53 | new_value |= data;\ | ||
54 | __raw_writel(new_value, base_address+offset);\ | ||
55 | } | ||
56 | |||
57 | #define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\ | ||
58 | (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\ | ||
59 | __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET)) | ||
60 | |||
61 | #define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\ | ||
62 | {\ | ||
63 | const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\ | ||
64 | register u32 new_value = (value);\ | ||
65 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\ | ||
66 | __raw_writel(new_value, (base_address)+offset);\ | ||
67 | } | ||
68 | |||
69 | #define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\ | ||
70 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\ | ||
71 | __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET)) | ||
72 | |||
73 | #define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\ | ||
74 | {\ | ||
75 | const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\ | ||
76 | register u32 new_value = (value);\ | ||
77 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\ | ||
78 | __raw_writel(new_value, (base_address)+offset);\ | ||
79 | } | ||
80 | |||
81 | #define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\ | ||
82 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\ | ||
83 | (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\ | ||
84 | & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\ | ||
85 | MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET)) | ||
86 | |||
87 | #define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\ | ||
88 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\ | ||
89 | (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\ | ||
90 | MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\ | ||
91 | MMU_MMU_CNTL_TWL_ENABLE_OFFSET)) | ||
92 | |||
93 | #define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\ | ||
94 | {\ | ||
95 | const u32 offset = MMU_MMU_CNTL_OFFSET;\ | ||
96 | register u32 data = __raw_readl((base_address)+offset);\ | ||
97 | register u32 new_value = (value);\ | ||
98 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\ | ||
99 | data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\ | ||
100 | new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\ | ||
101 | new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\ | ||
102 | new_value |= data;\ | ||
103 | __raw_writel(new_value, base_address+offset);\ | ||
104 | } | ||
105 | |||
106 | #define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\ | ||
107 | {\ | ||
108 | const u32 offset = MMU_MMU_CNTL_OFFSET;\ | ||
109 | register u32 data = __raw_readl((base_address)+offset);\ | ||
110 | register u32 new_value = (value);\ | ||
111 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\ | ||
112 | data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\ | ||
113 | new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\ | ||
114 | new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\ | ||
115 | new_value |= data;\ | ||
116 | __raw_writel(new_value, base_address+offset);\ | ||
117 | } | ||
118 | |||
119 | #define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\ | ||
120 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\ | ||
121 | __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET)) | ||
122 | |||
123 | #define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\ | ||
124 | {\ | ||
125 | const u32 offset = MMU_MMU_TTB_OFFSET;\ | ||
126 | register u32 new_value = (value);\ | ||
127 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\ | ||
128 | __raw_writel(new_value, (base_address)+offset);\ | ||
129 | } | ||
130 | |||
131 | #define MMUMMU_LOCK_READ_REGISTER32(base_address)\ | ||
132 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\ | ||
133 | __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET)) | ||
134 | |||
135 | #define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\ | ||
136 | {\ | ||
137 | const u32 offset = MMU_MMU_LOCK_OFFSET;\ | ||
138 | register u32 new_value = (value);\ | ||
139 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\ | ||
140 | __raw_writel(new_value, (base_address)+offset);\ | ||
141 | } | ||
142 | |||
143 | #define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\ | ||
144 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\ | ||
145 | (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\ | ||
146 | MMU_MMU_LOCK_BASE_VALUE_MASK) >>\ | ||
147 | MMU_MMU_LOCK_BASE_VALUE_OFFSET)) | ||
148 | |||
149 | #define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\ | ||
150 | {\ | ||
151 | const u32 offset = MMU_MMU_LOCK_OFFSET;\ | ||
152 | register u32 data = __raw_readl((base_address)+offset);\ | ||
153 | register u32 new_value = (value);\ | ||
154 | _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\ | ||
155 | data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\ | ||
156 | new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\ | ||
157 | new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\ | ||
158 | new_value |= data;\ | ||
159 | __raw_writel(new_value, base_address+offset);\ | ||
160 | } | ||
161 | |||
162 | #define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\ | ||
163 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\ | ||
164 | (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\ | ||
165 | MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\ | ||
166 | MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET)) | ||
167 | |||
168 | #define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\ | ||
169 | {\ | ||
170 | const u32 offset = MMU_MMU_LOCK_OFFSET;\ | ||
171 | register u32 data = __raw_readl((base_address)+offset);\ | ||
172 | register u32 new_value = (value);\ | ||
173 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\ | ||
174 | data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\ | ||
175 | new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\ | ||
176 | new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\ | ||
177 | new_value |= data;\ | ||
178 | __raw_writel(new_value, base_address+offset);\ | ||
179 | } | ||
180 | |||
181 | #define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\ | ||
182 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\ | ||
183 | (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\ | ||
184 | (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\ | ||
185 | MMU_MMU_LOCK_CURRENT_VICTIM_MASK))) | ||
186 | |||
187 | #define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\ | ||
188 | (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\ | ||
189 | __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET)) | ||
190 | |||
191 | #define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\ | ||
192 | {\ | ||
193 | const u32 offset = MMU_MMU_LD_TLB_OFFSET;\ | ||
194 | register u32 new_value = (value);\ | ||
195 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\ | ||
196 | __raw_writel(new_value, (base_address)+offset);\ | ||
197 | } | ||
198 | |||
199 | #define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\ | ||
200 | {\ | ||
201 | const u32 offset = MMU_MMU_CAM_OFFSET;\ | ||
202 | register u32 new_value = (value);\ | ||
203 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\ | ||
204 | __raw_writel(new_value, (base_address)+offset);\ | ||
205 | } | ||
206 | |||
207 | #define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\ | ||
208 | {\ | ||
209 | const u32 offset = MMU_MMU_RAM_OFFSET;\ | ||
210 | register u32 new_value = (value);\ | ||
211 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\ | ||
212 | __raw_writel(new_value, (base_address)+offset);\ | ||
213 | } | ||
214 | |||
215 | #define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\ | ||
216 | {\ | ||
217 | const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\ | ||
218 | register u32 new_value = (value);\ | ||
219 | _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\ | ||
220 | __raw_writel(new_value, (base_address)+offset);\ | ||
221 | } | ||
222 | |||
223 | #endif /* USE_LEVEL_1_MACROS */ | ||
224 | |||
225 | #endif /* _MMU_REG_ACM_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h deleted file mode 100644 index d5266d4c163f..000000000000 --- a/drivers/staging/tidspbridge/hw/hw_defs.h +++ /dev/null | |||
@@ -1,58 +0,0 @@ | |||
1 | /* | ||
2 | * hw_defs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global HW definitions | ||
7 | * | ||
8 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef _HW_DEFS_H | ||
20 | #define _HW_DEFS_H | ||
21 | |||
22 | /* Page size */ | ||
23 | #define HW_PAGE_SIZE4KB 0x1000 | ||
24 | #define HW_PAGE_SIZE64KB 0x10000 | ||
25 | #define HW_PAGE_SIZE1MB 0x100000 | ||
26 | #define HW_PAGE_SIZE16MB 0x1000000 | ||
27 | |||
28 | /* hw_status: return type for HW API */ | ||
29 | typedef long hw_status; | ||
30 | |||
31 | /* Macro used to set and clear any bit */ | ||
32 | #define HW_CLEAR 0 | ||
33 | #define HW_SET 1 | ||
34 | |||
35 | /* hw_endianism_t: Enumerated Type used to specify the endianism | ||
36 | * Do NOT change these values. They are used as bit fields. */ | ||
37 | enum hw_endianism_t { | ||
38 | HW_LITTLE_ENDIAN, | ||
39 | HW_BIG_ENDIAN | ||
40 | }; | ||
41 | |||
42 | /* hw_element_size_t: Enumerated Type used to specify the element size | ||
43 | * Do NOT change these values. They are used as bit fields. */ | ||
44 | enum hw_element_size_t { | ||
45 | HW_ELEM_SIZE8BIT, | ||
46 | HW_ELEM_SIZE16BIT, | ||
47 | HW_ELEM_SIZE32BIT, | ||
48 | HW_ELEM_SIZE64BIT | ||
49 | }; | ||
50 | |||
51 | /* hw_idle_mode_t: Enumerated Type used to specify Idle modes */ | ||
52 | enum hw_idle_mode_t { | ||
53 | HW_FORCE_IDLE, | ||
54 | HW_NO_IDLE, | ||
55 | HW_SMART_IDLE | ||
56 | }; | ||
57 | |||
58 | #endif /* _HW_DEFS_H */ | ||
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c deleted file mode 100644 index 50244a474178..000000000000 --- a/drivers/staging/tidspbridge/hw/hw_mmu.c +++ /dev/null | |||
@@ -1,487 +0,0 @@ | |||
1 | /* | ||
2 | * hw_mmu.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * API definitions to setup MMU TLB and PTE | ||
7 | * | ||
8 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/io.h> | ||
20 | #include "MMURegAcM.h" | ||
21 | #include <hw_defs.h> | ||
22 | #include <hw_mmu.h> | ||
23 | #include <linux/types.h> | ||
24 | #include <linux/err.h> | ||
25 | |||
26 | #define MMU_BASE_VAL_MASK 0xFC00 | ||
27 | #define MMU_PAGE_MAX 3 | ||
28 | #define MMU_ELEMENTSIZE_MAX 3 | ||
29 | #define MMU_ADDR_MASK 0xFFFFF000 | ||
30 | #define MMU_TTB_MASK 0xFFFFC000 | ||
31 | #define MMU_SECTION_ADDR_MASK 0xFFF00000 | ||
32 | #define MMU_SSECTION_ADDR_MASK 0xFF000000 | ||
33 | #define MMU_PAGE_TABLE_MASK 0xFFFFFC00 | ||
34 | #define MMU_LARGE_PAGE_MASK 0xFFFF0000 | ||
35 | #define MMU_SMALL_PAGE_MASK 0xFFFFF000 | ||
36 | |||
37 | #define MMU_LOAD_TLB 0x00000001 | ||
38 | #define MMU_GFLUSH 0x60 | ||
39 | |||
40 | /* | ||
41 | * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS) | ||
42 | */ | ||
43 | enum hw_mmu_page_size_t { | ||
44 | HW_MMU_SECTION, | ||
45 | HW_MMU_LARGE_PAGE, | ||
46 | HW_MMU_SMALL_PAGE, | ||
47 | HW_MMU_SUPERSECTION | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * FUNCTION : mmu_set_cam_entry | ||
52 | * | ||
53 | * INPUTS: | ||
54 | * | ||
55 | * Identifier : base_address | ||
56 | * Type : void __iomem * | ||
57 | * Description : Base Address of instance of MMU module | ||
58 | * | ||
59 | * Identifier : page_sz | ||
60 | * TypE : const u32 | ||
61 | * Description : It indicates the page size | ||
62 | * | ||
63 | * Identifier : preserved_bit | ||
64 | * Type : const u32 | ||
65 | * Description : It indicates the TLB entry is preserved entry | ||
66 | * or not | ||
67 | * | ||
68 | * Identifier : valid_bit | ||
69 | * Type : const u32 | ||
70 | * Description : It indicates the TLB entry is valid entry or not | ||
71 | * | ||
72 | * | ||
73 | * Identifier : virtual_addr_tag | ||
74 | * Type : const u32 | ||
75 | * Description : virtual Address | ||
76 | * | ||
77 | * RETURNS: | ||
78 | * | ||
79 | * Type : hw_status | ||
80 | * Description : 0 -- No errors occurred | ||
81 | * RET_BAD_NULL_PARAM -- A Pointer Parameter | ||
82 | * was set to NULL | ||
83 | * RET_PARAM_OUT_OF_RANGE -- Input Parameter out | ||
84 | * of Range | ||
85 | * | ||
86 | * PURPOSE: : Set MMU_CAM reg | ||
87 | * | ||
88 | * METHOD: : Check the Input parameters and set the CAM entry. | ||
89 | */ | ||
90 | static hw_status mmu_set_cam_entry(void __iomem *base_address, | ||
91 | const u32 page_sz, | ||
92 | const u32 preserved_bit, | ||
93 | const u32 valid_bit, | ||
94 | const u32 virtual_addr_tag); | ||
95 | |||
96 | /* | ||
97 | * FUNCTION : mmu_set_ram_entry | ||
98 | * | ||
99 | * INPUTS: | ||
100 | * | ||
101 | * Identifier : base_address | ||
102 | * Type : void __iomem * | ||
103 | * Description : Base Address of instance of MMU module | ||
104 | * | ||
105 | * Identifier : physical_addr | ||
106 | * Type : const u32 | ||
107 | * Description : Physical Address to which the corresponding | ||
108 | * virtual Address shouldpoint | ||
109 | * | ||
110 | * Identifier : endianism | ||
111 | * Type : hw_endianism_t | ||
112 | * Description : endianism for the given page | ||
113 | * | ||
114 | * Identifier : element_size | ||
115 | * Type : hw_element_size_t | ||
116 | * Description : The element size ( 8,16, 32 or 64 bit) | ||
117 | * | ||
118 | * Identifier : mixed_size | ||
119 | * Type : hw_mmu_mixed_size_t | ||
120 | * Description : Element Size to follow CPU or TLB | ||
121 | * | ||
122 | * RETURNS: | ||
123 | * | ||
124 | * Type : hw_status | ||
125 | * Description : 0 -- No errors occurred | ||
126 | * RET_BAD_NULL_PARAM -- A Pointer Parameter | ||
127 | * was set to NULL | ||
128 | * RET_PARAM_OUT_OF_RANGE -- Input Parameter | ||
129 | * out of Range | ||
130 | * | ||
131 | * PURPOSE: : Set MMU_CAM reg | ||
132 | * | ||
133 | * METHOD: : Check the Input parameters and set the RAM entry. | ||
134 | */ | ||
135 | static hw_status mmu_set_ram_entry(void __iomem *base_address, | ||
136 | const u32 physical_addr, | ||
137 | enum hw_endianism_t endianism, | ||
138 | enum hw_element_size_t element_size, | ||
139 | enum hw_mmu_mixed_size_t mixed_size); | ||
140 | |||
141 | /* HW FUNCTIONS */ | ||
142 | |||
143 | hw_status hw_mmu_enable(void __iomem *base_address) | ||
144 | { | ||
145 | hw_status status = 0; | ||
146 | |||
147 | MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET); | ||
148 | |||
149 | return status; | ||
150 | } | ||
151 | |||
152 | hw_status hw_mmu_disable(void __iomem *base_address) | ||
153 | { | ||
154 | hw_status status = 0; | ||
155 | |||
156 | MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR); | ||
157 | |||
158 | return status; | ||
159 | } | ||
160 | |||
161 | hw_status hw_mmu_num_locked_set(void __iomem *base_address, | ||
162 | u32 num_locked_entries) | ||
163 | { | ||
164 | hw_status status = 0; | ||
165 | |||
166 | MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries); | ||
167 | |||
168 | return status; | ||
169 | } | ||
170 | |||
171 | hw_status hw_mmu_victim_num_set(void __iomem *base_address, | ||
172 | u32 victim_entry_num) | ||
173 | { | ||
174 | hw_status status = 0; | ||
175 | |||
176 | MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num); | ||
177 | |||
178 | return status; | ||
179 | } | ||
180 | |||
181 | hw_status hw_mmu_event_ack(void __iomem *base_address, u32 irq_mask) | ||
182 | { | ||
183 | hw_status status = 0; | ||
184 | |||
185 | MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask); | ||
186 | |||
187 | return status; | ||
188 | } | ||
189 | |||
190 | hw_status hw_mmu_event_disable(void __iomem *base_address, u32 irq_mask) | ||
191 | { | ||
192 | hw_status status = 0; | ||
193 | u32 irq_reg; | ||
194 | |||
195 | irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address); | ||
196 | |||
197 | MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask); | ||
198 | |||
199 | return status; | ||
200 | } | ||
201 | |||
202 | hw_status hw_mmu_event_enable(void __iomem *base_address, u32 irq_mask) | ||
203 | { | ||
204 | hw_status status = 0; | ||
205 | u32 irq_reg; | ||
206 | |||
207 | irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address); | ||
208 | |||
209 | MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask); | ||
210 | |||
211 | return status; | ||
212 | } | ||
213 | |||
214 | hw_status hw_mmu_event_status(void __iomem *base_address, u32 *irq_mask) | ||
215 | { | ||
216 | hw_status status = 0; | ||
217 | |||
218 | *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address); | ||
219 | |||
220 | return status; | ||
221 | } | ||
222 | |||
223 | hw_status hw_mmu_fault_addr_read(void __iomem *base_address, u32 *addr) | ||
224 | { | ||
225 | hw_status status = 0; | ||
226 | |||
227 | /* read values from register */ | ||
228 | *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address); | ||
229 | |||
230 | return status; | ||
231 | } | ||
232 | |||
233 | hw_status hw_mmu_ttb_set(void __iomem *base_address, u32 ttb_phys_addr) | ||
234 | { | ||
235 | hw_status status = 0; | ||
236 | u32 load_ttb; | ||
237 | |||
238 | load_ttb = ttb_phys_addr & ~0x7FUL; | ||
239 | /* write values to register */ | ||
240 | MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb); | ||
241 | |||
242 | return status; | ||
243 | } | ||
244 | |||
245 | hw_status hw_mmu_twl_enable(void __iomem *base_address) | ||
246 | { | ||
247 | hw_status status = 0; | ||
248 | |||
249 | MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET); | ||
250 | |||
251 | return status; | ||
252 | } | ||
253 | |||
254 | hw_status hw_mmu_twl_disable(void __iomem *base_address) | ||
255 | { | ||
256 | hw_status status = 0; | ||
257 | |||
258 | MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR); | ||
259 | |||
260 | return status; | ||
261 | } | ||
262 | |||
263 | hw_status hw_mmu_tlb_add(void __iomem *base_address, | ||
264 | u32 physical_addr, | ||
265 | u32 virtual_addr, | ||
266 | u32 page_sz, | ||
267 | u32 entry_num, | ||
268 | struct hw_mmu_map_attrs_t *map_attrs, | ||
269 | s8 preserved_bit, s8 valid_bit) | ||
270 | { | ||
271 | hw_status status = 0; | ||
272 | u32 lock_reg; | ||
273 | u32 virtual_addr_tag; | ||
274 | enum hw_mmu_page_size_t mmu_pg_size; | ||
275 | |||
276 | /*Check the input Parameters */ | ||
277 | switch (page_sz) { | ||
278 | case HW_PAGE_SIZE4KB: | ||
279 | mmu_pg_size = HW_MMU_SMALL_PAGE; | ||
280 | break; | ||
281 | |||
282 | case HW_PAGE_SIZE64KB: | ||
283 | mmu_pg_size = HW_MMU_LARGE_PAGE; | ||
284 | break; | ||
285 | |||
286 | case HW_PAGE_SIZE1MB: | ||
287 | mmu_pg_size = HW_MMU_SECTION; | ||
288 | break; | ||
289 | |||
290 | case HW_PAGE_SIZE16MB: | ||
291 | mmu_pg_size = HW_MMU_SUPERSECTION; | ||
292 | break; | ||
293 | |||
294 | default: | ||
295 | return -EINVAL; | ||
296 | } | ||
297 | |||
298 | lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address); | ||
299 | |||
300 | /* Generate the 20-bit tag from virtual address */ | ||
301 | virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12); | ||
302 | |||
303 | /* Write the fields in the CAM Entry Register */ | ||
304 | mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit, | ||
305 | virtual_addr_tag); | ||
306 | |||
307 | /* Write the different fields of the RAM Entry Register */ | ||
308 | /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */ | ||
309 | mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism, | ||
310 | map_attrs->element_size, map_attrs->mixed_size); | ||
311 | |||
312 | /* Update the MMU Lock Register */ | ||
313 | /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */ | ||
314 | MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num); | ||
315 | |||
316 | /* Enable loading of an entry in TLB by writing 1 | ||
317 | into LD_TLB_REG register */ | ||
318 | MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB); | ||
319 | |||
320 | MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg); | ||
321 | |||
322 | return status; | ||
323 | } | ||
324 | |||
325 | hw_status hw_mmu_pte_set(const u32 pg_tbl_va, | ||
326 | u32 physical_addr, | ||
327 | u32 virtual_addr, | ||
328 | u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs) | ||
329 | { | ||
330 | hw_status status = 0; | ||
331 | u32 pte_addr, pte_val; | ||
332 | s32 num_entries = 1; | ||
333 | |||
334 | switch (page_sz) { | ||
335 | case HW_PAGE_SIZE4KB: | ||
336 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
337 | virtual_addr & | ||
338 | MMU_SMALL_PAGE_MASK); | ||
339 | pte_val = | ||
340 | ((physical_addr & MMU_SMALL_PAGE_MASK) | | ||
341 | (map_attrs->endianism << 9) | (map_attrs-> | ||
342 | element_size << 4) | | ||
343 | (map_attrs->mixed_size << 11) | 2); | ||
344 | break; | ||
345 | |||
346 | case HW_PAGE_SIZE64KB: | ||
347 | num_entries = 16; | ||
348 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
349 | virtual_addr & | ||
350 | MMU_LARGE_PAGE_MASK); | ||
351 | pte_val = | ||
352 | ((physical_addr & MMU_LARGE_PAGE_MASK) | | ||
353 | (map_attrs->endianism << 9) | (map_attrs-> | ||
354 | element_size << 4) | | ||
355 | (map_attrs->mixed_size << 11) | 1); | ||
356 | break; | ||
357 | |||
358 | case HW_PAGE_SIZE1MB: | ||
359 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
360 | virtual_addr & | ||
361 | MMU_SECTION_ADDR_MASK); | ||
362 | pte_val = | ||
363 | ((((physical_addr & MMU_SECTION_ADDR_MASK) | | ||
364 | (map_attrs->endianism << 15) | (map_attrs-> | ||
365 | element_size << 10) | | ||
366 | (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2); | ||
367 | break; | ||
368 | |||
369 | case HW_PAGE_SIZE16MB: | ||
370 | num_entries = 16; | ||
371 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
372 | virtual_addr & | ||
373 | MMU_SSECTION_ADDR_MASK); | ||
374 | pte_val = | ||
375 | (((physical_addr & MMU_SSECTION_ADDR_MASK) | | ||
376 | (map_attrs->endianism << 15) | (map_attrs-> | ||
377 | element_size << 10) | | ||
378 | (map_attrs->mixed_size << 17) | ||
379 | ) | 0x40000 | 0x2); | ||
380 | break; | ||
381 | |||
382 | case HW_MMU_COARSE_PAGE_SIZE: | ||
383 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
384 | virtual_addr & | ||
385 | MMU_SECTION_ADDR_MASK); | ||
386 | pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1; | ||
387 | break; | ||
388 | |||
389 | default: | ||
390 | return -EINVAL; | ||
391 | } | ||
392 | |||
393 | while (--num_entries >= 0) | ||
394 | ((u32 *) pte_addr)[num_entries] = pte_val; | ||
395 | |||
396 | return status; | ||
397 | } | ||
398 | |||
399 | hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size) | ||
400 | { | ||
401 | hw_status status = 0; | ||
402 | u32 pte_addr; | ||
403 | s32 num_entries = 1; | ||
404 | |||
405 | switch (page_size) { | ||
406 | case HW_PAGE_SIZE4KB: | ||
407 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
408 | virtual_addr & | ||
409 | MMU_SMALL_PAGE_MASK); | ||
410 | break; | ||
411 | |||
412 | case HW_PAGE_SIZE64KB: | ||
413 | num_entries = 16; | ||
414 | pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, | ||
415 | virtual_addr & | ||
416 | MMU_LARGE_PAGE_MASK); | ||
417 | break; | ||
418 | |||
419 | case HW_PAGE_SIZE1MB: | ||
420 | case HW_MMU_COARSE_PAGE_SIZE: | ||
421 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
422 | virtual_addr & | ||
423 | MMU_SECTION_ADDR_MASK); | ||
424 | break; | ||
425 | |||
426 | case HW_PAGE_SIZE16MB: | ||
427 | num_entries = 16; | ||
428 | pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, | ||
429 | virtual_addr & | ||
430 | MMU_SSECTION_ADDR_MASK); | ||
431 | break; | ||
432 | |||
433 | default: | ||
434 | return -EINVAL; | ||
435 | } | ||
436 | |||
437 | while (--num_entries >= 0) | ||
438 | ((u32 *) pte_addr)[num_entries] = 0; | ||
439 | |||
440 | return status; | ||
441 | } | ||
442 | |||
443 | /* mmu_set_cam_entry */ | ||
444 | static hw_status mmu_set_cam_entry(void __iomem *base_address, | ||
445 | const u32 page_sz, | ||
446 | const u32 preserved_bit, | ||
447 | const u32 valid_bit, | ||
448 | const u32 virtual_addr_tag) | ||
449 | { | ||
450 | hw_status status = 0; | ||
451 | u32 mmu_cam_reg; | ||
452 | |||
453 | mmu_cam_reg = (virtual_addr_tag << 12); | ||
454 | mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) | | ||
455 | (preserved_bit << 3); | ||
456 | |||
457 | /* write values to register */ | ||
458 | MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg); | ||
459 | |||
460 | return status; | ||
461 | } | ||
462 | |||
463 | /* mmu_set_ram_entry */ | ||
464 | static hw_status mmu_set_ram_entry(void __iomem *base_address, | ||
465 | const u32 physical_addr, | ||
466 | enum hw_endianism_t endianism, | ||
467 | enum hw_element_size_t element_size, | ||
468 | enum hw_mmu_mixed_size_t mixed_size) | ||
469 | { | ||
470 | hw_status status = 0; | ||
471 | u32 mmu_ram_reg; | ||
472 | |||
473 | mmu_ram_reg = (physical_addr & MMU_ADDR_MASK); | ||
474 | mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) | | ||
475 | (mixed_size << 6)); | ||
476 | |||
477 | /* write values to register */ | ||
478 | MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg); | ||
479 | |||
480 | return status; | ||
481 | |||
482 | } | ||
483 | |||
484 | void hw_mmu_tlb_flush_all(void __iomem *base) | ||
485 | { | ||
486 | __raw_writel(1, base + MMU_GFLUSH); | ||
487 | } | ||
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h deleted file mode 100644 index 1c50bb36edfe..000000000000 --- a/drivers/staging/tidspbridge/hw/hw_mmu.h +++ /dev/null | |||
@@ -1,160 +0,0 @@ | |||
1 | /* | ||
2 | * hw_mmu.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * MMU types and API declarations | ||
7 | * | ||
8 | * Copyright (C) 2007 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef _HW_MMU_H | ||
20 | #define _HW_MMU_H | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | |||
24 | /* Bitmasks for interrupt sources */ | ||
25 | #define HW_MMU_TRANSLATION_FAULT 0x2 | ||
26 | #define HW_MMU_ALL_INTERRUPTS 0x1F | ||
27 | |||
28 | #define HW_MMU_COARSE_PAGE_SIZE 0x400 | ||
29 | |||
30 | /* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow | ||
31 | CPU/TLB Element size */ | ||
32 | enum hw_mmu_mixed_size_t { | ||
33 | HW_MMU_TLBES, | ||
34 | HW_MMU_CPUES | ||
35 | }; | ||
36 | |||
37 | /* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */ | ||
38 | struct hw_mmu_map_attrs_t { | ||
39 | enum hw_endianism_t endianism; | ||
40 | enum hw_element_size_t element_size; | ||
41 | enum hw_mmu_mixed_size_t mixed_size; | ||
42 | bool donotlockmpupage; | ||
43 | }; | ||
44 | |||
45 | extern hw_status hw_mmu_enable(void __iomem *base_address); | ||
46 | |||
47 | extern hw_status hw_mmu_disable(void __iomem *base_address); | ||
48 | |||
49 | extern hw_status hw_mmu_num_locked_set(void __iomem *base_address, | ||
50 | u32 num_locked_entries); | ||
51 | |||
52 | extern hw_status hw_mmu_victim_num_set(void __iomem *base_address, | ||
53 | u32 victim_entry_num); | ||
54 | |||
55 | /* For MMU faults */ | ||
56 | extern hw_status hw_mmu_event_ack(void __iomem *base_address, | ||
57 | u32 irq_mask); | ||
58 | |||
59 | extern hw_status hw_mmu_event_disable(void __iomem *base_address, | ||
60 | u32 irq_mask); | ||
61 | |||
62 | extern hw_status hw_mmu_event_enable(void __iomem *base_address, | ||
63 | u32 irq_mask); | ||
64 | |||
65 | extern hw_status hw_mmu_event_status(void __iomem *base_address, | ||
66 | u32 *irq_mask); | ||
67 | |||
68 | extern hw_status hw_mmu_fault_addr_read(void __iomem *base_address, | ||
69 | u32 *addr); | ||
70 | |||
71 | /* Set the TT base address */ | ||
72 | extern hw_status hw_mmu_ttb_set(void __iomem *base_address, | ||
73 | u32 ttb_phys_addr); | ||
74 | |||
75 | extern hw_status hw_mmu_twl_enable(void __iomem *base_address); | ||
76 | |||
77 | extern hw_status hw_mmu_twl_disable(void __iomem *base_address); | ||
78 | |||
79 | extern hw_status hw_mmu_tlb_add(void __iomem *base_address, | ||
80 | u32 physical_addr, | ||
81 | u32 virtual_addr, | ||
82 | u32 page_sz, | ||
83 | u32 entry_num, | ||
84 | struct hw_mmu_map_attrs_t *map_attrs, | ||
85 | s8 preserved_bit, s8 valid_bit); | ||
86 | |||
87 | /* For PTEs */ | ||
88 | extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va, | ||
89 | u32 physical_addr, | ||
90 | u32 virtual_addr, | ||
91 | u32 page_sz, | ||
92 | struct hw_mmu_map_attrs_t *map_attrs); | ||
93 | |||
94 | extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, | ||
95 | u32 virtual_addr, u32 page_size); | ||
96 | |||
97 | void hw_mmu_tlb_flush_all(void __iomem *base); | ||
98 | |||
99 | static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va) | ||
100 | { | ||
101 | u32 pte_addr; | ||
102 | u32 va31_to20; | ||
103 | |||
104 | va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */ | ||
105 | va31_to20 &= 0xFFFFFFFCUL; | ||
106 | pte_addr = l1_base + va31_to20; | ||
107 | |||
108 | return pte_addr; | ||
109 | } | ||
110 | |||
111 | static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va) | ||
112 | { | ||
113 | u32 pte_addr; | ||
114 | |||
115 | pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC); | ||
116 | |||
117 | return pte_addr; | ||
118 | } | ||
119 | |||
120 | static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val) | ||
121 | { | ||
122 | u32 pte_coarse; | ||
123 | |||
124 | pte_coarse = pte_val & 0xFFFFFC00; | ||
125 | |||
126 | return pte_coarse; | ||
127 | } | ||
128 | |||
129 | static inline u32 hw_mmu_pte_size_l1(u32 pte_val) | ||
130 | { | ||
131 | u32 pte_size = 0; | ||
132 | |||
133 | if ((pte_val & 0x3) == 0x1) { | ||
134 | /* Points to L2 PT */ | ||
135 | pte_size = HW_MMU_COARSE_PAGE_SIZE; | ||
136 | } | ||
137 | |||
138 | if ((pte_val & 0x3) == 0x2) { | ||
139 | if (pte_val & (1 << 18)) | ||
140 | pte_size = HW_PAGE_SIZE16MB; | ||
141 | else | ||
142 | pte_size = HW_PAGE_SIZE1MB; | ||
143 | } | ||
144 | |||
145 | return pte_size; | ||
146 | } | ||
147 | |||
148 | static inline u32 hw_mmu_pte_size_l2(u32 pte_val) | ||
149 | { | ||
150 | u32 pte_size = 0; | ||
151 | |||
152 | if (pte_val & 0x2) | ||
153 | pte_size = HW_PAGE_SIZE4KB; | ||
154 | else if (pte_val & 0x1) | ||
155 | pte_size = HW_PAGE_SIZE64KB; | ||
156 | |||
157 | return pte_size; | ||
158 | } | ||
159 | |||
160 | #endif /* _HW_MMU_H */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h b/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h deleted file mode 100644 index cc95a18f1db9..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/_chnl_sm.h +++ /dev/null | |||
@@ -1,177 +0,0 @@ | |||
1 | /* | ||
2 | * _chnl_sm.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Private header file defining channel manager and channel objects for | ||
7 | * a shared memory channel driver. | ||
8 | * | ||
9 | * Shared between the modules implementing the shared memory channel class | ||
10 | * library. | ||
11 | * | ||
12 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
13 | * | ||
14 | * This package is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License version 2 as | ||
16 | * published by the Free Software Foundation. | ||
17 | * | ||
18 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
19 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
20 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
21 | */ | ||
22 | |||
23 | #ifndef _CHNL_SM_ | ||
24 | #define _CHNL_SM_ | ||
25 | |||
26 | #include <dspbridge/dspapi.h> | ||
27 | #include <dspbridge/dspdefs.h> | ||
28 | |||
29 | #include <linux/list.h> | ||
30 | #include <dspbridge/ntfy.h> | ||
31 | |||
32 | /* | ||
33 | * These target side symbols define the beginning and ending addresses | ||
34 | * of shared memory buffer. They are defined in the *cfg.cmd file by | ||
35 | * cdb code. | ||
36 | */ | ||
37 | #define CHNL_SHARED_BUFFER_BASE_SYM "_SHM_BEG" | ||
38 | #define CHNL_SHARED_BUFFER_LIMIT_SYM "_SHM_END" | ||
39 | #define BRIDGEINIT_BIOSGPTIMER "_BRIDGEINIT_BIOSGPTIMER" | ||
40 | #define BRIDGEINIT_LOADMON_GPTIMER "_BRIDGEINIT_LOADMON_GPTIMER" | ||
41 | |||
42 | #ifndef _CHNL_WORDSIZE | ||
43 | #define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */ | ||
44 | #endif | ||
45 | |||
46 | #define MAXOPPS 16 | ||
47 | |||
48 | /* Shared memory config options */ | ||
49 | #define SHM_CURROPP 0 /* Set current OPP in shm */ | ||
50 | #define SHM_OPPINFO 1 /* Set dsp voltage and freq table values */ | ||
51 | #define SHM_GETOPP 2 /* Get opp requested by DSP */ | ||
52 | |||
53 | struct opp_table_entry { | ||
54 | u32 voltage; | ||
55 | u32 frequency; | ||
56 | u32 min_freq; | ||
57 | u32 max_freq; | ||
58 | }; | ||
59 | |||
60 | struct opp_struct { | ||
61 | u32 curr_opp_pt; | ||
62 | u32 num_opp_pts; | ||
63 | struct opp_table_entry opp_point[MAXOPPS]; | ||
64 | }; | ||
65 | |||
66 | /* Request to MPU */ | ||
67 | struct opp_rqst_struct { | ||
68 | u32 rqst_dsp_freq; | ||
69 | u32 rqst_opp_pt; | ||
70 | }; | ||
71 | |||
72 | /* Info to MPU */ | ||
73 | struct load_mon_struct { | ||
74 | u32 curr_dsp_load; | ||
75 | u32 curr_dsp_freq; | ||
76 | u32 pred_dsp_load; | ||
77 | u32 pred_dsp_freq; | ||
78 | }; | ||
79 | |||
80 | /* Structure in shared between DSP and PC for communication. */ | ||
81 | struct shm { | ||
82 | u32 dsp_free_mask; /* Written by DSP, read by PC. */ | ||
83 | u32 host_free_mask; /* Written by PC, read by DSP */ | ||
84 | |||
85 | u32 input_full; /* Input channel has unread data. */ | ||
86 | u32 input_id; /* Channel for which input is available. */ | ||
87 | u32 input_size; /* Size of data block (in DSP words). */ | ||
88 | |||
89 | u32 output_full; /* Output channel has unread data. */ | ||
90 | u32 output_id; /* Channel for which output is available. */ | ||
91 | u32 output_size; /* Size of data block (in DSP words). */ | ||
92 | |||
93 | u32 arg; /* Arg for Issue/Reclaim (23 bits for 55x). */ | ||
94 | u32 resvd; /* Keep structure size even for 32-bit DSPs */ | ||
95 | |||
96 | /* Operating Point structure */ | ||
97 | struct opp_struct opp_table_struct; | ||
98 | /* Operating Point Request structure */ | ||
99 | struct opp_rqst_struct opp_request; | ||
100 | /* load monitor information structure */ | ||
101 | struct load_mon_struct load_mon_info; | ||
102 | /* Flag for WDT enable/disable F/I clocks */ | ||
103 | u32 wdt_setclocks; | ||
104 | u32 wdt_overflow; /* WDT overflow time */ | ||
105 | char dummy[176]; /* padding to 256 byte boundary */ | ||
106 | u32 shm_dbg_var[64]; /* shared memory debug variables */ | ||
107 | }; | ||
108 | |||
109 | /* Channel Manager: only one created per board: */ | ||
110 | struct chnl_mgr { | ||
111 | /* Function interface to Bridge driver */ | ||
112 | struct bridge_drv_interface *intf_fxns; | ||
113 | struct io_mgr *iomgr; /* IO manager */ | ||
114 | /* Device this board represents */ | ||
115 | struct dev_object *dev_obj; | ||
116 | |||
117 | /* These fields initialized in bridge_chnl_create(): */ | ||
118 | u32 output_mask; /* Host output channels w/ full buffers */ | ||
119 | u32 last_output; /* Last output channel fired from DPC */ | ||
120 | /* Critical section object handle */ | ||
121 | spinlock_t chnl_mgr_lock; | ||
122 | u32 word_size; /* Size in bytes of DSP word */ | ||
123 | u8 max_channels; /* Total number of channels */ | ||
124 | u8 open_channels; /* Total number of open channels */ | ||
125 | struct chnl_object **channels; /* Array of channels */ | ||
126 | u8 type; /* Type of channel class library */ | ||
127 | /* If no shm syms, return for CHNL_Open */ | ||
128 | int chnl_open_status; | ||
129 | }; | ||
130 | |||
131 | /* | ||
132 | * Channel: up to CHNL_MAXCHANNELS per board or if DSP-DMA supported then | ||
133 | * up to CHNL_MAXCHANNELS + CHNL_MAXDDMACHNLS per board. | ||
134 | */ | ||
135 | struct chnl_object { | ||
136 | /* Pointer back to channel manager */ | ||
137 | struct chnl_mgr *chnl_mgr_obj; | ||
138 | u32 chnl_id; /* Channel id */ | ||
139 | u8 state; /* Current channel state */ | ||
140 | s8 chnl_mode; /* Chnl mode and attributes */ | ||
141 | /* Chnl I/O completion event (user mode) */ | ||
142 | void *user_event; | ||
143 | /* Abstract synchronization object */ | ||
144 | struct sync_object *sync_event; | ||
145 | u32 process; /* Process which created this channel */ | ||
146 | u32 cb_arg; /* Argument to use with callback */ | ||
147 | struct list_head io_requests; /* List of IOR's to driver */ | ||
148 | s32 cio_cs; /* Number of IOC's in queue */ | ||
149 | s32 cio_reqs; /* Number of IORequests in queue */ | ||
150 | s32 chnl_packets; /* Initial number of free Irps */ | ||
151 | /* List of IOC's from driver */ | ||
152 | struct list_head io_completions; | ||
153 | struct list_head free_packets_list; /* List of free Irps */ | ||
154 | struct ntfy_object *ntfy_obj; | ||
155 | u32 bytes_moved; /* Total number of bytes transferred */ | ||
156 | |||
157 | /* For DSP-DMA */ | ||
158 | |||
159 | /* Type of chnl transport:CHNL_[PCPY][DDMA] */ | ||
160 | u32 chnl_type; | ||
161 | }; | ||
162 | |||
163 | /* I/O Request/completion packet: */ | ||
164 | struct chnl_irp { | ||
165 | struct list_head link; /* Link to next CHIRP in queue. */ | ||
166 | /* Buffer to be filled/emptied. (User) */ | ||
167 | u8 *host_user_buf; | ||
168 | /* Buffer to be filled/emptied. (System) */ | ||
169 | u8 *host_sys_buf; | ||
170 | u32 arg; /* Issue/Reclaim argument. */ | ||
171 | u32 dsp_tx_addr; /* Transfer address on DSP side. */ | ||
172 | u32 byte_size; /* Bytes transferred. */ | ||
173 | u32 buf_size; /* Actual buffer size when allocated. */ | ||
174 | u32 status; /* Status of IO completion. */ | ||
175 | }; | ||
176 | |||
177 | #endif /* _CHNL_SM_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/brddefs.h b/drivers/staging/tidspbridge/include/dspbridge/brddefs.h deleted file mode 100644 index 725d7b37414c..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/brddefs.h +++ /dev/null | |||
@@ -1,37 +0,0 @@ | |||
1 | /* | ||
2 | * brddefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global BRD constants and types, shared between DSP API and Bridge driver. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef BRDDEFS_ | ||
20 | #define BRDDEFS_ | ||
21 | |||
22 | /* platform status values */ | ||
23 | #define BRD_STOPPED 0x0 /* No Monitor Loaded, Not running. */ | ||
24 | #define BRD_IDLE 0x1 /* Monitor Loaded, but suspended. */ | ||
25 | #define BRD_RUNNING 0x2 /* Monitor loaded, and executing. */ | ||
26 | #define BRD_UNKNOWN 0x3 /* Board state is indeterminate. */ | ||
27 | #define BRD_LOADED 0x5 | ||
28 | #define BRD_SLEEP_TRANSITION 0x6 /* Sleep transition in progress */ | ||
29 | #define BRD_HIBERNATION 0x7 /* MPU initiated hibernation */ | ||
30 | #define BRD_RETENTION 0x8 /* Retention mode */ | ||
31 | #define BRD_DSP_HIBERNATION 0x9 /* DSP initiated hibernation */ | ||
32 | #define BRD_ERROR 0xA /* Board state is Error */ | ||
33 | |||
34 | /* BRD Object */ | ||
35 | struct brd_object; | ||
36 | |||
37 | #endif /* BRDDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h deleted file mode 100644 index b32c75673ab4..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h +++ /dev/null | |||
@@ -1,61 +0,0 @@ | |||
1 | /* | ||
2 | * cfgdefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global CFG constants and types, shared between DSP API and Bridge driver. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef CFGDEFS_ | ||
20 | #define CFGDEFS_ | ||
21 | |||
22 | /* Host Resources: */ | ||
23 | #define CFG_MAXMEMREGISTERS 9 | ||
24 | |||
25 | /* IRQ flag */ | ||
26 | #define CFG_IRQSHARED 0x01 /* IRQ can be shared */ | ||
27 | |||
28 | /* A platform-related device handle: */ | ||
29 | struct cfg_devnode; | ||
30 | |||
31 | /* | ||
32 | * Host resource structure. | ||
33 | */ | ||
34 | struct cfg_hostres { | ||
35 | u32 num_mem_windows; /* Set to default */ | ||
36 | /* This is the base.memory */ | ||
37 | u32 mem_base[CFG_MAXMEMREGISTERS]; /* shm virtual address */ | ||
38 | u32 mem_length[CFG_MAXMEMREGISTERS]; /* Length of the Base */ | ||
39 | u32 mem_phys[CFG_MAXMEMREGISTERS]; /* shm Physical address */ | ||
40 | u8 birq_registers; /* IRQ Number */ | ||
41 | u8 birq_attrib; /* IRQ Attribute */ | ||
42 | u32 offset_for_monitor; /* The Shared memory starts from | ||
43 | * mem_base + this offset */ | ||
44 | /* | ||
45 | * Info needed by NODE for allocating channels to communicate with RMS: | ||
46 | * chnl_offset: Offset of RMS channels. Lower channels are | ||
47 | * reserved. | ||
48 | * chnl_buf_size: Size of channel buffer to send to RMS | ||
49 | * num_chnls: Total number of channels | ||
50 | * (including reserved). | ||
51 | */ | ||
52 | u32 chnl_offset; | ||
53 | u32 chnl_buf_size; | ||
54 | u32 num_chnls; | ||
55 | void __iomem *per_base; | ||
56 | void __iomem *per_pm_base; | ||
57 | void __iomem *core_pm_base; | ||
58 | void __iomem *dmmu_base; | ||
59 | }; | ||
60 | |||
61 | #endif /* CFGDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnl.h b/drivers/staging/tidspbridge/include/dspbridge/chnl.h deleted file mode 100644 index 9b018b1f9bf3..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/chnl.h +++ /dev/null | |||
@@ -1,80 +0,0 @@ | |||
1 | /* | ||
2 | * chnl.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP API channel interface: multiplexes data streams through the single | ||
7 | * physical link managed by a Bridge driver. | ||
8 | * | ||
9 | * See DSP API chnl.h for more details. | ||
10 | * | ||
11 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
12 | * | ||
13 | * This package is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License version 2 as | ||
15 | * published by the Free Software Foundation. | ||
16 | * | ||
17 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
18 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
19 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
20 | */ | ||
21 | |||
22 | #ifndef CHNL_ | ||
23 | #define CHNL_ | ||
24 | |||
25 | #include <dspbridge/chnlpriv.h> | ||
26 | |||
27 | /* | ||
28 | * ======== chnl_create ======== | ||
29 | * Purpose: | ||
30 | * Create a channel manager object, responsible for opening new channels | ||
31 | * and closing old ones for a given board. | ||
32 | * Parameters: | ||
33 | * channel_mgr: Location to store a channel manager object on output. | ||
34 | * hdev_obj: Handle to a device object. | ||
35 | * mgr_attrts: Channel manager attributes. | ||
36 | * mgr_attrts->max_channels: Max channels | ||
37 | * mgr_attrts->birq: Channel's I/O IRQ number. | ||
38 | * mgr_attrts->irq_shared: TRUE if the IRQ is shareable. | ||
39 | * mgr_attrts->word_size: DSP Word size in equivalent PC bytes.. | ||
40 | * Returns: | ||
41 | * 0: Success; | ||
42 | * -EFAULT: hdev_obj is invalid. | ||
43 | * -EINVAL: max_channels is 0. | ||
44 | * Invalid DSP word size (must be > 0). | ||
45 | * Invalid base address for DSP communications. | ||
46 | * -ENOMEM: Insufficient memory for requested resources. | ||
47 | * -EIO: Unable to plug channel ISR for configured IRQ. | ||
48 | * -ECHRNG: This manager cannot handle this many channels. | ||
49 | * -EEXIST: Channel manager already exists for this device. | ||
50 | * Requires: | ||
51 | * channel_mgr != NULL. | ||
52 | * mgr_attrts != NULL. | ||
53 | * Ensures: | ||
54 | * 0: Subsequent calls to chnl_create() for the same | ||
55 | * board without an intervening call to | ||
56 | * chnl_destroy() will fail. | ||
57 | */ | ||
58 | extern int chnl_create(struct chnl_mgr **channel_mgr, | ||
59 | struct dev_object *hdev_obj, | ||
60 | const struct chnl_mgrattrs *mgr_attrts); | ||
61 | |||
62 | /* | ||
63 | * ======== chnl_destroy ======== | ||
64 | * Purpose: | ||
65 | * Close all open channels, and destroy the channel manager. | ||
66 | * Parameters: | ||
67 | * hchnl_mgr: Channel manager object. | ||
68 | * Returns: | ||
69 | * 0: Success. | ||
70 | * -EFAULT: hchnl_mgr was invalid. | ||
71 | * Requires: | ||
72 | * Ensures: | ||
73 | * 0: Cancels I/O on each open channel. | ||
74 | * Closes each open channel. | ||
75 | * chnl_create may subsequently be called for the | ||
76 | * same board. | ||
77 | */ | ||
78 | extern int chnl_destroy(struct chnl_mgr *hchnl_mgr); | ||
79 | |||
80 | #endif /* CHNL_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h b/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h deleted file mode 100644 index cb67c309b6ca..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/chnldefs.h +++ /dev/null | |||
@@ -1,63 +0,0 @@ | |||
1 | /* | ||
2 | * chnldefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * System-wide channel objects and constants. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef CHNLDEFS_ | ||
20 | #define CHNLDEFS_ | ||
21 | |||
22 | /* Channel id option. */ | ||
23 | #define CHNL_PICKFREE (~0UL) /* Let manager pick a free channel. */ | ||
24 | |||
25 | /* Channel modes */ | ||
26 | #define CHNL_MODETODSP 0 /* Data streaming to the DSP. */ | ||
27 | #define CHNL_MODEFROMDSP 1 /* Data streaming from the DSP. */ | ||
28 | |||
29 | /* GetIOCompletion flags */ | ||
30 | #define CHNL_IOCINFINITE 0xffffffff /* Wait forever for IO completion. */ | ||
31 | #define CHNL_IOCNOWAIT 0x0 /* Dequeue an IOC, if available. */ | ||
32 | |||
33 | /* IO Completion Record status: */ | ||
34 | #define CHNL_IOCSTATCOMPLETE 0x0000 /* IO Completed. */ | ||
35 | #define CHNL_IOCSTATCANCEL 0x0002 /* IO was cancelled */ | ||
36 | #define CHNL_IOCSTATTIMEOUT 0x0008 /* Wait for IOC timed out. */ | ||
37 | #define CHNL_IOCSTATEOS 0x8000 /* End Of Stream reached. */ | ||
38 | |||
39 | /* Macros for checking I/O Completion status: */ | ||
40 | #define CHNL_IS_IO_COMPLETE(ioc) (!(ioc.status & ~CHNL_IOCSTATEOS)) | ||
41 | #define CHNL_IS_IO_CANCELLED(ioc) (ioc.status & CHNL_IOCSTATCANCEL) | ||
42 | #define CHNL_IS_TIMED_OUT(ioc) (ioc.status & CHNL_IOCSTATTIMEOUT) | ||
43 | |||
44 | /* Channel attributes: */ | ||
45 | struct chnl_attr { | ||
46 | u32 uio_reqs; /* Max # of preallocated I/O requests. */ | ||
47 | void *event_obj; /* User supplied auto-reset event object. */ | ||
48 | char *str_event_name; /* Ptr to name of user event object. */ | ||
49 | void *reserved1; /* Reserved for future use. */ | ||
50 | u32 reserved2; /* Reserved for future use. */ | ||
51 | |||
52 | }; | ||
53 | |||
54 | /* I/O completion record: */ | ||
55 | struct chnl_ioc { | ||
56 | void *buf; /* Buffer to be filled/emptied. */ | ||
57 | u32 byte_size; /* Bytes transferred. */ | ||
58 | u32 buf_size; /* Actual buffer size in bytes */ | ||
59 | u32 status; /* Status of IO completion. */ | ||
60 | u32 arg; /* User argument associated with buf. */ | ||
61 | }; | ||
62 | |||
63 | #endif /* CHNLDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h b/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h deleted file mode 100644 index 4114c79e2466..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/chnlpriv.h +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /* | ||
2 | * chnlpriv.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Private channel header shared between DSPSYS, DSPAPI and | ||
7 | * Bridge driver modules. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef CHNLPRIV_ | ||
21 | #define CHNLPRIV_ | ||
22 | |||
23 | #include <dspbridge/chnldefs.h> | ||
24 | #include <dspbridge/devdefs.h> | ||
25 | #include <dspbridge/sync.h> | ||
26 | |||
27 | /* Channel manager limits: */ | ||
28 | #define CHNL_MAXCHANNELS 32 /* Max channels available per transport */ | ||
29 | |||
30 | /* | ||
31 | * Trans port channel Id definitions:(must match dsp-side). | ||
32 | * | ||
33 | * For CHNL_MAXCHANNELS = 16: | ||
34 | * | ||
35 | * ChnlIds: | ||
36 | * 0-15 (PCPY) - transport 0) | ||
37 | * 16-31 (DDMA) - transport 1) | ||
38 | * 32-47 (ZCPY) - transport 2) | ||
39 | */ | ||
40 | #define CHNL_PCPY 0 /* Proc-copy transport 0 */ | ||
41 | |||
42 | /* Higher level channel states: */ | ||
43 | #define CHNL_STATEREADY 0 /* Channel ready for I/O. */ | ||
44 | #define CHNL_STATECANCEL 1 /* I/O was cancelled. */ | ||
45 | #define CHNL_STATEEOS 2 /* End Of Stream reached. */ | ||
46 | |||
47 | /* Macros for checking mode: */ | ||
48 | #define CHNL_IS_INPUT(mode) (mode & CHNL_MODEFROMDSP) | ||
49 | #define CHNL_IS_OUTPUT(mode) (!CHNL_IS_INPUT(mode)) | ||
50 | |||
51 | /* Types of channel class libraries: */ | ||
52 | #define CHNL_TYPESM 1 /* Shared memory driver. */ | ||
53 | |||
54 | /* Channel info. */ | ||
55 | struct chnl_info { | ||
56 | struct chnl_mgr *chnl_mgr; /* Owning channel manager. */ | ||
57 | u32 cnhl_id; /* Channel ID. */ | ||
58 | void *event_obj; /* Channel I/O completion event. */ | ||
59 | /*Abstraction of I/O completion event. */ | ||
60 | struct sync_object *sync_event; | ||
61 | s8 mode; /* Channel mode. */ | ||
62 | u8 state; /* Current channel state. */ | ||
63 | u32 bytes_tx; /* Total bytes transferred. */ | ||
64 | u32 cio_cs; /* Number of IOCs in queue. */ | ||
65 | u32 cio_reqs; /* Number of IO Requests in queue. */ | ||
66 | u32 process; /* Process owning this channel. */ | ||
67 | }; | ||
68 | |||
69 | /* Channel manager info: */ | ||
70 | struct chnl_mgrinfo { | ||
71 | u8 type; /* Type of channel class library. */ | ||
72 | /* Channel handle, given the channel id. */ | ||
73 | struct chnl_object *chnl_obj; | ||
74 | u8 open_channels; /* Number of open channels. */ | ||
75 | u8 max_channels; /* total # of chnls supported */ | ||
76 | }; | ||
77 | |||
78 | /* Channel Manager Attrs: */ | ||
79 | struct chnl_mgrattrs { | ||
80 | /* Max number of channels this manager can use. */ | ||
81 | u8 max_channels; | ||
82 | u32 word_size; /* DSP Word size. */ | ||
83 | }; | ||
84 | |||
85 | #endif /* CHNLPRIV_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/clk.h b/drivers/staging/tidspbridge/include/dspbridge/clk.h deleted file mode 100644 index 685341c50693..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/clk.h +++ /dev/null | |||
@@ -1,101 +0,0 @@ | |||
1 | /* | ||
2 | * clk.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Provides Clock functions. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef _CLK_H | ||
20 | #define _CLK_H | ||
21 | |||
22 | enum dsp_clk_id { | ||
23 | DSP_CLK_IVA2 = 0, | ||
24 | DSP_CLK_GPT5, | ||
25 | DSP_CLK_GPT6, | ||
26 | DSP_CLK_GPT7, | ||
27 | DSP_CLK_GPT8, | ||
28 | DSP_CLK_WDT3, | ||
29 | DSP_CLK_MCBSP1, | ||
30 | DSP_CLK_MCBSP2, | ||
31 | DSP_CLK_MCBSP3, | ||
32 | DSP_CLK_MCBSP4, | ||
33 | DSP_CLK_MCBSP5, | ||
34 | DSP_CLK_SSI, | ||
35 | DSP_CLK_NOT_DEFINED | ||
36 | }; | ||
37 | |||
38 | /* | ||
39 | * ======== dsp_clk_exit ======== | ||
40 | * Purpose: | ||
41 | * Discontinue usage of module; free resources when reference count | ||
42 | * reaches 0. | ||
43 | * Parameters: | ||
44 | * Returns: | ||
45 | * Requires: | ||
46 | * CLK initialized. | ||
47 | * Ensures: | ||
48 | * Resources used by module are freed when cRef reaches zero. | ||
49 | */ | ||
50 | extern void dsp_clk_exit(void); | ||
51 | |||
52 | /* | ||
53 | * ======== dsp_clk_init ======== | ||
54 | * Purpose: | ||
55 | * Initializes private state of CLK module. | ||
56 | * Parameters: | ||
57 | * Returns: | ||
58 | * TRUE if initialized; FALSE if error occurred. | ||
59 | * Requires: | ||
60 | * Ensures: | ||
61 | * CLK initialized. | ||
62 | */ | ||
63 | extern void dsp_clk_init(void); | ||
64 | |||
65 | void dsp_gpt_wait_overflow(short int clk_id, unsigned int load); | ||
66 | |||
67 | /* | ||
68 | * ======== dsp_clk_enable ======== | ||
69 | * Purpose: | ||
70 | * Enables the clock requested. | ||
71 | * Parameters: | ||
72 | * Returns: | ||
73 | * 0: Success. | ||
74 | * -EPERM: Error occurred while enabling the clock. | ||
75 | * Requires: | ||
76 | * Ensures: | ||
77 | */ | ||
78 | extern int dsp_clk_enable(enum dsp_clk_id clk_id); | ||
79 | |||
80 | u32 dsp_clock_enable_all(u32 dsp_per_clocks); | ||
81 | |||
82 | /* | ||
83 | * ======== dsp_clk_disable ======== | ||
84 | * Purpose: | ||
85 | * Disables the clock requested. | ||
86 | * Parameters: | ||
87 | * Returns: | ||
88 | * 0: Success. | ||
89 | * -EPERM: Error occurred while disabling the clock. | ||
90 | * Requires: | ||
91 | * Ensures: | ||
92 | */ | ||
93 | extern int dsp_clk_disable(enum dsp_clk_id clk_id); | ||
94 | |||
95 | extern u32 dsp_clk_get_iva2_rate(void); | ||
96 | |||
97 | u32 dsp_clock_disable_all(u32 dsp_per_clocks); | ||
98 | |||
99 | extern void ssi_clk_prepare(bool FLAG); | ||
100 | |||
101 | #endif /* _SYNC_H */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmm.h b/drivers/staging/tidspbridge/include/dspbridge/cmm.h deleted file mode 100644 index 2adf9ecdf07f..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/cmm.h +++ /dev/null | |||
@@ -1,337 +0,0 @@ | |||
1 | /* | ||
2 | * cmm.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * The Communication Memory Management(CMM) module provides shared memory | ||
7 | * management services for DSP/BIOS Bridge data streaming and messaging. | ||
8 | * Multiple shared memory segments can be registered with CMM. Memory is | ||
9 | * coelesced back to the appropriate pool when a buffer is freed. | ||
10 | * | ||
11 | * The CMM_Xlator[xxx] functions are used for node messaging and data | ||
12 | * streaming address translation to perform zero-copy inter-processor | ||
13 | * data transfer(GPP<->DSP). A "translator" object is created for a node or | ||
14 | * stream object that contains per thread virtual address information. This | ||
15 | * translator info is used at runtime to perform SM address translation | ||
16 | * to/from the DSP address space. | ||
17 | * | ||
18 | * Notes: | ||
19 | * cmm_xlator_alloc_buf - Used by Node and Stream modules for SM address | ||
20 | * translation. | ||
21 | * | ||
22 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
23 | * | ||
24 | * This package is free software; you can redistribute it and/or modify | ||
25 | * it under the terms of the GNU General Public License version 2 as | ||
26 | * published by the Free Software Foundation. | ||
27 | * | ||
28 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
29 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
30 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
31 | */ | ||
32 | |||
33 | #ifndef CMM_ | ||
34 | #define CMM_ | ||
35 | |||
36 | #include <dspbridge/devdefs.h> | ||
37 | |||
38 | #include <dspbridge/cmmdefs.h> | ||
39 | #include <dspbridge/host_os.h> | ||
40 | |||
41 | /* | ||
42 | * ======== cmm_calloc_buf ======== | ||
43 | * Purpose: | ||
44 | * Allocate memory buffers that can be used for data streaming or | ||
45 | * messaging. | ||
46 | * Parameters: | ||
47 | * hcmm_mgr: Cmm Mgr handle. | ||
48 | * usize: Number of bytes to allocate. | ||
49 | * pattr: Attributes of memory to allocate. | ||
50 | * pp_buf_va: Address of where to place VA. | ||
51 | * Returns: | ||
52 | * Pointer to a zero'd block of SM memory; | ||
53 | * NULL if memory couldn't be allocated, | ||
54 | * or if byte_size == 0, | ||
55 | * Requires: | ||
56 | * Valid hcmm_mgr. | ||
57 | * CMM initialized. | ||
58 | * Ensures: | ||
59 | * The returned pointer, if not NULL, points to a valid memory block of | ||
60 | * the size requested. | ||
61 | * | ||
62 | */ | ||
63 | extern void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, | ||
64 | u32 usize, struct cmm_attrs *pattrs, | ||
65 | void **pp_buf_va); | ||
66 | |||
67 | /* | ||
68 | * ======== cmm_create ======== | ||
69 | * Purpose: | ||
70 | * Create a communication memory manager object. | ||
71 | * Parameters: | ||
72 | * ph_cmm_mgr: Location to store a communication manager handle on | ||
73 | * output. | ||
74 | * hdev_obj: Handle to a device object. | ||
75 | * mgr_attrts: Comm mem manager attributes. | ||
76 | * Returns: | ||
77 | * 0: Success; | ||
78 | * -ENOMEM: Insufficient memory for requested resources. | ||
79 | * -EPERM: Failed to initialize critical sect sync object. | ||
80 | * | ||
81 | * Requires: | ||
82 | * ph_cmm_mgr != NULL. | ||
83 | * mgr_attrts->min_block_size >= 4 bytes. | ||
84 | * Ensures: | ||
85 | * | ||
86 | */ | ||
87 | extern int cmm_create(struct cmm_object **ph_cmm_mgr, | ||
88 | struct dev_object *hdev_obj, | ||
89 | const struct cmm_mgrattrs *mgr_attrts); | ||
90 | |||
91 | /* | ||
92 | * ======== cmm_destroy ======== | ||
93 | * Purpose: | ||
94 | * Destroy the communication memory manager object. | ||
95 | * Parameters: | ||
96 | * hcmm_mgr: Cmm Mgr handle. | ||
97 | * force: Force deallocation of all cmm memory immediately if set TRUE. | ||
98 | * If FALSE, and outstanding allocations will return -EPERM | ||
99 | * status. | ||
100 | * Returns: | ||
101 | * 0: CMM object & resources deleted. | ||
102 | * -EPERM: Unable to free CMM object due to outstanding allocation. | ||
103 | * -EFAULT: Unable to free CMM due to bad handle. | ||
104 | * Requires: | ||
105 | * CMM is initialized. | ||
106 | * hcmm_mgr != NULL. | ||
107 | * Ensures: | ||
108 | * Memory resources used by Cmm Mgr are freed. | ||
109 | */ | ||
110 | extern int cmm_destroy(struct cmm_object *hcmm_mgr, bool force); | ||
111 | |||
112 | /* | ||
113 | * ======== cmm_free_buf ======== | ||
114 | * Purpose: | ||
115 | * Free the given buffer. | ||
116 | * Parameters: | ||
117 | * hcmm_mgr: Cmm Mgr handle. | ||
118 | * pbuf: Pointer to memory allocated by cmm_calloc_buf(). | ||
119 | * ul_seg_id: SM segment Id used in CMM_Calloc() attrs. | ||
120 | * Set to 0 to use default segment. | ||
121 | * Returns: | ||
122 | * 0 | ||
123 | * -EPERM | ||
124 | * Requires: | ||
125 | * CMM initialized. | ||
126 | * buf_pa != NULL | ||
127 | * Ensures: | ||
128 | * | ||
129 | */ | ||
130 | extern int cmm_free_buf(struct cmm_object *hcmm_mgr, | ||
131 | void *buf_pa, u32 ul_seg_id); | ||
132 | |||
133 | /* | ||
134 | * ======== cmm_get_handle ======== | ||
135 | * Purpose: | ||
136 | * Return the handle to the cmm mgr for the given device obj. | ||
137 | * Parameters: | ||
138 | * hprocessor: Handle to a Processor. | ||
139 | * ph_cmm_mgr: Location to store the shared memory mgr handle on | ||
140 | * output. | ||
141 | * | ||
142 | * Returns: | ||
143 | * 0: Cmm Mgr opaque handle returned. | ||
144 | * -EFAULT: Invalid handle. | ||
145 | * Requires: | ||
146 | * ph_cmm_mgr != NULL | ||
147 | * hdev_obj != NULL | ||
148 | * Ensures: | ||
149 | */ | ||
150 | extern int cmm_get_handle(void *hprocessor, | ||
151 | struct cmm_object **ph_cmm_mgr); | ||
152 | |||
153 | /* | ||
154 | * ======== cmm_get_info ======== | ||
155 | * Purpose: | ||
156 | * Return the current SM and VM utilization information. | ||
157 | * Parameters: | ||
158 | * hcmm_mgr: Handle to a Cmm Mgr. | ||
159 | * cmm_info_obj: Location to store the Cmm information on output. | ||
160 | * | ||
161 | * Returns: | ||
162 | * 0: Success. | ||
163 | * -EFAULT: Invalid handle. | ||
164 | * -EINVAL Invalid input argument. | ||
165 | * Requires: | ||
166 | * Ensures: | ||
167 | * | ||
168 | */ | ||
169 | extern int cmm_get_info(struct cmm_object *hcmm_mgr, | ||
170 | struct cmm_info *cmm_info_obj); | ||
171 | |||
172 | /* | ||
173 | * ======== cmm_register_gppsm_seg ======== | ||
174 | * Purpose: | ||
175 | * Register a block of SM with the CMM. | ||
176 | * Parameters: | ||
177 | * hcmm_mgr: Handle to a Cmm Mgr. | ||
178 | * lpGPPBasePA: GPP Base Physical address. | ||
179 | * ul_size: Size in GPP bytes. | ||
180 | * dsp_addr_offset GPP PA to DSP PA Offset. | ||
181 | * c_factor: Add offset if CMM_ADDTODSPPA, sub if CMM_SUBFROMDSPPA. | ||
182 | * dw_dsp_base: DSP virtual base byte address. | ||
183 | * ul_dsp_size: Size of DSP segment in bytes. | ||
184 | * sgmt_id: Address to store segment Id. | ||
185 | * | ||
186 | * Returns: | ||
187 | * 0: Success. | ||
188 | * -EFAULT: Invalid hcmm_mgr handle. | ||
189 | * -EINVAL: Invalid input argument. | ||
190 | * -EPERM: Unable to register. | ||
191 | * - On success *sgmt_id is a valid SM segment ID. | ||
192 | * Requires: | ||
193 | * ul_size > 0 | ||
194 | * sgmt_id != NULL | ||
195 | * dw_gpp_base_pa != 0 | ||
196 | * c_factor = CMM_ADDTODSPPA || c_factor = CMM_SUBFROMDSPPA | ||
197 | * Ensures: | ||
198 | * | ||
199 | */ | ||
200 | extern int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr, | ||
201 | unsigned int dw_gpp_base_pa, | ||
202 | u32 ul_size, | ||
203 | u32 dsp_addr_offset, | ||
204 | s8 c_factor, | ||
205 | unsigned int dw_dsp_base, | ||
206 | u32 ul_dsp_size, | ||
207 | u32 *sgmt_id, u32 gpp_base_va); | ||
208 | |||
209 | /* | ||
210 | * ======== cmm_un_register_gppsm_seg ======== | ||
211 | * Purpose: | ||
212 | * Unregister the given memory segment that was previously registered | ||
213 | * by cmm_register_gppsm_seg. | ||
214 | * Parameters: | ||
215 | * hcmm_mgr: Handle to a Cmm Mgr. | ||
216 | * ul_seg_id Segment identifier returned by cmm_register_gppsm_seg. | ||
217 | * Returns: | ||
218 | * 0: Success. | ||
219 | * -EFAULT: Invalid handle. | ||
220 | * -EINVAL: Invalid ul_seg_id. | ||
221 | * -EPERM: Unable to unregister for unknown reason. | ||
222 | * Requires: | ||
223 | * Ensures: | ||
224 | * | ||
225 | */ | ||
226 | extern int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr, | ||
227 | u32 ul_seg_id); | ||
228 | |||
229 | /* | ||
230 | * ======== cmm_xlator_alloc_buf ======== | ||
231 | * Purpose: | ||
232 | * Allocate the specified SM buffer and create a local memory descriptor. | ||
233 | * Place on the descriptor on the translator's HaQ (Host Alloc'd Queue). | ||
234 | * Parameters: | ||
235 | * xlator: Handle to a Xlator object. | ||
236 | * va_buf: Virtual address ptr(client context) | ||
237 | * pa_size: Size of SM memory to allocate. | ||
238 | * Returns: | ||
239 | * Ptr to valid physical address(Pa) of pa_size bytes, NULL if failed. | ||
240 | * Requires: | ||
241 | * va_buf != 0. | ||
242 | * pa_size != 0. | ||
243 | * Ensures: | ||
244 | * | ||
245 | */ | ||
246 | extern void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, | ||
247 | void *va_buf, u32 pa_size); | ||
248 | |||
249 | /* | ||
250 | * ======== cmm_xlator_create ======== | ||
251 | * Purpose: | ||
252 | * Create a translator(xlator) object used for process specific Va<->Pa | ||
253 | * address translation. Node messaging and streams use this to perform | ||
254 | * inter-processor(GPP<->DSP) zero-copy data transfer. | ||
255 | * Parameters: | ||
256 | * xlator: Address to place handle to a new Xlator handle. | ||
257 | * hcmm_mgr: Handle to Cmm Mgr associated with this translator. | ||
258 | * xlator_attrs: Translator attributes used for the client NODE or STREAM. | ||
259 | * Returns: | ||
260 | * 0: Success. | ||
261 | * -EINVAL: Bad input Attrs. | ||
262 | * -ENOMEM: Insufficient memory(local) for requested resources. | ||
263 | * Requires: | ||
264 | * xlator != NULL | ||
265 | * hcmm_mgr != NULL | ||
266 | * xlator_attrs != NULL | ||
267 | * Ensures: | ||
268 | * | ||
269 | */ | ||
270 | extern int cmm_xlator_create(struct cmm_xlatorobject **xlator, | ||
271 | struct cmm_object *hcmm_mgr, | ||
272 | struct cmm_xlatorattrs *xlator_attrs); | ||
273 | |||
274 | /* | ||
275 | * ======== cmm_xlator_free_buf ======== | ||
276 | * Purpose: | ||
277 | * Free SM buffer and descriptor. | ||
278 | * Does not free client process VM. | ||
279 | * Parameters: | ||
280 | * xlator: handle to translator. | ||
281 | * buf_va Virtual address of PA to free. | ||
282 | * Returns: | ||
283 | * 0: Success. | ||
284 | * -EFAULT: Bad translator handle. | ||
285 | * Requires: | ||
286 | * Ensures: | ||
287 | * | ||
288 | */ | ||
289 | extern int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, | ||
290 | void *buf_va); | ||
291 | |||
292 | /* | ||
293 | * ======== cmm_xlator_info ======== | ||
294 | * Purpose: | ||
295 | * Set/Get process specific "translator" address info. | ||
296 | * This is used to perform fast virtual address translation | ||
297 | * for shared memory buffers between the GPP and DSP. | ||
298 | * Parameters: | ||
299 | * xlator: handle to translator. | ||
300 | * paddr: Virtual base address of segment. | ||
301 | * ul_size: Size in bytes. | ||
302 | * segm_id: Segment identifier of SM segment(s) | ||
303 | * set_info Set xlator fields if TRUE, else return base addr | ||
304 | * Returns: | ||
305 | * 0: Success. | ||
306 | * -EFAULT: Bad translator handle. | ||
307 | * Requires: | ||
308 | * (paddr != NULL) | ||
309 | * (ul_size > 0) | ||
310 | * Ensures: | ||
311 | * | ||
312 | */ | ||
313 | extern int cmm_xlator_info(struct cmm_xlatorobject *xlator, | ||
314 | u8 **paddr, | ||
315 | u32 ul_size, u32 segm_id, bool set_info); | ||
316 | |||
317 | /* | ||
318 | * ======== cmm_xlator_translate ======== | ||
319 | * Purpose: | ||
320 | * Perform address translation VA<->PA for the specified stream or | ||
321 | * message shared memory buffer. | ||
322 | * Parameters: | ||
323 | * xlator: handle to translator. | ||
324 | * paddr address of buffer to translate. | ||
325 | * xtype Type of address xlation. CMM_PA2VA or CMM_VA2PA. | ||
326 | * Returns: | ||
327 | * Valid address on success, else NULL. | ||
328 | * Requires: | ||
329 | * paddr != NULL | ||
330 | * xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA) | ||
331 | * Ensures: | ||
332 | * | ||
333 | */ | ||
334 | extern void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, | ||
335 | void *paddr, enum cmm_xlatetype xtype); | ||
336 | |||
337 | #endif /* CMM_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h deleted file mode 100644 index a264fa69a4fc..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/cmmdefs.h +++ /dev/null | |||
@@ -1,104 +0,0 @@ | |||
1 | /* | ||
2 | * cmmdefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global MEM constants and types. | ||
7 | * | ||
8 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef CMMDEFS_ | ||
20 | #define CMMDEFS_ | ||
21 | |||
22 | |||
23 | /* Cmm attributes used in cmm_create() */ | ||
24 | struct cmm_mgrattrs { | ||
25 | /* Minimum SM allocation; default 32 bytes. */ | ||
26 | u32 min_block_size; | ||
27 | }; | ||
28 | |||
29 | /* Attributes for CMM_AllocBuf() & CMM_AllocDesc() */ | ||
30 | struct cmm_attrs { | ||
31 | u32 seg_id; /* 1,2... are SM segments. 0 is not. */ | ||
32 | u32 alignment; /* 0,1,2,4....min_block_size */ | ||
33 | }; | ||
34 | |||
35 | /* | ||
36 | * DSPPa to GPPPa Conversion Factor. | ||
37 | * | ||
38 | * For typical platforms: | ||
39 | * converted Address = PaDSP + ( c_factor * addressToConvert). | ||
40 | */ | ||
41 | #define CMM_SUBFROMDSPPA -1 | ||
42 | #define CMM_ADDTODSPPA 1 | ||
43 | |||
44 | #define CMM_ALLSEGMENTS 0xFFFFFF /* All SegIds */ | ||
45 | #define CMM_MAXGPPSEGS 1 /* Maximum # of SM segs */ | ||
46 | |||
47 | /* | ||
48 | * SMSEGs are SM segments the DSP allocates from. | ||
49 | * | ||
50 | * This info is used by the GPP to xlate DSP allocated PAs. | ||
51 | */ | ||
52 | |||
53 | struct cmm_seginfo { | ||
54 | u32 seg_base_pa; /* Start Phys address of SM segment */ | ||
55 | /* Total size in bytes of segment: DSP+GPP */ | ||
56 | u32 total_seg_size; | ||
57 | u32 gpp_base_pa; /* Start Phys addr of Gpp SM seg */ | ||
58 | u32 gpp_size; /* Size of Gpp SM seg in bytes */ | ||
59 | u32 dsp_base_va; /* DSP virt base byte address */ | ||
60 | u32 dsp_size; /* DSP seg size in bytes */ | ||
61 | /* # of current GPP allocations from this segment */ | ||
62 | u32 in_use_cnt; | ||
63 | u32 seg_base_va; /* Start Virt address of SM seg */ | ||
64 | |||
65 | }; | ||
66 | |||
67 | /* CMM useful information */ | ||
68 | struct cmm_info { | ||
69 | /* # of SM segments registered with this Cmm. */ | ||
70 | u32 num_gppsm_segs; | ||
71 | /* Total # of allocations outstanding for CMM */ | ||
72 | u32 total_in_use_cnt; | ||
73 | /* Min SM block size allocation from cmm_create() */ | ||
74 | u32 min_block_size; | ||
75 | /* Info per registered SM segment. */ | ||
76 | struct cmm_seginfo seg_info[CMM_MAXGPPSEGS]; | ||
77 | }; | ||
78 | |||
79 | /* XlatorCreate attributes */ | ||
80 | struct cmm_xlatorattrs { | ||
81 | u32 seg_id; /* segment Id used for SM allocations */ | ||
82 | u32 dsp_bufs; /* # of DSP-side bufs */ | ||
83 | u32 dsp_buf_size; /* size of DSP-side bufs in GPP bytes */ | ||
84 | /* Vm base address alloc'd in client process context */ | ||
85 | void *vm_base; | ||
86 | /* vm_size must be >= (dwMaxNumBufs * dwMaxSize) */ | ||
87 | u32 vm_size; | ||
88 | }; | ||
89 | |||
90 | /* | ||
91 | * Cmm translation types. Use to map SM addresses to process context. | ||
92 | */ | ||
93 | enum cmm_xlatetype { | ||
94 | CMM_VA2PA = 0, /* Virtual to GPP physical address xlation */ | ||
95 | CMM_PA2VA = 1, /* GPP Physical to virtual */ | ||
96 | CMM_VA2DSPPA = 2, /* Va to DSP Pa */ | ||
97 | CMM_PA2DSPPA = 3, /* GPP Pa to DSP Pa */ | ||
98 | CMM_DSPPA2PA = 4, /* DSP Pa to GPP Pa */ | ||
99 | }; | ||
100 | |||
101 | struct cmm_object; | ||
102 | struct cmm_xlatorobject; | ||
103 | |||
104 | #endif /* CMMDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cod.h b/drivers/staging/tidspbridge/include/dspbridge/cod.h deleted file mode 100644 index ba2005d02422..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/cod.h +++ /dev/null | |||
@@ -1,329 +0,0 @@ | |||
1 | /* | ||
2 | * cod.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Code management module for DSPs. This module provides an interface | ||
7 | * interface for loading both static and dynamic code objects onto DSP | ||
8 | * systems. | ||
9 | * | ||
10 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
11 | * | ||
12 | * This package is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
17 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
18 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
19 | */ | ||
20 | |||
21 | #ifndef COD_ | ||
22 | #define COD_ | ||
23 | |||
24 | #include <dspbridge/dblldefs.h> | ||
25 | |||
26 | #define COD_MAXPATHLENGTH 255 | ||
27 | #define COD_TRACEBEG "SYS_PUTCBEG" | ||
28 | #define COD_TRACEEND "SYS_PUTCEND" | ||
29 | #define COD_TRACECURPOS "BRIDGE_SYS_PUTC_current" | ||
30 | |||
31 | #define COD_NOLOAD DBLL_NOLOAD | ||
32 | #define COD_SYMB DBLL_SYMB | ||
33 | |||
34 | /* COD code manager handle */ | ||
35 | struct cod_manager; | ||
36 | |||
37 | /* COD library handle */ | ||
38 | struct cod_libraryobj; | ||
39 | |||
40 | /* | ||
41 | * Function prototypes for writing memory to a DSP system, allocating | ||
42 | * and freeing DSP memory. | ||
43 | */ | ||
44 | typedef u32(*cod_writefxn) (void *priv_ref, u32 dsp_add, | ||
45 | void *pbuf, u32 ul_num_bytes, u32 mem_space); | ||
46 | |||
47 | /* | ||
48 | * ======== cod_close ======== | ||
49 | * Purpose: | ||
50 | * Close a library opened with cod_open(). | ||
51 | * Parameters: | ||
52 | * lib - Library handle returned by cod_open(). | ||
53 | * Returns: | ||
54 | * None. | ||
55 | * Requires: | ||
56 | * COD module initialized. | ||
57 | * valid lib. | ||
58 | * Ensures: | ||
59 | * | ||
60 | */ | ||
61 | extern void cod_close(struct cod_libraryobj *lib); | ||
62 | |||
63 | /* | ||
64 | * ======== cod_create ======== | ||
65 | * Purpose: | ||
66 | * Create an object to manage code on a DSP system. This object can be | ||
67 | * used to load an initial program image with arguments that can later | ||
68 | * be expanded with dynamically loaded object files. | ||
69 | * Symbol table information is managed by this object and can be retrieved | ||
70 | * using the cod_get_sym_value() function. | ||
71 | * Parameters: | ||
72 | * manager: created manager object | ||
73 | * str_zl_file: ZL DLL filename, of length < COD_MAXPATHLENGTH. | ||
74 | * Returns: | ||
75 | * 0: Success. | ||
76 | * -ESPIPE: ZL_Create failed. | ||
77 | * -ENOSYS: attrs was not NULL. We don't yet support | ||
78 | * non default values of attrs. | ||
79 | * Requires: | ||
80 | * COD module initialized. | ||
81 | * str_zl_file != NULL | ||
82 | * Ensures: | ||
83 | */ | ||
84 | extern int cod_create(struct cod_manager **mgr, | ||
85 | char *str_zl_file); | ||
86 | |||
87 | /* | ||
88 | * ======== cod_delete ======== | ||
89 | * Purpose: | ||
90 | * Delete a code manager object. | ||
91 | * Parameters: | ||
92 | * cod_mgr_obj: handle of manager to be deleted | ||
93 | * Returns: | ||
94 | * None. | ||
95 | * Requires: | ||
96 | * COD module initialized. | ||
97 | * valid cod_mgr_obj. | ||
98 | * Ensures: | ||
99 | */ | ||
100 | extern void cod_delete(struct cod_manager *cod_mgr_obj); | ||
101 | |||
102 | /* | ||
103 | * ======== cod_get_base_lib ======== | ||
104 | * Purpose: | ||
105 | * Get handle to the base image DBL library. | ||
106 | * Parameters: | ||
107 | * cod_mgr_obj: handle of manager to be deleted | ||
108 | * plib: location to store library handle on output. | ||
109 | * Returns: | ||
110 | * 0: Success. | ||
111 | * Requires: | ||
112 | * COD module initialized. | ||
113 | * valid cod_mgr_obj. | ||
114 | * plib != NULL. | ||
115 | * Ensures: | ||
116 | */ | ||
117 | extern int cod_get_base_lib(struct cod_manager *cod_mgr_obj, | ||
118 | struct dbll_library_obj **plib); | ||
119 | |||
120 | /* | ||
121 | * ======== cod_get_base_name ======== | ||
122 | * Purpose: | ||
123 | * Get the name of the base image DBL library. | ||
124 | * Parameters: | ||
125 | * cod_mgr_obj: handle of manager to be deleted | ||
126 | * sz_name: location to store library name on output. | ||
127 | * usize: size of name buffer. | ||
128 | * Returns: | ||
129 | * 0: Success. | ||
130 | * -EPERM: Buffer too small. | ||
131 | * Requires: | ||
132 | * COD module initialized. | ||
133 | * valid cod_mgr_obj. | ||
134 | * sz_name != NULL. | ||
135 | * Ensures: | ||
136 | */ | ||
137 | extern int cod_get_base_name(struct cod_manager *cod_mgr_obj, | ||
138 | char *sz_name, u32 usize); | ||
139 | |||
140 | /* | ||
141 | * ======== cod_get_entry ======== | ||
142 | * Purpose: | ||
143 | * Retrieve the entry point of a loaded DSP program image | ||
144 | * Parameters: | ||
145 | * cod_mgr_obj: handle of manager to be deleted | ||
146 | * entry_pt: pointer to location for entry point | ||
147 | * Returns: | ||
148 | * 0: Success. | ||
149 | * Requires: | ||
150 | * COD module initialized. | ||
151 | * valid cod_mgr_obj. | ||
152 | * entry_pt != NULL. | ||
153 | * Ensures: | ||
154 | */ | ||
155 | extern int cod_get_entry(struct cod_manager *cod_mgr_obj, | ||
156 | u32 *entry_pt); | ||
157 | |||
158 | /* | ||
159 | * ======== cod_get_loader ======== | ||
160 | * Purpose: | ||
161 | * Get handle to the DBL loader. | ||
162 | * Parameters: | ||
163 | * cod_mgr_obj: handle of manager to be deleted | ||
164 | * loader: location to store loader handle on output. | ||
165 | * Returns: | ||
166 | * 0: Success. | ||
167 | * Requires: | ||
168 | * COD module initialized. | ||
169 | * valid cod_mgr_obj. | ||
170 | * loader != NULL. | ||
171 | * Ensures: | ||
172 | */ | ||
173 | extern int cod_get_loader(struct cod_manager *cod_mgr_obj, | ||
174 | struct dbll_tar_obj **loader); | ||
175 | |||
176 | /* | ||
177 | * ======== cod_get_section ======== | ||
178 | * Purpose: | ||
179 | * Retrieve the starting address and length of a section in the COFF file | ||
180 | * given the section name. | ||
181 | * Parameters: | ||
182 | * lib Library handle returned from cod_open(). | ||
183 | * str_sect: name of the section, with or without leading "." | ||
184 | * addr: Location to store address. | ||
185 | * len: Location to store length. | ||
186 | * Returns: | ||
187 | * 0: Success | ||
188 | * -ESPIPE: Symbols could not be found or have not been loaded onto | ||
189 | * the board. | ||
190 | * Requires: | ||
191 | * COD module initialized. | ||
192 | * valid cod_mgr_obj. | ||
193 | * str_sect != NULL; | ||
194 | * addr != NULL; | ||
195 | * len != NULL; | ||
196 | * Ensures: | ||
197 | * 0: *addr and *len contain the address and length of the | ||
198 | * section. | ||
199 | * else: *addr == 0 and *len == 0; | ||
200 | * | ||
201 | */ | ||
202 | extern int cod_get_section(struct cod_libraryobj *lib, | ||
203 | char *str_sect, | ||
204 | u32 *addr, u32 *len); | ||
205 | |||
206 | /* | ||
207 | * ======== cod_get_sym_value ======== | ||
208 | * Purpose: | ||
209 | * Retrieve the value for the specified symbol. The symbol is first | ||
210 | * searched for literally and then, if not found, searched for as a | ||
211 | * C symbol. | ||
212 | * Parameters: | ||
213 | * lib: library handle returned from cod_open(). | ||
214 | * pstrSymbol: name of the symbol | ||
215 | * value: value of the symbol | ||
216 | * Returns: | ||
217 | * 0: Success. | ||
218 | * -ESPIPE: Symbols could not be found or have not been loaded onto | ||
219 | * the board. | ||
220 | * Requires: | ||
221 | * COD module initialized. | ||
222 | * Valid cod_mgr_obj. | ||
223 | * str_sym != NULL. | ||
224 | * pul_value != NULL. | ||
225 | * Ensures: | ||
226 | */ | ||
227 | extern int cod_get_sym_value(struct cod_manager *cod_mgr_obj, | ||
228 | char *str_sym, u32 * pul_value); | ||
229 | |||
230 | /* | ||
231 | * ======== cod_load_base ======== | ||
232 | * Purpose: | ||
233 | * Load the initial program image, optionally with command-line arguments, | ||
234 | * on the DSP system managed by the supplied handle. The program to be | ||
235 | * loaded must be the first element of the args array and must be a fully | ||
236 | * qualified pathname. | ||
237 | * Parameters: | ||
238 | * hmgr: manager to load the code with | ||
239 | * num_argc: number of arguments in the args array | ||
240 | * args: array of strings for arguments to DSP program | ||
241 | * write_fxn: board-specific function to write data to DSP system | ||
242 | * arb: arbitrary pointer to be passed as first arg to write_fxn | ||
243 | * envp: array of environment strings for DSP exec. | ||
244 | * Returns: | ||
245 | * 0: Success. | ||
246 | * -EBADF: Failed to open target code. | ||
247 | * Requires: | ||
248 | * COD module initialized. | ||
249 | * hmgr is valid. | ||
250 | * num_argc > 0. | ||
251 | * args != NULL. | ||
252 | * args[0] != NULL. | ||
253 | * pfn_write != NULL. | ||
254 | * Ensures: | ||
255 | */ | ||
256 | extern int cod_load_base(struct cod_manager *cod_mgr_obj, | ||
257 | u32 num_argc, char *args[], | ||
258 | cod_writefxn pfn_write, void *arb, | ||
259 | char *envp[]); | ||
260 | |||
261 | /* | ||
262 | * ======== cod_open ======== | ||
263 | * Purpose: | ||
264 | * Open a library for reading sections. Does not load or set the base. | ||
265 | * Parameters: | ||
266 | * hmgr: manager to load the code with | ||
267 | * sz_coff_path: Coff file to open. | ||
268 | * flags: COD_NOLOAD (don't load symbols) or COD_SYMB (load | ||
269 | * symbols). | ||
270 | * lib_obj: Handle returned that can be used in calls to cod_close | ||
271 | * and cod_get_section. | ||
272 | * Returns: | ||
273 | * S_OK: Success. | ||
274 | * -EBADF: Failed to open target code. | ||
275 | * Requires: | ||
276 | * COD module initialized. | ||
277 | * hmgr is valid. | ||
278 | * flags == COD_NOLOAD || flags == COD_SYMB. | ||
279 | * sz_coff_path != NULL. | ||
280 | * Ensures: | ||
281 | */ | ||
282 | extern int cod_open(struct cod_manager *hmgr, | ||
283 | char *sz_coff_path, | ||
284 | u32 flags, struct cod_libraryobj **lib_obj); | ||
285 | |||
286 | /* | ||
287 | * ======== cod_open_base ======== | ||
288 | * Purpose: | ||
289 | * Open base image for reading sections. Does not load the base. | ||
290 | * Parameters: | ||
291 | * hmgr: manager to load the code with | ||
292 | * sz_coff_path: Coff file to open. | ||
293 | * flags: Specifies whether to load symbols. | ||
294 | * Returns: | ||
295 | * 0: Success. | ||
296 | * -EBADF: Failed to open target code. | ||
297 | * Requires: | ||
298 | * COD module initialized. | ||
299 | * hmgr is valid. | ||
300 | * sz_coff_path != NULL. | ||
301 | * Ensures: | ||
302 | */ | ||
303 | extern int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path, | ||
304 | dbll_flags flags); | ||
305 | |||
306 | /* | ||
307 | * ======== cod_read_section ======== | ||
308 | * Purpose: | ||
309 | * Retrieve the content of a code section given the section name. | ||
310 | * Parameters: | ||
311 | * cod_mgr_obj - manager in which to search for the symbol | ||
312 | * str_sect - name of the section, with or without leading "." | ||
313 | * str_content - buffer to store content of the section. | ||
314 | * Returns: | ||
315 | * 0: on success, error code on failure | ||
316 | * -ESPIPE: Symbols have not been loaded onto the board. | ||
317 | * Requires: | ||
318 | * COD module initialized. | ||
319 | * valid cod_mgr_obj. | ||
320 | * str_sect != NULL; | ||
321 | * str_content != NULL; | ||
322 | * Ensures: | ||
323 | * 0: *str_content stores the content of the named section. | ||
324 | */ | ||
325 | extern int cod_read_section(struct cod_libraryobj *lib, | ||
326 | char *str_sect, | ||
327 | char *str_content, u32 content_size); | ||
328 | |||
329 | #endif /* COD_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbdcd.h b/drivers/staging/tidspbridge/include/dspbridge/dbdcd.h deleted file mode 100644 index 7cc3e12686e8..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dbdcd.h +++ /dev/null | |||
@@ -1,358 +0,0 @@ | |||
1 | /* | ||
2 | * dbdcd.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Defines the DSP/BIOS Bridge Configuration Database (DCD) API. | ||
7 | * | ||
8 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef DBDCD_ | ||
20 | #define DBDCD_ | ||
21 | |||
22 | #include <dspbridge/dbdcddef.h> | ||
23 | #include <dspbridge/host_os.h> | ||
24 | #include <dspbridge/nldrdefs.h> | ||
25 | |||
26 | /* | ||
27 | * ======== dcd_auto_register ======== | ||
28 | * Purpose: | ||
29 | * This function automatically registers DCD objects specified in a | ||
30 | * special COFF section called ".dcd_register" | ||
31 | * Parameters: | ||
32 | * hdcd_mgr: A DCD manager handle. | ||
33 | * sz_coff_path: Pointer to name of COFF file containing DCD | ||
34 | * objects to be registered. | ||
35 | * Returns: | ||
36 | * 0: Success. | ||
37 | * -EACCES: Unable to find auto-registration/read/load section. | ||
38 | * -EFAULT: Invalid DCD_HMANAGER handle.. | ||
39 | * Requires: | ||
40 | * DCD initialized. | ||
41 | * Ensures: | ||
42 | * Note: | ||
43 | * Due to the DCD database construction, it is essential for a DCD-enabled | ||
44 | * COFF file to contain the right COFF sections, especially | ||
45 | * ".dcd_register", which is used for auto registration. | ||
46 | */ | ||
47 | extern int dcd_auto_register(struct dcd_manager *hdcd_mgr, | ||
48 | char *sz_coff_path); | ||
49 | |||
50 | /* | ||
51 | * ======== dcd_auto_unregister ======== | ||
52 | * Purpose: | ||
53 | * This function automatically unregisters DCD objects specified in a | ||
54 | * special COFF section called ".dcd_register" | ||
55 | * Parameters: | ||
56 | * hdcd_mgr: A DCD manager handle. | ||
57 | * sz_coff_path: Pointer to name of COFF file containing | ||
58 | * DCD objects to be unregistered. | ||
59 | * Returns: | ||
60 | * 0: Success. | ||
61 | * -EACCES: Unable to find auto-registration/read/load section. | ||
62 | * -EFAULT: Invalid DCD_HMANAGER handle.. | ||
63 | * Requires: | ||
64 | * DCD initialized. | ||
65 | * Ensures: | ||
66 | * Note: | ||
67 | * Due to the DCD database construction, it is essential for a DCD-enabled | ||
68 | * COFF file to contain the right COFF sections, especially | ||
69 | * ".dcd_register", which is used for auto unregistration. | ||
70 | */ | ||
71 | extern int dcd_auto_unregister(struct dcd_manager *hdcd_mgr, | ||
72 | char *sz_coff_path); | ||
73 | |||
74 | /* | ||
75 | * ======== dcd_create_manager ======== | ||
76 | * Purpose: | ||
77 | * This function creates a DCD module manager. | ||
78 | * Parameters: | ||
79 | * sz_zl_dll_name: Pointer to a DLL name string. | ||
80 | * dcd_mgr: A pointer to a DCD manager handle. | ||
81 | * Returns: | ||
82 | * 0: Success. | ||
83 | * -ENOMEM: Unable to allocate memory for DCD manager handle. | ||
84 | * -EPERM: General failure. | ||
85 | * Requires: | ||
86 | * DCD initialized. | ||
87 | * sz_zl_dll_name is non-NULL. | ||
88 | * dcd_mgr is non-NULL. | ||
89 | * Ensures: | ||
90 | * A DCD manager handle is created. | ||
91 | */ | ||
92 | extern int dcd_create_manager(char *sz_zl_dll_name, | ||
93 | struct dcd_manager **dcd_mgr); | ||
94 | |||
95 | /* | ||
96 | * ======== dcd_destroy_manager ======== | ||
97 | * Purpose: | ||
98 | * This function destroys a DCD module manager. | ||
99 | * Parameters: | ||
100 | * hdcd_mgr: A DCD manager handle. | ||
101 | * Returns: | ||
102 | * 0: Success. | ||
103 | * -EFAULT: Invalid DCD manager handle. | ||
104 | * Requires: | ||
105 | * DCD initialized. | ||
106 | * Ensures: | ||
107 | */ | ||
108 | extern int dcd_destroy_manager(struct dcd_manager *hdcd_mgr); | ||
109 | |||
110 | /* | ||
111 | * ======== dcd_enumerate_object ======== | ||
112 | * Purpose: | ||
113 | * This function enumerates currently visible DSP/BIOS Bridge objects | ||
114 | * and returns the UUID and type of each enumerated object. | ||
115 | * Parameters: | ||
116 | * index: The object enumeration index. | ||
117 | * obj_type: Type of object to enumerate. | ||
118 | * uuid_obj: Pointer to a dsp_uuid object. | ||
119 | * Returns: | ||
120 | * 0: Success. | ||
121 | * -EPERM: Unable to enumerate through the DCD database. | ||
122 | * ENODATA: Enumeration completed. This is not an error code. | ||
123 | * Requires: | ||
124 | * DCD initialized. | ||
125 | * uuid_obj is a valid pointer. | ||
126 | * Ensures: | ||
127 | * Details: | ||
128 | * This function can be used in conjunction with dcd_get_object_def to | ||
129 | * retrieve object properties. | ||
130 | */ | ||
131 | extern int dcd_enumerate_object(s32 index, | ||
132 | enum dsp_dcdobjtype obj_type, | ||
133 | struct dsp_uuid *uuid_obj); | ||
134 | |||
135 | /* | ||
136 | * ======== dcd_exit ======== | ||
137 | * Purpose: | ||
138 | * This function cleans up the DCD module. | ||
139 | * Parameters: | ||
140 | * Returns: | ||
141 | * Requires: | ||
142 | * DCD initialized. | ||
143 | * Ensures: | ||
144 | */ | ||
145 | extern void dcd_exit(void); | ||
146 | |||
147 | /* | ||
148 | * ======== dcd_get_dep_libs ======== | ||
149 | * Purpose: | ||
150 | * Given the uuid of a library and size of array of uuids, this function | ||
151 | * fills the array with the uuids of all dependent libraries of the input | ||
152 | * library. | ||
153 | * Parameters: | ||
154 | * hdcd_mgr: A DCD manager handle. | ||
155 | * uuid_obj: Pointer to a dsp_uuid for a library. | ||
156 | * num_libs: Size of uuid array (number of library uuids). | ||
157 | * dep_lib_uuids: Array of dependent library uuids to be filled in. | ||
158 | * prstnt_dep_libs: Array indicating if corresponding lib is persistent. | ||
159 | * phase: phase to obtain correct input library | ||
160 | * Returns: | ||
161 | * 0: Success. | ||
162 | * -ENOMEM: Memory allocation failure. | ||
163 | * -EACCES: Failure to read section containing library info. | ||
164 | * -EPERM: General failure. | ||
165 | * Requires: | ||
166 | * DCD initialized. | ||
167 | * Valid hdcd_mgr. | ||
168 | * uuid_obj != NULL | ||
169 | * dep_lib_uuids != NULL. | ||
170 | * Ensures: | ||
171 | */ | ||
172 | extern int dcd_get_dep_libs(struct dcd_manager *hdcd_mgr, | ||
173 | struct dsp_uuid *uuid_obj, | ||
174 | u16 num_libs, | ||
175 | struct dsp_uuid *dep_lib_uuids, | ||
176 | bool *prstnt_dep_libs, | ||
177 | enum nldr_phase phase); | ||
178 | |||
179 | /* | ||
180 | * ======== dcd_get_num_dep_libs ======== | ||
181 | * Purpose: | ||
182 | * Given the uuid of a library, determine its number of dependent | ||
183 | * libraries. | ||
184 | * Parameters: | ||
185 | * hdcd_mgr: A DCD manager handle. | ||
186 | * uuid_obj: Pointer to a dsp_uuid for a library. | ||
187 | * num_libs: Size of uuid array (number of library uuids). | ||
188 | * num_pers_libs: number of persistent dependent library. | ||
189 | * phase: Phase to obtain correct input library | ||
190 | * Returns: | ||
191 | * 0: Success. | ||
192 | * -ENOMEM: Memory allocation failure. | ||
193 | * -EACCES: Failure to read section containing library info. | ||
194 | * -EPERM: General failure. | ||
195 | * Requires: | ||
196 | * DCD initialized. | ||
197 | * Valid hdcd_mgr. | ||
198 | * uuid_obj != NULL | ||
199 | * num_libs != NULL. | ||
200 | * Ensures: | ||
201 | */ | ||
202 | extern int dcd_get_num_dep_libs(struct dcd_manager *hdcd_mgr, | ||
203 | struct dsp_uuid *uuid_obj, | ||
204 | u16 *num_libs, | ||
205 | u16 *num_pers_libs, | ||
206 | enum nldr_phase phase); | ||
207 | |||
208 | /* | ||
209 | * ======== dcd_get_library_name ======== | ||
210 | * Purpose: | ||
211 | * This function returns the name of a (dynamic) library for a given | ||
212 | * UUID. | ||
213 | * Parameters: | ||
214 | * hdcd_mgr: A DCD manager handle. | ||
215 | * uuid_obj: Pointer to a dsp_uuid that represents a unique DSP/BIOS | ||
216 | * Bridge object. | ||
217 | * str_lib_name: Buffer to hold library name. | ||
218 | * buff_size: Contains buffer size. Set to string size on output. | ||
219 | * phase: Which phase to load | ||
220 | * phase_split: Are phases in multiple libraries | ||
221 | * Returns: | ||
222 | * 0: Success. | ||
223 | * -EPERM: General failure. | ||
224 | * Requires: | ||
225 | * DCD initialized. | ||
226 | * Valid hdcd_mgr. | ||
227 | * str_lib_name != NULL. | ||
228 | * uuid_obj != NULL | ||
229 | * buff_size != NULL. | ||
230 | * Ensures: | ||
231 | */ | ||
232 | extern int dcd_get_library_name(struct dcd_manager *hdcd_mgr, | ||
233 | struct dsp_uuid *uuid_obj, | ||
234 | char *str_lib_name, | ||
235 | u32 *buff_size, | ||
236 | enum nldr_phase phase, | ||
237 | bool *phase_split); | ||
238 | |||
239 | /* | ||
240 | * ======== dcd_get_object_def ======== | ||
241 | * Purpose: | ||
242 | * This function returns the properties/attributes of a DSP/BIOS Bridge | ||
243 | * object. | ||
244 | * Parameters: | ||
245 | * hdcd_mgr: A DCD manager handle. | ||
246 | * uuid_obj: Pointer to a dsp_uuid that represents a unique | ||
247 | * DSP/BIOS Bridge object. | ||
248 | * obj_type: The type of DSP/BIOS Bridge object to be | ||
249 | * referenced (node, processor, etc). | ||
250 | * obj_def: Pointer to an object definition structure. A | ||
251 | * union of various possible DCD object types. | ||
252 | * Returns: | ||
253 | * 0: Success. | ||
254 | * -EACCES: Unable to access/read/parse/load content of object code | ||
255 | * section. | ||
256 | * -EPERM: General failure. | ||
257 | * -EFAULT: Invalid DCD_HMANAGER handle. | ||
258 | * Requires: | ||
259 | * DCD initialized. | ||
260 | * obj_uuid is non-NULL. | ||
261 | * obj_def is non-NULL. | ||
262 | * Ensures: | ||
263 | */ | ||
264 | extern int dcd_get_object_def(struct dcd_manager *hdcd_mgr, | ||
265 | struct dsp_uuid *obj_uuid, | ||
266 | enum dsp_dcdobjtype obj_type, | ||
267 | struct dcd_genericobj *obj_def); | ||
268 | |||
269 | /* | ||
270 | * ======== dcd_get_objects ======== | ||
271 | * Purpose: | ||
272 | * This function finds all DCD objects specified in a special | ||
273 | * COFF section called ".dcd_register", and for each object, | ||
274 | * call a "register" function. The "register" function may perform | ||
275 | * various actions, such as 1) register nodes in the node database, 2) | ||
276 | * unregister nodes from the node database, and 3) add overlay nodes. | ||
277 | * Parameters: | ||
278 | * hdcd_mgr: A DCD manager handle. | ||
279 | * sz_coff_path: Pointer to name of COFF file containing DCD | ||
280 | * objects. | ||
281 | * register_fxn: Callback fxn to be applied on each located | ||
282 | * DCD object. | ||
283 | * handle: Handle to pass to callback. | ||
284 | * Returns: | ||
285 | * 0: Success. | ||
286 | * -EACCES: Unable to access/read/parse/load content of object code | ||
287 | * section. | ||
288 | * -EFAULT: Invalid DCD_HMANAGER handle.. | ||
289 | * Requires: | ||
290 | * DCD initialized. | ||
291 | * Ensures: | ||
292 | * Note: | ||
293 | * Due to the DCD database construction, it is essential for a DCD-enabled | ||
294 | * COFF file to contain the right COFF sections, especially | ||
295 | * ".dcd_register", which is used for auto registration. | ||
296 | */ | ||
297 | extern int dcd_get_objects(struct dcd_manager *hdcd_mgr, | ||
298 | char *sz_coff_path, | ||
299 | dcd_registerfxn register_fxn, void *handle); | ||
300 | |||
301 | /* | ||
302 | * ======== dcd_init ======== | ||
303 | * Purpose: | ||
304 | * This function initializes DCD. | ||
305 | * Parameters: | ||
306 | * Returns: | ||
307 | * FALSE: Initialization failed. | ||
308 | * TRUE: Initialization succeeded. | ||
309 | * Requires: | ||
310 | * Ensures: | ||
311 | * DCD initialized. | ||
312 | */ | ||
313 | extern bool dcd_init(void); | ||
314 | |||
315 | /* | ||
316 | * ======== dcd_register_object ======== | ||
317 | * Purpose: | ||
318 | * This function registers a DSP/BIOS Bridge object in the DCD database. | ||
319 | * Parameters: | ||
320 | * uuid_obj: Pointer to a dsp_uuid that identifies a DSP/BIOS | ||
321 | * Bridge object. | ||
322 | * obj_type: Type of object. | ||
323 | * psz_path_name: Path to the object's COFF file. | ||
324 | * Returns: | ||
325 | * 0: Success. | ||
326 | * -EPERM: Failed to register object. | ||
327 | * Requires: | ||
328 | * DCD initialized. | ||
329 | * uuid_obj and szPathName are non-NULL values. | ||
330 | * obj_type is a valid type value. | ||
331 | * Ensures: | ||
332 | */ | ||
333 | extern int dcd_register_object(struct dsp_uuid *uuid_obj, | ||
334 | enum dsp_dcdobjtype obj_type, | ||
335 | char *psz_path_name); | ||
336 | |||
337 | /* | ||
338 | * ======== dcd_unregister_object ======== | ||
339 | * Purpose: | ||
340 | * This function de-registers a valid DSP/BIOS Bridge object from the DCD | ||
341 | * database. | ||
342 | * Parameters: | ||
343 | * uuid_obj: Pointer to a dsp_uuid that identifies a DSP/BIOS Bridge | ||
344 | * object. | ||
345 | * obj_type: Type of object. | ||
346 | * Returns: | ||
347 | * 0: Success. | ||
348 | * -EPERM: Unable to de-register the specified object. | ||
349 | * Requires: | ||
350 | * DCD initialized. | ||
351 | * uuid_obj is a non-NULL value. | ||
352 | * obj_type is a valid type value. | ||
353 | * Ensures: | ||
354 | */ | ||
355 | extern int dcd_unregister_object(struct dsp_uuid *uuid_obj, | ||
356 | enum dsp_dcdobjtype obj_type); | ||
357 | |||
358 | #endif /* _DBDCD_H */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h b/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h deleted file mode 100644 index bc201b329033..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dbdcddef.h +++ /dev/null | |||
@@ -1,78 +0,0 @@ | |||
1 | /* | ||
2 | * dbdcddef.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DCD (DSP/BIOS Bridge Configuration Database) constants and types. | ||
7 | * | ||
8 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef DBDCDDEF_ | ||
20 | #define DBDCDDEF_ | ||
21 | |||
22 | #include <dspbridge/dbdefs.h> | ||
23 | #include <dspbridge/mgrpriv.h> /* for mgr_processorextinfo */ | ||
24 | |||
25 | /* | ||
26 | * The following defines are critical elements for the DCD module: | ||
27 | * | ||
28 | * - DCD_REGKEY enables DCD functions to locate registered DCD objects. | ||
29 | * - DCD_REGISTER_SECTION identifies the COFF section where the UUID of | ||
30 | * registered DCD objects are stored. | ||
31 | */ | ||
32 | #define DCD_REGKEY "Software\\TexasInstruments\\DspBridge\\DCD" | ||
33 | #define DCD_REGISTER_SECTION ".dcd_register" | ||
34 | |||
35 | #define DCD_MAXPATHLENGTH 255 | ||
36 | |||
37 | /* DCD Manager Object */ | ||
38 | struct dcd_manager; | ||
39 | |||
40 | struct dcd_key_elem { | ||
41 | struct list_head link; /* Make it linked to a list */ | ||
42 | char name[DCD_MAXPATHLENGTH]; /* Name of a given value entry */ | ||
43 | char *path; /* Pointer to the actual data */ | ||
44 | }; | ||
45 | |||
46 | /* DCD Node Properties */ | ||
47 | struct dcd_nodeprops { | ||
48 | struct dsp_ndbprops ndb_props; | ||
49 | u32 msg_segid; | ||
50 | u32 msg_notify_type; | ||
51 | char *str_create_phase_fxn; | ||
52 | char *str_delete_phase_fxn; | ||
53 | char *str_execute_phase_fxn; | ||
54 | char *str_i_alg_name; | ||
55 | |||
56 | /* Dynamic load properties */ | ||
57 | u16 load_type; /* Static, dynamic, overlay */ | ||
58 | u32 data_mem_seg_mask; /* Data memory requirements */ | ||
59 | u32 code_mem_seg_mask; /* Code memory requirements */ | ||
60 | }; | ||
61 | |||
62 | /* DCD Generic Object Type */ | ||
63 | struct dcd_genericobj { | ||
64 | union dcd_obj { | ||
65 | struct dcd_nodeprops node_obj; /* node object. */ | ||
66 | /* processor object. */ | ||
67 | struct dsp_processorinfo proc_info; | ||
68 | /* extended proc object (private) */ | ||
69 | struct mgr_processorextinfo ext_proc_obj; | ||
70 | } obj_data; | ||
71 | }; | ||
72 | |||
73 | /* DCD Internal Callback Type */ | ||
74 | typedef int(*dcd_registerfxn) (struct dsp_uuid *uuid_obj, | ||
75 | enum dsp_dcdobjtype obj_type, | ||
76 | void *handle); | ||
77 | |||
78 | #endif /* DBDCDDEF_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h deleted file mode 100644 index c8f464505efc..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dbdefs.h +++ /dev/null | |||
@@ -1,488 +0,0 @@ | |||
1 | /* | ||
2 | * dbdefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global definitions and constants for DSP/BIOS Bridge. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef DBDEFS_ | ||
20 | #define DBDEFS_ | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | |||
24 | #include <dspbridge/rms_sh.h> /* Types shared between GPP and DSP */ | ||
25 | |||
26 | #define PG_SIZE4K 4096 | ||
27 | #define PG_MASK(pg_size) (~((pg_size)-1)) | ||
28 | #define PG_ALIGN_LOW(addr, pg_size) ((addr) & PG_MASK(pg_size)) | ||
29 | #define PG_ALIGN_HIGH(addr, pg_size) (((addr)+(pg_size)-1) & PG_MASK(pg_size)) | ||
30 | |||
31 | /* API return value and calling convention */ | ||
32 | #define DBAPI int | ||
33 | |||
34 | /* Maximum length of node name, used in dsp_ndbprops */ | ||
35 | #define DSP_MAXNAMELEN 32 | ||
36 | |||
37 | /* notify_type values for the RegisterNotify() functions. */ | ||
38 | #define DSP_SIGNALEVENT 0x00000001 | ||
39 | |||
40 | /* Types of events for processors */ | ||
41 | #define DSP_PROCESSORSTATECHANGE 0x00000001 | ||
42 | #define DSP_PROCESSORATTACH 0x00000002 | ||
43 | #define DSP_PROCESSORDETACH 0x00000004 | ||
44 | #define DSP_PROCESSORRESTART 0x00000008 | ||
45 | |||
46 | /* DSP exception events (DSP/BIOS and DSP MMU fault) */ | ||
47 | #define DSP_MMUFAULT 0x00000010 | ||
48 | #define DSP_SYSERROR 0x00000020 | ||
49 | #define DSP_EXCEPTIONABORT 0x00000300 | ||
50 | #define DSP_PWRERROR 0x00000080 | ||
51 | #define DSP_WDTOVERFLOW 0x00000040 | ||
52 | |||
53 | /* IVA exception events (IVA MMU fault) */ | ||
54 | #define IVA_MMUFAULT 0x00000040 | ||
55 | /* Types of events for nodes */ | ||
56 | #define DSP_NODESTATECHANGE 0x00000100 | ||
57 | #define DSP_NODEMESSAGEREADY 0x00000200 | ||
58 | |||
59 | /* Types of events for streams */ | ||
60 | #define DSP_STREAMDONE 0x00001000 | ||
61 | #define DSP_STREAMIOCOMPLETION 0x00002000 | ||
62 | |||
63 | /* Handle definition representing the GPP node in DSPNode_Connect() calls */ | ||
64 | #define DSP_HGPPNODE 0xFFFFFFFF | ||
65 | |||
66 | /* Node directions used in DSPNode_Connect() */ | ||
67 | #define DSP_TONODE 1 | ||
68 | #define DSP_FROMNODE 2 | ||
69 | |||
70 | /* Define Node Minimum and Maximum Priorities */ | ||
71 | #define DSP_NODE_MIN_PRIORITY 1 | ||
72 | #define DSP_NODE_MAX_PRIORITY 15 | ||
73 | |||
74 | /* msg_ctrl contains SM buffer description */ | ||
75 | #define DSP_RMSBUFDESC RMS_BUFDESC | ||
76 | |||
77 | /* Processor ID numbers */ | ||
78 | #define DSP_UNIT 0 | ||
79 | #define IVA_UNIT 1 | ||
80 | |||
81 | #define DSPWORD unsigned char | ||
82 | #define DSPWORDSIZE sizeof(DSPWORD) | ||
83 | |||
84 | #define MAX_PROFILES 16 | ||
85 | |||
86 | /* DSP chip type */ | ||
87 | #define DSPTYPE64 0x99 | ||
88 | |||
89 | /* Handy Macros */ | ||
90 | #define VALID_PROC_EVENT (DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH | \ | ||
91 | DSP_PROCESSORDETACH | DSP_PROCESSORRESTART | DSP_NODESTATECHANGE | \ | ||
92 | DSP_STREAMDONE | DSP_STREAMIOCOMPLETION | DSP_MMUFAULT | \ | ||
93 | DSP_SYSERROR | DSP_WDTOVERFLOW | DSP_PWRERROR) | ||
94 | |||
95 | static inline bool is_valid_proc_event(u32 x) | ||
96 | { | ||
97 | return (x == 0 || (x & VALID_PROC_EVENT && !(x & ~VALID_PROC_EVENT))); | ||
98 | } | ||
99 | |||
100 | /* The Node UUID structure */ | ||
101 | struct dsp_uuid { | ||
102 | u32 data1; | ||
103 | u16 data2; | ||
104 | u16 data3; | ||
105 | u8 data4; | ||
106 | u8 data5; | ||
107 | u8 data6[6]; | ||
108 | }; | ||
109 | |||
110 | /* DCD types */ | ||
111 | enum dsp_dcdobjtype { | ||
112 | DSP_DCDNODETYPE, | ||
113 | DSP_DCDPROCESSORTYPE, | ||
114 | DSP_DCDLIBRARYTYPE, | ||
115 | DSP_DCDCREATELIBTYPE, | ||
116 | DSP_DCDEXECUTELIBTYPE, | ||
117 | DSP_DCDDELETELIBTYPE, | ||
118 | /* DSP_DCDMAXOBJTYPE is meant to be the last DCD object type */ | ||
119 | DSP_DCDMAXOBJTYPE | ||
120 | }; | ||
121 | |||
122 | /* Processor states */ | ||
123 | enum dsp_procstate { | ||
124 | PROC_STOPPED, | ||
125 | PROC_LOADED, | ||
126 | PROC_RUNNING, | ||
127 | PROC_ERROR | ||
128 | }; | ||
129 | |||
130 | /* | ||
131 | * Node types: Message node, task node, xDAIS socket node, and | ||
132 | * device node. _NODE_GPP is used when defining a stream connection | ||
133 | * between a task or socket node and the GPP. | ||
134 | * | ||
135 | */ | ||
136 | enum node_type { | ||
137 | NODE_DEVICE, | ||
138 | NODE_TASK, | ||
139 | NODE_DAISSOCKET, | ||
140 | NODE_MESSAGE, | ||
141 | NODE_GPP | ||
142 | }; | ||
143 | |||
144 | /* | ||
145 | * ======== node_state ======== | ||
146 | * Internal node states. | ||
147 | */ | ||
148 | enum node_state { | ||
149 | NODE_ALLOCATED, | ||
150 | NODE_CREATED, | ||
151 | NODE_RUNNING, | ||
152 | NODE_PAUSED, | ||
153 | NODE_DONE, | ||
154 | NODE_CREATING, | ||
155 | NODE_STARTING, | ||
156 | NODE_PAUSING, | ||
157 | NODE_TERMINATING, | ||
158 | NODE_DELETING, | ||
159 | }; | ||
160 | |||
161 | /* Stream states */ | ||
162 | enum dsp_streamstate { | ||
163 | STREAM_IDLE, | ||
164 | STREAM_READY, | ||
165 | STREAM_PENDING, | ||
166 | STREAM_DONE | ||
167 | }; | ||
168 | |||
169 | /* Stream connect types */ | ||
170 | enum dsp_connecttype { | ||
171 | CONNECTTYPE_NODEOUTPUT, | ||
172 | CONNECTTYPE_GPPOUTPUT, | ||
173 | CONNECTTYPE_NODEINPUT, | ||
174 | CONNECTTYPE_GPPINPUT | ||
175 | }; | ||
176 | |||
177 | /* Stream mode types */ | ||
178 | enum dsp_strmmode { | ||
179 | STRMMODE_PROCCOPY, /* Processor(s) copy stream data payloads */ | ||
180 | STRMMODE_ZEROCOPY, /* Strm buffer ptrs swapped no data copied */ | ||
181 | STRMMODE_LDMA, /* Local DMA : OMAP's System-DMA device */ | ||
182 | STRMMODE_RDMA /* Remote DMA: OMAP's DSP-DMA device */ | ||
183 | }; | ||
184 | |||
185 | /* Resource Types */ | ||
186 | enum dsp_resourceinfotype { | ||
187 | DSP_RESOURCE_DYNDARAM = 0, | ||
188 | DSP_RESOURCE_DYNSARAM, | ||
189 | DSP_RESOURCE_DYNEXTERNAL, | ||
190 | DSP_RESOURCE_DYNSRAM, | ||
191 | DSP_RESOURCE_PROCLOAD | ||
192 | }; | ||
193 | |||
194 | /* Memory Segment Types */ | ||
195 | enum dsp_memtype { | ||
196 | DSP_DYNDARAM = 0, | ||
197 | DSP_DYNSARAM, | ||
198 | DSP_DYNEXTERNAL, | ||
199 | DSP_DYNSRAM | ||
200 | }; | ||
201 | |||
202 | /* Memory Flush Types */ | ||
203 | enum dsp_flushtype { | ||
204 | PROC_INVALIDATE_MEM = 0, | ||
205 | PROC_WRITEBACK_MEM, | ||
206 | PROC_WRITEBACK_INVALIDATE_MEM, | ||
207 | }; | ||
208 | |||
209 | /* Memory Segment Status Values */ | ||
210 | struct dsp_memstat { | ||
211 | u32 size; | ||
212 | u32 total_free_size; | ||
213 | u32 len_max_free_block; | ||
214 | u32 num_free_blocks; | ||
215 | u32 num_alloc_blocks; | ||
216 | }; | ||
217 | |||
218 | /* Processor Load information Values */ | ||
219 | struct dsp_procloadstat { | ||
220 | u32 curr_load; | ||
221 | u32 predicted_load; | ||
222 | u32 curr_dsp_freq; | ||
223 | u32 predicted_freq; | ||
224 | }; | ||
225 | |||
226 | /* Attributes for STRM connections between nodes */ | ||
227 | struct dsp_strmattr { | ||
228 | u32 seg_id; /* Memory segment on DSP to allocate buffers */ | ||
229 | u32 buf_size; /* Buffer size (DSP words) */ | ||
230 | u32 num_bufs; /* Number of buffers */ | ||
231 | u32 buf_alignment; /* Buffer alignment */ | ||
232 | u32 timeout; /* Timeout for blocking STRM calls */ | ||
233 | enum dsp_strmmode strm_mode; /* mode of stream when opened */ | ||
234 | /* DMA chnl id if dsp_strmmode is LDMA or RDMA */ | ||
235 | u32 dma_chnl_id; | ||
236 | u32 dma_priority; /* DMA channel priority 0=lowest, >0=high */ | ||
237 | }; | ||
238 | |||
239 | /* The dsp_cbdata structure */ | ||
240 | struct dsp_cbdata { | ||
241 | u32 cb_data; | ||
242 | u8 node_data[1]; | ||
243 | }; | ||
244 | |||
245 | /* The dsp_msg structure */ | ||
246 | struct dsp_msg { | ||
247 | u32 cmd; | ||
248 | u32 arg1; | ||
249 | u32 arg2; | ||
250 | }; | ||
251 | |||
252 | /* The dsp_resourcereqmts structure for node's resource requirements */ | ||
253 | struct dsp_resourcereqmts { | ||
254 | u32 cb_struct; | ||
255 | u32 static_data_size; | ||
256 | u32 global_data_size; | ||
257 | u32 program_mem_size; | ||
258 | u32 wc_execution_time; | ||
259 | u32 wc_period; | ||
260 | u32 wc_deadline; | ||
261 | u32 avg_exection_time; | ||
262 | u32 minimum_period; | ||
263 | }; | ||
264 | |||
265 | /* | ||
266 | * The dsp_streamconnect structure describes a stream connection | ||
267 | * between two nodes, or between a node and the GPP | ||
268 | */ | ||
269 | struct dsp_streamconnect { | ||
270 | u32 cb_struct; | ||
271 | enum dsp_connecttype connect_type; | ||
272 | u32 this_node_stream_index; | ||
273 | void *connected_node; | ||
274 | struct dsp_uuid ui_connected_node_id; | ||
275 | u32 connected_node_stream_index; | ||
276 | }; | ||
277 | |||
278 | struct dsp_nodeprofs { | ||
279 | u32 heap_size; | ||
280 | }; | ||
281 | |||
282 | /* The dsp_ndbprops structure reports the attributes of a node */ | ||
283 | struct dsp_ndbprops { | ||
284 | u32 cb_struct; | ||
285 | struct dsp_uuid ui_node_id; | ||
286 | char ac_name[DSP_MAXNAMELEN]; | ||
287 | enum node_type ntype; | ||
288 | u32 cache_on_gpp; | ||
289 | struct dsp_resourcereqmts dsp_resource_reqmts; | ||
290 | s32 prio; | ||
291 | u32 stack_size; | ||
292 | u32 sys_stack_size; | ||
293 | u32 stack_seg; | ||
294 | u32 message_depth; | ||
295 | u32 num_input_streams; | ||
296 | u32 num_output_streams; | ||
297 | u32 timeout; | ||
298 | u32 count_profiles; /* Number of supported profiles */ | ||
299 | /* Array of profiles */ | ||
300 | struct dsp_nodeprofs node_profiles[MAX_PROFILES]; | ||
301 | u32 stack_seg_name; /* Stack Segment Name */ | ||
302 | }; | ||
303 | |||
304 | /* The dsp_nodeattrin structure describes the attributes of a | ||
305 | * node client */ | ||
306 | struct dsp_nodeattrin { | ||
307 | u32 cb_struct; | ||
308 | s32 prio; | ||
309 | u32 timeout; | ||
310 | u32 profile_id; | ||
311 | /* Reserved, for Bridge Internal use only */ | ||
312 | u32 heap_size; | ||
313 | void *pgpp_virt_addr; /* Reserved, for Bridge Internal use only */ | ||
314 | }; | ||
315 | |||
316 | /* The dsp_nodeinfo structure is used to retrieve information | ||
317 | * about a node */ | ||
318 | struct dsp_nodeinfo { | ||
319 | u32 cb_struct; | ||
320 | struct dsp_ndbprops nb_node_database_props; | ||
321 | u32 execution_priority; | ||
322 | enum node_state ns_execution_state; | ||
323 | void *device_owner; | ||
324 | u32 number_streams; | ||
325 | struct dsp_streamconnect sc_stream_connection[16]; | ||
326 | u32 node_env; | ||
327 | }; | ||
328 | |||
329 | /* The dsp_nodeattr structure describes the attributes of a node */ | ||
330 | struct dsp_nodeattr { | ||
331 | u32 cb_struct; | ||
332 | struct dsp_nodeattrin in_node_attr_in; | ||
333 | u32 node_attr_inputs; | ||
334 | u32 node_attr_outputs; | ||
335 | struct dsp_nodeinfo node_info; | ||
336 | }; | ||
337 | |||
338 | /* | ||
339 | * Notification type: either the name of an opened event, or an event or | ||
340 | * window handle. | ||
341 | */ | ||
342 | struct dsp_notification { | ||
343 | char *name; | ||
344 | void *handle; | ||
345 | }; | ||
346 | |||
347 | /* The dsp_processorattrin structure describes the attributes of a processor */ | ||
348 | struct dsp_processorattrin { | ||
349 | u32 cb_struct; | ||
350 | u32 timeout; | ||
351 | }; | ||
352 | /* | ||
353 | * The dsp_processorinfo structure describes basic capabilities of a | ||
354 | * DSP processor | ||
355 | */ | ||
356 | struct dsp_processorinfo { | ||
357 | u32 cb_struct; | ||
358 | int processor_family; | ||
359 | int processor_type; | ||
360 | u32 clock_rate; | ||
361 | u32 internal_mem_size; | ||
362 | u32 external_mem_size; | ||
363 | u32 processor_id; | ||
364 | int ty_running_rtos; | ||
365 | s32 node_min_priority; | ||
366 | s32 node_max_priority; | ||
367 | }; | ||
368 | |||
369 | /* Error information of last DSP exception signalled to the GPP */ | ||
370 | struct dsp_errorinfo { | ||
371 | u32 err_mask; | ||
372 | u32 val1; | ||
373 | u32 val2; | ||
374 | u32 val3; | ||
375 | }; | ||
376 | |||
377 | /* The dsp_processorstate structure describes the state of a DSP processor */ | ||
378 | struct dsp_processorstate { | ||
379 | u32 cb_struct; | ||
380 | enum dsp_procstate proc_state; | ||
381 | }; | ||
382 | |||
383 | /* | ||
384 | * The dsp_resourceinfo structure is used to retrieve information about a | ||
385 | * processor's resources | ||
386 | */ | ||
387 | struct dsp_resourceinfo { | ||
388 | u32 cb_struct; | ||
389 | enum dsp_resourceinfotype resource_type; | ||
390 | union { | ||
391 | u32 resource; | ||
392 | struct dsp_memstat mem_stat; | ||
393 | struct dsp_procloadstat proc_load_stat; | ||
394 | } result; | ||
395 | }; | ||
396 | |||
397 | /* | ||
398 | * The dsp_streamattrin structure describes the attributes of a stream, | ||
399 | * including segment and alignment of data buffers allocated with | ||
400 | * DSPStream_AllocateBuffers(), if applicable | ||
401 | */ | ||
402 | struct dsp_streamattrin { | ||
403 | u32 cb_struct; | ||
404 | u32 timeout; | ||
405 | u32 segment_id; | ||
406 | u32 buf_alignment; | ||
407 | u32 num_bufs; | ||
408 | enum dsp_strmmode strm_mode; | ||
409 | u32 dma_chnl_id; | ||
410 | u32 dma_priority; | ||
411 | }; | ||
412 | |||
413 | /* The dsp_bufferattr structure describes the attributes of a data buffer */ | ||
414 | struct dsp_bufferattr { | ||
415 | u32 cb_struct; | ||
416 | u32 segment_id; | ||
417 | u32 buf_alignment; | ||
418 | }; | ||
419 | |||
420 | /* | ||
421 | * The dsp_streaminfo structure is used to retrieve information | ||
422 | * about a stream. | ||
423 | */ | ||
424 | struct dsp_streaminfo { | ||
425 | u32 cb_struct; | ||
426 | u32 number_bufs_allowed; | ||
427 | u32 number_bufs_in_stream; | ||
428 | u32 number_bytes; | ||
429 | void *sync_object_handle; | ||
430 | enum dsp_streamstate ss_stream_state; | ||
431 | }; | ||
432 | |||
433 | /* DMM MAP attributes | ||
434 | It is a bit mask with each bit value indicating a specific attribute | ||
435 | bit 0 - GPP address type (user virtual=0, physical=1) | ||
436 | bit 1 - MMU Endianism (Big Endian=1, Little Endian=0) | ||
437 | bit 2 - MMU mixed page attribute (Mixed/ CPUES=1, TLBES =0) | ||
438 | bit 3 - MMU element size = 8bit (valid only for non mixed page entries) | ||
439 | bit 4 - MMU element size = 16bit (valid only for non mixed page entries) | ||
440 | bit 5 - MMU element size = 32bit (valid only for non mixed page entries) | ||
441 | bit 6 - MMU element size = 64bit (valid only for non mixed page entries) | ||
442 | |||
443 | bit 14 - Input (read only) buffer | ||
444 | bit 15 - Output (writeable) buffer | ||
445 | */ | ||
446 | |||
447 | /* Types of mapping attributes */ | ||
448 | |||
449 | /* MPU address is virtual and needs to be translated to physical addr */ | ||
450 | #define DSP_MAPVIRTUALADDR 0x00000000 | ||
451 | #define DSP_MAPPHYSICALADDR 0x00000001 | ||
452 | |||
453 | /* Mapped data is big endian */ | ||
454 | #define DSP_MAPBIGENDIAN 0x00000002 | ||
455 | #define DSP_MAPLITTLEENDIAN 0x00000000 | ||
456 | |||
457 | /* Element size is based on DSP r/w access size */ | ||
458 | #define DSP_MAPMIXEDELEMSIZE 0x00000004 | ||
459 | |||
460 | /* | ||
461 | * Element size for MMU mapping (8, 16, 32, or 64 bit) | ||
462 | * Ignored if DSP_MAPMIXEDELEMSIZE enabled | ||
463 | */ | ||
464 | #define DSP_MAPELEMSIZE8 0x00000008 | ||
465 | #define DSP_MAPELEMSIZE16 0x00000010 | ||
466 | #define DSP_MAPELEMSIZE32 0x00000020 | ||
467 | #define DSP_MAPELEMSIZE64 0x00000040 | ||
468 | |||
469 | #define DSP_MAPVMALLOCADDR 0x00000080 | ||
470 | |||
471 | #define DSP_MAPDONOTLOCK 0x00000100 | ||
472 | |||
473 | #define DSP_MAP_DIR_MASK 0x3FFF | ||
474 | |||
475 | #define GEM_CACHE_LINE_SIZE 128 | ||
476 | #define GEM_L1P_PREFETCH_SIZE 128 | ||
477 | |||
478 | /* | ||
479 | * Definitions from dbreg.h | ||
480 | */ | ||
481 | |||
482 | #define DSPPROCTYPE_C64 6410 | ||
483 | #define IVAPROCTYPE_ARM7 470 | ||
484 | |||
485 | /* Max registry path length. Also the max registry value length. */ | ||
486 | #define MAXREGPATHLENGTH 255 | ||
487 | |||
488 | #endif /* DBDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dbll.h b/drivers/staging/tidspbridge/include/dspbridge/dbll.h deleted file mode 100644 index 46a9e0027ea5..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dbll.h +++ /dev/null | |||
@@ -1,56 +0,0 @@ | |||
1 | /* | ||
2 | * dbll.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge Dynamic load library module interface. Function header | ||
7 | * comments are in the file dblldefs.h. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef DBLL_ | ||
21 | #define DBLL_ | ||
22 | |||
23 | #include <dspbridge/dbdefs.h> | ||
24 | #include <dspbridge/dblldefs.h> | ||
25 | |||
26 | extern bool symbols_reloaded; | ||
27 | |||
28 | extern void dbll_close(struct dbll_library_obj *zl_lib); | ||
29 | extern int dbll_create(struct dbll_tar_obj **target_obj, | ||
30 | struct dbll_attrs *pattrs); | ||
31 | extern void dbll_delete(struct dbll_tar_obj *target); | ||
32 | extern void dbll_exit(void); | ||
33 | extern bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name, | ||
34 | struct dbll_sym_val **sym_val); | ||
35 | extern void dbll_get_attrs(struct dbll_tar_obj *target, | ||
36 | struct dbll_attrs *pattrs); | ||
37 | extern bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name, | ||
38 | struct dbll_sym_val **sym_val); | ||
39 | extern int dbll_get_sect(struct dbll_library_obj *lib, char *name, | ||
40 | u32 *paddr, u32 *psize); | ||
41 | extern bool dbll_init(void); | ||
42 | extern int dbll_load(struct dbll_library_obj *lib, | ||
43 | dbll_flags flags, | ||
44 | struct dbll_attrs *attrs, u32 * entry); | ||
45 | extern int dbll_open(struct dbll_tar_obj *target, char *file, | ||
46 | dbll_flags flags, | ||
47 | struct dbll_library_obj **lib_obj); | ||
48 | extern int dbll_read_sect(struct dbll_library_obj *lib, | ||
49 | char *name, char *buf, u32 size); | ||
50 | extern void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs); | ||
51 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
52 | bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address, | ||
53 | u32 offset_range, u32 *sym_addr_output, char *name_output); | ||
54 | #endif | ||
55 | |||
56 | #endif /* DBLL_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h b/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h deleted file mode 100644 index a19e07809ff6..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dblldefs.h +++ /dev/null | |||
@@ -1,431 +0,0 @@ | |||
1 | /* | ||
2 | * dblldefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef DBLLDEFS_ | ||
18 | #define DBLLDEFS_ | ||
19 | |||
20 | /* | ||
21 | * Bit masks for dbl_flags. | ||
22 | */ | ||
23 | #define DBLL_NOLOAD 0x0 /* Don't load symbols, code, or data */ | ||
24 | #define DBLL_SYMB 0x1 /* load symbols */ | ||
25 | #define DBLL_CODE 0x2 /* load code */ | ||
26 | #define DBLL_DATA 0x4 /* load data */ | ||
27 | #define DBLL_DYNAMIC 0x8 /* dynamic load */ | ||
28 | #define DBLL_BSS 0x20 /* Unitialized section */ | ||
29 | |||
30 | #define DBLL_MAXPATHLENGTH 255 | ||
31 | |||
32 | /* | ||
33 | * ======== DBLL_Target ======== | ||
34 | * | ||
35 | */ | ||
36 | struct dbll_tar_obj; | ||
37 | |||
38 | /* | ||
39 | * ======== dbll_flags ======== | ||
40 | * Specifies whether to load code, data, or symbols | ||
41 | */ | ||
42 | typedef s32 dbll_flags; | ||
43 | |||
44 | /* | ||
45 | * ======== DBLL_Library ======== | ||
46 | * | ||
47 | */ | ||
48 | struct dbll_library_obj; | ||
49 | |||
50 | /* | ||
51 | * ======== dbll_sect_info ======== | ||
52 | * For collecting info on overlay sections | ||
53 | */ | ||
54 | struct dbll_sect_info { | ||
55 | const char *name; /* name of section */ | ||
56 | u32 sect_run_addr; /* run address of section */ | ||
57 | u32 sect_load_addr; /* load address of section */ | ||
58 | u32 size; /* size of section (target MAUs) */ | ||
59 | dbll_flags type; /* Code, data, or BSS */ | ||
60 | }; | ||
61 | |||
62 | /* | ||
63 | * ======== dbll_sym_val ======== | ||
64 | * (Needed for dynamic load library) | ||
65 | */ | ||
66 | struct dbll_sym_val { | ||
67 | u32 value; | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * ======== dbll_alloc_fxn ======== | ||
72 | * Allocate memory function. Allocate or reserve (if reserved == TRUE) | ||
73 | * "size" bytes of memory from segment "space" and return the address in | ||
74 | * *dsp_address (or starting at *dsp_address if reserve == TRUE). Returns 0 on | ||
75 | * success, or an error code on failure. | ||
76 | */ | ||
77 | typedef s32(*dbll_alloc_fxn) (void *hdl, s32 space, u32 size, u32 align, | ||
78 | u32 *dsp_address, s32 seg_id, s32 req, | ||
79 | bool reserved); | ||
80 | |||
81 | /* | ||
82 | * ======== dbll_close_fxn ======== | ||
83 | */ | ||
84 | typedef s32(*dbll_f_close_fxn) (void *); | ||
85 | |||
86 | /* | ||
87 | * ======== dbll_free_fxn ======== | ||
88 | * Free memory function. Free, or unreserve (if reserved == TRUE) "size" | ||
89 | * bytes of memory from segment "space" | ||
90 | */ | ||
91 | typedef bool(*dbll_free_fxn) (void *hdl, u32 addr, s32 space, u32 size, | ||
92 | bool reserved); | ||
93 | |||
94 | /* | ||
95 | * ======== dbll_f_open_fxn ======== | ||
96 | */ | ||
97 | typedef void *(*dbll_f_open_fxn) (const char *, const char *); | ||
98 | |||
99 | /* | ||
100 | * ======== dbll_log_write_fxn ======== | ||
101 | * Function to call when writing data from a section, to log the info. | ||
102 | * Can be NULL if no logging is required. | ||
103 | */ | ||
104 | typedef int(*dbll_log_write_fxn) (void *handle, | ||
105 | struct dbll_sect_info *sect, u32 addr, | ||
106 | u32 bytes); | ||
107 | |||
108 | /* | ||
109 | * ======== dbll_read_fxn ======== | ||
110 | */ | ||
111 | typedef s32(*dbll_read_fxn) (void *, size_t, size_t, void *); | ||
112 | |||
113 | /* | ||
114 | * ======== dbll_seek_fxn ======== | ||
115 | */ | ||
116 | typedef s32(*dbll_seek_fxn) (void *, long, int); | ||
117 | |||
118 | /* | ||
119 | * ======== dbll_sym_lookup ======== | ||
120 | * Symbol lookup function - Find the symbol name and return its value. | ||
121 | * | ||
122 | * Parameters: | ||
123 | * handle - Opaque handle | ||
124 | * parg - Opaque argument. | ||
125 | * name - Name of symbol to lookup. | ||
126 | * sym - Location to store address of symbol structure. | ||
127 | * | ||
128 | * Returns: | ||
129 | * TRUE: Success (symbol was found). | ||
130 | * FALSE: Failed to find symbol. | ||
131 | */ | ||
132 | typedef bool(*dbll_sym_lookup) (void *handle, void *parg, void *rmm_handle, | ||
133 | const char *name, struct dbll_sym_val **sym); | ||
134 | |||
135 | /* | ||
136 | * ======== dbll_tell_fxn ======== | ||
137 | */ | ||
138 | typedef s32(*dbll_tell_fxn) (void *); | ||
139 | |||
140 | /* | ||
141 | * ======== dbll_write_fxn ======== | ||
142 | * Write memory function. Write "n" HOST bytes of memory to segment "mtype" | ||
143 | * starting at address "dsp_address" from the buffer "buf". The buffer is | ||
144 | * formatted as an array of words appropriate for the DSP. | ||
145 | */ | ||
146 | typedef s32(*dbll_write_fxn) (void *hdl, u32 dsp_address, void *buf, | ||
147 | u32 n, s32 mtype); | ||
148 | |||
149 | /* | ||
150 | * ======== dbll_attrs ======== | ||
151 | */ | ||
152 | struct dbll_attrs { | ||
153 | dbll_alloc_fxn alloc; | ||
154 | dbll_free_fxn free; | ||
155 | void *rmm_handle; /* Handle to pass to alloc, free functions */ | ||
156 | dbll_write_fxn write; | ||
157 | void *input_params; /* Handle to pass to write, cinit function */ | ||
158 | bool base_image; | ||
159 | dbll_log_write_fxn log_write; | ||
160 | void *log_write_handle; | ||
161 | |||
162 | /* Symbol matching function and handle to pass to it */ | ||
163 | dbll_sym_lookup sym_lookup; | ||
164 | void *sym_handle; | ||
165 | void *sym_arg; | ||
166 | |||
167 | /* | ||
168 | * These file manipulation functions should be compatible with the | ||
169 | * "C" run time library functions of the same name. | ||
170 | */ | ||
171 | s32 (*fread)(void *ptr, size_t size, size_t count, void *filp); | ||
172 | s32 (*fseek)(void *filp, long offset, int origin); | ||
173 | s32 (*ftell)(void *filp); | ||
174 | s32 (*fclose)(void *filp); | ||
175 | void *(*fopen)(const char *path, const char *mode); | ||
176 | }; | ||
177 | |||
178 | /* | ||
179 | * ======== dbll_close ======== | ||
180 | * Close library opened with dbll_open. | ||
181 | * Parameters: | ||
182 | * lib - Handle returned from dbll_open(). | ||
183 | * Returns: | ||
184 | * Requires: | ||
185 | * DBL initialized. | ||
186 | * Valid lib. | ||
187 | * Ensures: | ||
188 | */ | ||
189 | typedef void (*dbll_close_fxn) (struct dbll_library_obj *library); | ||
190 | |||
191 | /* | ||
192 | * ======== dbll_create ======== | ||
193 | * Create a target object, specifying the alloc, free, and write functions. | ||
194 | * Parameters: | ||
195 | * target_obj - Location to store target handle on output. | ||
196 | * pattrs - Attributes. | ||
197 | * Returns: | ||
198 | * 0: Success. | ||
199 | * -ENOMEM: Memory allocation failed. | ||
200 | * Requires: | ||
201 | * DBL initialized. | ||
202 | * pattrs != NULL. | ||
203 | * target_obj != NULL; | ||
204 | * Ensures: | ||
205 | * Success: *target_obj != NULL. | ||
206 | * Failure: *target_obj == NULL. | ||
207 | */ | ||
208 | typedef int(*dbll_create_fxn) (struct dbll_tar_obj **target_obj, | ||
209 | struct dbll_attrs *attrs); | ||
210 | |||
211 | /* | ||
212 | * ======== dbll_delete ======== | ||
213 | * Delete target object and free resources for any loaded libraries. | ||
214 | * Parameters: | ||
215 | * target - Handle returned from DBLL_Create(). | ||
216 | * Returns: | ||
217 | * Requires: | ||
218 | * DBL initialized. | ||
219 | * Valid target. | ||
220 | * Ensures: | ||
221 | */ | ||
222 | typedef void (*dbll_delete_fxn) (struct dbll_tar_obj *target); | ||
223 | |||
224 | /* | ||
225 | * ======== dbll_exit ======== | ||
226 | * Discontinue use of DBL module. | ||
227 | * Parameters: | ||
228 | * Returns: | ||
229 | * Requires: | ||
230 | * refs > 0. | ||
231 | * Ensures: | ||
232 | * refs >= 0. | ||
233 | */ | ||
234 | typedef void (*dbll_exit_fxn) (void); | ||
235 | |||
236 | /* | ||
237 | * ======== dbll_get_addr ======== | ||
238 | * Get address of name in the specified library. | ||
239 | * Parameters: | ||
240 | * lib - Handle returned from dbll_open(). | ||
241 | * name - Name of symbol | ||
242 | * sym_val - Location to store symbol address on output. | ||
243 | * Returns: | ||
244 | * TRUE: Success. | ||
245 | * FALSE: Symbol not found. | ||
246 | * Requires: | ||
247 | * DBL initialized. | ||
248 | * Valid library. | ||
249 | * name != NULL. | ||
250 | * sym_val != NULL. | ||
251 | * Ensures: | ||
252 | */ | ||
253 | typedef bool(*dbll_get_addr_fxn) (struct dbll_library_obj *lib, char *name, | ||
254 | struct dbll_sym_val **sym_val); | ||
255 | |||
256 | /* | ||
257 | * ======== dbll_get_attrs ======== | ||
258 | * Retrieve the attributes of the target. | ||
259 | * Parameters: | ||
260 | * target - Handle returned from DBLL_Create(). | ||
261 | * pattrs - Location to store attributes on output. | ||
262 | * Returns: | ||
263 | * Requires: | ||
264 | * DBL initialized. | ||
265 | * Valid target. | ||
266 | * pattrs != NULL. | ||
267 | * Ensures: | ||
268 | */ | ||
269 | typedef void (*dbll_get_attrs_fxn) (struct dbll_tar_obj *target, | ||
270 | struct dbll_attrs *attrs); | ||
271 | |||
272 | /* | ||
273 | * ======== dbll_get_c_addr ======== | ||
274 | * Get address of "C" name on the specified library. | ||
275 | * Parameters: | ||
276 | * lib - Handle returned from dbll_open(). | ||
277 | * name - Name of symbol | ||
278 | * sym_val - Location to store symbol address on output. | ||
279 | * Returns: | ||
280 | * TRUE: Success. | ||
281 | * FALSE: Symbol not found. | ||
282 | * Requires: | ||
283 | * DBL initialized. | ||
284 | * Valid target. | ||
285 | * name != NULL. | ||
286 | * sym_val != NULL. | ||
287 | * Ensures: | ||
288 | */ | ||
289 | typedef bool(*dbll_get_c_addr_fxn) (struct dbll_library_obj *lib, char *name, | ||
290 | struct dbll_sym_val **sym_val); | ||
291 | |||
292 | /* | ||
293 | * ======== dbll_get_sect ======== | ||
294 | * Get address and size of a named section. | ||
295 | * Parameters: | ||
296 | * lib - Library handle returned from dbll_open(). | ||
297 | * name - Name of section. | ||
298 | * paddr - Location to store section address on output. | ||
299 | * psize - Location to store section size on output. | ||
300 | * Returns: | ||
301 | * 0: Success. | ||
302 | * -ENXIO: Section not found. | ||
303 | * Requires: | ||
304 | * DBL initialized. | ||
305 | * Valid lib. | ||
306 | * name != NULL. | ||
307 | * paddr != NULL; | ||
308 | * psize != NULL. | ||
309 | * Ensures: | ||
310 | */ | ||
311 | typedef int(*dbll_get_sect_fxn) (struct dbll_library_obj *lib, | ||
312 | char *name, u32 *addr, u32 *size); | ||
313 | |||
314 | /* | ||
315 | * ======== dbll_init ======== | ||
316 | * Initialize DBL module. | ||
317 | * Parameters: | ||
318 | * Returns: | ||
319 | * TRUE: Success. | ||
320 | * FALSE: Failure. | ||
321 | * Requires: | ||
322 | * refs >= 0. | ||
323 | * Ensures: | ||
324 | * Success: refs > 0. | ||
325 | * Failure: refs >= 0. | ||
326 | */ | ||
327 | typedef bool(*dbll_init_fxn) (void); | ||
328 | |||
329 | /* | ||
330 | * ======== dbll_load ======== | ||
331 | * Load library onto the target. | ||
332 | * | ||
333 | * Parameters: | ||
334 | * lib - Library handle returned from dbll_open(). | ||
335 | * flags - Load code, data and/or symbols. | ||
336 | * attrs - May contain alloc, free, and write function. | ||
337 | * entry_pt - Location to store program entry on output. | ||
338 | * Returns: | ||
339 | * 0: Success. | ||
340 | * -EBADF: File read failed. | ||
341 | * -EILSEQ: Failure in dynamic loader library. | ||
342 | * Requires: | ||
343 | * DBL initialized. | ||
344 | * Valid lib. | ||
345 | * entry != NULL. | ||
346 | * Ensures: | ||
347 | */ | ||
348 | typedef int(*dbll_load_fxn) (struct dbll_library_obj *lib, | ||
349 | dbll_flags flags, | ||
350 | struct dbll_attrs *attrs, u32 *entry); | ||
351 | /* | ||
352 | * ======== dbll_open ======== | ||
353 | * dbll_open() returns a library handle that can be used to load/unload | ||
354 | * the symbols/code/data via dbll_load()/dbll_unload(). | ||
355 | * Parameters: | ||
356 | * target - Handle returned from dbll_create(). | ||
357 | * file - Name of file to open. | ||
358 | * flags - If flags & DBLL_SYMB, load symbols. | ||
359 | * lib_obj - Location to store library handle on output. | ||
360 | * Returns: | ||
361 | * 0: Success. | ||
362 | * -ENOMEM: Memory allocation failure. | ||
363 | * -EBADF: File open/read failure. | ||
364 | * Unable to determine target type. | ||
365 | * Requires: | ||
366 | * DBL initialized. | ||
367 | * Valid target. | ||
368 | * file != NULL. | ||
369 | * lib_obj != NULL. | ||
370 | * dbll_attrs fopen function non-NULL. | ||
371 | * Ensures: | ||
372 | * Success: Valid *lib_obj. | ||
373 | * Failure: *lib_obj == NULL. | ||
374 | */ | ||
375 | typedef int(*dbll_open_fxn) (struct dbll_tar_obj *target, char *file, | ||
376 | dbll_flags flags, | ||
377 | struct dbll_library_obj **lib_obj); | ||
378 | |||
379 | /* | ||
380 | * ======== dbll_read_sect ======== | ||
381 | * Read COFF section into a character buffer. | ||
382 | * Parameters: | ||
383 | * lib - Library handle returned from dbll_open(). | ||
384 | * name - Name of section. | ||
385 | * pbuf - Buffer to write section contents into. | ||
386 | * size - Buffer size | ||
387 | * Returns: | ||
388 | * 0: Success. | ||
389 | * -ENXIO: Named section does not exists. | ||
390 | * Requires: | ||
391 | * DBL initialized. | ||
392 | * Valid lib. | ||
393 | * name != NULL. | ||
394 | * pbuf != NULL. | ||
395 | * size != 0. | ||
396 | * Ensures: | ||
397 | */ | ||
398 | typedef int(*dbll_read_sect_fxn) (struct dbll_library_obj *lib, | ||
399 | char *name, char *content, | ||
400 | u32 cont_size); | ||
401 | /* | ||
402 | * ======== dbll_unload ======== | ||
403 | * Unload library loaded with dbll_load(). | ||
404 | * Parameters: | ||
405 | * lib - Handle returned from dbll_open(). | ||
406 | * attrs - Contains free() function and handle to pass to it. | ||
407 | * Returns: | ||
408 | * Requires: | ||
409 | * DBL initialized. | ||
410 | * Valid lib. | ||
411 | * Ensures: | ||
412 | */ | ||
413 | typedef void (*dbll_unload_fxn) (struct dbll_library_obj *library, | ||
414 | struct dbll_attrs *attrs); | ||
415 | struct dbll_fxns { | ||
416 | dbll_close_fxn close_fxn; | ||
417 | dbll_create_fxn create_fxn; | ||
418 | dbll_delete_fxn delete_fxn; | ||
419 | dbll_exit_fxn exit_fxn; | ||
420 | dbll_get_attrs_fxn get_attrs_fxn; | ||
421 | dbll_get_addr_fxn get_addr_fxn; | ||
422 | dbll_get_c_addr_fxn get_c_addr_fxn; | ||
423 | dbll_get_sect_fxn get_sect_fxn; | ||
424 | dbll_init_fxn init_fxn; | ||
425 | dbll_load_fxn load_fxn; | ||
426 | dbll_open_fxn open_fxn; | ||
427 | dbll_read_sect_fxn read_sect_fxn; | ||
428 | dbll_unload_fxn unload_fxn; | ||
429 | }; | ||
430 | |||
431 | #endif /* DBLDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h deleted file mode 100644 index fa2d79ef6cc8..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dev.h +++ /dev/null | |||
@@ -1,620 +0,0 @@ | |||
1 | /* | ||
2 | * dev.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Bridge Bridge driver device operations. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef DEV_ | ||
20 | #define DEV_ | ||
21 | |||
22 | /* ----------------------------------- Module Dependent Headers */ | ||
23 | #include <dspbridge/chnldefs.h> | ||
24 | #include <dspbridge/cmm.h> | ||
25 | #include <dspbridge/cod.h> | ||
26 | #include <dspbridge/dspdeh.h> | ||
27 | #include <dspbridge/nodedefs.h> | ||
28 | #include <dspbridge/disp.h> | ||
29 | #include <dspbridge/dspdefs.h> | ||
30 | #include <dspbridge/dmm.h> | ||
31 | #include <dspbridge/host_os.h> | ||
32 | |||
33 | /* ----------------------------------- This */ | ||
34 | #include <dspbridge/devdefs.h> | ||
35 | |||
36 | /* | ||
37 | * ======== dev_brd_write_fxn ======== | ||
38 | * Purpose: | ||
39 | * Exported function to be used as the COD write function. This function | ||
40 | * is passed a handle to a DEV_hObject by ZL in arb, then calls the | ||
41 | * device's bridge_brd_write() function. | ||
42 | * Parameters: | ||
43 | * arb: Handle to a Device Object. | ||
44 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
45 | * dsp_addr: Address on DSP board (Destination). | ||
46 | * host_buf: Pointer to host buffer (Source). | ||
47 | * ul_num_bytes: Number of bytes to transfer. | ||
48 | * mem_type: Memory space on DSP to which to transfer. | ||
49 | * Returns: | ||
50 | * Number of bytes written. Returns 0 if the DEV_hObject passed in via | ||
51 | * arb is invalid. | ||
52 | * Requires: | ||
53 | * DEV Initialized. | ||
54 | * host_buf != NULL | ||
55 | * Ensures: | ||
56 | */ | ||
57 | extern u32 dev_brd_write_fxn(void *arb, | ||
58 | u32 dsp_add, | ||
59 | void *host_buf, u32 ul_num_bytes, u32 mem_space); | ||
60 | |||
61 | /* | ||
62 | * ======== dev_create_device ======== | ||
63 | * Purpose: | ||
64 | * Called by the operating system to load the Bridge Driver for a | ||
65 | * 'Bridge device. | ||
66 | * Parameters: | ||
67 | * device_obj: Ptr to location to receive the device object handle. | ||
68 | * driver_file_name: Name of Bridge driver PE DLL file to load. If the | ||
69 | * absolute path is not provided, the file is loaded | ||
70 | * through 'Bridge's module search path. | ||
71 | * host_config: Host configuration information, to be passed down | ||
72 | * to the Bridge driver when bridge_dev_create() is called. | ||
73 | * pDspConfig: DSP resources, to be passed down to the Bridge driver | ||
74 | * when bridge_dev_create() is called. | ||
75 | * dev_node_obj: Platform specific device node. | ||
76 | * Returns: | ||
77 | * 0: Module is loaded, device object has been created | ||
78 | * -ENOMEM: Insufficient memory to create needed resources. | ||
79 | * -EPERM: Unable to find Bridge driver entry point function. | ||
80 | * -ESPIPE: Unable to load ZL DLL. | ||
81 | * Requires: | ||
82 | * DEV Initialized. | ||
83 | * device_obj != NULL. | ||
84 | * driver_file_name != NULL. | ||
85 | * host_config != NULL. | ||
86 | * pDspConfig != NULL. | ||
87 | * Ensures: | ||
88 | * 0: *device_obj will contain handle to the new device object. | ||
89 | * Otherwise, does not create the device object, ensures the Bridge driver | ||
90 | * module is unloaded, and sets *device_obj to NULL. | ||
91 | */ | ||
92 | extern int dev_create_device(struct dev_object | ||
93 | **device_obj, | ||
94 | const char *driver_file_name, | ||
95 | struct cfg_devnode *dev_node_obj); | ||
96 | |||
97 | /* | ||
98 | * ======== dev_create2 ======== | ||
99 | * Purpose: | ||
100 | * After successful loading of the image from api_init_complete2 | ||
101 | * (PROC Auto_Start) or proc_load this fxn is called. This creates | ||
102 | * the Node Manager and updates the DEV Object. | ||
103 | * Parameters: | ||
104 | * hdev_obj: Handle to device object created with dev_create_device(). | ||
105 | * Returns: | ||
106 | * 0: Successful Creation of Node Manager | ||
107 | * -EPERM: Some Error Occurred. | ||
108 | * Requires: | ||
109 | * DEV Initialized | ||
110 | * Valid hdev_obj | ||
111 | * Ensures: | ||
112 | * 0 and hdev_obj->node_mgr != NULL | ||
113 | * else hdev_obj->node_mgr == NULL | ||
114 | */ | ||
115 | extern int dev_create2(struct dev_object *hdev_obj); | ||
116 | |||
117 | /* | ||
118 | * ======== dev_destroy2 ======== | ||
119 | * Purpose: | ||
120 | * Destroys the Node manager for this device. | ||
121 | * Parameters: | ||
122 | * hdev_obj: Handle to device object created with dev_create_device(). | ||
123 | * Returns: | ||
124 | * 0: Successful Creation of Node Manager | ||
125 | * -EPERM: Some Error Occurred. | ||
126 | * Requires: | ||
127 | * DEV Initialized | ||
128 | * Valid hdev_obj | ||
129 | * Ensures: | ||
130 | * 0 and hdev_obj->node_mgr == NULL | ||
131 | * else -EPERM. | ||
132 | */ | ||
133 | extern int dev_destroy2(struct dev_object *hdev_obj); | ||
134 | |||
135 | /* | ||
136 | * ======== dev_destroy_device ======== | ||
137 | * Purpose: | ||
138 | * Destroys the channel manager for this device, if any, calls | ||
139 | * bridge_dev_destroy(), and then attempts to unload the Bridge module. | ||
140 | * Parameters: | ||
141 | * hdev_obj: Handle to device object created with | ||
142 | * dev_create_device(). | ||
143 | * Returns: | ||
144 | * 0: Success. | ||
145 | * -EFAULT: Invalid hdev_obj. | ||
146 | * -EPERM: The Bridge driver failed it's bridge_dev_destroy() function. | ||
147 | * Requires: | ||
148 | * DEV Initialized. | ||
149 | * Ensures: | ||
150 | */ | ||
151 | extern int dev_destroy_device(struct dev_object | ||
152 | *hdev_obj); | ||
153 | |||
154 | /* | ||
155 | * ======== dev_get_chnl_mgr ======== | ||
156 | * Purpose: | ||
157 | * Retrieve the handle to the channel manager created for this device. | ||
158 | * Parameters: | ||
159 | * hdev_obj: Handle to device object created with | ||
160 | * dev_create_device(). | ||
161 | * *mgr: Ptr to location to store handle. | ||
162 | * Returns: | ||
163 | * 0: Success. | ||
164 | * -EFAULT: Invalid hdev_obj. | ||
165 | * Requires: | ||
166 | * mgr != NULL. | ||
167 | * DEV Initialized. | ||
168 | * Ensures: | ||
169 | * 0: *mgr contains a handle to a channel manager object, | ||
170 | * or NULL. | ||
171 | * else: *mgr is NULL. | ||
172 | */ | ||
173 | extern int dev_get_chnl_mgr(struct dev_object *hdev_obj, | ||
174 | struct chnl_mgr **mgr); | ||
175 | |||
176 | /* | ||
177 | * ======== dev_get_cmm_mgr ======== | ||
178 | * Purpose: | ||
179 | * Retrieve the handle to the shared memory manager created for this | ||
180 | * device. | ||
181 | * Parameters: | ||
182 | * hdev_obj: Handle to device object created with | ||
183 | * dev_create_device(). | ||
184 | * *mgr: Ptr to location to store handle. | ||
185 | * Returns: | ||
186 | * 0: Success. | ||
187 | * -EFAULT: Invalid hdev_obj. | ||
188 | * Requires: | ||
189 | * mgr != NULL. | ||
190 | * DEV Initialized. | ||
191 | * Ensures: | ||
192 | * 0: *mgr contains a handle to a channel manager object, | ||
193 | * or NULL. | ||
194 | * else: *mgr is NULL. | ||
195 | */ | ||
196 | extern int dev_get_cmm_mgr(struct dev_object *hdev_obj, | ||
197 | struct cmm_object **mgr); | ||
198 | |||
199 | /* | ||
200 | * ======== dev_get_dmm_mgr ======== | ||
201 | * Purpose: | ||
202 | * Retrieve the handle to the dynamic memory manager created for this | ||
203 | * device. | ||
204 | * Parameters: | ||
205 | * hdev_obj: Handle to device object created with | ||
206 | * dev_create_device(). | ||
207 | * *mgr: Ptr to location to store handle. | ||
208 | * Returns: | ||
209 | * 0: Success. | ||
210 | * -EFAULT: Invalid hdev_obj. | ||
211 | * Requires: | ||
212 | * mgr != NULL. | ||
213 | * DEV Initialized. | ||
214 | * Ensures: | ||
215 | * 0: *mgr contains a handle to a channel manager object, | ||
216 | * or NULL. | ||
217 | * else: *mgr is NULL. | ||
218 | */ | ||
219 | extern int dev_get_dmm_mgr(struct dev_object *hdev_obj, | ||
220 | struct dmm_object **mgr); | ||
221 | |||
222 | /* | ||
223 | * ======== dev_get_cod_mgr ======== | ||
224 | * Purpose: | ||
225 | * Retrieve the COD manager create for this device. | ||
226 | * Parameters: | ||
227 | * hdev_obj: Handle to device object created with | ||
228 | * dev_create_device(). | ||
229 | * *cod_mgr: Ptr to location to store handle. | ||
230 | * Returns: | ||
231 | * 0: Success. | ||
232 | * -EFAULT: Invalid hdev_obj. | ||
233 | * Requires: | ||
234 | * cod_mgr != NULL. | ||
235 | * DEV Initialized. | ||
236 | * Ensures: | ||
237 | * 0: *cod_mgr contains a handle to a COD manager object. | ||
238 | * else: *cod_mgr is NULL. | ||
239 | */ | ||
240 | extern int dev_get_cod_mgr(struct dev_object *hdev_obj, | ||
241 | struct cod_manager **cod_mgr); | ||
242 | |||
243 | /* | ||
244 | * ======== dev_get_deh_mgr ======== | ||
245 | * Purpose: | ||
246 | * Retrieve the DEH manager created for this device. | ||
247 | * Parameters: | ||
248 | * hdev_obj: Handle to device object created with dev_create_device(). | ||
249 | * *deh_manager: Ptr to location to store handle. | ||
250 | * Returns: | ||
251 | * 0: Success. | ||
252 | * -EFAULT: Invalid hdev_obj. | ||
253 | * Requires: | ||
254 | * deh_manager != NULL. | ||
255 | * DEH Initialized. | ||
256 | * Ensures: | ||
257 | * 0: *deh_manager contains a handle to a DEH manager object. | ||
258 | * else: *deh_manager is NULL. | ||
259 | */ | ||
260 | extern int dev_get_deh_mgr(struct dev_object *hdev_obj, | ||
261 | struct deh_mgr **deh_manager); | ||
262 | |||
263 | /* | ||
264 | * ======== dev_get_dev_node ======== | ||
265 | * Purpose: | ||
266 | * Retrieve the platform specific device ID for this device. | ||
267 | * Parameters: | ||
268 | * hdev_obj: Handle to device object created with | ||
269 | * dev_create_device(). | ||
270 | * dev_nde: Ptr to location to get the device node handle. | ||
271 | * Returns: | ||
272 | * 0: Returns a DEVNODE in *dev_node_obj. | ||
273 | * -EFAULT: Invalid hdev_obj. | ||
274 | * Requires: | ||
275 | * dev_nde != NULL. | ||
276 | * DEV Initialized. | ||
277 | * Ensures: | ||
278 | * 0: *dev_nde contains a platform specific device ID; | ||
279 | * else: *dev_nde is NULL. | ||
280 | */ | ||
281 | extern int dev_get_dev_node(struct dev_object *hdev_obj, | ||
282 | struct cfg_devnode **dev_nde); | ||
283 | |||
284 | /* | ||
285 | * ======== dev_get_dev_type ======== | ||
286 | * Purpose: | ||
287 | * Retrieve the platform specific device ID for this device. | ||
288 | * Parameters: | ||
289 | * hdev_obj: Handle to device object created with | ||
290 | * dev_create_device(). | ||
291 | * dev_nde: Ptr to location to get the device node handle. | ||
292 | * Returns: | ||
293 | * 0: Success | ||
294 | * -EFAULT: Invalid hdev_obj. | ||
295 | * Requires: | ||
296 | * dev_nde != NULL. | ||
297 | * DEV Initialized. | ||
298 | * Ensures: | ||
299 | * 0: *dev_nde contains a platform specific device ID; | ||
300 | * else: *dev_nde is NULL. | ||
301 | */ | ||
302 | extern int dev_get_dev_type(struct dev_object *device_obj, | ||
303 | u8 *dev_type); | ||
304 | |||
305 | /* | ||
306 | * ======== dev_get_first ======== | ||
307 | * Purpose: | ||
308 | * Retrieve the first Device Object handle from an internal linked list of | ||
309 | * of DEV_OBJECTs maintained by DEV. | ||
310 | * Parameters: | ||
311 | * Returns: | ||
312 | * NULL if there are no device objects stored; else | ||
313 | * a valid DEV_HOBJECT. | ||
314 | * Requires: | ||
315 | * No calls to dev_create_device or dev_destroy_device (which my modify the | ||
316 | * internal device object list) may occur between calls to dev_get_first | ||
317 | * and dev_get_next. | ||
318 | * Ensures: | ||
319 | * The DEV_HOBJECT returned is valid. | ||
320 | * A subsequent call to dev_get_next will return the next device object in | ||
321 | * the list. | ||
322 | */ | ||
323 | extern struct dev_object *dev_get_first(void); | ||
324 | |||
325 | /* | ||
326 | * ======== dev_get_intf_fxns ======== | ||
327 | * Purpose: | ||
328 | * Retrieve the Bridge driver interface function structure for the | ||
329 | * loaded Bridge driver. | ||
330 | * Parameters: | ||
331 | * hdev_obj: Handle to device object created with | ||
332 | * dev_create_device(). | ||
333 | * *if_fxns: Ptr to location to store fxn interface. | ||
334 | * Returns: | ||
335 | * 0: Success. | ||
336 | * -EFAULT: Invalid hdev_obj. | ||
337 | * Requires: | ||
338 | * if_fxns != NULL. | ||
339 | * DEV Initialized. | ||
340 | * Ensures: | ||
341 | * 0: *if_fxns contains a pointer to the Bridge | ||
342 | * driver interface; | ||
343 | * else: *if_fxns is NULL. | ||
344 | */ | ||
345 | extern int dev_get_intf_fxns(struct dev_object *hdev_obj, | ||
346 | struct bridge_drv_interface **if_fxns); | ||
347 | |||
348 | /* | ||
349 | * ======== dev_get_io_mgr ======== | ||
350 | * Purpose: | ||
351 | * Retrieve the handle to the IO manager created for this device. | ||
352 | * Parameters: | ||
353 | * hdev_obj: Handle to device object created with | ||
354 | * dev_create_device(). | ||
355 | * *mgr: Ptr to location to store handle. | ||
356 | * Returns: | ||
357 | * 0: Success. | ||
358 | * -EFAULT: Invalid hdev_obj. | ||
359 | * Requires: | ||
360 | * mgr != NULL. | ||
361 | * DEV Initialized. | ||
362 | * Ensures: | ||
363 | * 0: *mgr contains a handle to an IO manager object. | ||
364 | * else: *mgr is NULL. | ||
365 | */ | ||
366 | extern int dev_get_io_mgr(struct dev_object *hdev_obj, | ||
367 | struct io_mgr **mgr); | ||
368 | |||
369 | /* | ||
370 | * ======== dev_get_next ======== | ||
371 | * Purpose: | ||
372 | * Retrieve the next Device Object handle from an internal linked list of | ||
373 | * of DEV_OBJECTs maintained by DEV, after having previously called | ||
374 | * dev_get_first() and zero or more dev_get_next | ||
375 | * Parameters: | ||
376 | * hdev_obj: Handle to the device object returned from a previous | ||
377 | * call to dev_get_first() or dev_get_next(). | ||
378 | * Returns: | ||
379 | * NULL if there are no further device objects on the list or hdev_obj | ||
380 | * was invalid; | ||
381 | * else the next valid DEV_HOBJECT in the list. | ||
382 | * Requires: | ||
383 | * No calls to dev_create_device or dev_destroy_device (which my modify the | ||
384 | * internal device object list) may occur between calls to dev_get_first | ||
385 | * and dev_get_next. | ||
386 | * Ensures: | ||
387 | * The DEV_HOBJECT returned is valid. | ||
388 | * A subsequent call to dev_get_next will return the next device object in | ||
389 | * the list. | ||
390 | */ | ||
391 | extern struct dev_object *dev_get_next(struct dev_object | ||
392 | *hdev_obj); | ||
393 | |||
394 | /* | ||
395 | * ========= dev_get_msg_mgr ======== | ||
396 | * Purpose: | ||
397 | * Retrieve the msg_ctrl Manager Handle from the DevObject. | ||
398 | * Parameters: | ||
399 | * hdev_obj: Handle to the Dev Object | ||
400 | * msg_man: Location where msg_ctrl Manager handle will be returned. | ||
401 | * Returns: | ||
402 | * Requires: | ||
403 | * DEV Initialized. | ||
404 | * Valid hdev_obj. | ||
405 | * node_man != NULL. | ||
406 | * Ensures: | ||
407 | */ | ||
408 | extern void dev_get_msg_mgr(struct dev_object *hdev_obj, | ||
409 | struct msg_mgr **msg_man); | ||
410 | |||
411 | /* | ||
412 | * ========= dev_get_node_manager ======== | ||
413 | * Purpose: | ||
414 | * Retrieve the Node Manager Handle from the DevObject. It is an | ||
415 | * accessor function | ||
416 | * Parameters: | ||
417 | * hdev_obj: Handle to the Dev Object | ||
418 | * node_man: Location where Handle to the Node Manager will be | ||
419 | * returned.. | ||
420 | * Returns: | ||
421 | * 0: Success | ||
422 | * -EFAULT: Invalid Dev Object handle. | ||
423 | * Requires: | ||
424 | * DEV Initialized. | ||
425 | * node_man is not null | ||
426 | * Ensures: | ||
427 | * 0: *node_man contains a handle to a Node manager object. | ||
428 | * else: *node_man is NULL. | ||
429 | */ | ||
430 | extern int dev_get_node_manager(struct dev_object | ||
431 | *hdev_obj, | ||
432 | struct node_mgr **node_man); | ||
433 | |||
434 | /* | ||
435 | * ======== dev_get_symbol ======== | ||
436 | * Purpose: | ||
437 | * Get the value of a symbol in the currently loaded program. | ||
438 | * Parameters: | ||
439 | * hdev_obj: Handle to device object created with | ||
440 | * dev_create_device(). | ||
441 | * str_sym: Name of symbol to look up. | ||
442 | * pul_value: Ptr to symbol value. | ||
443 | * Returns: | ||
444 | * 0: Success. | ||
445 | * -EFAULT: Invalid hdev_obj. | ||
446 | * -ESPIPE: Symbols couldn not be found or have not been loaded onto | ||
447 | * the board. | ||
448 | * Requires: | ||
449 | * str_sym != NULL. | ||
450 | * pul_value != NULL. | ||
451 | * DEV Initialized. | ||
452 | * Ensures: | ||
453 | * 0: *pul_value contains the symbol value; | ||
454 | */ | ||
455 | extern int dev_get_symbol(struct dev_object *hdev_obj, | ||
456 | const char *str_sym, u32 * pul_value); | ||
457 | |||
458 | /* | ||
459 | * ======== dev_get_bridge_context ======== | ||
460 | * Purpose: | ||
461 | * Retrieve the Bridge Context handle, as returned by the | ||
462 | * bridge_dev_create fxn. | ||
463 | * Parameters: | ||
464 | * hdev_obj: Handle to device object created with dev_create_device() | ||
465 | * *phbridge_context: Ptr to location to store context handle. | ||
466 | * Returns: | ||
467 | * 0: Success. | ||
468 | * -EFAULT: Invalid hdev_obj. | ||
469 | * Requires: | ||
470 | * phbridge_context != NULL. | ||
471 | * DEV Initialized. | ||
472 | * Ensures: | ||
473 | * 0: *phbridge_context contains context handle; | ||
474 | * else: *phbridge_context is NULL; | ||
475 | */ | ||
476 | extern int dev_get_bridge_context(struct dev_object *hdev_obj, | ||
477 | struct bridge_dev_context | ||
478 | **phbridge_context); | ||
479 | |||
480 | /* | ||
481 | * ======== dev_insert_proc_object ======== | ||
482 | * Purpose: | ||
483 | * Inserts the Processor Object into the List of PROC Objects | ||
484 | * kept in the DEV Object | ||
485 | * Parameters: | ||
486 | * proc_obj: Handle to the Proc Object | ||
487 | * hdev_obj Handle to the Dev Object | ||
488 | * bAttachedNew Specifies if there are already processors attached | ||
489 | * Returns: | ||
490 | * 0: Successfully inserted into the list | ||
491 | * Requires: | ||
492 | * proc_obj is not NULL | ||
493 | * hdev_obj is a valid handle to the DEV. | ||
494 | * DEV Initialized. | ||
495 | * List(of Proc object in Dev) Exists. | ||
496 | * Ensures: | ||
497 | * 0 & the PROC Object is inserted and the list is not empty | ||
498 | * Details: | ||
499 | * If the List of Proc Object is empty bAttachedNew is TRUE, it indicated | ||
500 | * this is the first Processor attaching. | ||
501 | * If it is False, there are already processors attached. | ||
502 | */ | ||
503 | extern int dev_insert_proc_object(struct dev_object | ||
504 | *hdev_obj, | ||
505 | u32 proc_obj, | ||
506 | bool *already_attached); | ||
507 | |||
508 | /* | ||
509 | * ======== dev_remove_proc_object ======== | ||
510 | * Purpose: | ||
511 | * Search for and remove a Proc object from the given list maintained | ||
512 | * by the DEV | ||
513 | * Parameters: | ||
514 | * p_proc_object: Ptr to ProcObject to insert. | ||
515 | * dev_obj: Ptr to Dev Object where the list is. | ||
516 | * already_attached: Ptr to return the bool | ||
517 | * Returns: | ||
518 | * 0: If successful. | ||
519 | * -EPERM Failure to Remove the PROC Object from the list | ||
520 | * Requires: | ||
521 | * DevObject is Valid | ||
522 | * proc_obj != 0 | ||
523 | * dev_obj->proc_list != NULL | ||
524 | * !LST_IS_EMPTY(dev_obj->proc_list) | ||
525 | * already_attached !=NULL | ||
526 | * Ensures: | ||
527 | * Details: | ||
528 | * List will be deleted when the DEV is destroyed. | ||
529 | * | ||
530 | */ | ||
531 | extern int dev_remove_proc_object(struct dev_object | ||
532 | *hdev_obj, u32 proc_obj); | ||
533 | |||
534 | /* | ||
535 | * ======== dev_notify_clients ======== | ||
536 | * Purpose: | ||
537 | * Notify all clients of this device of a change in device status. | ||
538 | * Clients may include multiple users of BRD, as well as CHNL. | ||
539 | * This function is asychronous, and may be called by a timer event | ||
540 | * set up by a watchdog timer. | ||
541 | * Parameters: | ||
542 | * hdev_obj: Handle to device object created with dev_create_device(). | ||
543 | * ret: A status word, most likely a BRD_STATUS. | ||
544 | * Returns: | ||
545 | * 0: All registered clients were asynchronously notified. | ||
546 | * -EINVAL: Invalid hdev_obj. | ||
547 | * Requires: | ||
548 | * DEV Initialized. | ||
549 | * Ensures: | ||
550 | * 0: Notifications are queued by the operating system to be | ||
551 | * delivered to clients. This function does not ensure that | ||
552 | * the notifications will ever be delivered. | ||
553 | */ | ||
554 | extern int dev_notify_clients(struct dev_object *hdev_obj, u32 ret); | ||
555 | |||
556 | /* | ||
557 | * ======== dev_remove_device ======== | ||
558 | * Purpose: | ||
559 | * Destroys the Device Object created by dev_start_device. | ||
560 | * Parameters: | ||
561 | * dev_node_obj: Device node as it is know to OS. | ||
562 | * Returns: | ||
563 | * 0: If success; | ||
564 | * <error code> Otherwise. | ||
565 | * Requires: | ||
566 | * Ensures: | ||
567 | */ | ||
568 | extern int dev_remove_device(struct cfg_devnode *dev_node_obj); | ||
569 | |||
570 | /* | ||
571 | * ======== dev_set_chnl_mgr ======== | ||
572 | * Purpose: | ||
573 | * Set the channel manager for this device. | ||
574 | * Parameters: | ||
575 | * hdev_obj: Handle to device object created with | ||
576 | * dev_create_device(). | ||
577 | * hmgr: Handle to a channel manager, or NULL. | ||
578 | * Returns: | ||
579 | * 0: Success. | ||
580 | * -EFAULT: Invalid hdev_obj. | ||
581 | * Requires: | ||
582 | * DEV Initialized. | ||
583 | * Ensures: | ||
584 | */ | ||
585 | extern int dev_set_chnl_mgr(struct dev_object *hdev_obj, | ||
586 | struct chnl_mgr *hmgr); | ||
587 | |||
588 | /* | ||
589 | * ======== dev_set_msg_mgr ======== | ||
590 | * Purpose: | ||
591 | * Set the Message manager for this device. | ||
592 | * Parameters: | ||
593 | * hdev_obj: Handle to device object created with dev_create_device(). | ||
594 | * hmgr: Handle to a message manager, or NULL. | ||
595 | * Returns: | ||
596 | * Requires: | ||
597 | * DEV Initialized. | ||
598 | * Ensures: | ||
599 | */ | ||
600 | extern void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr); | ||
601 | |||
602 | /* | ||
603 | * ======== dev_start_device ======== | ||
604 | * Purpose: | ||
605 | * Initializes the new device with bridge environment. This involves | ||
606 | * querying CM for allocated resources, querying the registry for | ||
607 | * necessary dsp resources (requested in the INF file), and using this | ||
608 | * information to create a bridge device object. | ||
609 | * Parameters: | ||
610 | * dev_node_obj: Device node as it is know to OS. | ||
611 | * Returns: | ||
612 | * 0: If success; | ||
613 | * <error code> Otherwise. | ||
614 | * Requires: | ||
615 | * DEV initialized. | ||
616 | * Ensures: | ||
617 | */ | ||
618 | extern int dev_start_device(struct cfg_devnode *dev_node_obj); | ||
619 | |||
620 | #endif /* DEV_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/devdefs.h b/drivers/staging/tidspbridge/include/dspbridge/devdefs.h deleted file mode 100644 index a2f9241ff139..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/devdefs.h +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | /* | ||
2 | * devdefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Definition of common include typedef between dspdefs.h and dev.h. Required | ||
7 | * to break circular dependency between Bridge driver and DEV include files. | ||
8 | * | ||
9 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef DEVDEFS_ | ||
21 | #define DEVDEFS_ | ||
22 | |||
23 | /* Bridge Device Object */ | ||
24 | struct dev_object; | ||
25 | |||
26 | #endif /* DEVDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/disp.h b/drivers/staging/tidspbridge/include/dspbridge/disp.h deleted file mode 100644 index 39d3cea9ca8b..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/disp.h +++ /dev/null | |||
@@ -1,186 +0,0 @@ | |||
1 | /* | ||
2 | * disp.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge Node Dispatcher. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef DISP_ | ||
20 | #define DISP_ | ||
21 | |||
22 | #include <dspbridge/dbdefs.h> | ||
23 | #include <dspbridge/nodedefs.h> | ||
24 | #include <dspbridge/nodepriv.h> | ||
25 | |||
26 | struct disp_object; | ||
27 | |||
28 | /* Node Dispatcher attributes */ | ||
29 | struct disp_attr { | ||
30 | u32 chnl_offset; /* Offset of channel ids reserved for RMS */ | ||
31 | /* Size of buffer for sending data to RMS */ | ||
32 | u32 chnl_buf_size; | ||
33 | int proc_family; /* eg, 5000 */ | ||
34 | int proc_type; /* eg, 5510 */ | ||
35 | void *reserved1; /* Reserved for future use. */ | ||
36 | u32 reserved2; /* Reserved for future use. */ | ||
37 | }; | ||
38 | |||
39 | |||
40 | /* | ||
41 | * ======== disp_create ======== | ||
42 | * Create a NODE Dispatcher object. This object handles the creation, | ||
43 | * deletion, and execution of nodes on the DSP target, through communication | ||
44 | * with the Resource Manager Server running on the target. Each NODE | ||
45 | * Manager object should have exactly one NODE Dispatcher. | ||
46 | * | ||
47 | * Parameters: | ||
48 | * dispatch_obj: Location to store node dispatcher object on output. | ||
49 | * hdev_obj: Device for this processor. | ||
50 | * disp_attrs: Node dispatcher attributes. | ||
51 | * Returns: | ||
52 | * 0: Success; | ||
53 | * -ENOMEM: Insufficient memory for requested resources. | ||
54 | * -EPERM: Unable to create dispatcher. | ||
55 | * Requires: | ||
56 | * disp_attrs != NULL. | ||
57 | * hdev_obj != NULL. | ||
58 | * dispatch_obj != NULL. | ||
59 | * Ensures: | ||
60 | * 0: IS_VALID(*dispatch_obj). | ||
61 | * error: *dispatch_obj == NULL. | ||
62 | */ | ||
63 | extern int disp_create(struct disp_object **dispatch_obj, | ||
64 | struct dev_object *hdev_obj, | ||
65 | const struct disp_attr *disp_attrs); | ||
66 | |||
67 | /* | ||
68 | * ======== disp_delete ======== | ||
69 | * Delete the NODE Dispatcher. | ||
70 | * | ||
71 | * Parameters: | ||
72 | * disp_obj: Node Dispatcher object. | ||
73 | * Returns: | ||
74 | * Requires: | ||
75 | * Valid disp_obj. | ||
76 | * Ensures: | ||
77 | * disp_obj is invalid. | ||
78 | */ | ||
79 | extern void disp_delete(struct disp_object *disp_obj); | ||
80 | |||
81 | /* | ||
82 | * ======== disp_node_change_priority ======== | ||
83 | * Change the priority of a node currently running on the target. | ||
84 | * | ||
85 | * Parameters: | ||
86 | * disp_obj: Node Dispatcher object. | ||
87 | * hnode: Node object representing a node currently | ||
88 | * allocated or running on the DSP. | ||
89 | * ulFxnAddress: Address of RMS function for changing priority. | ||
90 | * node_env: Address of node's environment structure. | ||
91 | * prio: New priority level to set node's priority to. | ||
92 | * Returns: | ||
93 | * 0: Success. | ||
94 | * -ETIME: A timeout occurred before the DSP responded. | ||
95 | * Requires: | ||
96 | * Valid disp_obj. | ||
97 | * hnode != NULL. | ||
98 | * Ensures: | ||
99 | */ | ||
100 | extern int disp_node_change_priority(struct disp_object | ||
101 | *disp_obj, | ||
102 | struct node_object *hnode, | ||
103 | u32 rms_fxn, | ||
104 | nodeenv node_env, s32 prio); | ||
105 | |||
106 | /* | ||
107 | * ======== disp_node_create ======== | ||
108 | * Create a node on the DSP by remotely calling the node's create function. | ||
109 | * | ||
110 | * Parameters: | ||
111 | * disp_obj: Node Dispatcher object. | ||
112 | * hnode: Node handle obtained from node_allocate(). | ||
113 | * ul_fxn_addr: Address or RMS create node function. | ||
114 | * ul_create_fxn: Address of node's create function. | ||
115 | * pargs: Arguments to pass to RMS node create function. | ||
116 | * node_env: Location to store node environment pointer on | ||
117 | * output. | ||
118 | * Returns: | ||
119 | * 0: Success. | ||
120 | * -ETIME: A timeout occurred before the DSP responded. | ||
121 | * -EPERM: A failure occurred, unable to create node. | ||
122 | * Requires: | ||
123 | * Valid disp_obj. | ||
124 | * pargs != NULL. | ||
125 | * hnode != NULL. | ||
126 | * node_env != NULL. | ||
127 | * node_get_type(hnode) != NODE_DEVICE. | ||
128 | * Ensures: | ||
129 | */ | ||
130 | extern int disp_node_create(struct disp_object *disp_obj, | ||
131 | struct node_object *hnode, | ||
132 | u32 rms_fxn, | ||
133 | u32 ul_create_fxn, | ||
134 | const struct node_createargs | ||
135 | *pargs, nodeenv *node_env); | ||
136 | |||
137 | /* | ||
138 | * ======== disp_node_delete ======== | ||
139 | * Delete a node on the DSP by remotely calling the node's delete function. | ||
140 | * | ||
141 | * Parameters: | ||
142 | * disp_obj: Node Dispatcher object. | ||
143 | * hnode: Node object representing a node currently | ||
144 | * loaded on the DSP. | ||
145 | * ul_fxn_addr: Address or RMS delete node function. | ||
146 | * ul_delete_fxn: Address of node's delete function. | ||
147 | * node_env: Address of node's environment structure. | ||
148 | * Returns: | ||
149 | * 0: Success. | ||
150 | * -ETIME: A timeout occurred before the DSP responded. | ||
151 | * Requires: | ||
152 | * Valid disp_obj. | ||
153 | * hnode != NULL. | ||
154 | * Ensures: | ||
155 | */ | ||
156 | extern int disp_node_delete(struct disp_object *disp_obj, | ||
157 | struct node_object *hnode, | ||
158 | u32 rms_fxn, | ||
159 | u32 ul_delete_fxn, nodeenv node_env); | ||
160 | |||
161 | /* | ||
162 | * ======== disp_node_run ======== | ||
163 | * Start execution of a node's execute phase, or resume execution of a node | ||
164 | * that has been suspended (via DISP_NodePause()) on the DSP. | ||
165 | * | ||
166 | * Parameters: | ||
167 | * disp_obj: Node Dispatcher object. | ||
168 | * hnode: Node object representing a node to be executed | ||
169 | * on the DSP. | ||
170 | * ul_fxn_addr: Address or RMS node execute function. | ||
171 | * ul_execute_fxn: Address of node's execute function. | ||
172 | * node_env: Address of node's environment structure. | ||
173 | * Returns: | ||
174 | * 0: Success. | ||
175 | * -ETIME: A timeout occurred before the DSP responded. | ||
176 | * Requires: | ||
177 | * Valid disp_obj. | ||
178 | * hnode != NULL. | ||
179 | * Ensures: | ||
180 | */ | ||
181 | extern int disp_node_run(struct disp_object *disp_obj, | ||
182 | struct node_object *hnode, | ||
183 | u32 rms_fxn, | ||
184 | u32 ul_execute_fxn, nodeenv node_env); | ||
185 | |||
186 | #endif /* DISP_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h deleted file mode 100644 index c3487be8fcf5..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dmm.h +++ /dev/null | |||
@@ -1,71 +0,0 @@ | |||
1 | /* | ||
2 | * dmm.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address | ||
7 | * space that can be directly mapped to any MPU buffer or memory region. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef DMM_ | ||
21 | #define DMM_ | ||
22 | |||
23 | #include <dspbridge/dbdefs.h> | ||
24 | |||
25 | struct dmm_object; | ||
26 | |||
27 | /* DMM attributes used in dmm_create() */ | ||
28 | struct dmm_mgrattrs { | ||
29 | u32 reserved; | ||
30 | }; | ||
31 | |||
32 | #define DMMPOOLSIZE 0x4000000 | ||
33 | |||
34 | /* | ||
35 | * ======== dmm_get_handle ======== | ||
36 | * Purpose: | ||
37 | * Return the dynamic memory manager object for this device. | ||
38 | * This is typically called from the client process. | ||
39 | */ | ||
40 | |||
41 | extern int dmm_get_handle(void *hprocessor, | ||
42 | struct dmm_object **dmm_manager); | ||
43 | |||
44 | extern int dmm_reserve_memory(struct dmm_object *dmm_mgr, | ||
45 | u32 size, u32 *prsv_addr); | ||
46 | |||
47 | extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, | ||
48 | u32 rsv_addr); | ||
49 | |||
50 | extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, | ||
51 | u32 size); | ||
52 | |||
53 | extern int dmm_un_map_memory(struct dmm_object *dmm_mgr, | ||
54 | u32 addr, u32 *psize); | ||
55 | |||
56 | extern int dmm_destroy(struct dmm_object *dmm_mgr); | ||
57 | |||
58 | extern int dmm_delete_tables(struct dmm_object *dmm_mgr); | ||
59 | |||
60 | extern int dmm_create(struct dmm_object **dmm_manager, | ||
61 | struct dev_object *hdev_obj, | ||
62 | const struct dmm_mgrattrs *mgr_attrts); | ||
63 | |||
64 | extern int dmm_create_tables(struct dmm_object *dmm_mgr, | ||
65 | u32 addr, u32 size); | ||
66 | |||
67 | #ifdef DSP_DMM_DEBUG | ||
68 | u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr); | ||
69 | #endif | ||
70 | |||
71 | #endif /* DMM_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h deleted file mode 100644 index b0c7708321b2..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/drv.h +++ /dev/null | |||
@@ -1,468 +0,0 @@ | |||
1 | /* | ||
2 | * drv.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DRV Resource allocation module. Driver Object gets Created | ||
7 | * at the time of Loading. It holds the List of Device Objects | ||
8 | * in the system. | ||
9 | * | ||
10 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
11 | * | ||
12 | * This package is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
17 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
18 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
19 | */ | ||
20 | |||
21 | #ifndef DRV_ | ||
22 | #define DRV_ | ||
23 | |||
24 | #include <dspbridge/devdefs.h> | ||
25 | |||
26 | #include <linux/idr.h> | ||
27 | |||
28 | /* Bridge Driver Object */ | ||
29 | struct drv_object; | ||
30 | |||
31 | /* Provide the DSP Internal memory windows that can be accessed from L3 address | ||
32 | * space */ | ||
33 | |||
34 | #define OMAP_GEM_BASE 0x107F8000 | ||
35 | #define OMAP_DSP_SIZE 0x00720000 | ||
36 | |||
37 | /* MEM1 is L2 RAM + L2 Cache space */ | ||
38 | #define OMAP_DSP_MEM1_BASE 0x5C7F8000 | ||
39 | #define OMAP_DSP_MEM1_SIZE 0x18000 | ||
40 | |||
41 | /* MEM2 is L1P RAM/CACHE space */ | ||
42 | #define OMAP_DSP_MEM2_BASE 0x5CE00000 | ||
43 | #define OMAP_DSP_MEM2_SIZE 0x8000 | ||
44 | |||
45 | /* MEM3 is L1D RAM/CACHE space */ | ||
46 | #define OMAP_DSP_MEM3_BASE 0x5CF04000 | ||
47 | #define OMAP_DSP_MEM3_SIZE 0x14000 | ||
48 | |||
49 | #define OMAP_PER_CM_BASE 0x48005000 | ||
50 | #define OMAP_PER_CM_SIZE 0x1000 | ||
51 | |||
52 | #define OMAP_PER_PRM_BASE 0x48307000 | ||
53 | #define OMAP_PER_PRM_SIZE 0x1000 | ||
54 | |||
55 | #define OMAP_CORE_PRM_BASE 0x48306A00 | ||
56 | #define OMAP_CORE_PRM_SIZE 0x1000 | ||
57 | |||
58 | #define OMAP_DMMU_BASE 0x5D000000 | ||
59 | #define OMAP_DMMU_SIZE 0x1000 | ||
60 | |||
61 | /* GPP PROCESS CLEANUP Data structures */ | ||
62 | |||
63 | /* New structure (member of process context) abstracts NODE resource info */ | ||
64 | struct node_res_object { | ||
65 | void *node; | ||
66 | s32 node_allocated; /* Node status */ | ||
67 | s32 heap_allocated; /* Heap status */ | ||
68 | s32 streams_allocated; /* Streams status */ | ||
69 | int id; | ||
70 | }; | ||
71 | |||
72 | /* used to cache dma mapping information */ | ||
73 | struct bridge_dma_map_info { | ||
74 | /* direction of DMA in action, or DMA_NONE */ | ||
75 | enum dma_data_direction dir; | ||
76 | /* number of elements requested by us */ | ||
77 | int num_pages; | ||
78 | /* number of elements returned from dma_map_sg */ | ||
79 | int sg_num; | ||
80 | /* list of buffers used in this DMA action */ | ||
81 | struct scatterlist *sg; | ||
82 | }; | ||
83 | |||
84 | /* Used for DMM mapped memory accounting */ | ||
85 | struct dmm_map_object { | ||
86 | struct list_head link; | ||
87 | u32 dsp_addr; | ||
88 | u32 mpu_addr; | ||
89 | u32 size; | ||
90 | u32 num_usr_pgs; | ||
91 | struct page **pages; | ||
92 | struct bridge_dma_map_info dma_info; | ||
93 | }; | ||
94 | |||
95 | /* Used for DMM reserved memory accounting */ | ||
96 | struct dmm_rsv_object { | ||
97 | struct list_head link; | ||
98 | u32 dsp_reserved_addr; | ||
99 | }; | ||
100 | |||
101 | /* New structure (member of process context) abstracts stream resource info */ | ||
102 | struct strm_res_object { | ||
103 | s32 stream_allocated; /* Stream status */ | ||
104 | void *stream; | ||
105 | u32 num_bufs; | ||
106 | u32 dir; | ||
107 | int id; | ||
108 | }; | ||
109 | |||
110 | /* Overall Bridge process resource usage state */ | ||
111 | enum gpp_proc_res_state { | ||
112 | PROC_RES_ALLOCATED, | ||
113 | PROC_RES_FREED | ||
114 | }; | ||
115 | |||
116 | /* Bridge Data */ | ||
117 | struct drv_data { | ||
118 | char *base_img; | ||
119 | s32 shm_size; | ||
120 | int tc_wordswapon; | ||
121 | void *drv_object; | ||
122 | void *dev_object; | ||
123 | void *mgr_object; | ||
124 | }; | ||
125 | |||
126 | /* Process Context */ | ||
127 | struct process_context { | ||
128 | /* Process State */ | ||
129 | enum gpp_proc_res_state res_state; | ||
130 | |||
131 | /* Handle to Processor */ | ||
132 | void *processor; | ||
133 | |||
134 | /* DSP Node resources */ | ||
135 | struct idr *node_id; | ||
136 | |||
137 | /* DMM mapped memory resources */ | ||
138 | struct list_head dmm_map_list; | ||
139 | spinlock_t dmm_map_lock; | ||
140 | |||
141 | /* DMM reserved memory resources */ | ||
142 | struct list_head dmm_rsv_list; | ||
143 | spinlock_t dmm_rsv_lock; | ||
144 | |||
145 | /* Stream resources */ | ||
146 | struct idr *stream_id; | ||
147 | }; | ||
148 | |||
149 | /* | ||
150 | * ======== drv_create ======== | ||
151 | * Purpose: | ||
152 | * Creates the Driver Object. This is done during the driver loading. | ||
153 | * There is only one Driver Object in the DSP/BIOS Bridge. | ||
154 | * Parameters: | ||
155 | * drv_obj: Location to store created DRV Object handle. | ||
156 | * Returns: | ||
157 | * 0: Success | ||
158 | * -ENOMEM: Failed in Memory allocation | ||
159 | * -EPERM: General Failure | ||
160 | * Requires: | ||
161 | * DRV Initialized (refs > 0 ) | ||
162 | * drv_obj != NULL. | ||
163 | * Ensures: | ||
164 | * 0: - *drv_obj is a valid DRV interface to the device. | ||
165 | * - List of DevObject Created and Initialized. | ||
166 | * - List of dev_node String created and initialized. | ||
167 | * - Registry is updated with the DRV Object. | ||
168 | * !0: DRV Object not created | ||
169 | * Details: | ||
170 | * There is one Driver Object for the Driver representing | ||
171 | * the driver itself. It contains the list of device | ||
172 | * Objects and the list of Device Extensions in the system. | ||
173 | * Also it can hold other necessary | ||
174 | * information in its storage area. | ||
175 | */ | ||
176 | extern int drv_create(struct drv_object **drv_obj); | ||
177 | |||
178 | /* | ||
179 | * ======== drv_destroy ======== | ||
180 | * Purpose: | ||
181 | * destroys the Dev Object list, DrvExt list | ||
182 | * and destroy the DRV object | ||
183 | * Called upon driver unLoading.or unsuccessful loading of the driver. | ||
184 | * Parameters: | ||
185 | * driver_obj: Handle to Driver object . | ||
186 | * Returns: | ||
187 | * 0: Success. | ||
188 | * -EPERM: Failed to destroy DRV Object | ||
189 | * Requires: | ||
190 | * DRV Initialized (cRegs > 0 ) | ||
191 | * hdrv_obj is not NULL and a valid DRV handle . | ||
192 | * List of DevObject is Empty. | ||
193 | * List of DrvExt is Empty | ||
194 | * Ensures: | ||
195 | * 0: - DRV Object destroyed and hdrv_obj is not a valid | ||
196 | * DRV handle. | ||
197 | * - Registry is updated with "0" as the DRV Object. | ||
198 | */ | ||
199 | extern int drv_destroy(struct drv_object *driver_obj); | ||
200 | |||
201 | /* | ||
202 | * ======== drv_get_first_dev_object ======== | ||
203 | * Purpose: | ||
204 | * Returns the Ptr to the FirstDev Object in the List | ||
205 | * Parameters: | ||
206 | * Requires: | ||
207 | * DRV Initialized | ||
208 | * Returns: | ||
209 | * dw_dev_object: Ptr to the First Dev Object as a u32 | ||
210 | * 0 if it fails to retrieve the First Dev Object | ||
211 | * Ensures: | ||
212 | */ | ||
213 | extern u32 drv_get_first_dev_object(void); | ||
214 | |||
215 | /* | ||
216 | * ======== drv_get_first_dev_extension ======== | ||
217 | * Purpose: | ||
218 | * Returns the Ptr to the First Device Extension in the List | ||
219 | * Parameters: | ||
220 | * Requires: | ||
221 | * DRV Initialized | ||
222 | * Returns: | ||
223 | * dw_dev_extension: Ptr to the First Device Extension as a u32 | ||
224 | * 0: Failed to Get the Device Extension | ||
225 | * Ensures: | ||
226 | */ | ||
227 | extern u32 drv_get_first_dev_extension(void); | ||
228 | |||
229 | /* | ||
230 | * ======== drv_get_dev_object ======== | ||
231 | * Purpose: | ||
232 | * Given a index, returns a handle to DevObject from the list | ||
233 | * Parameters: | ||
234 | * hdrv_obj: Handle to the Manager | ||
235 | * device_obj: Location to store the Dev Handle | ||
236 | * Requires: | ||
237 | * DRV Initialized | ||
238 | * index >= 0 | ||
239 | * hdrv_obj is not NULL and Valid DRV Object | ||
240 | * device_obj is not NULL | ||
241 | * Device Object List not Empty | ||
242 | * Returns: | ||
243 | * 0: Success | ||
244 | * -EPERM: Failed to Get the Dev Object | ||
245 | * Ensures: | ||
246 | * 0: *device_obj != NULL | ||
247 | * -EPERM: *device_obj = NULL | ||
248 | */ | ||
249 | extern int drv_get_dev_object(u32 index, | ||
250 | struct drv_object *hdrv_obj, | ||
251 | struct dev_object **device_obj); | ||
252 | |||
253 | /* | ||
254 | * ======== drv_get_next_dev_object ======== | ||
255 | * Purpose: | ||
256 | * Returns the Ptr to the Next Device Object from the the List | ||
257 | * Parameters: | ||
258 | * hdev_obj: Handle to the Device Object | ||
259 | * Requires: | ||
260 | * DRV Initialized | ||
261 | * hdev_obj != 0 | ||
262 | * Returns: | ||
263 | * dw_dev_object: Ptr to the Next Dev Object as a u32 | ||
264 | * 0: If it fail to get the next Dev Object. | ||
265 | * Ensures: | ||
266 | */ | ||
267 | extern u32 drv_get_next_dev_object(u32 hdev_obj); | ||
268 | |||
269 | /* | ||
270 | * ======== drv_get_next_dev_extension ======== | ||
271 | * Purpose: | ||
272 | * Returns the Ptr to the Next Device Extension from the the List | ||
273 | * Parameters: | ||
274 | * dev_extension: Handle to the Device Extension | ||
275 | * Requires: | ||
276 | * DRV Initialized | ||
277 | * dev_extension != 0. | ||
278 | * Returns: | ||
279 | * dw_dev_extension: Ptr to the Next Dev Extension | ||
280 | * 0: If it fail to Get the next Dev Extension | ||
281 | * Ensures: | ||
282 | */ | ||
283 | extern u32 drv_get_next_dev_extension(u32 dev_extension); | ||
284 | |||
285 | /* | ||
286 | * ======== drv_insert_dev_object ======== | ||
287 | * Purpose: | ||
288 | * Insert a DeviceObject into the list of Driver object. | ||
289 | * Parameters: | ||
290 | * driver_obj: Handle to DrvObject | ||
291 | * hdev_obj: Handle to DeviceObject to insert. | ||
292 | * Returns: | ||
293 | * 0: If successful. | ||
294 | * -EPERM: General Failure: | ||
295 | * Requires: | ||
296 | * hdrv_obj != NULL and Valid DRV Handle. | ||
297 | * hdev_obj != NULL. | ||
298 | * Ensures: | ||
299 | * 0: Device Object is inserted and the List is not empty. | ||
300 | */ | ||
301 | extern int drv_insert_dev_object(struct drv_object *driver_obj, | ||
302 | struct dev_object *hdev_obj); | ||
303 | |||
304 | /* | ||
305 | * ======== drv_remove_dev_object ======== | ||
306 | * Purpose: | ||
307 | * Search for and remove a Device object from the given list of Device Obj | ||
308 | * objects. | ||
309 | * Parameters: | ||
310 | * driver_obj: Handle to DrvObject | ||
311 | * hdev_obj: Handle to DevObject to Remove | ||
312 | * Returns: | ||
313 | * 0: Success. | ||
314 | * -EPERM: Unable to find dev_obj. | ||
315 | * Requires: | ||
316 | * hdrv_obj != NULL and a Valid DRV Handle. | ||
317 | * hdev_obj != NULL. | ||
318 | * List exists and is not empty. | ||
319 | * Ensures: | ||
320 | * List either does not exist (NULL), or is not empty if it does exist. | ||
321 | */ | ||
322 | extern int drv_remove_dev_object(struct drv_object *driver_obj, | ||
323 | struct dev_object *hdev_obj); | ||
324 | |||
325 | /* | ||
326 | * ======== drv_request_resources ======== | ||
327 | * Purpose: | ||
328 | * Assigns the Resources or Releases them. | ||
329 | * Parameters: | ||
330 | * dw_context: Path to the driver Registry Key. | ||
331 | * dev_node_strg: Ptr to dev_node String stored in the Device Ext. | ||
332 | * Returns: | ||
333 | * TRUE if success; FALSE otherwise. | ||
334 | * Requires: | ||
335 | * Ensures: | ||
336 | * The Resources are assigned based on Bus type. | ||
337 | * The hardware is initialized. Resource information is | ||
338 | * gathered from the Registry(ISA, PCMCIA)or scanned(PCI) | ||
339 | * Resource structure is stored in the registry which will be | ||
340 | * later used by the CFG module. | ||
341 | */ | ||
342 | extern int drv_request_resources(u32 dw_context, | ||
343 | u32 *dev_node_strg); | ||
344 | |||
345 | /* | ||
346 | * ======== drv_release_resources ======== | ||
347 | * Purpose: | ||
348 | * Assigns the Resources or Releases them. | ||
349 | * Parameters: | ||
350 | * dw_context: Path to the driver Registry Key. | ||
351 | * hdrv_obj: Handle to the Driver Object. | ||
352 | * Returns: | ||
353 | * TRUE if success; FALSE otherwise. | ||
354 | * Requires: | ||
355 | * Ensures: | ||
356 | * The Resources are released based on Bus type. | ||
357 | * Resource structure is deleted from the registry | ||
358 | */ | ||
359 | extern int drv_release_resources(u32 dw_context, | ||
360 | struct drv_object *hdrv_obj); | ||
361 | |||
362 | /** | ||
363 | * drv_request_bridge_res_dsp() - Reserves shared memory for bridge. | ||
364 | * @phost_resources: pointer to host resources. | ||
365 | */ | ||
366 | int drv_request_bridge_res_dsp(void **phost_resources); | ||
367 | |||
368 | #ifdef CONFIG_TIDSPBRIDGE_RECOVERY | ||
369 | void bridge_recover_schedule(void); | ||
370 | #endif | ||
371 | |||
372 | /* | ||
373 | * ======== mem_ext_phys_pool_init ======== | ||
374 | * Purpose: | ||
375 | * Uses the physical memory chunk passed for internal consistent memory | ||
376 | * allocations. | ||
377 | * physical address based on the page frame address. | ||
378 | * Parameters: | ||
379 | * pool_phys_base starting address of the physical memory pool. | ||
380 | * pool_size size of the physical memory pool. | ||
381 | * Returns: | ||
382 | * none. | ||
383 | * Requires: | ||
384 | * - MEM initialized. | ||
385 | * - valid physical address for the base and size > 0 | ||
386 | */ | ||
387 | extern void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size); | ||
388 | |||
389 | /* | ||
390 | * ======== mem_ext_phys_pool_release ======== | ||
391 | */ | ||
392 | extern void mem_ext_phys_pool_release(void); | ||
393 | |||
394 | /* ======== mem_alloc_phys_mem ======== | ||
395 | * Purpose: | ||
396 | * Allocate physically contiguous, uncached memory | ||
397 | * Parameters: | ||
398 | * byte_size: Number of bytes to allocate. | ||
399 | * align_mask: Alignment Mask. | ||
400 | * physical_address: Physical address of allocated memory. | ||
401 | * Returns: | ||
402 | * Pointer to a block of memory; | ||
403 | * NULL if memory couldn't be allocated, or if byte_size == 0. | ||
404 | * Requires: | ||
405 | * MEM initialized. | ||
406 | * Ensures: | ||
407 | * The returned pointer, if not NULL, points to a valid memory block of | ||
408 | * the size requested. Returned physical address refers to physical | ||
409 | * location of memory. | ||
410 | */ | ||
411 | extern void *mem_alloc_phys_mem(u32 byte_size, | ||
412 | u32 align_mask, u32 *physical_address); | ||
413 | |||
414 | /* | ||
415 | * ======== mem_free_phys_mem ======== | ||
416 | * Purpose: | ||
417 | * Free the given block of physically contiguous memory. | ||
418 | * Parameters: | ||
419 | * virtual_address: Pointer to virtual memory region allocated | ||
420 | * by mem_alloc_phys_mem(). | ||
421 | * physical_address: Pointer to physical memory region allocated | ||
422 | * by mem_alloc_phys_mem(). | ||
423 | * byte_size: Size of the memory region allocated by mem_alloc_phys_mem(). | ||
424 | * Returns: | ||
425 | * Requires: | ||
426 | * MEM initialized. | ||
427 | * virtual_address is a valid memory address returned by | ||
428 | * mem_alloc_phys_mem() | ||
429 | * Ensures: | ||
430 | * virtual_address is no longer a valid pointer to memory. | ||
431 | */ | ||
432 | extern void mem_free_phys_mem(void *virtual_address, | ||
433 | u32 physical_address, u32 byte_size); | ||
434 | |||
435 | /* | ||
436 | * ======== MEM_LINEAR_ADDRESS ======== | ||
437 | * Purpose: | ||
438 | * Get the linear address corresponding to the given physical address. | ||
439 | * Parameters: | ||
440 | * phys_addr: Physical address to be mapped. | ||
441 | * byte_size: Number of bytes in physical range to map. | ||
442 | * Returns: | ||
443 | * The corresponding linear address, or NULL if unsuccessful. | ||
444 | * Requires: | ||
445 | * MEM initialized. | ||
446 | * Ensures: | ||
447 | * Notes: | ||
448 | * If valid linear address is returned, be sure to call | ||
449 | * MEM_UNMAP_LINEAR_ADDRESS(). | ||
450 | */ | ||
451 | #define MEM_LINEAR_ADDRESS(phy_addr, byte_size) phy_addr | ||
452 | |||
453 | /* | ||
454 | * ======== MEM_UNMAP_LINEAR_ADDRESS ======== | ||
455 | * Purpose: | ||
456 | * Unmap the linear address mapped in MEM_LINEAR_ADDRESS. | ||
457 | * Parameters: | ||
458 | * base_addr: Ptr to mapped memory (as returned by MEM_LINEAR_ADDRESS()). | ||
459 | * Returns: | ||
460 | * Requires: | ||
461 | * - MEM initialized. | ||
462 | * - base_addr is a valid linear address mapped in MEM_LINEAR_ADDRESS. | ||
463 | * Ensures: | ||
464 | * - base_addr no longer points to a valid linear address. | ||
465 | */ | ||
466 | #define MEM_UNMAP_LINEAR_ADDRESS(base_addr) {} | ||
467 | |||
468 | #endif /* DRV_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h deleted file mode 100644 index 6ff808297c10..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dspapi-ioctl.h +++ /dev/null | |||
@@ -1,467 +0,0 @@ | |||
1 | /* | ||
2 | * dspapi-ioctl.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Contains structures and commands that are used for interaction | ||
7 | * between the DDSP API and Bridge driver. | ||
8 | * | ||
9 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef DSPAPIIOCTL_ | ||
21 | #define DSPAPIIOCTL_ | ||
22 | |||
23 | #include <dspbridge/cmm.h> | ||
24 | #include <dspbridge/strmdefs.h> | ||
25 | #include <dspbridge/dbdcd.h> | ||
26 | |||
27 | union trapped_args { | ||
28 | |||
29 | /* MGR Module */ | ||
30 | struct { | ||
31 | u32 node_id; | ||
32 | struct dsp_ndbprops __user *ndb_props; | ||
33 | u32 ndb_props_size; | ||
34 | u32 __user *num_nodes; | ||
35 | } args_mgr_enumnode_info; | ||
36 | |||
37 | struct { | ||
38 | u32 processor_id; | ||
39 | struct dsp_processorinfo __user *processor_info; | ||
40 | u32 processor_info_size; | ||
41 | u32 __user *num_procs; | ||
42 | } args_mgr_enumproc_info; | ||
43 | |||
44 | struct { | ||
45 | struct dsp_uuid *uuid_obj; | ||
46 | enum dsp_dcdobjtype obj_type; | ||
47 | char *sz_path_name; | ||
48 | } args_mgr_registerobject; | ||
49 | |||
50 | struct { | ||
51 | struct dsp_uuid *uuid_obj; | ||
52 | enum dsp_dcdobjtype obj_type; | ||
53 | } args_mgr_unregisterobject; | ||
54 | |||
55 | struct { | ||
56 | struct dsp_notification __user *__user *anotifications; | ||
57 | u32 count; | ||
58 | u32 __user *index; | ||
59 | u32 timeout; | ||
60 | } args_mgr_wait; | ||
61 | |||
62 | /* PROC Module */ | ||
63 | struct { | ||
64 | u32 processor_id; | ||
65 | struct dsp_processorattrin __user *attr_in; | ||
66 | void *__user *ph_processor; | ||
67 | } args_proc_attach; | ||
68 | |||
69 | struct { | ||
70 | void *processor; | ||
71 | u32 cmd; | ||
72 | struct dsp_cbdata __user *args; | ||
73 | } args_proc_ctrl; | ||
74 | |||
75 | struct { | ||
76 | void *processor; | ||
77 | } args_proc_detach; | ||
78 | |||
79 | struct { | ||
80 | void *processor; | ||
81 | void *__user *node_tab; | ||
82 | u32 node_tab_size; | ||
83 | u32 __user *num_nodes; | ||
84 | u32 __user *allocated; | ||
85 | } args_proc_enumnode_info; | ||
86 | |||
87 | struct { | ||
88 | void *processor; | ||
89 | u32 resource_type; | ||
90 | struct dsp_resourceinfo *resource_info; | ||
91 | u32 resource_info_size; | ||
92 | } args_proc_enumresources; | ||
93 | |||
94 | struct { | ||
95 | void *processor; | ||
96 | struct dsp_processorstate __user *proc_state_obj; | ||
97 | u32 state_info_size; | ||
98 | } args_proc_getstate; | ||
99 | |||
100 | struct { | ||
101 | void *processor; | ||
102 | u8 __user *buf; | ||
103 | u8 __user *size; | ||
104 | u32 max_size; | ||
105 | } args_proc_gettrace; | ||
106 | |||
107 | struct { | ||
108 | void *processor; | ||
109 | s32 argc_index; | ||
110 | char __user *__user *user_args; | ||
111 | char *__user *user_envp; | ||
112 | } args_proc_load; | ||
113 | |||
114 | struct { | ||
115 | void *processor; | ||
116 | u32 event_mask; | ||
117 | u32 notify_type; | ||
118 | struct dsp_notification __user *notification; | ||
119 | } args_proc_register_notify; | ||
120 | |||
121 | struct { | ||
122 | void *processor; | ||
123 | u32 size; | ||
124 | void *__user *rsv_addr; | ||
125 | } args_proc_rsvmem; | ||
126 | |||
127 | struct { | ||
128 | void *processor; | ||
129 | u32 size; | ||
130 | void *rsv_addr; | ||
131 | } args_proc_unrsvmem; | ||
132 | |||
133 | struct { | ||
134 | void *processor; | ||
135 | void *mpu_addr; | ||
136 | u32 size; | ||
137 | void *req_addr; | ||
138 | void *__user *map_addr; | ||
139 | u32 map_attr; | ||
140 | } args_proc_mapmem; | ||
141 | |||
142 | struct { | ||
143 | void *processor; | ||
144 | u32 size; | ||
145 | void *map_addr; | ||
146 | } args_proc_unmapmem; | ||
147 | |||
148 | struct { | ||
149 | void *processor; | ||
150 | void *mpu_addr; | ||
151 | u32 size; | ||
152 | u32 dir; | ||
153 | } args_proc_dma; | ||
154 | |||
155 | struct { | ||
156 | void *processor; | ||
157 | void *mpu_addr; | ||
158 | u32 size; | ||
159 | u32 flags; | ||
160 | } args_proc_flushmemory; | ||
161 | |||
162 | struct { | ||
163 | void *processor; | ||
164 | void *mpu_addr; | ||
165 | u32 size; | ||
166 | } args_proc_invalidatememory; | ||
167 | |||
168 | /* NODE Module */ | ||
169 | struct { | ||
170 | void *processor; | ||
171 | struct dsp_uuid __user *node_id_ptr; | ||
172 | struct dsp_cbdata __user *args; | ||
173 | struct dsp_nodeattrin __user *attr_in; | ||
174 | void *__user *node; | ||
175 | } args_node_allocate; | ||
176 | |||
177 | struct { | ||
178 | void *node; | ||
179 | u32 size; | ||
180 | struct dsp_bufferattr __user *attr; | ||
181 | u8 *__user *buffer; | ||
182 | } args_node_allocmsgbuf; | ||
183 | |||
184 | struct { | ||
185 | void *node; | ||
186 | s32 prio; | ||
187 | } args_node_changepriority; | ||
188 | |||
189 | struct { | ||
190 | void *node; | ||
191 | u32 stream_id; | ||
192 | void *other_node; | ||
193 | u32 other_stream; | ||
194 | struct dsp_strmattr __user *attrs; | ||
195 | struct dsp_cbdata __user *conn_param; | ||
196 | } args_node_connect; | ||
197 | |||
198 | struct { | ||
199 | void *node; | ||
200 | } args_node_create; | ||
201 | |||
202 | struct { | ||
203 | void *node; | ||
204 | } args_node_delete; | ||
205 | |||
206 | struct { | ||
207 | void *node; | ||
208 | struct dsp_bufferattr __user *attr; | ||
209 | u8 *buffer; | ||
210 | } args_node_freemsgbuf; | ||
211 | |||
212 | struct { | ||
213 | void *node; | ||
214 | struct dsp_nodeattr __user *attr; | ||
215 | u32 attr_size; | ||
216 | } args_node_getattr; | ||
217 | |||
218 | struct { | ||
219 | void *node; | ||
220 | struct dsp_msg __user *message; | ||
221 | u32 timeout; | ||
222 | } args_node_getmessage; | ||
223 | |||
224 | struct { | ||
225 | void *node; | ||
226 | } args_node_pause; | ||
227 | |||
228 | struct { | ||
229 | void *node; | ||
230 | struct dsp_msg __user *message; | ||
231 | u32 timeout; | ||
232 | } args_node_putmessage; | ||
233 | |||
234 | struct { | ||
235 | void *node; | ||
236 | u32 event_mask; | ||
237 | u32 notify_type; | ||
238 | struct dsp_notification __user *notification; | ||
239 | } args_node_registernotify; | ||
240 | |||
241 | struct { | ||
242 | void *node; | ||
243 | } args_node_run; | ||
244 | |||
245 | struct { | ||
246 | void *node; | ||
247 | int __user *status; | ||
248 | } args_node_terminate; | ||
249 | |||
250 | struct { | ||
251 | void *processor; | ||
252 | struct dsp_uuid __user *node_id_ptr; | ||
253 | struct dsp_ndbprops __user *node_props; | ||
254 | } args_node_getuuidprops; | ||
255 | |||
256 | /* STRM module */ | ||
257 | |||
258 | struct { | ||
259 | void *stream; | ||
260 | u32 size; | ||
261 | u8 *__user *ap_buffer; | ||
262 | u32 num_bufs; | ||
263 | } args_strm_allocatebuffer; | ||
264 | |||
265 | struct { | ||
266 | void *stream; | ||
267 | } args_strm_close; | ||
268 | |||
269 | struct { | ||
270 | void *stream; | ||
271 | u8 *__user *ap_buffer; | ||
272 | u32 num_bufs; | ||
273 | } args_strm_freebuffer; | ||
274 | |||
275 | struct { | ||
276 | void *stream; | ||
277 | void **event; | ||
278 | } args_strm_geteventhandle; | ||
279 | |||
280 | struct { | ||
281 | void *stream; | ||
282 | struct stream_info __user *stream_info; | ||
283 | u32 stream_info_size; | ||
284 | } args_strm_getinfo; | ||
285 | |||
286 | struct { | ||
287 | void *stream; | ||
288 | bool flush_flag; | ||
289 | } args_strm_idle; | ||
290 | |||
291 | struct { | ||
292 | void *stream; | ||
293 | u8 *buffer; | ||
294 | u32 bytes; | ||
295 | u32 buf_size; | ||
296 | u32 arg; | ||
297 | } args_strm_issue; | ||
298 | |||
299 | struct { | ||
300 | void *node; | ||
301 | u32 direction; | ||
302 | u32 index; | ||
303 | struct strm_attr __user *attr_in; | ||
304 | void *__user *stream; | ||
305 | } args_strm_open; | ||
306 | |||
307 | struct { | ||
308 | void *stream; | ||
309 | u8 *__user *buf_ptr; | ||
310 | u32 __user *bytes; | ||
311 | u32 __user *buf_size_ptr; | ||
312 | u32 __user *arg; | ||
313 | } args_strm_reclaim; | ||
314 | |||
315 | struct { | ||
316 | void *stream; | ||
317 | u32 event_mask; | ||
318 | u32 notify_type; | ||
319 | struct dsp_notification __user *notification; | ||
320 | } args_strm_registernotify; | ||
321 | |||
322 | struct { | ||
323 | void *__user *stream_tab; | ||
324 | u32 strm_num; | ||
325 | u32 __user *mask; | ||
326 | u32 timeout; | ||
327 | } args_strm_select; | ||
328 | |||
329 | /* CMM Module */ | ||
330 | struct { | ||
331 | struct cmm_object *cmm_mgr; | ||
332 | u32 size; | ||
333 | struct cmm_attrs *attrs; | ||
334 | void **buf_va; | ||
335 | } args_cmm_allocbuf; | ||
336 | |||
337 | struct { | ||
338 | struct cmm_object *cmm_mgr; | ||
339 | void *buf_pa; | ||
340 | u32 seg_id; | ||
341 | } args_cmm_freebuf; | ||
342 | |||
343 | struct { | ||
344 | void *processor; | ||
345 | struct cmm_object *__user *cmm_mgr; | ||
346 | } args_cmm_gethandle; | ||
347 | |||
348 | struct { | ||
349 | struct cmm_object *cmm_mgr; | ||
350 | struct cmm_info __user *cmm_info_obj; | ||
351 | } args_cmm_getinfo; | ||
352 | |||
353 | /* UTIL module */ | ||
354 | struct { | ||
355 | s32 util_argc; | ||
356 | char **argv; | ||
357 | } args_util_testdll; | ||
358 | }; | ||
359 | |||
360 | /* | ||
361 | * Dspbridge Ioctl numbering scheme | ||
362 | * | ||
363 | * 7 0 | ||
364 | * --------------------------------- | ||
365 | * | Module | Ioctl Number | | ||
366 | * --------------------------------- | ||
367 | * | x | x | x | 0 | 0 | 0 | 0 | 0 | | ||
368 | * --------------------------------- | ||
369 | */ | ||
370 | |||
371 | /* Ioctl driver identifier */ | ||
372 | #define DB 0xDB | ||
373 | |||
374 | /* | ||
375 | * Following are used to distinguish between module ioctls, this is needed | ||
376 | * in case new ioctls are introduced. | ||
377 | */ | ||
378 | #define DB_MODULE_MASK 0xE0 | ||
379 | #define DB_IOC_MASK 0x1F | ||
380 | |||
381 | /* Ioctl module masks */ | ||
382 | #define DB_MGR 0x0 | ||
383 | #define DB_PROC 0x20 | ||
384 | #define DB_NODE 0x40 | ||
385 | #define DB_STRM 0x60 | ||
386 | #define DB_CMM 0x80 | ||
387 | |||
388 | #define DB_MODULE_SHIFT 5 | ||
389 | |||
390 | /* Used to calculate the ioctl per dspbridge module */ | ||
391 | #define DB_IOC(module, num) \ | ||
392 | (((module) & DB_MODULE_MASK) | ((num) & DB_IOC_MASK)) | ||
393 | /* Used to get dspbridge ioctl module */ | ||
394 | #define DB_GET_MODULE(cmd) ((cmd) & DB_MODULE_MASK) | ||
395 | /* Used to get dspbridge ioctl number */ | ||
396 | #define DB_GET_IOC(cmd) ((cmd) & DB_IOC_MASK) | ||
397 | |||
398 | /* TODO: Remove deprecated and not implemented */ | ||
399 | |||
400 | /* MGR Module */ | ||
401 | #define MGR_ENUMNODE_INFO _IOWR(DB, DB_IOC(DB_MGR, 0), unsigned long) | ||
402 | #define MGR_ENUMPROC_INFO _IOWR(DB, DB_IOC(DB_MGR, 1), unsigned long) | ||
403 | #define MGR_REGISTEROBJECT _IOWR(DB, DB_IOC(DB_MGR, 2), unsigned long) | ||
404 | #define MGR_UNREGISTEROBJECT _IOWR(DB, DB_IOC(DB_MGR, 3), unsigned long) | ||
405 | #define MGR_WAIT _IOWR(DB, DB_IOC(DB_MGR, 4), unsigned long) | ||
406 | /* MGR_GET_PROC_RES Deprecated */ | ||
407 | #define MGR_GET_PROC_RES _IOR(DB, DB_IOC(DB_MGR, 5), unsigned long) | ||
408 | |||
409 | /* PROC Module */ | ||
410 | #define PROC_ATTACH _IOWR(DB, DB_IOC(DB_PROC, 0), unsigned long) | ||
411 | #define PROC_CTRL _IOR(DB, DB_IOC(DB_PROC, 1), unsigned long) | ||
412 | /* PROC_DETACH Deprecated */ | ||
413 | #define PROC_DETACH _IOR(DB, DB_IOC(DB_PROC, 2), unsigned long) | ||
414 | #define PROC_ENUMNODE _IOWR(DB, DB_IOC(DB_PROC, 3), unsigned long) | ||
415 | #define PROC_ENUMRESOURCES _IOWR(DB, DB_IOC(DB_PROC, 4), unsigned long) | ||
416 | #define PROC_GET_STATE _IOWR(DB, DB_IOC(DB_PROC, 5), unsigned long) | ||
417 | #define PROC_GET_TRACE _IOWR(DB, DB_IOC(DB_PROC, 6), unsigned long) | ||
418 | #define PROC_LOAD _IOW(DB, DB_IOC(DB_PROC, 7), unsigned long) | ||
419 | #define PROC_REGISTERNOTIFY _IOWR(DB, DB_IOC(DB_PROC, 8), unsigned long) | ||
420 | #define PROC_START _IOW(DB, DB_IOC(DB_PROC, 9), unsigned long) | ||
421 | #define PROC_RSVMEM _IOWR(DB, DB_IOC(DB_PROC, 10), unsigned long) | ||
422 | #define PROC_UNRSVMEM _IOW(DB, DB_IOC(DB_PROC, 11), unsigned long) | ||
423 | #define PROC_MAPMEM _IOWR(DB, DB_IOC(DB_PROC, 12), unsigned long) | ||
424 | #define PROC_UNMAPMEM _IOR(DB, DB_IOC(DB_PROC, 13), unsigned long) | ||
425 | #define PROC_FLUSHMEMORY _IOW(DB, DB_IOC(DB_PROC, 14), unsigned long) | ||
426 | #define PROC_STOP _IOWR(DB, DB_IOC(DB_PROC, 15), unsigned long) | ||
427 | #define PROC_INVALIDATEMEMORY _IOW(DB, DB_IOC(DB_PROC, 16), unsigned long) | ||
428 | #define PROC_BEGINDMA _IOW(DB, DB_IOC(DB_PROC, 17), unsigned long) | ||
429 | #define PROC_ENDDMA _IOW(DB, DB_IOC(DB_PROC, 18), unsigned long) | ||
430 | |||
431 | /* NODE Module */ | ||
432 | #define NODE_ALLOCATE _IOWR(DB, DB_IOC(DB_NODE, 0), unsigned long) | ||
433 | #define NODE_ALLOCMSGBUF _IOWR(DB, DB_IOC(DB_NODE, 1), unsigned long) | ||
434 | #define NODE_CHANGEPRIORITY _IOW(DB, DB_IOC(DB_NODE, 2), unsigned long) | ||
435 | #define NODE_CONNECT _IOW(DB, DB_IOC(DB_NODE, 3), unsigned long) | ||
436 | #define NODE_CREATE _IOW(DB, DB_IOC(DB_NODE, 4), unsigned long) | ||
437 | #define NODE_DELETE _IOW(DB, DB_IOC(DB_NODE, 5), unsigned long) | ||
438 | #define NODE_FREEMSGBUF _IOW(DB, DB_IOC(DB_NODE, 6), unsigned long) | ||
439 | #define NODE_GETATTR _IOWR(DB, DB_IOC(DB_NODE, 7), unsigned long) | ||
440 | #define NODE_GETMESSAGE _IOWR(DB, DB_IOC(DB_NODE, 8), unsigned long) | ||
441 | #define NODE_PAUSE _IOW(DB, DB_IOC(DB_NODE, 9), unsigned long) | ||
442 | #define NODE_PUTMESSAGE _IOW(DB, DB_IOC(DB_NODE, 10), unsigned long) | ||
443 | #define NODE_REGISTERNOTIFY _IOWR(DB, DB_IOC(DB_NODE, 11), unsigned long) | ||
444 | #define NODE_RUN _IOW(DB, DB_IOC(DB_NODE, 12), unsigned long) | ||
445 | #define NODE_TERMINATE _IOWR(DB, DB_IOC(DB_NODE, 13), unsigned long) | ||
446 | #define NODE_GETUUIDPROPS _IOWR(DB, DB_IOC(DB_NODE, 14), unsigned long) | ||
447 | |||
448 | /* STRM Module */ | ||
449 | #define STRM_ALLOCATEBUFFER _IOWR(DB, DB_IOC(DB_STRM, 0), unsigned long) | ||
450 | #define STRM_CLOSE _IOW(DB, DB_IOC(DB_STRM, 1), unsigned long) | ||
451 | #define STRM_FREEBUFFER _IOWR(DB, DB_IOC(DB_STRM, 2), unsigned long) | ||
452 | #define STRM_GETEVENTHANDLE _IO(DB, DB_IOC(DB_STRM, 3)) /* Not Impl'd */ | ||
453 | #define STRM_GETINFO _IOWR(DB, DB_IOC(DB_STRM, 4), unsigned long) | ||
454 | #define STRM_IDLE _IOW(DB, DB_IOC(DB_STRM, 5), unsigned long) | ||
455 | #define STRM_ISSUE _IOW(DB, DB_IOC(DB_STRM, 6), unsigned long) | ||
456 | #define STRM_OPEN _IOWR(DB, DB_IOC(DB_STRM, 7), unsigned long) | ||
457 | #define STRM_RECLAIM _IOWR(DB, DB_IOC(DB_STRM, 8), unsigned long) | ||
458 | #define STRM_REGISTERNOTIFY _IOWR(DB, DB_IOC(DB_STRM, 9), unsigned long) | ||
459 | #define STRM_SELECT _IOWR(DB, DB_IOC(DB_STRM, 10), unsigned long) | ||
460 | |||
461 | /* CMM Module */ | ||
462 | #define CMM_ALLOCBUF _IO(DB, DB_IOC(DB_CMM, 0)) /* Not Impl'd */ | ||
463 | #define CMM_FREEBUF _IO(DB, DB_IOC(DB_CMM, 1)) /* Not Impl'd */ | ||
464 | #define CMM_GETHANDLE _IOR(DB, DB_IOC(DB_CMM, 2), unsigned long) | ||
465 | #define CMM_GETINFO _IOR(DB, DB_IOC(DB_CMM, 3), unsigned long) | ||
466 | |||
467 | #endif /* DSPAPIIOCTL_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspapi.h b/drivers/staging/tidspbridge/include/dspbridge/dspapi.h deleted file mode 100644 index c99c68738b0f..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dspapi.h +++ /dev/null | |||
@@ -1,167 +0,0 @@ | |||
1 | /* | ||
2 | * dspapi.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Includes the wrapper functions called directly by the | ||
7 | * DeviceIOControl interface. | ||
8 | * | ||
9 | * Notes: | ||
10 | * Bridge services exported to Bridge driver are initialized by the DSPAPI on | ||
11 | * behalf of the Bridge driver. Bridge driver must not call module Init/Exit | ||
12 | * functions. | ||
13 | * | ||
14 | * To ensure Bridge driver binary compatibility across different platforms, | ||
15 | * for the same processor, a Bridge driver must restrict its usage of system | ||
16 | * services to those exported by the DSPAPI library. | ||
17 | * | ||
18 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
19 | * | ||
20 | * This package is free software; you can redistribute it and/or modify | ||
21 | * it under the terms of the GNU General Public License version 2 as | ||
22 | * published by the Free Software Foundation. | ||
23 | * | ||
24 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
25 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
26 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
27 | */ | ||
28 | |||
29 | #ifndef DSPAPI_ | ||
30 | #define DSPAPI_ | ||
31 | |||
32 | #include <dspbridge/dspapi-ioctl.h> | ||
33 | |||
34 | /* This BRD API Library Version: */ | ||
35 | #define BRD_API_MAJOR_VERSION (u32)8 /* .8x - Alpha, .9x - Beta, 1.x FCS */ | ||
36 | #define BRD_API_MINOR_VERSION (u32)0 | ||
37 | |||
38 | /* | ||
39 | * ======== api_call_dev_ioctl ======== | ||
40 | * Purpose: | ||
41 | * Call the (wrapper) function for the corresponding API IOCTL. | ||
42 | * Parameters: | ||
43 | * cmd: IOCTL id, base 0. | ||
44 | * args: Argument structure. | ||
45 | * result: | ||
46 | * Returns: | ||
47 | * 0 if command called; -EINVAL if command not in IOCTL | ||
48 | * table. | ||
49 | * Requires: | ||
50 | * Ensures: | ||
51 | */ | ||
52 | extern int api_call_dev_ioctl(unsigned int cmd, | ||
53 | union trapped_args *args, | ||
54 | u32 *result, void *pr_ctxt); | ||
55 | |||
56 | /* | ||
57 | * ======== api_init ======== | ||
58 | * Purpose: | ||
59 | * Initialize modules used by Bridge API. | ||
60 | * This procedure is called when the driver is loaded. | ||
61 | * Parameters: | ||
62 | * Returns: | ||
63 | * TRUE if success; FALSE otherwise. | ||
64 | * Requires: | ||
65 | * Ensures: | ||
66 | */ | ||
67 | extern bool api_init(void); | ||
68 | |||
69 | /* | ||
70 | * ======== api_init_complete2 ======== | ||
71 | * Purpose: | ||
72 | * Perform any required bridge initialization which cannot | ||
73 | * be performed in api_init() or dev_start_device() due | ||
74 | * to the fact that some services are not yet | ||
75 | * completely initialized. | ||
76 | * Parameters: | ||
77 | * Returns: | ||
78 | * 0: Allow this device to load | ||
79 | * -EPERM: Failure. | ||
80 | * Requires: | ||
81 | * Bridge API initialized. | ||
82 | * Ensures: | ||
83 | */ | ||
84 | extern int api_init_complete2(void); | ||
85 | |||
86 | /* | ||
87 | * ======== api_exit ======== | ||
88 | * Purpose: | ||
89 | * Exit all modules initialized in api_init(void). | ||
90 | * This procedure is called when the driver is unloaded. | ||
91 | * Parameters: | ||
92 | * Returns: | ||
93 | * Requires: | ||
94 | * api_init(void) was previously called. | ||
95 | * Ensures: | ||
96 | * Resources acquired in api_init(void) are freed. | ||
97 | */ | ||
98 | extern void api_exit(void); | ||
99 | |||
100 | /* MGR wrapper functions */ | ||
101 | extern u32 mgrwrap_enum_node_info(union trapped_args *args, void *pr_ctxt); | ||
102 | extern u32 mgrwrap_enum_proc_info(union trapped_args *args, void *pr_ctxt); | ||
103 | extern u32 mgrwrap_register_object(union trapped_args *args, void *pr_ctxt); | ||
104 | extern u32 mgrwrap_unregister_object(union trapped_args *args, void *pr_ctxt); | ||
105 | extern u32 mgrwrap_wait_for_bridge_events(union trapped_args *args, | ||
106 | void *pr_ctxt); | ||
107 | |||
108 | extern u32 mgrwrap_get_process_resources_info(union trapped_args *args, | ||
109 | void *pr_ctxt); | ||
110 | |||
111 | /* CPRC (Processor) wrapper Functions */ | ||
112 | extern u32 procwrap_attach(union trapped_args *args, void *pr_ctxt); | ||
113 | extern u32 procwrap_ctrl(union trapped_args *args, void *pr_ctxt); | ||
114 | extern u32 procwrap_detach(union trapped_args *args, void *pr_ctxt); | ||
115 | extern u32 procwrap_enum_node_info(union trapped_args *args, void *pr_ctxt); | ||
116 | extern u32 procwrap_enum_resources(union trapped_args *args, void *pr_ctxt); | ||
117 | extern u32 procwrap_get_state(union trapped_args *args, void *pr_ctxt); | ||
118 | extern u32 procwrap_get_trace(union trapped_args *args, void *pr_ctxt); | ||
119 | extern u32 procwrap_load(union trapped_args *args, void *pr_ctxt); | ||
120 | extern u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt); | ||
121 | extern u32 procwrap_start(union trapped_args *args, void *pr_ctxt); | ||
122 | extern u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt); | ||
123 | extern u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt); | ||
124 | extern u32 procwrap_map(union trapped_args *args, void *pr_ctxt); | ||
125 | extern u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt); | ||
126 | extern u32 procwrap_flush_memory(union trapped_args *args, void *pr_ctxt); | ||
127 | extern u32 procwrap_stop(union trapped_args *args, void *pr_ctxt); | ||
128 | extern u32 procwrap_invalidate_memory(union trapped_args *args, void *pr_ctxt); | ||
129 | extern u32 procwrap_begin_dma(union trapped_args *args, void *pr_ctxt); | ||
130 | extern u32 procwrap_end_dma(union trapped_args *args, void *pr_ctxt); | ||
131 | |||
132 | /* NODE wrapper functions */ | ||
133 | extern u32 nodewrap_allocate(union trapped_args *args, void *pr_ctxt); | ||
134 | extern u32 nodewrap_alloc_msg_buf(union trapped_args *args, void *pr_ctxt); | ||
135 | extern u32 nodewrap_change_priority(union trapped_args *args, void *pr_ctxt); | ||
136 | extern u32 nodewrap_connect(union trapped_args *args, void *pr_ctxt); | ||
137 | extern u32 nodewrap_create(union trapped_args *args, void *pr_ctxt); | ||
138 | extern u32 nodewrap_delete(union trapped_args *args, void *pr_ctxt); | ||
139 | extern u32 nodewrap_free_msg_buf(union trapped_args *args, void *pr_ctxt); | ||
140 | extern u32 nodewrap_get_attr(union trapped_args *args, void *pr_ctxt); | ||
141 | extern u32 nodewrap_get_message(union trapped_args *args, void *pr_ctxt); | ||
142 | extern u32 nodewrap_pause(union trapped_args *args, void *pr_ctxt); | ||
143 | extern u32 nodewrap_put_message(union trapped_args *args, void *pr_ctxt); | ||
144 | extern u32 nodewrap_register_notify(union trapped_args *args, void *pr_ctxt); | ||
145 | extern u32 nodewrap_run(union trapped_args *args, void *pr_ctxt); | ||
146 | extern u32 nodewrap_terminate(union trapped_args *args, void *pr_ctxt); | ||
147 | extern u32 nodewrap_get_uuid_props(union trapped_args *args, void *pr_ctxt); | ||
148 | |||
149 | /* STRM wrapper functions */ | ||
150 | extern u32 strmwrap_allocate_buffer(union trapped_args *args, void *pr_ctxt); | ||
151 | extern u32 strmwrap_close(union trapped_args *args, void *pr_ctxt); | ||
152 | extern u32 strmwrap_free_buffer(union trapped_args *args, void *pr_ctxt); | ||
153 | extern u32 strmwrap_get_event_handle(union trapped_args *args, void *pr_ctxt); | ||
154 | extern u32 strmwrap_get_info(union trapped_args *args, void *pr_ctxt); | ||
155 | extern u32 strmwrap_idle(union trapped_args *args, void *pr_ctxt); | ||
156 | extern u32 strmwrap_issue(union trapped_args *args, void *pr_ctxt); | ||
157 | extern u32 strmwrap_open(union trapped_args *args, void *pr_ctxt); | ||
158 | extern u32 strmwrap_reclaim(union trapped_args *args, void *pr_ctxt); | ||
159 | extern u32 strmwrap_register_notify(union trapped_args *args, void *pr_ctxt); | ||
160 | extern u32 strmwrap_select(union trapped_args *args, void *pr_ctxt); | ||
161 | |||
162 | extern u32 cmmwrap_calloc_buf(union trapped_args *args, void *pr_ctxt); | ||
163 | extern u32 cmmwrap_free_buf(union trapped_args *args, void *pr_ctxt); | ||
164 | extern u32 cmmwrap_get_handle(union trapped_args *args, void *pr_ctxt); | ||
165 | extern u32 cmmwrap_get_info(union trapped_args *args, void *pr_ctxt); | ||
166 | |||
167 | #endif /* DSPAPI_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspchnl.h b/drivers/staging/tidspbridge/include/dspbridge/dspchnl.h deleted file mode 100644 index 7146a5057e29..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dspchnl.h +++ /dev/null | |||
@@ -1,72 +0,0 @@ | |||
1 | /* | ||
2 | * dspchnl.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Declares the upper edge channel class library functions required by | ||
7 | * all Bridge driver / DSP API driver interface tables. These functions are | ||
8 | * implemented by every class of Bridge channel library. | ||
9 | * | ||
10 | * Notes: | ||
11 | * The function comment headers reside in dspdefs.h. | ||
12 | * | ||
13 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
14 | * | ||
15 | * This package is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License version 2 as | ||
17 | * published by the Free Software Foundation. | ||
18 | * | ||
19 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
20 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | */ | ||
23 | |||
24 | #ifndef DSPCHNL_ | ||
25 | #define DSPCHNL_ | ||
26 | |||
27 | extern int bridge_chnl_create(struct chnl_mgr **channel_mgr, | ||
28 | struct dev_object *hdev_obj, | ||
29 | const struct chnl_mgrattrs | ||
30 | *mgr_attrts); | ||
31 | |||
32 | extern int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr); | ||
33 | |||
34 | extern int bridge_chnl_open(struct chnl_object **chnl, | ||
35 | struct chnl_mgr *hchnl_mgr, | ||
36 | s8 chnl_mode, | ||
37 | u32 ch_id, | ||
38 | const struct chnl_attr | ||
39 | *pattrs); | ||
40 | |||
41 | extern int bridge_chnl_close(struct chnl_object *chnl_obj); | ||
42 | |||
43 | extern int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, | ||
44 | void *host_buf, | ||
45 | u32 byte_size, u32 buf_size, | ||
46 | u32 dw_dsp_addr, u32 dw_arg); | ||
47 | |||
48 | extern int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, | ||
49 | u32 timeout, struct chnl_ioc *chan_ioc); | ||
50 | |||
51 | extern int bridge_chnl_cancel_io(struct chnl_object *chnl_obj); | ||
52 | |||
53 | extern int bridge_chnl_flush_io(struct chnl_object *chnl_obj, | ||
54 | u32 timeout); | ||
55 | |||
56 | extern int bridge_chnl_get_info(struct chnl_object *chnl_obj, | ||
57 | struct chnl_info *channel_info); | ||
58 | |||
59 | extern int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, | ||
60 | u32 ch_id, struct chnl_mgrinfo | ||
61 | *mgr_info); | ||
62 | |||
63 | extern int bridge_chnl_idle(struct chnl_object *chnl_obj, | ||
64 | u32 timeout, bool flush_data); | ||
65 | |||
66 | extern int bridge_chnl_register_notify(struct chnl_object *chnl_obj, | ||
67 | u32 event_mask, | ||
68 | u32 notify_type, | ||
69 | struct dsp_notification | ||
70 | *hnotification); | ||
71 | |||
72 | #endif /* DSPCHNL_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h deleted file mode 100644 index ed32bf383132..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h +++ /dev/null | |||
@@ -1,1048 +0,0 @@ | |||
1 | /* | ||
2 | * dspdefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Bridge driver entry point and interface function declarations. | ||
7 | * | ||
8 | * Notes: | ||
9 | * The DSP API obtains it's function interface to | ||
10 | * the Bridge driver via a call to bridge_drv_entry(). | ||
11 | * | ||
12 | * Bridge services exported to Bridge drivers are initialized by the | ||
13 | * DSP API on behalf of the Bridge driver. | ||
14 | * | ||
15 | * Bridge function DBC Requires and Ensures are also made by the DSP API on | ||
16 | * behalf of the Bridge driver, to simplify the Bridge driver code. | ||
17 | * | ||
18 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
19 | * | ||
20 | * This package is free software; you can redistribute it and/or modify | ||
21 | * it under the terms of the GNU General Public License version 2 as | ||
22 | * published by the Free Software Foundation. | ||
23 | * | ||
24 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
25 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
26 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
27 | */ | ||
28 | |||
29 | #ifndef DSPDEFS_ | ||
30 | #define DSPDEFS_ | ||
31 | |||
32 | #include <dspbridge/brddefs.h> | ||
33 | #include <dspbridge/cfgdefs.h> | ||
34 | #include <dspbridge/chnlpriv.h> | ||
35 | #include <dspbridge/dspdeh.h> | ||
36 | #include <dspbridge/devdefs.h> | ||
37 | #include <dspbridge/io.h> | ||
38 | #include <dspbridge/msgdefs.h> | ||
39 | |||
40 | /* Handle to Bridge driver's private device context. */ | ||
41 | struct bridge_dev_context; | ||
42 | |||
43 | /*--------------------------------------------------------------------------- */ | ||
44 | /* BRIDGE DRIVER FUNCTION TYPES */ | ||
45 | /*--------------------------------------------------------------------------- */ | ||
46 | |||
47 | /* | ||
48 | * ======== bridge_brd_monitor ======== | ||
49 | * Purpose: | ||
50 | * Bring the board to the BRD_IDLE (monitor) state. | ||
51 | * Parameters: | ||
52 | * dev_ctxt: Handle to Bridge driver defined device context. | ||
53 | * Returns: | ||
54 | * 0: Success. | ||
55 | * -ETIMEDOUT: Timeout occurred waiting for a response from hardware. | ||
56 | * -EPERM: Other, unspecified error. | ||
57 | * Requires: | ||
58 | * dev_ctxt != NULL | ||
59 | * Ensures: | ||
60 | * 0: Board is in BRD_IDLE state; | ||
61 | * else: Board state is indeterminate. | ||
62 | */ | ||
63 | typedef int(*fxn_brd_monitor) (struct bridge_dev_context *dev_ctxt); | ||
64 | |||
65 | /* | ||
66 | * ======== fxn_brd_setstate ======== | ||
67 | * Purpose: | ||
68 | * Sets the Bridge driver state | ||
69 | * Parameters: | ||
70 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
71 | * brd_state: Board state | ||
72 | * Returns: | ||
73 | * 0: Success. | ||
74 | * -EPERM: Other, unspecified error. | ||
75 | * Requires: | ||
76 | * dev_ctxt != NULL; | ||
77 | * brd_state <= BRD_LASTSTATE. | ||
78 | * Ensures: | ||
79 | * brd_state <= BRD_LASTSTATE. | ||
80 | * Update the Board state to the specified state. | ||
81 | */ | ||
82 | typedef int(*fxn_brd_setstate) (struct bridge_dev_context | ||
83 | * dev_ctxt, u32 brd_state); | ||
84 | |||
85 | /* | ||
86 | * ======== bridge_brd_start ======== | ||
87 | * Purpose: | ||
88 | * Bring board to the BRD_RUNNING (start) state. | ||
89 | * Parameters: | ||
90 | * dev_ctxt: Handle to Bridge driver defined device context. | ||
91 | * dsp_addr: DSP address at which to start execution. | ||
92 | * Returns: | ||
93 | * 0: Success. | ||
94 | * -ETIMEDOUT: Timeout occurred waiting for a response from hardware. | ||
95 | * -EPERM: Other, unspecified error. | ||
96 | * Requires: | ||
97 | * dev_ctxt != NULL | ||
98 | * Board is in monitor (BRD_IDLE) state. | ||
99 | * Ensures: | ||
100 | * 0: Board is in BRD_RUNNING state. | ||
101 | * Interrupts to the PC are enabled. | ||
102 | * else: Board state is indeterminate. | ||
103 | */ | ||
104 | typedef int(*fxn_brd_start) (struct bridge_dev_context | ||
105 | * dev_ctxt, u32 dsp_addr); | ||
106 | |||
107 | /* | ||
108 | * ======== bridge_brd_mem_copy ======== | ||
109 | * Purpose: | ||
110 | * Copy memory from one DSP address to another | ||
111 | * Parameters: | ||
112 | * dev_context: Pointer to context handle | ||
113 | * dsp_dest_addr: DSP address to copy to | ||
114 | * dsp_src_addr: DSP address to copy from | ||
115 | * ul_num_bytes: Number of bytes to copy | ||
116 | * mem_type: What section of memory to copy to | ||
117 | * Returns: | ||
118 | * 0: Success. | ||
119 | * -EPERM: Other, unspecified error. | ||
120 | * Requires: | ||
121 | * dev_context != NULL | ||
122 | * Ensures: | ||
123 | * 0: Board is in BRD_RUNNING state. | ||
124 | * Interrupts to the PC are enabled. | ||
125 | * else: Board state is indeterminate. | ||
126 | */ | ||
127 | typedef int(*fxn_brd_memcopy) (struct bridge_dev_context | ||
128 | * dev_ctxt, | ||
129 | u32 dsp_dest_addr, | ||
130 | u32 dsp_src_addr, | ||
131 | u32 ul_num_bytes, u32 mem_type); | ||
132 | /* | ||
133 | * ======== bridge_brd_mem_write ======== | ||
134 | * Purpose: | ||
135 | * Write a block of host memory into a DSP address, into a given memory | ||
136 | * space. Unlike bridge_brd_write, this API does reset the DSP | ||
137 | * Parameters: | ||
138 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
139 | * dsp_addr: Address on DSP board (Destination). | ||
140 | * host_buf: Pointer to host buffer (Source). | ||
141 | * ul_num_bytes: Number of bytes to transfer. | ||
142 | * mem_type: Memory space on DSP to which to transfer. | ||
143 | * Returns: | ||
144 | * 0: Success. | ||
145 | * -ETIMEDOUT: Timeout occurred waiting for a response from hardware. | ||
146 | * -EPERM: Other, unspecified error. | ||
147 | * Requires: | ||
148 | * dev_ctxt != NULL; | ||
149 | * host_buf != NULL. | ||
150 | * Ensures: | ||
151 | */ | ||
152 | typedef int(*fxn_brd_memwrite) (struct bridge_dev_context | ||
153 | * dev_ctxt, | ||
154 | u8 *host_buf, | ||
155 | u32 dsp_addr, u32 ul_num_bytes, | ||
156 | u32 mem_type); | ||
157 | |||
158 | /* | ||
159 | * ======== bridge_brd_mem_map ======== | ||
160 | * Purpose: | ||
161 | * Map a MPU memory region to a DSP/IVA memory space | ||
162 | * Parameters: | ||
163 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
164 | * ul_mpu_addr: MPU memory region start address. | ||
165 | * virt_addr: DSP/IVA memory region u8 address. | ||
166 | * ul_num_bytes: Number of bytes to map. | ||
167 | * map_attrs: Mapping attributes (e.g. endianness). | ||
168 | * Returns: | ||
169 | * 0: Success. | ||
170 | * -EPERM: Other, unspecified error. | ||
171 | * Requires: | ||
172 | * dev_ctxt != NULL; | ||
173 | * Ensures: | ||
174 | */ | ||
175 | typedef int(*fxn_brd_memmap) (struct bridge_dev_context | ||
176 | * dev_ctxt, u32 ul_mpu_addr, | ||
177 | u32 virt_addr, u32 ul_num_bytes, | ||
178 | u32 map_attr, | ||
179 | struct page **mapped_pages); | ||
180 | |||
181 | /* | ||
182 | * ======== bridge_brd_mem_un_map ======== | ||
183 | * Purpose: | ||
184 | * UnMap an MPU memory region from DSP/IVA memory space | ||
185 | * Parameters: | ||
186 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
187 | * virt_addr: DSP/IVA memory region u8 address. | ||
188 | * ul_num_bytes: Number of bytes to unmap. | ||
189 | * Returns: | ||
190 | * 0: Success. | ||
191 | * -EPERM: Other, unspecified error. | ||
192 | * Requires: | ||
193 | * dev_ctxt != NULL; | ||
194 | * Ensures: | ||
195 | */ | ||
196 | typedef int(*fxn_brd_memunmap) (struct bridge_dev_context | ||
197 | * dev_ctxt, | ||
198 | u32 virt_addr, u32 ul_num_bytes); | ||
199 | |||
200 | /* | ||
201 | * ======== bridge_brd_stop ======== | ||
202 | * Purpose: | ||
203 | * Bring board to the BRD_STOPPED state. | ||
204 | * Parameters: | ||
205 | * dev_ctxt: Handle to Bridge driver defined device context. | ||
206 | * Returns: | ||
207 | * 0: Success. | ||
208 | * -ETIMEDOUT: Timeout occurred waiting for a response from hardware. | ||
209 | * -EPERM: Other, unspecified error. | ||
210 | * Requires: | ||
211 | * dev_ctxt != NULL | ||
212 | * Ensures: | ||
213 | * 0: Board is in BRD_STOPPED (stop) state; | ||
214 | * Interrupts to the PC are disabled. | ||
215 | * else: Board state is indeterminate. | ||
216 | */ | ||
217 | typedef int(*fxn_brd_stop) (struct bridge_dev_context *dev_ctxt); | ||
218 | |||
219 | /* | ||
220 | * ======== bridge_brd_status ======== | ||
221 | * Purpose: | ||
222 | * Report the current state of the board. | ||
223 | * Parameters: | ||
224 | * dev_ctxt: Handle to Bridge driver defined device context. | ||
225 | * board_state: Ptr to BRD status variable. | ||
226 | * Returns: | ||
227 | * 0: | ||
228 | * Requires: | ||
229 | * board_state != NULL; | ||
230 | * dev_ctxt != NULL | ||
231 | * Ensures: | ||
232 | * *board_state is one of | ||
233 | * {BRD_STOPPED, BRD_IDLE, BRD_RUNNING, BRD_UNKNOWN}; | ||
234 | */ | ||
235 | typedef int(*fxn_brd_status) (struct bridge_dev_context *dev_ctxt, | ||
236 | int *board_state); | ||
237 | |||
238 | /* | ||
239 | * ======== bridge_brd_read ======== | ||
240 | * Purpose: | ||
241 | * Read a block of DSP memory, from a given memory space, into a host | ||
242 | * buffer. | ||
243 | * Parameters: | ||
244 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
245 | * host_buf: Pointer to host buffer (Destination). | ||
246 | * dsp_addr: Address on DSP board (Source). | ||
247 | * ul_num_bytes: Number of bytes to transfer. | ||
248 | * mem_type: Memory space on DSP from which to transfer. | ||
249 | * Returns: | ||
250 | * 0: Success. | ||
251 | * -ETIMEDOUT: Timeout occurred waiting for a response from hardware. | ||
252 | * -EPERM: Other, unspecified error. | ||
253 | * Requires: | ||
254 | * dev_ctxt != NULL; | ||
255 | * host_buf != NULL. | ||
256 | * Ensures: | ||
257 | * Will not write more than ul_num_bytes bytes into host_buf. | ||
258 | */ | ||
259 | typedef int(*fxn_brd_read) (struct bridge_dev_context *dev_ctxt, | ||
260 | u8 *host_buf, | ||
261 | u32 dsp_addr, | ||
262 | u32 ul_num_bytes, u32 mem_type); | ||
263 | |||
264 | /* | ||
265 | * ======== bridge_brd_write ======== | ||
266 | * Purpose: | ||
267 | * Write a block of host memory into a DSP address, into a given memory | ||
268 | * space. | ||
269 | * Parameters: | ||
270 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
271 | * dsp_addr: Address on DSP board (Destination). | ||
272 | * host_buf: Pointer to host buffer (Source). | ||
273 | * ul_num_bytes: Number of bytes to transfer. | ||
274 | * mem_type: Memory space on DSP to which to transfer. | ||
275 | * Returns: | ||
276 | * 0: Success. | ||
277 | * -ETIMEDOUT: Timeout occurred waiting for a response from hardware. | ||
278 | * -EPERM: Other, unspecified error. | ||
279 | * Requires: | ||
280 | * dev_ctxt != NULL; | ||
281 | * host_buf != NULL. | ||
282 | * Ensures: | ||
283 | */ | ||
284 | typedef int(*fxn_brd_write) (struct bridge_dev_context *dev_ctxt, | ||
285 | u8 *host_buf, | ||
286 | u32 dsp_addr, | ||
287 | u32 ul_num_bytes, u32 mem_type); | ||
288 | |||
289 | /* | ||
290 | * ======== bridge_chnl_create ======== | ||
291 | * Purpose: | ||
292 | * Create a channel manager object, responsible for opening new channels | ||
293 | * and closing old ones for a given 'Bridge board. | ||
294 | * Parameters: | ||
295 | * channel_mgr: Location to store a channel manager object on output. | ||
296 | * hdev_obj: Handle to a device object. | ||
297 | * mgr_attrts: Channel manager attributes. | ||
298 | * mgr_attrts->max_channels: Max channels | ||
299 | * mgr_attrts->birq: Channel's I/O IRQ number. | ||
300 | * mgr_attrts->irq_shared: TRUE if the IRQ is shareable. | ||
301 | * mgr_attrts->word_size: DSP Word size in equivalent PC bytes.. | ||
302 | * mgr_attrts->shm_base: Base physical address of shared memory, if any. | ||
303 | * mgr_attrts->sm_length: Bytes of shared memory block. | ||
304 | * Returns: | ||
305 | * 0: Success; | ||
306 | * -ENOMEM: Insufficient memory for requested resources. | ||
307 | * -EIO: Unable to plug ISR for given IRQ. | ||
308 | * -EFAULT: Couldn't map physical address to a virtual one. | ||
309 | * Requires: | ||
310 | * channel_mgr != NULL. | ||
311 | * mgr_attrts != NULL | ||
312 | * mgr_attrts field are all valid: | ||
313 | * 0 < max_channels <= CHNL_MAXCHANNELS. | ||
314 | * birq <= 15. | ||
315 | * word_size > 0. | ||
316 | * hdev_obj != NULL | ||
317 | * No channel manager exists for this board. | ||
318 | * Ensures: | ||
319 | */ | ||
320 | typedef int(*fxn_chnl_create) (struct chnl_mgr | ||
321 | **channel_mgr, | ||
322 | struct dev_object | ||
323 | * hdev_obj, | ||
324 | const struct | ||
325 | chnl_mgrattrs * mgr_attrts); | ||
326 | |||
327 | /* | ||
328 | * ======== bridge_chnl_destroy ======== | ||
329 | * Purpose: | ||
330 | * Close all open channels, and destroy the channel manager. | ||
331 | * Parameters: | ||
332 | * hchnl_mgr: Channel manager object. | ||
333 | * Returns: | ||
334 | * 0: Success. | ||
335 | * -EFAULT: hchnl_mgr was invalid. | ||
336 | * Requires: | ||
337 | * Ensures: | ||
338 | * 0: Cancels I/O on each open channel. Closes each open channel. | ||
339 | * chnl_create may subsequently be called for the same device. | ||
340 | */ | ||
341 | typedef int(*fxn_chnl_destroy) (struct chnl_mgr *hchnl_mgr); | ||
342 | /* | ||
343 | * ======== bridge_deh_notify ======== | ||
344 | * Purpose: | ||
345 | * When notified of DSP error, take appropriate action. | ||
346 | * Parameters: | ||
347 | * hdeh_mgr: Handle to DEH manager object. | ||
348 | * evnt_mask: Indicate the type of exception | ||
349 | * error_info: Error information | ||
350 | * Returns: | ||
351 | * | ||
352 | * Requires: | ||
353 | * hdeh_mgr != NULL; | ||
354 | * evnt_mask with a valid exception | ||
355 | * Ensures: | ||
356 | */ | ||
357 | typedef void (*fxn_deh_notify) (struct deh_mgr *hdeh_mgr, | ||
358 | u32 evnt_mask, u32 error_info); | ||
359 | |||
360 | /* | ||
361 | * ======== bridge_chnl_open ======== | ||
362 | * Purpose: | ||
363 | * Open a new half-duplex channel to the DSP board. | ||
364 | * Parameters: | ||
365 | * chnl: Location to store a channel object handle. | ||
366 | * hchnl_mgr: Handle to channel manager, as returned by | ||
367 | * CHNL_GetMgr(). | ||
368 | * chnl_mode: One of {CHNL_MODETODSP, CHNL_MODEFROMDSP} specifies | ||
369 | * direction of data transfer. | ||
370 | * ch_id: If CHNL_PICKFREE is specified, the channel manager will | ||
371 | * select a free channel id (default); | ||
372 | * otherwise this field specifies the id of the channel. | ||
373 | * pattrs: Channel attributes. Attribute fields are as follows: | ||
374 | * pattrs->uio_reqs: Specifies the maximum number of I/O requests which can | ||
375 | * be pending at any given time. All request packets are | ||
376 | * preallocated when the channel is opened. | ||
377 | * pattrs->event_obj: This field allows the user to supply an auto reset | ||
378 | * event object for channel I/O completion notifications. | ||
379 | * It is the responsibility of the user to destroy this | ||
380 | * object AFTER closing the channel. | ||
381 | * This channel event object can be retrieved using | ||
382 | * CHNL_GetEventHandle(). | ||
383 | * pattrs->hReserved: The kernel mode handle of this event object. | ||
384 | * | ||
385 | * Returns: | ||
386 | * 0: Success. | ||
387 | * -EFAULT: hchnl_mgr is invalid. | ||
388 | * -ENOMEM: Insufficient memory for requested resources. | ||
389 | * -EINVAL: Invalid number of IOReqs. | ||
390 | * -ENOSR: No free channels available. | ||
391 | * -ECHRNG: Channel ID is out of range. | ||
392 | * -EALREADY: Channel is in use. | ||
393 | * -EIO: No free IO request packets available for | ||
394 | * queuing. | ||
395 | * Requires: | ||
396 | * chnl != NULL. | ||
397 | * pattrs != NULL. | ||
398 | * pattrs->event_obj is a valid event handle. | ||
399 | * pattrs->hReserved is the kernel mode handle for pattrs->event_obj. | ||
400 | * Ensures: | ||
401 | * 0: *chnl is a valid channel. | ||
402 | * else: *chnl is set to NULL if (chnl != NULL); | ||
403 | */ | ||
404 | typedef int(*fxn_chnl_open) (struct chnl_object | ||
405 | **chnl, | ||
406 | struct chnl_mgr *hchnl_mgr, | ||
407 | s8 chnl_mode, | ||
408 | u32 ch_id, | ||
409 | const struct | ||
410 | chnl_attr * pattrs); | ||
411 | |||
412 | /* | ||
413 | * ======== bridge_chnl_close ======== | ||
414 | * Purpose: | ||
415 | * Ensures all pending I/O on this channel is cancelled, discards all | ||
416 | * queued I/O completion notifications, then frees the resources allocated | ||
417 | * for this channel, and makes the corresponding logical channel id | ||
418 | * available for subsequent use. | ||
419 | * Parameters: | ||
420 | * chnl_obj: Handle to a channel object. | ||
421 | * Returns: | ||
422 | * 0: Success; | ||
423 | * -EFAULT: Invalid chnl_obj. | ||
424 | * Requires: | ||
425 | * No thread must be blocked on this channel's I/O completion event. | ||
426 | * Ensures: | ||
427 | * 0: chnl_obj is no longer valid. | ||
428 | */ | ||
429 | typedef int(*fxn_chnl_close) (struct chnl_object *chnl_obj); | ||
430 | |||
431 | /* | ||
432 | * ======== bridge_chnl_add_io_req ======== | ||
433 | * Purpose: | ||
434 | * Enqueue an I/O request for data transfer on a channel to the DSP. | ||
435 | * The direction (mode) is specified in the channel object. Note the DSP | ||
436 | * address is specified for channels opened in direct I/O mode. | ||
437 | * Parameters: | ||
438 | * chnl_obj: Channel object handle. | ||
439 | * host_buf: Host buffer address source. | ||
440 | * byte_size: Number of PC bytes to transfer. A zero value indicates | ||
441 | * that this buffer is the last in the output channel. | ||
442 | * A zero value is invalid for an input channel. | ||
443 | *! buf_size: Actual buffer size in host bytes. | ||
444 | * dw_dsp_addr: DSP address for transfer. (Currently ignored). | ||
445 | * dw_arg: A user argument that travels with the buffer. | ||
446 | * Returns: | ||
447 | * 0: Success; | ||
448 | * -EFAULT: Invalid chnl_obj or host_buf. | ||
449 | * -EPERM: User cannot mark EOS on an input channel. | ||
450 | * -ECANCELED: I/O has been cancelled on this channel. No further | ||
451 | * I/O is allowed. | ||
452 | * -EPIPE: End of stream was already marked on a previous | ||
453 | * IORequest on this channel. No further I/O is expected. | ||
454 | * -EINVAL: Buffer submitted to this output channel is larger than | ||
455 | * the size of the physical shared memory output window. | ||
456 | * Requires: | ||
457 | * Ensures: | ||
458 | * 0: The buffer will be transferred if the channel is ready; | ||
459 | * otherwise, will be queued for transfer when the channel becomes | ||
460 | * ready. In any case, notifications of I/O completion are | ||
461 | * asynchronous. | ||
462 | * If byte_size is 0 for an output channel, subsequent CHNL_AddIOReq's | ||
463 | * on this channel will fail with error code -EPIPE. The | ||
464 | * corresponding IOC for this I/O request will have its status flag | ||
465 | * set to CHNL_IOCSTATEOS. | ||
466 | */ | ||
467 | typedef int(*fxn_chnl_addioreq) (struct chnl_object | ||
468 | * chnl_obj, | ||
469 | void *host_buf, | ||
470 | u32 byte_size, | ||
471 | u32 buf_size, | ||
472 | u32 dw_dsp_addr, u32 dw_arg); | ||
473 | |||
474 | /* | ||
475 | * ======== bridge_chnl_get_ioc ======== | ||
476 | * Purpose: | ||
477 | * Dequeue an I/O completion record, which contains information about the | ||
478 | * completed I/O request. | ||
479 | * Parameters: | ||
480 | * chnl_obj: Channel object handle. | ||
481 | * timeout: A value of CHNL_IOCNOWAIT will simply dequeue the | ||
482 | * first available IOC. | ||
483 | * chan_ioc: On output, contains host buffer address, bytes | ||
484 | * transferred, and status of I/O completion. | ||
485 | * chan_ioc->status: See chnldefs.h. | ||
486 | * Returns: | ||
487 | * 0: Success. | ||
488 | * -EFAULT: Invalid chnl_obj or chan_ioc. | ||
489 | * -EREMOTEIO: CHNL_IOCNOWAIT was specified as the timeout parameter | ||
490 | * yet no I/O completions were queued. | ||
491 | * Requires: | ||
492 | * timeout == CHNL_IOCNOWAIT. | ||
493 | * Ensures: | ||
494 | * 0: if there are any remaining IOC's queued before this call | ||
495 | * returns, the channel event object will be left in a signalled | ||
496 | * state. | ||
497 | */ | ||
498 | typedef int(*fxn_chnl_getioc) (struct chnl_object *chnl_obj, | ||
499 | u32 timeout, | ||
500 | struct chnl_ioc *chan_ioc); | ||
501 | |||
502 | /* | ||
503 | * ======== bridge_chnl_cancel_io ======== | ||
504 | * Purpose: | ||
505 | * Return all I/O requests to the client which have not yet been | ||
506 | * transferred. The channel's I/O completion object is | ||
507 | * signalled, and all the I/O requests are queued as IOC's, with the | ||
508 | * status field set to CHNL_IOCSTATCANCEL. | ||
509 | * This call is typically used in abort situations, and is a prelude to | ||
510 | * chnl_close(); | ||
511 | * Parameters: | ||
512 | * chnl_obj: Channel object handle. | ||
513 | * Returns: | ||
514 | * 0: Success; | ||
515 | * -EFAULT: Invalid chnl_obj. | ||
516 | * Requires: | ||
517 | * Ensures: | ||
518 | * Subsequent I/O requests to this channel will not be accepted. | ||
519 | */ | ||
520 | typedef int(*fxn_chnl_cancelio) (struct chnl_object *chnl_obj); | ||
521 | |||
522 | /* | ||
523 | * ======== bridge_chnl_flush_io ======== | ||
524 | * Purpose: | ||
525 | * For an output stream (to the DSP), indicates if any IO requests are in | ||
526 | * the output request queue. For input streams (from the DSP), will | ||
527 | * cancel all pending IO requests. | ||
528 | * Parameters: | ||
529 | * chnl_obj: Channel object handle. | ||
530 | * timeout: Timeout value for flush operation. | ||
531 | * Returns: | ||
532 | * 0: Success; | ||
533 | * S_CHNLIOREQUEST: Returned if any IORequests are in the output queue. | ||
534 | * -EFAULT: Invalid chnl_obj. | ||
535 | * Requires: | ||
536 | * Ensures: | ||
537 | * 0: No I/O requests will be pending on this channel. | ||
538 | */ | ||
539 | typedef int(*fxn_chnl_flushio) (struct chnl_object *chnl_obj, | ||
540 | u32 timeout); | ||
541 | |||
542 | /* | ||
543 | * ======== bridge_chnl_get_info ======== | ||
544 | * Purpose: | ||
545 | * Retrieve information related to a channel. | ||
546 | * Parameters: | ||
547 | * chnl_obj: Handle to a valid channel object, or NULL. | ||
548 | * channel_info: Location to store channel info. | ||
549 | * Returns: | ||
550 | * 0: Success; | ||
551 | * -EFAULT: Invalid chnl_obj or channel_info. | ||
552 | * Requires: | ||
553 | * Ensures: | ||
554 | * 0: channel_info points to a filled in chnl_info struct, | ||
555 | * if (channel_info != NULL). | ||
556 | */ | ||
557 | typedef int(*fxn_chnl_getinfo) (struct chnl_object *chnl_obj, | ||
558 | struct chnl_info *channel_info); | ||
559 | |||
560 | /* | ||
561 | * ======== bridge_chnl_get_mgr_info ======== | ||
562 | * Purpose: | ||
563 | * Retrieve information related to the channel manager. | ||
564 | * Parameters: | ||
565 | * hchnl_mgr: Handle to a valid channel manager, or NULL. | ||
566 | * ch_id: Channel ID. | ||
567 | * mgr_info: Location to store channel manager info. | ||
568 | * Returns: | ||
569 | * 0: Success; | ||
570 | * -EFAULT: Invalid hchnl_mgr or mgr_info. | ||
571 | * -ECHRNG: Invalid channel ID. | ||
572 | * Requires: | ||
573 | * Ensures: | ||
574 | * 0: mgr_info points to a filled in chnl_mgrinfo | ||
575 | * struct, if (mgr_info != NULL). | ||
576 | */ | ||
577 | typedef int(*fxn_chnl_getmgrinfo) (struct chnl_mgr | ||
578 | * hchnl_mgr, | ||
579 | u32 ch_id, | ||
580 | struct chnl_mgrinfo *mgr_info); | ||
581 | |||
582 | /* | ||
583 | * ======== bridge_chnl_idle ======== | ||
584 | * Purpose: | ||
585 | * Idle a channel. If this is an input channel, or if this is an output | ||
586 | * channel and flush_data is TRUE, all currently enqueued buffers will be | ||
587 | * dequeued (data discarded for output channel). | ||
588 | * If this is an output channel and flush_data is FALSE, this function | ||
589 | * will block until all currently buffered data is output, or the timeout | ||
590 | * specified has been reached. | ||
591 | * | ||
592 | * Parameters: | ||
593 | * chnl_obj: Channel object handle. | ||
594 | * timeout: If output channel and flush_data is FALSE, timeout value | ||
595 | * to wait for buffers to be output. (Not used for | ||
596 | * input channel). | ||
597 | * flush_data: If output channel and flush_data is TRUE, discard any | ||
598 | * currently buffered data. If FALSE, wait for currently | ||
599 | * buffered data to be output, or timeout, whichever | ||
600 | * occurs first. flush_data is ignored for input channel. | ||
601 | * Returns: | ||
602 | * 0: Success; | ||
603 | * -EFAULT: Invalid chnl_obj. | ||
604 | * -ETIMEDOUT: Timeout occurred before channel could be idled. | ||
605 | * Requires: | ||
606 | * Ensures: | ||
607 | */ | ||
608 | typedef int(*fxn_chnl_idle) (struct chnl_object *chnl_obj, | ||
609 | u32 timeout, bool flush_data); | ||
610 | |||
611 | /* | ||
612 | * ======== bridge_chnl_register_notify ======== | ||
613 | * Purpose: | ||
614 | * Register for notification of events on a channel. | ||
615 | * Parameters: | ||
616 | * chnl_obj: Channel object handle. | ||
617 | * event_mask: Type of events to be notified about: IO completion | ||
618 | * (DSP_STREAMIOCOMPLETION) or end of stream | ||
619 | * (DSP_STREAMDONE). | ||
620 | * notify_type: DSP_SIGNALEVENT. | ||
621 | * hnotification: Handle of a dsp_notification object. | ||
622 | * Returns: | ||
623 | * 0: Success. | ||
624 | * -ENOMEM: Insufficient memory. | ||
625 | * -EINVAL: event_mask is 0 and hnotification was not | ||
626 | * previously registered. | ||
627 | * -EFAULT: NULL hnotification, hnotification event name | ||
628 | * too long, or hnotification event name NULL. | ||
629 | * Requires: | ||
630 | * Valid chnl_obj. | ||
631 | * hnotification != NULL. | ||
632 | * (event_mask & ~(DSP_STREAMIOCOMPLETION | DSP_STREAMDONE)) == 0. | ||
633 | * notify_type == DSP_SIGNALEVENT. | ||
634 | * Ensures: | ||
635 | */ | ||
636 | typedef int(*fxn_chnl_registernotify) | ||
637 | (struct chnl_object *chnl_obj, | ||
638 | u32 event_mask, u32 notify_type, struct dsp_notification *hnotification); | ||
639 | |||
640 | /* | ||
641 | * ======== bridge_dev_create ======== | ||
642 | * Purpose: | ||
643 | * Complete creation of the device object for this board. | ||
644 | * Parameters: | ||
645 | * device_ctx: Ptr to location to store a Bridge device context. | ||
646 | * hdev_obj: Handle to a Device Object, created and managed by DSP API. | ||
647 | * config_param: Ptr to configuration parameters provided by the | ||
648 | * Configuration Manager during device loading. | ||
649 | * pDspConfig: DSP resources, as specified in the registry key for this | ||
650 | * device. | ||
651 | * Returns: | ||
652 | * 0: Success. | ||
653 | * -ENOMEM: Unable to allocate memory for device context. | ||
654 | * Requires: | ||
655 | * device_ctx != NULL; | ||
656 | * hdev_obj != NULL; | ||
657 | * config_param != NULL; | ||
658 | * pDspConfig != NULL; | ||
659 | * Fields in config_param and pDspConfig contain valid values. | ||
660 | * Ensures: | ||
661 | * 0: All Bridge driver specific DSP resource and other | ||
662 | * board context has been allocated. | ||
663 | * -ENOMEM: Bridge failed to allocate resources. | ||
664 | * Any acquired resources have been freed. The DSP API | ||
665 | * will not call bridge_dev_destroy() if | ||
666 | * bridge_dev_create() fails. | ||
667 | * Details: | ||
668 | * Called during the CONFIGMG's Device_Init phase. Based on host and | ||
669 | * DSP configuration information, create a board context, a handle to | ||
670 | * which is passed into other Bridge BRD and CHNL functions. The | ||
671 | * board context contains state information for the device. Since the | ||
672 | * addresses of all pointer parameters may be invalid when this | ||
673 | * function returns, they must not be stored into the device context | ||
674 | * structure. | ||
675 | */ | ||
676 | typedef int(*fxn_dev_create) (struct bridge_dev_context | ||
677 | **device_ctx, | ||
678 | struct dev_object | ||
679 | * hdev_obj, | ||
680 | struct cfg_hostres | ||
681 | * config_param); | ||
682 | |||
683 | /* | ||
684 | * ======== bridge_dev_ctrl ======== | ||
685 | * Purpose: | ||
686 | * Bridge driver specific interface. | ||
687 | * Parameters: | ||
688 | * dev_ctxt: Handle to Bridge driver defined device info. | ||
689 | * dw_cmd: Bridge driver defined command code. | ||
690 | * pargs: Pointer to an arbitrary argument structure. | ||
691 | * Returns: | ||
692 | * 0 or -EPERM. Actual command error codes should be passed back in | ||
693 | * the pargs structure, and are defined by the Bridge driver implementor. | ||
694 | * Requires: | ||
695 | * All calls are currently assumed to be synchronous. There are no | ||
696 | * IOCTL completion routines provided. | ||
697 | * Ensures: | ||
698 | */ | ||
699 | typedef int(*fxn_dev_ctrl) (struct bridge_dev_context *dev_ctxt, | ||
700 | u32 dw_cmd, void *pargs); | ||
701 | |||
702 | /* | ||
703 | * ======== bridge_dev_destroy ======== | ||
704 | * Purpose: | ||
705 | * Deallocate Bridge device extension structures and all other resources | ||
706 | * acquired by the Bridge driver. | ||
707 | * No calls to other Bridge driver functions may subsequently | ||
708 | * occur, except for bridge_dev_create(). | ||
709 | * Parameters: | ||
710 | * dev_ctxt: Handle to Bridge driver defined device information. | ||
711 | * Returns: | ||
712 | * 0: Success. | ||
713 | * -EPERM: Failed to release a resource previously acquired. | ||
714 | * Requires: | ||
715 | * dev_ctxt != NULL; | ||
716 | * Ensures: | ||
717 | * 0: Device context is freed. | ||
718 | */ | ||
719 | typedef int(*fxn_dev_destroy) (struct bridge_dev_context *dev_ctxt); | ||
720 | |||
721 | /* | ||
722 | * ======== bridge_io_create ======== | ||
723 | * Purpose: | ||
724 | * Create an object that manages I/O between CHNL and msg_ctrl. | ||
725 | * Parameters: | ||
726 | * io_man: Location to store IO manager on output. | ||
727 | * hchnl_mgr: Handle to channel manager. | ||
728 | * hmsg_mgr: Handle to message manager. | ||
729 | * Returns: | ||
730 | * 0: Success. | ||
731 | * -ENOMEM: Memory allocation failure. | ||
732 | * -EPERM: Creation failed. | ||
733 | * Requires: | ||
734 | * hdev_obj != NULL; | ||
735 | * Channel manager already created; | ||
736 | * Message manager already created; | ||
737 | * mgr_attrts != NULL; | ||
738 | * io_man != NULL; | ||
739 | * Ensures: | ||
740 | */ | ||
741 | typedef int(*fxn_io_create) (struct io_mgr **io_man, | ||
742 | struct dev_object *hdev_obj, | ||
743 | const struct io_attrs *mgr_attrts); | ||
744 | |||
745 | /* | ||
746 | * ======== bridge_io_destroy ======== | ||
747 | * Purpose: | ||
748 | * Destroy object created in bridge_io_create. | ||
749 | * Parameters: | ||
750 | * hio_mgr: IO Manager. | ||
751 | * Returns: | ||
752 | * 0: Success. | ||
753 | * -ENOMEM: Memory allocation failure. | ||
754 | * -EPERM: Creation failed. | ||
755 | * Requires: | ||
756 | * Valid hio_mgr; | ||
757 | * Ensures: | ||
758 | */ | ||
759 | typedef int(*fxn_io_destroy) (struct io_mgr *hio_mgr); | ||
760 | |||
761 | /* | ||
762 | * ======== bridge_io_on_loaded ======== | ||
763 | * Purpose: | ||
764 | * Called whenever a program is loaded to update internal data. For | ||
765 | * example, if shared memory is used, this function would update the | ||
766 | * shared memory location and address. | ||
767 | * Parameters: | ||
768 | * hio_mgr: IO Manager. | ||
769 | * Returns: | ||
770 | * 0: Success. | ||
771 | * -EPERM: Internal failure occurred. | ||
772 | * Requires: | ||
773 | * Valid hio_mgr; | ||
774 | * Ensures: | ||
775 | */ | ||
776 | typedef int(*fxn_io_onloaded) (struct io_mgr *hio_mgr); | ||
777 | |||
778 | /* | ||
779 | * ======== fxn_io_getprocload ======== | ||
780 | * Purpose: | ||
781 | * Called to get the Processor's current and predicted load | ||
782 | * Parameters: | ||
783 | * hio_mgr: IO Manager. | ||
784 | * proc_load_stat Processor Load statistics | ||
785 | * Returns: | ||
786 | * 0: Success. | ||
787 | * -EPERM: Internal failure occurred. | ||
788 | * Requires: | ||
789 | * Valid hio_mgr; | ||
790 | * Ensures: | ||
791 | */ | ||
792 | typedef int(*fxn_io_getprocload) (struct io_mgr *hio_mgr, | ||
793 | struct dsp_procloadstat * | ||
794 | proc_load_stat); | ||
795 | |||
796 | /* | ||
797 | * ======== bridge_msg_create ======== | ||
798 | * Purpose: | ||
799 | * Create an object to manage message queues. Only one of these objects | ||
800 | * can exist per device object. | ||
801 | * Parameters: | ||
802 | * msg_man: Location to store msg_ctrl manager on output. | ||
803 | * hdev_obj: Handle to a device object. | ||
804 | * msg_callback: Called whenever an RMS_EXIT message is received. | ||
805 | * Returns: | ||
806 | * 0: Success. | ||
807 | * -ENOMEM: Insufficient memory. | ||
808 | * Requires: | ||
809 | * msg_man != NULL. | ||
810 | * msg_callback != NULL. | ||
811 | * hdev_obj != NULL. | ||
812 | * Ensures: | ||
813 | */ | ||
814 | typedef int(*fxn_msg_create) | ||
815 | (struct msg_mgr **msg_man, | ||
816 | struct dev_object *hdev_obj, msg_onexit msg_callback); | ||
817 | |||
818 | /* | ||
819 | * ======== bridge_msg_create_queue ======== | ||
820 | * Purpose: | ||
821 | * Create a msg_ctrl queue for sending or receiving messages from a Message | ||
822 | * node on the DSP. | ||
823 | * Parameters: | ||
824 | * hmsg_mgr: msg_ctrl queue manager handle returned from | ||
825 | * bridge_msg_create. | ||
826 | * msgq: Location to store msg_ctrl queue on output. | ||
827 | * msgq_id: Identifier for messages (node environment pointer). | ||
828 | * max_msgs: Max number of simultaneous messages for the node. | ||
829 | * h: Handle passed to hmsg_mgr->msg_callback(). | ||
830 | * Returns: | ||
831 | * 0: Success. | ||
832 | * -ENOMEM: Insufficient memory. | ||
833 | * Requires: | ||
834 | * msgq != NULL. | ||
835 | * h != NULL. | ||
836 | * max_msgs > 0. | ||
837 | * Ensures: | ||
838 | * msgq !=NULL <==> 0. | ||
839 | */ | ||
840 | typedef int(*fxn_msg_createqueue) | ||
841 | (struct msg_mgr *hmsg_mgr, | ||
842 | struct msg_queue **msgq, u32 msgq_id, u32 max_msgs, void *h); | ||
843 | |||
844 | /* | ||
845 | * ======== bridge_msg_delete ======== | ||
846 | * Purpose: | ||
847 | * Delete a msg_ctrl manager allocated in bridge_msg_create(). | ||
848 | * Parameters: | ||
849 | * hmsg_mgr: Handle returned from bridge_msg_create(). | ||
850 | * Returns: | ||
851 | * Requires: | ||
852 | * Valid hmsg_mgr. | ||
853 | * Ensures: | ||
854 | */ | ||
855 | typedef void (*fxn_msg_delete) (struct msg_mgr *hmsg_mgr); | ||
856 | |||
857 | /* | ||
858 | * ======== bridge_msg_delete_queue ======== | ||
859 | * Purpose: | ||
860 | * Delete a msg_ctrl queue allocated in bridge_msg_create_queue. | ||
861 | * Parameters: | ||
862 | * msg_queue_obj: Handle to msg_ctrl queue returned from | ||
863 | * bridge_msg_create_queue. | ||
864 | * Returns: | ||
865 | * Requires: | ||
866 | * Valid msg_queue_obj. | ||
867 | * Ensures: | ||
868 | */ | ||
869 | typedef void (*fxn_msg_deletequeue) (struct msg_queue *msg_queue_obj); | ||
870 | |||
871 | /* | ||
872 | * ======== bridge_msg_get ======== | ||
873 | * Purpose: | ||
874 | * Get a message from a msg_ctrl queue. | ||
875 | * Parameters: | ||
876 | * msg_queue_obj: Handle to msg_ctrl queue returned from | ||
877 | * bridge_msg_create_queue. | ||
878 | * pmsg: Location to copy message into. | ||
879 | * utimeout: Timeout to wait for a message. | ||
880 | * Returns: | ||
881 | * 0: Success. | ||
882 | * -ETIME: Timeout occurred. | ||
883 | * -EPERM: No frames available for message (max_msgs too | ||
884 | * small). | ||
885 | * Requires: | ||
886 | * Valid msg_queue_obj. | ||
887 | * pmsg != NULL. | ||
888 | * Ensures: | ||
889 | */ | ||
890 | typedef int(*fxn_msg_get) (struct msg_queue *msg_queue_obj, | ||
891 | struct dsp_msg *pmsg, u32 utimeout); | ||
892 | |||
893 | /* | ||
894 | * ======== bridge_msg_put ======== | ||
895 | * Purpose: | ||
896 | * Put a message onto a msg_ctrl queue. | ||
897 | * Parameters: | ||
898 | * msg_queue_obj: Handle to msg_ctrl queue returned from | ||
899 | * bridge_msg_create_queue. | ||
900 | * pmsg: Pointer to message. | ||
901 | * utimeout: Timeout to wait for a message. | ||
902 | * Returns: | ||
903 | * 0: Success. | ||
904 | * -ETIME: Timeout occurred. | ||
905 | * -EPERM: No frames available for message (max_msgs too | ||
906 | * small). | ||
907 | * Requires: | ||
908 | * Valid msg_queue_obj. | ||
909 | * pmsg != NULL. | ||
910 | * Ensures: | ||
911 | */ | ||
912 | typedef int(*fxn_msg_put) (struct msg_queue *msg_queue_obj, | ||
913 | const struct dsp_msg *pmsg, u32 utimeout); | ||
914 | |||
915 | /* | ||
916 | * ======== bridge_msg_register_notify ======== | ||
917 | * Purpose: | ||
918 | * Register notification for when a message is ready. | ||
919 | * Parameters: | ||
920 | * msg_queue_obj: Handle to msg_ctrl queue returned from | ||
921 | * bridge_msg_create_queue. | ||
922 | * event_mask: Type of events to be notified about: Must be | ||
923 | * DSP_NODEMESSAGEREADY, or 0 to unregister. | ||
924 | * notify_type: DSP_SIGNALEVENT. | ||
925 | * hnotification: Handle of notification object. | ||
926 | * Returns: | ||
927 | * 0: Success. | ||
928 | * -ENOMEM: Insufficient memory. | ||
929 | * Requires: | ||
930 | * Valid msg_queue_obj. | ||
931 | * hnotification != NULL. | ||
932 | * notify_type == DSP_SIGNALEVENT. | ||
933 | * event_mask == DSP_NODEMESSAGEREADY || event_mask == 0. | ||
934 | * Ensures: | ||
935 | */ | ||
936 | typedef int(*fxn_msg_registernotify) | ||
937 | (struct msg_queue *msg_queue_obj, | ||
938 | u32 event_mask, u32 notify_type, struct dsp_notification *hnotification); | ||
939 | |||
940 | /* | ||
941 | * ======== bridge_msg_set_queue_id ======== | ||
942 | * Purpose: | ||
943 | * Set message queue id to node environment. Allows bridge_msg_create_queue | ||
944 | * to be called in node_allocate, before the node environment is known. | ||
945 | * Parameters: | ||
946 | * msg_queue_obj: Handle to msg_ctrl queue returned from | ||
947 | * bridge_msg_create_queue. | ||
948 | * msgq_id: Node environment pointer. | ||
949 | * Returns: | ||
950 | * Requires: | ||
951 | * Valid msg_queue_obj. | ||
952 | * msgq_id != 0. | ||
953 | * Ensures: | ||
954 | */ | ||
955 | typedef void (*fxn_msg_setqueueid) (struct msg_queue *msg_queue_obj, | ||
956 | u32 msgq_id); | ||
957 | |||
958 | /* | ||
959 | * Bridge Driver interface function table. | ||
960 | * | ||
961 | * The information in this table is filled in by the specific Bridge driver, | ||
962 | * and copied into the DSP API's own space. If any interface | ||
963 | * function field is set to a value of NULL, then the DSP API will | ||
964 | * consider that function not implemented, and return the error code | ||
965 | * -ENOSYS when a Bridge driver client attempts to call that function. | ||
966 | * | ||
967 | * This function table contains DSP API version numbers, which are used by the | ||
968 | * Bridge driver loader to help ensure backwards compatility between older | ||
969 | * Bridge drivers and newer DSP API. These must be set to | ||
970 | * BRD_API_MAJOR_VERSION and BRD_API_MINOR_VERSION, respectively. | ||
971 | * | ||
972 | * A Bridge driver need not export a CHNL interface. In this case, *all* of | ||
973 | * the bridge_chnl_* entries must be set to NULL. | ||
974 | */ | ||
975 | struct bridge_drv_interface { | ||
976 | u32 brd_api_major_version; /* Set to BRD_API_MAJOR_VERSION. */ | ||
977 | u32 brd_api_minor_version; /* Set to BRD_API_MINOR_VERSION. */ | ||
978 | fxn_dev_create dev_create; /* Create device context */ | ||
979 | fxn_dev_destroy dev_destroy; /* Destroy device context */ | ||
980 | fxn_dev_ctrl dev_cntrl; /* Optional vendor interface */ | ||
981 | fxn_brd_monitor brd_monitor; /* Load and/or start monitor */ | ||
982 | fxn_brd_start brd_start; /* Start DSP program. */ | ||
983 | fxn_brd_stop brd_stop; /* Stop/reset board. */ | ||
984 | fxn_brd_status brd_status; /* Get current board status. */ | ||
985 | fxn_brd_read brd_read; /* Read board memory */ | ||
986 | fxn_brd_write brd_write; /* Write board memory. */ | ||
987 | fxn_brd_setstate brd_set_state; /* Sets the Board State */ | ||
988 | fxn_brd_memcopy brd_mem_copy; /* Copies DSP Memory */ | ||
989 | fxn_brd_memwrite brd_mem_write; /* Write DSP Memory w/o halt */ | ||
990 | fxn_brd_memmap brd_mem_map; /* Maps MPU mem to DSP mem */ | ||
991 | fxn_brd_memunmap brd_mem_un_map; /* Unmaps MPU mem to DSP mem */ | ||
992 | fxn_chnl_create chnl_create; /* Create channel manager. */ | ||
993 | fxn_chnl_destroy chnl_destroy; /* Destroy channel manager. */ | ||
994 | fxn_chnl_open chnl_open; /* Create a new channel. */ | ||
995 | fxn_chnl_close chnl_close; /* Close a channel. */ | ||
996 | fxn_chnl_addioreq chnl_add_io_req; /* Req I/O on a channel. */ | ||
997 | fxn_chnl_getioc chnl_get_ioc; /* Wait for I/O completion. */ | ||
998 | fxn_chnl_cancelio chnl_cancel_io; /* Cancl I/O on a channel. */ | ||
999 | fxn_chnl_flushio chnl_flush_io; /* Flush I/O. */ | ||
1000 | fxn_chnl_getinfo chnl_get_info; /* Get channel specific info */ | ||
1001 | /* Get channel manager info. */ | ||
1002 | fxn_chnl_getmgrinfo chnl_get_mgr_info; | ||
1003 | fxn_chnl_idle chnl_idle; /* Idle the channel */ | ||
1004 | /* Register for notif. */ | ||
1005 | fxn_chnl_registernotify chnl_register_notify; | ||
1006 | fxn_io_create io_create; /* Create IO manager */ | ||
1007 | fxn_io_destroy io_destroy; /* Destroy IO manager */ | ||
1008 | fxn_io_onloaded io_on_loaded; /* Notify of program loaded */ | ||
1009 | /* Get Processor's current and predicted load */ | ||
1010 | fxn_io_getprocload io_get_proc_load; | ||
1011 | fxn_msg_create msg_create; /* Create message manager */ | ||
1012 | /* Create message queue */ | ||
1013 | fxn_msg_createqueue msg_create_queue; | ||
1014 | fxn_msg_delete msg_delete; /* Delete message manager */ | ||
1015 | /* Delete message queue */ | ||
1016 | fxn_msg_deletequeue msg_delete_queue; | ||
1017 | fxn_msg_get msg_get; /* Get a message */ | ||
1018 | fxn_msg_put msg_put; /* Send a message */ | ||
1019 | /* Register for notif. */ | ||
1020 | fxn_msg_registernotify msg_register_notify; | ||
1021 | /* Set message queue id */ | ||
1022 | fxn_msg_setqueueid msg_set_queue_id; | ||
1023 | }; | ||
1024 | |||
1025 | /* | ||
1026 | * ======== bridge_drv_entry ======== | ||
1027 | * Purpose: | ||
1028 | * Registers Bridge driver functions with the DSP API. Called only once | ||
1029 | * by the DSP API. The caller will first check DSP API version | ||
1030 | * compatibility, and then copy the interface functions into its own | ||
1031 | * memory space. | ||
1032 | * Parameters: | ||
1033 | * drv_intf Pointer to a location to receive a pointer to the | ||
1034 | * Bridge driver interface. | ||
1035 | * Returns: | ||
1036 | * Requires: | ||
1037 | * The code segment this function resides in must expect to be discarded | ||
1038 | * after completion. | ||
1039 | * Ensures: | ||
1040 | * drv_intf pointer initialized to Bridge driver's function | ||
1041 | * interface. No system resources are acquired by this function. | ||
1042 | * Details: | ||
1043 | * Called during the Device_Init phase. | ||
1044 | */ | ||
1045 | void bridge_drv_entry(struct bridge_drv_interface **drv_intf, | ||
1046 | const char *driver_file_name); | ||
1047 | |||
1048 | #endif /* DSPDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdeh.h b/drivers/staging/tidspbridge/include/dspbridge/dspdeh.h deleted file mode 100644 index d258ab6a41d1..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dspdeh.h +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | /* | ||
2 | * dspdeh.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Defines upper edge DEH functions required by all Bridge driver/DSP API | ||
7 | * interface tables. | ||
8 | * | ||
9 | * Notes: | ||
10 | * Function comment headers reside with the function typedefs in dspdefs.h. | ||
11 | * | ||
12 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
13 | * Copyright (C) 2010 Felipe Contreras | ||
14 | * | ||
15 | * This package is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License version 2 as | ||
17 | * published by the Free Software Foundation. | ||
18 | * | ||
19 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
20 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | */ | ||
23 | |||
24 | #ifndef DSPDEH_ | ||
25 | #define DSPDEH_ | ||
26 | |||
27 | struct deh_mgr; | ||
28 | struct dev_object; | ||
29 | struct dsp_notification; | ||
30 | |||
31 | int bridge_deh_create(struct deh_mgr **ret_deh, | ||
32 | struct dev_object *hdev_obj); | ||
33 | |||
34 | int bridge_deh_destroy(struct deh_mgr *deh); | ||
35 | |||
36 | int bridge_deh_register_notify(struct deh_mgr *deh, | ||
37 | u32 event_mask, | ||
38 | u32 notify_type, | ||
39 | struct dsp_notification *hnotification); | ||
40 | |||
41 | void bridge_deh_notify(struct deh_mgr *deh, int event, int info); | ||
42 | |||
43 | #endif /* DSPDEH_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h b/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h deleted file mode 100644 index 7adf1e705314..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dspdrv.h +++ /dev/null | |||
@@ -1,60 +0,0 @@ | |||
1 | /* | ||
2 | * dspdrv.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * This is the Stream Interface for the DSp API. | ||
7 | * All Device operations are performed via DeviceIOControl. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #if !defined _DSPDRV_H_ | ||
21 | #define _DSPDRV_H_ | ||
22 | |||
23 | /* | ||
24 | * ======== dsp_deinit ======== | ||
25 | * Purpose: | ||
26 | * This function is called by Device Manager to de-initialize a device. | ||
27 | * This function is not called by applications. | ||
28 | * Parameters: | ||
29 | * device_context:Handle to the device context. The XXX_Init function | ||
30 | * creates and returns this identifier. | ||
31 | * Returns: | ||
32 | * TRUE indicates the device successfully de-initialized. Otherwise it | ||
33 | * returns FALSE. | ||
34 | * Requires: | ||
35 | * device_context!= NULL. For a built in device this should never | ||
36 | * get called. | ||
37 | * Ensures: | ||
38 | */ | ||
39 | extern bool dsp_deinit(u32 device_context); | ||
40 | |||
41 | /* | ||
42 | * ======== dsp_init ======== | ||
43 | * Purpose: | ||
44 | * This function is called by Device Manager to initialize a device. | ||
45 | * This function is not called by applications | ||
46 | * Parameters: | ||
47 | * dw_context: Specifies a pointer to a string containing the registry | ||
48 | * path to the active key for the stream interface driver. | ||
49 | * HKEY_LOCAL_MACHINE\Drivers\Active | ||
50 | * Returns: | ||
51 | * Returns a handle to the device context created. This is the our actual | ||
52 | * Device Object representing the DSP Device instance. | ||
53 | * Requires: | ||
54 | * Ensures: | ||
55 | * Succeeded: device context > 0 | ||
56 | * Failed: device Context = 0 | ||
57 | */ | ||
58 | extern u32 dsp_init(u32 *init_status); | ||
59 | |||
60 | #endif | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspio.h b/drivers/staging/tidspbridge/include/dspbridge/dspio.h deleted file mode 100644 index 66b64fadf197..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dspio.h +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* | ||
2 | * dspio.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Declares the upper edge IO functions required by all Bridge driver /DSP API | ||
7 | * interface tables. | ||
8 | * | ||
9 | * Notes: | ||
10 | * Function comment headers reside in dspdefs.h. | ||
11 | * | ||
12 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
13 | * | ||
14 | * This package is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License version 2 as | ||
16 | * published by the Free Software Foundation. | ||
17 | * | ||
18 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
19 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
20 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
21 | */ | ||
22 | |||
23 | #ifndef DSPIO_ | ||
24 | #define DSPIO_ | ||
25 | |||
26 | #include <dspbridge/devdefs.h> | ||
27 | #include <dspbridge/io.h> | ||
28 | |||
29 | |||
30 | extern int bridge_io_create(struct io_mgr **io_man, | ||
31 | struct dev_object *hdev_obj, | ||
32 | const struct io_attrs *mgr_attrts); | ||
33 | |||
34 | extern int bridge_io_destroy(struct io_mgr *hio_mgr); | ||
35 | |||
36 | extern int bridge_io_on_loaded(struct io_mgr *hio_mgr); | ||
37 | |||
38 | extern int bridge_io_get_proc_load(struct io_mgr *hio_mgr, | ||
39 | struct dsp_procloadstat *proc_lstat); | ||
40 | |||
41 | #endif /* DSPIO_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h deleted file mode 100644 index 0fcda1978921..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h +++ /dev/null | |||
@@ -1,68 +0,0 @@ | |||
1 | /* | ||
2 | * dspioctl.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Bridge driver BRD_IOCtl reserved command definitions. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef DSPIOCTL_ | ||
20 | #define DSPIOCTL_ | ||
21 | |||
22 | /* ------------------------------------ Hardware Abstraction Layer */ | ||
23 | #include <hw_defs.h> | ||
24 | #include <hw_mmu.h> | ||
25 | |||
26 | /* | ||
27 | * Any IOCTLS at or above this value are reserved for standard Bridge driver | ||
28 | * interfaces. | ||
29 | */ | ||
30 | #define BRDIOCTL_RESERVEDBASE 0x8000 | ||
31 | |||
32 | #define BRDIOCTL_CHNLREAD (BRDIOCTL_RESERVEDBASE + 0x10) | ||
33 | #define BRDIOCTL_CHNLWRITE (BRDIOCTL_RESERVEDBASE + 0x20) | ||
34 | /* DMMU */ | ||
35 | #define BRDIOCTL_SETMMUCONFIG (BRDIOCTL_RESERVEDBASE + 0x60) | ||
36 | /* PWR */ | ||
37 | #define BRDIOCTL_PWRCONTROL (BRDIOCTL_RESERVEDBASE + 0x70) | ||
38 | |||
39 | /* attention, modifiers: | ||
40 | * Some of these control enumerations are made visible to user for power | ||
41 | * control, so any changes to this list, should also be updated in the user | ||
42 | * header file 'dbdefs.h' ***/ | ||
43 | /* These ioctls are reserved for PWR power commands for the DSP */ | ||
44 | #define BRDIOCTL_DEEPSLEEP (BRDIOCTL_PWRCONTROL + 0x0) | ||
45 | #define BRDIOCTL_EMERGENCYSLEEP (BRDIOCTL_PWRCONTROL + 0x1) | ||
46 | #define BRDIOCTL_WAKEUP (BRDIOCTL_PWRCONTROL + 0x2) | ||
47 | #define BRDIOCTL_CLK_CTRL (BRDIOCTL_PWRCONTROL + 0x7) | ||
48 | /* DSP Initiated Hibernate */ | ||
49 | #define BRDIOCTL_PWR_HIBERNATE (BRDIOCTL_PWRCONTROL + 0x8) | ||
50 | #define BRDIOCTL_PRESCALE_NOTIFY (BRDIOCTL_PWRCONTROL + 0x9) | ||
51 | #define BRDIOCTL_POSTSCALE_NOTIFY (BRDIOCTL_PWRCONTROL + 0xA) | ||
52 | #define BRDIOCTL_CONSTRAINT_REQUEST (BRDIOCTL_PWRCONTROL + 0xB) | ||
53 | |||
54 | /* Number of actual DSP-MMU TLB entries */ | ||
55 | #define BRDIOCTL_NUMOFMMUTLB 32 | ||
56 | |||
57 | struct bridge_ioctl_extproc { | ||
58 | u32 dsp_va; /* DSP virtual address */ | ||
59 | u32 gpp_pa; /* GPP physical address */ | ||
60 | /* GPP virtual address. __va does not work for ioremapped addresses */ | ||
61 | u32 gpp_va; | ||
62 | u32 size; /* Size of the mapped memory in bytes */ | ||
63 | enum hw_endianism_t endianism; | ||
64 | enum hw_mmu_mixed_size_t mixed_mode; | ||
65 | enum hw_element_size_t elem_size; | ||
66 | }; | ||
67 | |||
68 | #endif /* DSPIOCTL_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspmsg.h b/drivers/staging/tidspbridge/include/dspbridge/dspmsg.h deleted file mode 100644 index d4bd458bc8be..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dspmsg.h +++ /dev/null | |||
@@ -1,56 +0,0 @@ | |||
1 | /* | ||
2 | * dspmsg.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Declares the upper edge message class library functions required by | ||
7 | * all Bridge driver / DSP API interface tables. These functions are | ||
8 | * implemented by every class of Bridge driver channel library. | ||
9 | * | ||
10 | * Notes: | ||
11 | * Function comment headers reside in dspdefs.h. | ||
12 | * | ||
13 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
14 | * | ||
15 | * This package is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License version 2 as | ||
17 | * published by the Free Software Foundation. | ||
18 | * | ||
19 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
20 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | */ | ||
23 | |||
24 | #ifndef DSPMSG_ | ||
25 | #define DSPMSG_ | ||
26 | |||
27 | #include <dspbridge/msgdefs.h> | ||
28 | |||
29 | extern int bridge_msg_create(struct msg_mgr **msg_man, | ||
30 | struct dev_object *hdev_obj, | ||
31 | msg_onexit msg_callback); | ||
32 | |||
33 | extern int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, | ||
34 | struct msg_queue **msgq, | ||
35 | u32 msgq_id, u32 max_msgs, void *arg); | ||
36 | |||
37 | extern void bridge_msg_delete(struct msg_mgr *hmsg_mgr); | ||
38 | |||
39 | extern void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj); | ||
40 | |||
41 | extern int bridge_msg_get(struct msg_queue *msg_queue_obj, | ||
42 | struct dsp_msg *pmsg, u32 utimeout); | ||
43 | |||
44 | extern int bridge_msg_put(struct msg_queue *msg_queue_obj, | ||
45 | const struct dsp_msg *pmsg, u32 utimeout); | ||
46 | |||
47 | extern int bridge_msg_register_notify(struct msg_queue *msg_queue_obj, | ||
48 | u32 event_mask, | ||
49 | u32 notify_type, | ||
50 | struct dsp_notification | ||
51 | *hnotification); | ||
52 | |||
53 | extern void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, | ||
54 | u32 msgq_id); | ||
55 | |||
56 | #endif /* DSPMSG_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h b/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h deleted file mode 100644 index 052d27ee8b1a..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dynamic_loader.h +++ /dev/null | |||
@@ -1,490 +0,0 @@ | |||
1 | /* | ||
2 | * dynamic_loader.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef _DYNAMIC_LOADER_H_ | ||
18 | #define _DYNAMIC_LOADER_H_ | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/types.h> | ||
21 | |||
22 | /* | ||
23 | * Dynamic Loader | ||
24 | * | ||
25 | * The function of the dynamic loader is to load a "module" containing | ||
26 | * instructions for a "target" processor into that processor. In the process | ||
27 | * it assigns memory for the module, resolves symbol references made by the | ||
28 | * module, and remembers symbols defined by the module. | ||
29 | * | ||
30 | * The dynamic loader is parameterized for a particular system by 4 classes | ||
31 | * that supply the module and system specific functions it requires | ||
32 | */ | ||
33 | /* The read functions for the module image to be loaded */ | ||
34 | struct dynamic_loader_stream; | ||
35 | |||
36 | /* This class defines "host" symbol and support functions */ | ||
37 | struct dynamic_loader_sym; | ||
38 | |||
39 | /* This class defines the allocator for "target" memory */ | ||
40 | struct dynamic_loader_allocate; | ||
41 | |||
42 | /* This class defines the copy-into-target-memory functions */ | ||
43 | struct dynamic_loader_initialize; | ||
44 | |||
45 | /* | ||
46 | * Option flags to modify the behavior of module loading | ||
47 | */ | ||
48 | #define DLOAD_INITBSS 0x1 /* initialize BSS sections to zero */ | ||
49 | |||
50 | /***************************************************************************** | ||
51 | * Procedure dynamic_load_module | ||
52 | * | ||
53 | * Parameters: | ||
54 | * module The input stream that supplies the module image | ||
55 | * syms Host-side symbol table and malloc/free functions | ||
56 | * alloc Target-side memory allocation | ||
57 | * init Target-side memory initialization, or NULL for symbol read only | ||
58 | * options Option flags DLOAD_* | ||
59 | * mhandle A module handle for use with Dynamic_Unload | ||
60 | * | ||
61 | * Effect: | ||
62 | * The module image is read using *module. Target storage for the new image is | ||
63 | * obtained from *alloc. Symbols defined and referenced by the module are | ||
64 | * managed using *syms. The image is then relocated and references resolved | ||
65 | * as necessary, and the resulting executable bits are placed into target memory | ||
66 | * using *init. | ||
67 | * | ||
68 | * Returns: | ||
69 | * On a successful load, a module handle is placed in *mhandle, and zero is | ||
70 | * returned. On error, the number of errors detected is returned. Individual | ||
71 | * errors are reported during the load process using syms->error_report(). | ||
72 | **************************************************************************** */ | ||
73 | extern int dynamic_load_module( | ||
74 | /* the source for the module image */ | ||
75 | struct dynamic_loader_stream *module, | ||
76 | /* host support for symbols and storage */ | ||
77 | struct dynamic_loader_sym *syms, | ||
78 | /* the target memory allocator */ | ||
79 | struct dynamic_loader_allocate *alloc, | ||
80 | /* the target memory initializer */ | ||
81 | struct dynamic_loader_initialize *init, | ||
82 | unsigned options, /* option flags */ | ||
83 | /* the returned module handle */ | ||
84 | void **mhandle); | ||
85 | |||
86 | /***************************************************************************** | ||
87 | * Procedure dynamic_open_module | ||
88 | * | ||
89 | * Parameters: | ||
90 | * module The input stream that supplies the module image | ||
91 | * syms Host-side symbol table and malloc/free functions | ||
92 | * alloc Target-side memory allocation | ||
93 | * init Target-side memory initialization, or NULL for symbol read only | ||
94 | * options Option flags DLOAD_* | ||
95 | * mhandle A module handle for use with Dynamic_Unload | ||
96 | * | ||
97 | * Effect: | ||
98 | * The module image is read using *module. Target storage for the new image is | ||
99 | * obtained from *alloc. Symbols defined and referenced by the module are | ||
100 | * managed using *syms. The image is then relocated and references resolved | ||
101 | * as necessary, and the resulting executable bits are placed into target memory | ||
102 | * using *init. | ||
103 | * | ||
104 | * Returns: | ||
105 | * On a successful load, a module handle is placed in *mhandle, and zero is | ||
106 | * returned. On error, the number of errors detected is returned. Individual | ||
107 | * errors are reported during the load process using syms->error_report(). | ||
108 | **************************************************************************** */ | ||
109 | extern int dynamic_open_module( | ||
110 | /* the source for the module image */ | ||
111 | struct dynamic_loader_stream *module, | ||
112 | /* host support for symbols and storage */ | ||
113 | struct dynamic_loader_sym *syms, | ||
114 | /* the target memory allocator */ | ||
115 | struct dynamic_loader_allocate *alloc, | ||
116 | /* the target memory initializer */ | ||
117 | struct dynamic_loader_initialize *init, | ||
118 | unsigned options, /* option flags */ | ||
119 | /* the returned module handle */ | ||
120 | void **mhandle); | ||
121 | |||
122 | /***************************************************************************** | ||
123 | * Procedure dynamic_unload_module | ||
124 | * | ||
125 | * Parameters: | ||
126 | * mhandle A module handle from dynamic_load_module | ||
127 | * syms Host-side symbol table and malloc/free functions | ||
128 | * alloc Target-side memory allocation | ||
129 | * | ||
130 | * Effect: | ||
131 | * The module specified by mhandle is unloaded. Unloading causes all | ||
132 | * target memory to be deallocated, all symbols defined by the module to | ||
133 | * be purged, and any host-side storage used by the dynamic loader for | ||
134 | * this module to be released. | ||
135 | * | ||
136 | * Returns: | ||
137 | * Zero for success. On error, the number of errors detected is returned. | ||
138 | * Individual errors are reported using syms->error_report(). | ||
139 | **************************************************************************** */ | ||
140 | extern int dynamic_unload_module(void *mhandle, /* the module | ||
141 | * handle */ | ||
142 | /* host support for symbols and | ||
143 | * storage */ | ||
144 | struct dynamic_loader_sym *syms, | ||
145 | /* the target memory allocator */ | ||
146 | struct dynamic_loader_allocate *alloc, | ||
147 | /* the target memory initializer */ | ||
148 | struct dynamic_loader_initialize *init); | ||
149 | |||
150 | /***************************************************************************** | ||
151 | ***************************************************************************** | ||
152 | * A class used by the dynamic loader for input of the module image | ||
153 | ***************************************************************************** | ||
154 | **************************************************************************** */ | ||
155 | struct dynamic_loader_stream { | ||
156 | /* public: */ | ||
157 | /************************************************************************* | ||
158 | * read_buffer | ||
159 | * | ||
160 | * PARAMETERS : | ||
161 | * buffer Pointer to the buffer to fill | ||
162 | * bufsiz Amount of data desired in sizeof() units | ||
163 | * | ||
164 | * EFFECT : | ||
165 | * Reads the specified amount of data from the module input stream | ||
166 | * into the specified buffer. Returns the amount of data read in sizeof() | ||
167 | * units (which if less than the specification, represents an error). | ||
168 | * | ||
169 | * NOTES: | ||
170 | * In release 1 increments the file position by the number of bytes read | ||
171 | * | ||
172 | ************************************************************************ */ | ||
173 | int (*read_buffer) (struct dynamic_loader_stream *thisptr, | ||
174 | void *buffer, unsigned bufsiz); | ||
175 | |||
176 | /************************************************************************* | ||
177 | * set_file_posn (release 1 only) | ||
178 | * | ||
179 | * PARAMETERS : | ||
180 | * posn Desired file position relative to start of file in sizeof() units. | ||
181 | * | ||
182 | * EFFECT : | ||
183 | * Adjusts the internal state of the stream object so that the next | ||
184 | * read_buffer call will begin to read at the specified offset from | ||
185 | * the beginning of the input module. Returns 0 for success, non-zero | ||
186 | * for failure. | ||
187 | * | ||
188 | ************************************************************************ */ | ||
189 | int (*set_file_posn) (struct dynamic_loader_stream *thisptr, | ||
190 | /* to be eliminated in release 2 */ | ||
191 | unsigned int posn); | ||
192 | |||
193 | }; | ||
194 | |||
195 | /***************************************************************************** | ||
196 | ***************************************************************************** | ||
197 | * A class used by the dynamic loader for symbol table support and | ||
198 | * miscellaneous host-side functions | ||
199 | ***************************************************************************** | ||
200 | **************************************************************************** */ | ||
201 | |||
202 | typedef u32 ldr_addr; | ||
203 | |||
204 | /* | ||
205 | * the structure of a symbol known to the dynamic loader | ||
206 | */ | ||
207 | struct dynload_symbol { | ||
208 | ldr_addr value; | ||
209 | }; | ||
210 | |||
211 | struct dynamic_loader_sym { | ||
212 | /* public: */ | ||
213 | /************************************************************************* | ||
214 | * find_matching_symbol | ||
215 | * | ||
216 | * PARAMETERS : | ||
217 | * name The name of the desired symbol | ||
218 | * | ||
219 | * EFFECT : | ||
220 | * Locates a symbol matching the name specified. A pointer to the | ||
221 | * symbol is returned if it exists; 0 is returned if no such symbol is | ||
222 | * found. | ||
223 | * | ||
224 | ************************************************************************ */ | ||
225 | struct dynload_symbol *(*find_matching_symbol) | ||
226 | (struct dynamic_loader_sym *thisptr, const char *name); | ||
227 | |||
228 | /************************************************************************* | ||
229 | * add_to_symbol_table | ||
230 | * | ||
231 | * PARAMETERS : | ||
232 | * nname Pointer to the name of the new symbol | ||
233 | * moduleid An opaque module id assigned by the dynamic loader | ||
234 | * | ||
235 | * EFFECT : | ||
236 | * The new symbol is added to the table. A pointer to the symbol is | ||
237 | * returned, or NULL is returned for failure. | ||
238 | * | ||
239 | * NOTES: | ||
240 | * It is permissible for this function to return NULL; the effect is that | ||
241 | * the named symbol will not be available to resolve references in | ||
242 | * subsequent loads. Returning NULL will not cause the current load | ||
243 | * to fail. | ||
244 | ************************************************************************ */ | ||
245 | struct dynload_symbol *(*add_to_symbol_table) | ||
246 | (struct dynamic_loader_sym * | ||
247 | thisptr, const char *nname, unsigned moduleid); | ||
248 | |||
249 | /************************************************************************* | ||
250 | * purge_symbol_table | ||
251 | * | ||
252 | * PARAMETERS : | ||
253 | * moduleid An opaque module id assigned by the dynamic loader | ||
254 | * | ||
255 | * EFFECT : | ||
256 | * Each symbol in the symbol table whose moduleid matches the argument | ||
257 | * is removed from the table. | ||
258 | ************************************************************************ */ | ||
259 | void (*purge_symbol_table) (struct dynamic_loader_sym *thisptr, | ||
260 | unsigned moduleid); | ||
261 | |||
262 | /************************************************************************* | ||
263 | * dload_allocate | ||
264 | * | ||
265 | * PARAMETERS : | ||
266 | * memsiz size of desired memory in sizeof() units | ||
267 | * | ||
268 | * EFFECT : | ||
269 | * Returns a pointer to some "host" memory for use by the dynamic | ||
270 | * loader, or NULL for failure. | ||
271 | * This function is serves as a replaceable form of "malloc" to | ||
272 | * allow the user to configure the memory usage of the dynamic loader. | ||
273 | ************************************************************************ */ | ||
274 | void *(*dload_allocate) (struct dynamic_loader_sym *thisptr, | ||
275 | unsigned memsiz); | ||
276 | |||
277 | /************************************************************************* | ||
278 | * dload_deallocate | ||
279 | * | ||
280 | * PARAMETERS : | ||
281 | * memptr pointer to previously allocated memory | ||
282 | * | ||
283 | * EFFECT : | ||
284 | * Releases the previously allocated "host" memory. | ||
285 | ************************************************************************ */ | ||
286 | void (*dload_deallocate) (struct dynamic_loader_sym *thisptr, | ||
287 | void *memptr); | ||
288 | |||
289 | /************************************************************************* | ||
290 | * error_report | ||
291 | * | ||
292 | * PARAMETERS : | ||
293 | * errstr pointer to an error string | ||
294 | * args additional arguments | ||
295 | * | ||
296 | * EFFECT : | ||
297 | * This function provides an error reporting interface for the dynamic | ||
298 | * loader. The error string and arguments are designed as for the | ||
299 | * library function vprintf. | ||
300 | ************************************************************************ */ | ||
301 | void (*error_report) (struct dynamic_loader_sym *thisptr, | ||
302 | const char *errstr, va_list args); | ||
303 | |||
304 | }; /* class dynamic_loader_sym */ | ||
305 | |||
306 | /***************************************************************************** | ||
307 | ***************************************************************************** | ||
308 | * A class used by the dynamic loader to allocate and deallocate target memory. | ||
309 | ***************************************************************************** | ||
310 | **************************************************************************** */ | ||
311 | |||
312 | struct ldr_section_info { | ||
313 | /* Name of the memory section assigned at build time */ | ||
314 | const char *name; | ||
315 | ldr_addr run_addr; /* execution address of the section */ | ||
316 | ldr_addr load_addr; /* load address of the section */ | ||
317 | ldr_addr size; /* size of the section in addressable units */ | ||
318 | #ifndef _BIG_ENDIAN | ||
319 | u16 page; /* memory page or view */ | ||
320 | u16 type; /* one of the section types below */ | ||
321 | #else | ||
322 | u16 type; /* one of the section types below */ | ||
323 | u16 page; /* memory page or view */ | ||
324 | #endif | ||
325 | /* a context field for use by dynamic_loader_allocate; | ||
326 | * ignored but maintained by the dynamic loader */ | ||
327 | u32 context; | ||
328 | }; | ||
329 | |||
330 | /* use this macro to extract type of section from ldr_section_info.type field */ | ||
331 | #define DLOAD_SECTION_TYPE(typeinfo) (typeinfo & 0xF) | ||
332 | |||
333 | /* type of section to be allocated */ | ||
334 | #define DLOAD_TEXT 0 | ||
335 | #define DLOAD_DATA 1 | ||
336 | #define DLOAD_BSS 2 | ||
337 | /* internal use only, run-time cinit will be of type DLOAD_DATA */ | ||
338 | #define DLOAD_CINIT 3 | ||
339 | |||
340 | struct dynamic_loader_allocate { | ||
341 | /* public: */ | ||
342 | |||
343 | /************************************************************************* | ||
344 | * Function allocate | ||
345 | * | ||
346 | * Parameters: | ||
347 | * info A pointer to an information block for the section | ||
348 | * align The alignment of the storage in target AUs | ||
349 | * | ||
350 | * Effect: | ||
351 | * Allocates target memory for the specified section and fills in the | ||
352 | * load_addr and run_addr fields of the section info structure. Returns TRUE | ||
353 | * for success, FALSE for failure. | ||
354 | * | ||
355 | * Notes: | ||
356 | * Frequently load_addr and run_addr are the same, but if they are not | ||
357 | * load_addr is used with dynamic_loader_initialize, and run_addr is | ||
358 | * used for almost all relocations. This function should always initialize | ||
359 | * both fields. | ||
360 | ************************************************************************ */ | ||
361 | int (*dload_allocate) (struct dynamic_loader_allocate *thisptr, | ||
362 | struct ldr_section_info *info, unsigned align); | ||
363 | |||
364 | /************************************************************************* | ||
365 | * Function deallocate | ||
366 | * | ||
367 | * Parameters: | ||
368 | * info A pointer to an information block for the section | ||
369 | * | ||
370 | * Effect: | ||
371 | * Releases the target memory previously allocated. | ||
372 | * | ||
373 | * Notes: | ||
374 | * The content of the info->name field is undefined on call to this function. | ||
375 | ************************************************************************ */ | ||
376 | void (*dload_deallocate) (struct dynamic_loader_allocate *thisptr, | ||
377 | struct ldr_section_info *info); | ||
378 | |||
379 | }; /* class dynamic_loader_allocate */ | ||
380 | |||
381 | /***************************************************************************** | ||
382 | ***************************************************************************** | ||
383 | * A class used by the dynamic loader to load data into a target. This class | ||
384 | * provides the interface-specific functions needed to load data. | ||
385 | ***************************************************************************** | ||
386 | **************************************************************************** */ | ||
387 | |||
388 | struct dynamic_loader_initialize { | ||
389 | /* public: */ | ||
390 | /************************************************************************* | ||
391 | * Function connect | ||
392 | * | ||
393 | * Parameters: | ||
394 | * none | ||
395 | * | ||
396 | * Effect: | ||
397 | * Connect to the initialization interface. Returns TRUE for success, | ||
398 | * FALSE for failure. | ||
399 | * | ||
400 | * Notes: | ||
401 | * This function is called prior to use of any other functions in | ||
402 | * this interface. | ||
403 | ************************************************************************ */ | ||
404 | int (*connect) (struct dynamic_loader_initialize *thisptr); | ||
405 | |||
406 | /************************************************************************* | ||
407 | * Function readmem | ||
408 | * | ||
409 | * Parameters: | ||
410 | * bufr Pointer to a word-aligned buffer for the result | ||
411 | * locn Target address of first data element | ||
412 | * info Section info for the section in which the address resides | ||
413 | * bytsiz Size of the data to be read in sizeof() units | ||
414 | * | ||
415 | * Effect: | ||
416 | * Fills the specified buffer with data from the target. Returns TRUE for | ||
417 | * success, FALSE for failure. | ||
418 | ************************************************************************ */ | ||
419 | int (*readmem) (struct dynamic_loader_initialize *thisptr, | ||
420 | void *bufr, | ||
421 | ldr_addr locn, | ||
422 | struct ldr_section_info *info, unsigned bytsiz); | ||
423 | |||
424 | /************************************************************************* | ||
425 | * Function writemem | ||
426 | * | ||
427 | * Parameters: | ||
428 | * bufr Pointer to a word-aligned buffer of data | ||
429 | * locn Target address of first data element to be written | ||
430 | * info Section info for the section in which the address resides | ||
431 | * bytsiz Size of the data to be written in sizeof() units | ||
432 | * | ||
433 | * Effect: | ||
434 | * Writes the specified buffer to the target. Returns TRUE for success, | ||
435 | * FALSE for failure. | ||
436 | ************************************************************************ */ | ||
437 | int (*writemem) (struct dynamic_loader_initialize *thisptr, | ||
438 | void *bufr, | ||
439 | ldr_addr locn, | ||
440 | struct ldr_section_info *info, unsigned bytsiz); | ||
441 | |||
442 | /************************************************************************* | ||
443 | * Function fillmem | ||
444 | * | ||
445 | * Parameters: | ||
446 | * locn Target address of first data element to be written | ||
447 | * info Section info for the section in which the address resides | ||
448 | * bytsiz Size of the data to be written in sizeof() units | ||
449 | * val Value to be written in each byte | ||
450 | * Effect: | ||
451 | * Fills the specified area of target memory. Returns TRUE for success, | ||
452 | * FALSE for failure. | ||
453 | ************************************************************************ */ | ||
454 | int (*fillmem) (struct dynamic_loader_initialize *thisptr, | ||
455 | ldr_addr locn, struct ldr_section_info *info, | ||
456 | unsigned bytsiz, unsigned val); | ||
457 | |||
458 | /************************************************************************* | ||
459 | * Function execute | ||
460 | * | ||
461 | * Parameters: | ||
462 | * start Starting address | ||
463 | * | ||
464 | * Effect: | ||
465 | * The target code at the specified starting address is executed. | ||
466 | * | ||
467 | * Notes: | ||
468 | * This function is called at the end of the dynamic load process | ||
469 | * if the input module has specified a starting address. | ||
470 | ************************************************************************ */ | ||
471 | int (*execute) (struct dynamic_loader_initialize *thisptr, | ||
472 | ldr_addr start); | ||
473 | |||
474 | /************************************************************************* | ||
475 | * Function release | ||
476 | * | ||
477 | * Parameters: | ||
478 | * none | ||
479 | * | ||
480 | * Effect: | ||
481 | * Releases the connection to the load interface. | ||
482 | * | ||
483 | * Notes: | ||
484 | * This function is called at the end of the dynamic load process. | ||
485 | ************************************************************************ */ | ||
486 | void (*release) (struct dynamic_loader_initialize *thisptr); | ||
487 | |||
488 | }; /* class dynamic_loader_initialize */ | ||
489 | |||
490 | #endif /* _DYNAMIC_LOADER_H_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/getsection.h b/drivers/staging/tidspbridge/include/dspbridge/getsection.h deleted file mode 100644 index 626063dd9dfe..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/getsection.h +++ /dev/null | |||
@@ -1,108 +0,0 @@ | |||
1 | /* | ||
2 | * getsection.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * This file provides an API add-on to the dynamic loader that allows the user | ||
7 | * to query section information and extract section data from dynamic load | ||
8 | * modules. | ||
9 | * | ||
10 | * Notes: | ||
11 | * Functions in this API assume that the supplied dynamic_loader_stream | ||
12 | * object supports the set_file_posn method. | ||
13 | * | ||
14 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
15 | * | ||
16 | * This package is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License version 2 as | ||
18 | * published by the Free Software Foundation. | ||
19 | * | ||
20 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
21 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
22 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
23 | */ | ||
24 | |||
25 | #ifndef _GETSECTION_H_ | ||
26 | #define _GETSECTION_H_ | ||
27 | |||
28 | #include "dynamic_loader.h" | ||
29 | |||
30 | /* | ||
31 | * Procedure dload_module_open | ||
32 | * | ||
33 | * Parameters: | ||
34 | * module The input stream that supplies the module image | ||
35 | * syms Host-side malloc/free and error reporting functions. | ||
36 | * Other methods are unused. | ||
37 | * | ||
38 | * Effect: | ||
39 | * Reads header information from a dynamic loader module using the specified | ||
40 | * stream object, and returns a handle for the module information. This | ||
41 | * handle may be used in subsequent query calls to obtain information | ||
42 | * contained in the module. | ||
43 | * | ||
44 | * Returns: | ||
45 | * NULL if an error is encountered, otherwise a module handle for use | ||
46 | * in subsequent operations. | ||
47 | */ | ||
48 | extern void *dload_module_open(struct dynamic_loader_stream | ||
49 | *module, struct dynamic_loader_sym | ||
50 | *syms); | ||
51 | |||
52 | /* | ||
53 | * Procedure dload_get_section_info | ||
54 | * | ||
55 | * Parameters: | ||
56 | * minfo Handle from dload_module_open for this module | ||
57 | * section_name Pointer to the string name of the section desired | ||
58 | * section_info Address of a section info structure pointer to be initialized | ||
59 | * | ||
60 | * Effect: | ||
61 | * Finds the specified section in the module information, and fills in | ||
62 | * the provided ldr_section_info structure. | ||
63 | * | ||
64 | * Returns: | ||
65 | * TRUE for success, FALSE for section not found | ||
66 | */ | ||
67 | extern int dload_get_section_info(void *minfo, | ||
68 | const char *section_name, | ||
69 | const struct ldr_section_info | ||
70 | **const section_info); | ||
71 | |||
72 | /* | ||
73 | * Procedure dload_get_section | ||
74 | * | ||
75 | * Parameters: | ||
76 | * minfo Handle from dload_module_open for this module | ||
77 | * section_info Pointer to a section info structure for the desired section | ||
78 | * section_data Buffer to contain the section initialized data | ||
79 | * | ||
80 | * Effect: | ||
81 | * Copies the initialized data for the specified section into the | ||
82 | * supplied buffer. | ||
83 | * | ||
84 | * Returns: | ||
85 | * TRUE for success, FALSE for section not found | ||
86 | */ | ||
87 | extern int dload_get_section(void *minfo, | ||
88 | const struct ldr_section_info *section_info, | ||
89 | void *section_data); | ||
90 | |||
91 | /* | ||
92 | * Procedure dload_module_close | ||
93 | * | ||
94 | * Parameters: | ||
95 | * minfo Handle from dload_module_open for this module | ||
96 | * | ||
97 | * Effect: | ||
98 | * Releases any storage associated with the module handle. On return, | ||
99 | * the module handle is invalid. | ||
100 | * | ||
101 | * Returns: | ||
102 | * Zero for success. On error, the number of errors detected is returned. | ||
103 | * Individual errors are reported using syms->error_report(), where syms was | ||
104 | * an argument to dload_module_open | ||
105 | */ | ||
106 | extern void dload_module_close(void *minfo); | ||
107 | |||
108 | #endif /* _GETSECTION_H_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/gh.h b/drivers/staging/tidspbridge/include/dspbridge/gh.h deleted file mode 100644 index e4303b4bf5fd..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/gh.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | /* | ||
2 | * gh.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef GH_ | ||
18 | #define GH_ | ||
19 | #include <dspbridge/host_os.h> | ||
20 | |||
21 | extern struct gh_t_hash_tab *gh_create(u32 val_size, | ||
22 | u32 (*hash)(const void *), bool (*match)(const void *, | ||
23 | const void *), void (*delete) (void *)); | ||
24 | extern void gh_delete(struct gh_t_hash_tab *hash_tab); | ||
25 | extern void *gh_find(struct gh_t_hash_tab *hash_tab, const void *key); | ||
26 | extern void *gh_insert(struct gh_t_hash_tab *hash_tab, const void *key, | ||
27 | const void *value); | ||
28 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
29 | void gh_iterate(struct gh_t_hash_tab *hash_tab, | ||
30 | void (*callback)(void *, void *), void *user_data); | ||
31 | #endif | ||
32 | #endif /* GH_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/host_os.h b/drivers/staging/tidspbridge/include/dspbridge/host_os.h deleted file mode 100644 index d1441db469fc..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/host_os.h +++ /dev/null | |||
@@ -1,57 +0,0 @@ | |||
1 | /* | ||
2 | * host_os.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef _HOST_OS_H_ | ||
18 | #define _HOST_OS_H_ | ||
19 | |||
20 | #include <linux/atomic.h> | ||
21 | #include <linux/semaphore.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/irq.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/syscalls.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/stddef.h> | ||
29 | #include <linux/types.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/fs.h> | ||
34 | #include <linux/file.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/ctype.h> | ||
38 | #include <linux/mm.h> | ||
39 | #include <linux/device.h> | ||
40 | #include <linux/vmalloc.h> | ||
41 | #include <linux/ioport.h> | ||
42 | #include <linux/platform_device.h> | ||
43 | #include <linux/clk.h> | ||
44 | #include <linux/omap-mailbox.h> | ||
45 | #include <linux/pagemap.h> | ||
46 | #include <asm/cacheflush.h> | ||
47 | #include <linux/dma-mapping.h> | ||
48 | |||
49 | /* TODO -- Remove, once omap-iommu is used */ | ||
50 | #define INT_DSP_MMU_IRQ (28 + NR_IRQS) | ||
51 | |||
52 | #define PRCM_VDD1 1 | ||
53 | |||
54 | extern struct platform_device *omap_dspbridge_dev; | ||
55 | extern struct device *bridge; | ||
56 | |||
57 | #endif | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io.h b/drivers/staging/tidspbridge/include/dspbridge/io.h deleted file mode 100644 index 750571856908..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/io.h +++ /dev/null | |||
@@ -1,80 +0,0 @@ | |||
1 | /* | ||
2 | * io.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * The io module manages IO between CHNL and msg_ctrl. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef IO_ | ||
20 | #define IO_ | ||
21 | |||
22 | #include <dspbridge/cfgdefs.h> | ||
23 | #include <dspbridge/devdefs.h> | ||
24 | |||
25 | /* IO Objects: */ | ||
26 | struct io_mgr; | ||
27 | |||
28 | /* IO manager attributes: */ | ||
29 | struct io_attrs { | ||
30 | u8 birq; /* Channel's I/O IRQ number. */ | ||
31 | bool irq_shared; /* TRUE if the IRQ is shareable. */ | ||
32 | u32 word_size; /* DSP Word size. */ | ||
33 | u32 shm_base; /* Physical base address of shared memory. */ | ||
34 | u32 sm_length; /* Size (in bytes) of shared memory. */ | ||
35 | }; | ||
36 | |||
37 | |||
38 | /* | ||
39 | * ======== io_create ======== | ||
40 | * Purpose: | ||
41 | * Create an IO manager object, responsible for managing IO between | ||
42 | * CHNL and msg_ctrl. | ||
43 | * Parameters: | ||
44 | * channel_mgr: Location to store a channel manager object on | ||
45 | * output. | ||
46 | * hdev_obj: Handle to a device object. | ||
47 | * mgr_attrts: IO manager attributes. | ||
48 | * mgr_attrts->birq: I/O IRQ number. | ||
49 | * mgr_attrts->irq_shared: TRUE if the IRQ is shareable. | ||
50 | * mgr_attrts->word_size: DSP Word size in equivalent PC bytes.. | ||
51 | * Returns: | ||
52 | * 0: Success; | ||
53 | * -ENOMEM: Insufficient memory for requested resources. | ||
54 | * -EIO: Unable to plug channel ISR for configured IRQ. | ||
55 | * -EINVAL: Invalid DSP word size (must be > 0). | ||
56 | * Invalid base address for DSP communications. | ||
57 | * Requires: | ||
58 | * io_man != NULL. | ||
59 | * mgr_attrts != NULL. | ||
60 | * Ensures: | ||
61 | */ | ||
62 | extern int io_create(struct io_mgr **io_man, | ||
63 | struct dev_object *hdev_obj, | ||
64 | const struct io_attrs *mgr_attrts); | ||
65 | |||
66 | /* | ||
67 | * ======== io_destroy ======== | ||
68 | * Purpose: | ||
69 | * Destroy the IO manager. | ||
70 | * Parameters: | ||
71 | * hio_mgr: IOmanager object. | ||
72 | * Returns: | ||
73 | * 0: Success. | ||
74 | * -EFAULT: hio_mgr was invalid. | ||
75 | * Requires: | ||
76 | * Ensures: | ||
77 | */ | ||
78 | extern int io_destroy(struct io_mgr *hio_mgr); | ||
79 | |||
80 | #endif /* CHNL_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h deleted file mode 100644 index 903ff12b14de..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h +++ /dev/null | |||
@@ -1,160 +0,0 @@ | |||
1 | /* | ||
2 | * io_sm.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * IO dispatcher for a shared memory channel driver. | ||
7 | * Also, includes macros to simulate shm via port io calls. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef IOSM_ | ||
21 | #define IOSM_ | ||
22 | |||
23 | #include <dspbridge/_chnl_sm.h> | ||
24 | #include <dspbridge/host_os.h> | ||
25 | |||
26 | #include <dspbridge/io.h> | ||
27 | #include <dspbridge/mbx_sh.h> /* shared mailbox codes */ | ||
28 | |||
29 | /* Magic code used to determine if DSP signaled exception. */ | ||
30 | #define DEH_BASE MBX_DEH_BASE | ||
31 | #define DEH_LIMIT MBX_DEH_LIMIT | ||
32 | |||
33 | #define IO_INPUT 0 | ||
34 | #define IO_OUTPUT 1 | ||
35 | #define IO_SERVICE 2 | ||
36 | |||
37 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
38 | /* The maximum number of OPPs that are supported */ | ||
39 | extern s32 dsp_max_opps; | ||
40 | /* The Vdd1 opp table information */ | ||
41 | extern u32 vdd1_dsp_freq[6][4]; | ||
42 | #endif | ||
43 | |||
44 | /* | ||
45 | * ======== io_cancel_chnl ======== | ||
46 | * Purpose: | ||
47 | * Cancel IO on a given channel. | ||
48 | * Parameters: | ||
49 | * hio_mgr: IO Manager. | ||
50 | * chnl: Index of channel to cancel IO on. | ||
51 | * Returns: | ||
52 | * Requires: | ||
53 | * Valid hio_mgr. | ||
54 | * Ensures: | ||
55 | */ | ||
56 | extern void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl); | ||
57 | |||
58 | /* | ||
59 | * ======== io_dpc ======== | ||
60 | * Purpose: | ||
61 | * Deferred procedure call for shared memory channel driver ISR. Carries | ||
62 | * out the dispatch of I/O. | ||
63 | * Parameters: | ||
64 | * ref_data: Pointer to reference data registered via a call to | ||
65 | * DPC_Create(). | ||
66 | * Returns: | ||
67 | * Requires: | ||
68 | * Must not block. | ||
69 | * Must not acquire resources. | ||
70 | * All data touched must be locked in memory if running in kernel mode. | ||
71 | * Ensures: | ||
72 | * Non-preemptible (but interruptible). | ||
73 | */ | ||
74 | extern void io_dpc(unsigned long ref_data); | ||
75 | |||
76 | /* | ||
77 | * ======== io_mbox_msg ======== | ||
78 | * Purpose: | ||
79 | * Main message handler for the shared memory Bridge channel manager. | ||
80 | * Determine if this message is ours, then schedules a DPC to | ||
81 | * dispatch I/O. | ||
82 | * Parameters: | ||
83 | * self: Pointer to its own notifier_block struct. | ||
84 | * len: Length of message. | ||
85 | * msg: Message code received. | ||
86 | * Returns: | ||
87 | * NOTIFY_OK if handled; NOTIFY_BAD otherwise. | ||
88 | */ | ||
89 | int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg); | ||
90 | |||
91 | /* | ||
92 | * ======== io_request_chnl ======== | ||
93 | * Purpose: | ||
94 | * Request I/O from the DSP. Sets flags in shared memory, then interrupts | ||
95 | * the DSP. | ||
96 | * Parameters: | ||
97 | * hio_mgr: IO manager handle. | ||
98 | * pchnl: Ptr to the channel requesting I/O. | ||
99 | * io_mode: Mode of channel: {IO_INPUT | IO_OUTPUT}. | ||
100 | * Returns: | ||
101 | * Requires: | ||
102 | * pchnl != NULL | ||
103 | * Ensures: | ||
104 | */ | ||
105 | extern void io_request_chnl(struct io_mgr *io_manager, | ||
106 | struct chnl_object *pchnl, | ||
107 | u8 io_mode, u16 *mbx_val); | ||
108 | |||
109 | /* | ||
110 | * ======== iosm_schedule ======== | ||
111 | * Purpose: | ||
112 | * Schedule DPC for IO. | ||
113 | * Parameters: | ||
114 | * pio_mgr: Ptr to a I/O manager. | ||
115 | * Returns: | ||
116 | * Requires: | ||
117 | * pchnl != NULL | ||
118 | * Ensures: | ||
119 | */ | ||
120 | extern void iosm_schedule(struct io_mgr *io_manager); | ||
121 | |||
122 | /* | ||
123 | * ======== io_sh_msetting ======== | ||
124 | * Purpose: | ||
125 | * Sets the shared memory setting | ||
126 | * Parameters: | ||
127 | * hio_mgr: Handle to a I/O manager. | ||
128 | * desc: Shared memory type | ||
129 | * pargs: Ptr to shm setting | ||
130 | * Returns: | ||
131 | * Requires: | ||
132 | * hio_mgr != NULL | ||
133 | * pargs != NULL | ||
134 | * Ensures: | ||
135 | */ | ||
136 | extern int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs); | ||
137 | |||
138 | /* | ||
139 | * Misc functions for the CHNL_IO shared memory library: | ||
140 | */ | ||
141 | |||
142 | /* Maximum channel bufsize that can be used. */ | ||
143 | extern u32 io_buf_size(struct io_mgr *hio_mgr); | ||
144 | |||
145 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
146 | /* | ||
147 | * ========print_dsp_trace_buffer ======== | ||
148 | * Print DSP tracebuffer. | ||
149 | */ | ||
150 | extern int print_dsp_trace_buffer(struct bridge_dev_context | ||
151 | *hbridge_context); | ||
152 | |||
153 | int dump_dsp_stack(struct bridge_dev_context *bridge_context); | ||
154 | |||
155 | void dump_dl_modules(struct bridge_dev_context *bridge_context); | ||
156 | |||
157 | void print_dsp_debug_trace(struct io_mgr *hio_mgr); | ||
158 | #endif | ||
159 | |||
160 | #endif /* IOSM_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h b/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h deleted file mode 100644 index d4cb3948baba..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/mbx_sh.h +++ /dev/null | |||
@@ -1,144 +0,0 @@ | |||
1 | /* | ||
2 | * mbx_sh.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Definitions for shared mailbox cmd/data values.(used on both | ||
7 | * the GPP and DSP sides). | ||
8 | * | ||
9 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | /* | ||
21 | * Bridge usage of OMAP mailbox 1 is determined by the "class" of the | ||
22 | * mailbox interrupt's cmd value received. The class value are defined | ||
23 | * as a bit (10 thru 15) being set. | ||
24 | * | ||
25 | * Note: Only 16 bits of each is used. Other 16 bit data reg available. | ||
26 | * | ||
27 | * 16 bit Mbx bit defns: | ||
28 | * | ||
29 | * A). Exception/Error handling (Module DEH) : class = 0. | ||
30 | * | ||
31 | * 15 10 0 | ||
32 | * --------------------------------- | ||
33 | * |0|0|0|0|0|0|x|x|x|x|x|x|x|x|x|x| | ||
34 | * --------------------------------- | ||
35 | * | (class) | (module specific) | | ||
36 | * | ||
37 | * | ||
38 | * B: DSP-DMA link driver channels (DDMA) : class = 1. | ||
39 | * | ||
40 | * 15 10 0 | ||
41 | * --------------------------------- | ||
42 | * |0|0|0|0|0|1|b|b|b|b|b|c|c|c|c|c| | ||
43 | * --------------------------------- | ||
44 | * | (class) | (module specific) | | ||
45 | * | ||
46 | * where b -> buffer index (32 DDMA buffers/chnl max) | ||
47 | * c -> channel Id (32 DDMA chnls max) | ||
48 | * | ||
49 | * | ||
50 | * C: Proc-copy link driver channels (PCPY) : class = 2. | ||
51 | * | ||
52 | * 15 10 0 | ||
53 | * --------------------------------- | ||
54 | * |0|0|0|0|1|0|x|x|x|x|x|x|x|x|x|x| | ||
55 | * --------------------------------- | ||
56 | * | (class) | (module specific) | | ||
57 | * | ||
58 | * | ||
59 | * D: Zero-copy link driver channels (DDZC) : class = 4. | ||
60 | * | ||
61 | * 15 10 0 | ||
62 | * --------------------------------- | ||
63 | * |0|0|0|1|0|0|x|x|x|x|x|c|c|c|c|c| | ||
64 | * --------------------------------- | ||
65 | * | (class) | (module specific) | | ||
66 | * | ||
67 | * where x -> not used | ||
68 | * c -> channel Id (32 ZCPY chnls max) | ||
69 | * | ||
70 | * | ||
71 | * E: Power management : class = 8. | ||
72 | * | ||
73 | * 15 10 0 | ||
74 | * --------------------------------- | ||
75 | * |0|0|1|0|0|0|x|x|x|x|x|c|c|c|c|c| | ||
76 | |||
77 | * 0010 00xx xxxc cccc | ||
78 | * 0010 00nn pppp qqqq | ||
79 | * nn: | ||
80 | * 00 = reserved | ||
81 | * 01 = pwr state change | ||
82 | * 10 = opp pre-change | ||
83 | * 11 = opp post-change | ||
84 | * | ||
85 | * if nn = pwr state change: | ||
86 | * pppp = don't care | ||
87 | * qqqq: | ||
88 | * 0010 = hibernate | ||
89 | * 0010 0001 0000 0010 | ||
90 | * 0110 = retention | ||
91 | * 0010 0001 0000 0110 | ||
92 | * others reserved | ||
93 | * | ||
94 | * if nn = opp pre-change: | ||
95 | * pppp = current opp | ||
96 | * qqqq = next opp | ||
97 | * | ||
98 | * if nn = opp post-change: | ||
99 | * pppp = prev opp | ||
100 | * qqqq = current opp | ||
101 | * | ||
102 | * --------------------------------- | ||
103 | * | (class) | (module specific) | | ||
104 | * | ||
105 | * where x -> not used | ||
106 | * c -> Power management command | ||
107 | * | ||
108 | */ | ||
109 | |||
110 | #ifndef _MBX_SH_H | ||
111 | #define _MBX_SH_H | ||
112 | |||
113 | #define MBX_PCPY_CLASS 0x0800 /* PROC-COPY " */ | ||
114 | #define MBX_PM_CLASS 0x2000 /* Power Management */ | ||
115 | #define MBX_DBG_CLASS 0x4000 /* For debugging purpose */ | ||
116 | |||
117 | /* | ||
118 | * Exception Handler codes | ||
119 | * Magic code used to determine if DSP signaled exception. | ||
120 | */ | ||
121 | #define MBX_DEH_BASE 0x0 | ||
122 | #define MBX_DEH_USERS_BASE 0x100 /* 256 */ | ||
123 | #define MBX_DEH_LIMIT 0x3FF /* 1023 */ | ||
124 | #define MBX_DEH_RESET 0x101 /* DSP RESET (DEH) */ | ||
125 | |||
126 | /* | ||
127 | * Link driver command/status codes. | ||
128 | */ | ||
129 | |||
130 | /* Power Management Commands */ | ||
131 | #define MBX_PM_DSPIDLE (MBX_PM_CLASS + 0x0) | ||
132 | #define MBX_PM_DSPWAKEUP (MBX_PM_CLASS + 0x1) | ||
133 | #define MBX_PM_EMERGENCYSLEEP (MBX_PM_CLASS + 0x2) | ||
134 | #define MBX_PM_SETPOINT_PRENOTIFY (MBX_PM_CLASS + 0x6) | ||
135 | #define MBX_PM_SETPOINT_POSTNOTIFY (MBX_PM_CLASS + 0x7) | ||
136 | #define MBX_PM_DSPRETENTION (MBX_PM_CLASS + 0x8) | ||
137 | #define MBX_PM_DSPHIBERNATE (MBX_PM_CLASS + 0x9) | ||
138 | #define MBX_PM_HIBERNATE_EN (MBX_PM_CLASS + 0xA) | ||
139 | #define MBX_PM_OPP_REQ (MBX_PM_CLASS + 0xB) | ||
140 | |||
141 | /* Bridge Debug Commands */ | ||
142 | #define MBX_DBG_SYSPRINTF (MBX_DBG_CLASS + 0x0) | ||
143 | |||
144 | #endif /* _MBX_SH_H */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/memdefs.h b/drivers/staging/tidspbridge/include/dspbridge/memdefs.h deleted file mode 100644 index 78d2c5d0045b..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/memdefs.h +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | /* | ||
2 | * memdefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global MEM constants and types, shared between Bridge driver and DSP API. | ||
7 | * | ||
8 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef MEMDEFS_ | ||
20 | #define MEMDEFS_ | ||
21 | |||
22 | /* | ||
23 | * MEM_VIRTUALSEGID is used by Node & Strm to access virtual address space in | ||
24 | * the correct client process context. | ||
25 | */ | ||
26 | #define MEM_SETVIRTUALSEGID 0x10000000 | ||
27 | #define MEM_GETVIRTUALSEGID 0x20000000 | ||
28 | #define MEM_MASKVIRTUALSEGID (MEM_SETVIRTUALSEGID | MEM_GETVIRTUALSEGID) | ||
29 | |||
30 | #endif /* MEMDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/mgr.h b/drivers/staging/tidspbridge/include/dspbridge/mgr.h deleted file mode 100644 index 47b0318430e1..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/mgr.h +++ /dev/null | |||
@@ -1,205 +0,0 @@ | |||
1 | /* | ||
2 | * mgr.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * This is the DSP API RM module interface. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef MGR_ | ||
20 | #define MGR_ | ||
21 | |||
22 | #include <dspbridge/mgrpriv.h> | ||
23 | |||
24 | #define MAX_EVENTS 32 | ||
25 | |||
26 | /* | ||
27 | * ======== mgr_wait_for_bridge_events ======== | ||
28 | * Purpose: | ||
29 | * Block on any Bridge event(s) | ||
30 | * Parameters: | ||
31 | * anotifications : array of pointers to notification objects. | ||
32 | * count : number of elements in above array | ||
33 | * pu_index : index of signaled event object | ||
34 | * utimeout : timeout interval in milliseocnds | ||
35 | * Returns: | ||
36 | * 0 : Success. | ||
37 | * -ETIME : Wait timed out. *pu_index is undetermined. | ||
38 | * Details: | ||
39 | */ | ||
40 | |||
41 | int mgr_wait_for_bridge_events(struct dsp_notification | ||
42 | **anotifications, | ||
43 | u32 count, u32 *pu_index, | ||
44 | u32 utimeout); | ||
45 | |||
46 | /* | ||
47 | * ======== mgr_create ======== | ||
48 | * Purpose: | ||
49 | * Creates the Manager Object. This is done during the driver loading. | ||
50 | * There is only one Manager Object in the DSP/BIOS Bridge. | ||
51 | * Parameters: | ||
52 | * mgr_obj: Location to store created MGR Object handle. | ||
53 | * dev_node_obj: Device object as known to the system. | ||
54 | * Returns: | ||
55 | * 0: Success | ||
56 | * -ENOMEM: Failed to Create the Object | ||
57 | * -EPERM: General Failure | ||
58 | * Requires: | ||
59 | * MGR Initialized (refs > 0 ) | ||
60 | * mgr_obj != NULL. | ||
61 | * Ensures: | ||
62 | * 0: *mgr_obj is a valid MGR interface to the device. | ||
63 | * MGR Object stores the DCD Manager Handle. | ||
64 | * MGR Object stored in the Regsitry. | ||
65 | * !0: MGR Object not created | ||
66 | * Details: | ||
67 | * DCD Dll is loaded and MGR Object stores the handle of the DLL. | ||
68 | */ | ||
69 | extern int mgr_create(struct mgr_object **mgr_obj, | ||
70 | struct cfg_devnode *dev_node_obj); | ||
71 | |||
72 | /* | ||
73 | * ======== mgr_destroy ======== | ||
74 | * Purpose: | ||
75 | * Destroys the MGR object. Called upon driver unloading. | ||
76 | * Parameters: | ||
77 | * hmgr_obj: Handle to Manager object . | ||
78 | * Returns: | ||
79 | * 0: Success. | ||
80 | * DCD Manager freed; MGR Object destroyed; | ||
81 | * MGR Object deleted from the Registry. | ||
82 | * -EPERM: Failed to destroy MGR Object | ||
83 | * Requires: | ||
84 | * MGR Initialized (refs > 0 ) | ||
85 | * hmgr_obj is a valid MGR handle . | ||
86 | * Ensures: | ||
87 | * 0: MGR Object destroyed and hmgr_obj is Invalid MGR | ||
88 | * Handle. | ||
89 | */ | ||
90 | extern int mgr_destroy(struct mgr_object *hmgr_obj); | ||
91 | |||
92 | /* | ||
93 | * ======== mgr_enum_node_info ======== | ||
94 | * Purpose: | ||
95 | * Enumerate and get configuration information about nodes configured | ||
96 | * in the node database. | ||
97 | * Parameters: | ||
98 | * node_id: The node index (base 0). | ||
99 | * pndb_props: Ptr to the dsp_ndbprops structure for output. | ||
100 | * undb_props_size: Size of the dsp_ndbprops structure. | ||
101 | * pu_num_nodes: Location where the number of nodes configured | ||
102 | * in the database will be returned. | ||
103 | * Returns: | ||
104 | * 0: Success. | ||
105 | * -EINVAL: Parameter node_id is > than the number of nodes. | ||
106 | * configutred in the system | ||
107 | * -EIDRM: During Enumeration there has been a change in | ||
108 | * the number of nodes configured or in the | ||
109 | * the properties of the enumerated nodes. | ||
110 | * -EPERM: Failed to querry the Node Data Base | ||
111 | * Requires: | ||
112 | * pNDBPROPS is not null | ||
113 | * undb_props_size >= sizeof(dsp_ndbprops) | ||
114 | * pu_num_nodes is not null | ||
115 | * MGR Initialized (refs > 0 ) | ||
116 | * Ensures: | ||
117 | * SUCCESS on successful retreival of data and *pu_num_nodes > 0 OR | ||
118 | * DSP_FAILED && *pu_num_nodes == 0. | ||
119 | * Details: | ||
120 | */ | ||
121 | extern int mgr_enum_node_info(u32 node_id, | ||
122 | struct dsp_ndbprops *pndb_props, | ||
123 | u32 undb_props_size, | ||
124 | u32 *pu_num_nodes); | ||
125 | |||
126 | /* | ||
127 | * ======== mgr_enum_processor_info ======== | ||
128 | * Purpose: | ||
129 | * Enumerate and get configuration information about available DSP | ||
130 | * processors | ||
131 | * Parameters: | ||
132 | * processor_id: The processor index (zero-based). | ||
133 | * processor_info: Ptr to the dsp_processorinfo structure . | ||
134 | * processor_info_size: Size of dsp_processorinfo structure. | ||
135 | * pu_num_procs: Location where the number of DSPs configured | ||
136 | * in the database will be returned | ||
137 | * Returns: | ||
138 | * 0: Success. | ||
139 | * -EINVAL: Parameter processor_id is > than the number of | ||
140 | * DSP Processors in the system. | ||
141 | * -EPERM: Failed to querry the Node Data Base | ||
142 | * Requires: | ||
143 | * processor_info is not null | ||
144 | * pu_num_procs is not null | ||
145 | * processor_info_size >= sizeof(dsp_processorinfo) | ||
146 | * MGR Initialized (refs > 0 ) | ||
147 | * Ensures: | ||
148 | * SUCCESS on successful retreival of data and *pu_num_procs > 0 OR | ||
149 | * DSP_FAILED && *pu_num_procs == 0. | ||
150 | * Details: | ||
151 | */ | ||
152 | extern int mgr_enum_processor_info(u32 processor_id, | ||
153 | struct dsp_processorinfo | ||
154 | *processor_info, | ||
155 | u32 processor_info_size, | ||
156 | u8 *pu_num_procs); | ||
157 | /* | ||
158 | * ======== mgr_exit ======== | ||
159 | * Purpose: | ||
160 | * Decrement reference count, and free resources when reference count is | ||
161 | * 0. | ||
162 | * Parameters: | ||
163 | * Returns: | ||
164 | * Requires: | ||
165 | * MGR is initialized. | ||
166 | * Ensures: | ||
167 | * When reference count == 0, MGR's private resources are freed. | ||
168 | */ | ||
169 | extern void mgr_exit(void); | ||
170 | |||
171 | /* | ||
172 | * ======== mgr_get_dcd_handle ======== | ||
173 | * Purpose: | ||
174 | * Retrieves the MGR handle. Accessor Function | ||
175 | * Parameters: | ||
176 | * mgr_handle: Handle to the Manager Object | ||
177 | * dcd_handle: Ptr to receive the DCD Handle. | ||
178 | * Returns: | ||
179 | * 0: Success | ||
180 | * -EPERM: Failure to get the Handle | ||
181 | * Requires: | ||
182 | * MGR is initialized. | ||
183 | * dcd_handle != NULL | ||
184 | * Ensures: | ||
185 | * 0 and *dcd_handle != NULL || | ||
186 | * -EPERM and *dcd_handle == NULL | ||
187 | */ | ||
188 | extern int mgr_get_dcd_handle(struct mgr_object | ||
189 | *mgr_handle, u32 *dcd_handle); | ||
190 | |||
191 | /* | ||
192 | * ======== mgr_init ======== | ||
193 | * Purpose: | ||
194 | * Initialize MGR's private state, keeping a reference count on each | ||
195 | * call. Initializes the DCD. | ||
196 | * Parameters: | ||
197 | * Returns: | ||
198 | * TRUE if initialized; FALSE if error occurred. | ||
199 | * Requires: | ||
200 | * Ensures: | ||
201 | * TRUE: A requirement for the other public MGR functions. | ||
202 | */ | ||
203 | extern bool mgr_init(void); | ||
204 | |||
205 | #endif /* MGR_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h b/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h deleted file mode 100644 index 3a4e337c040d..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/mgrpriv.h +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* | ||
2 | * mgrpriv.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global MGR constants and types, shared by PROC, MGR, and DSP API. | ||
7 | * | ||
8 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef MGRPRIV_ | ||
20 | #define MGRPRIV_ | ||
21 | |||
22 | /* | ||
23 | * OMAP1510 specific | ||
24 | */ | ||
25 | #define MGR_MAXTLBENTRIES 32 | ||
26 | |||
27 | /* RM MGR Object */ | ||
28 | struct mgr_object; | ||
29 | |||
30 | struct mgr_tlbentry { | ||
31 | u32 dsp_virt; /* DSP virtual address */ | ||
32 | u32 gpp_phys; /* GPP physical address */ | ||
33 | }; | ||
34 | |||
35 | /* | ||
36 | * The DSP_PROCESSOREXTINFO structure describes additional extended | ||
37 | * capabilities of a DSP processor not exposed to user. | ||
38 | */ | ||
39 | struct mgr_processorextinfo { | ||
40 | struct dsp_processorinfo ty_basic; /* user processor info */ | ||
41 | /* private dsp mmu entries */ | ||
42 | struct mgr_tlbentry ty_tlb[MGR_MAXTLBENTRIES]; | ||
43 | }; | ||
44 | |||
45 | #endif /* MGRPRIV_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/msg.h b/drivers/staging/tidspbridge/include/dspbridge/msg.h deleted file mode 100644 index 2c8712c933fc..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/msg.h +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | /* | ||
2 | * msg.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge msg_ctrl Module. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef MSG_ | ||
20 | #define MSG_ | ||
21 | |||
22 | #include <dspbridge/devdefs.h> | ||
23 | #include <dspbridge/msgdefs.h> | ||
24 | |||
25 | /* | ||
26 | * ======== msg_create ======== | ||
27 | * Purpose: | ||
28 | * Create an object to manage message queues. Only one of these objects | ||
29 | * can exist per device object. The msg_ctrl manager must be created before | ||
30 | * the IO Manager. | ||
31 | * Parameters: | ||
32 | * msg_man: Location to store msg_ctrl manager handle on output. | ||
33 | * hdev_obj: The device object. | ||
34 | * msg_callback: Called whenever an RMS_EXIT message is received. | ||
35 | * Returns: | ||
36 | * Requires: | ||
37 | * msg_man != NULL. | ||
38 | * hdev_obj != NULL. | ||
39 | * msg_callback != NULL. | ||
40 | * Ensures: | ||
41 | */ | ||
42 | extern int msg_create(struct msg_mgr **msg_man, | ||
43 | struct dev_object *hdev_obj, | ||
44 | msg_onexit msg_callback); | ||
45 | |||
46 | /* | ||
47 | * ======== msg_delete ======== | ||
48 | * Purpose: | ||
49 | * Delete a msg_ctrl manager allocated in msg_create(). | ||
50 | * Parameters: | ||
51 | * hmsg_mgr: Handle returned from msg_create(). | ||
52 | * Returns: | ||
53 | * Requires: | ||
54 | * Valid hmsg_mgr. | ||
55 | * Ensures: | ||
56 | */ | ||
57 | extern void msg_delete(struct msg_mgr *hmsg_mgr); | ||
58 | |||
59 | #endif /* MSG_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/msgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/msgdefs.h deleted file mode 100644 index 80a3fa1a8a33..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/msgdefs.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | /* | ||
2 | * msgdefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global msg_ctrl constants and types. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef MSGDEFS_ | ||
20 | #define MSGDEFS_ | ||
21 | |||
22 | /* msg_ctrl Objects: */ | ||
23 | struct msg_mgr; | ||
24 | struct msg_queue; | ||
25 | |||
26 | /* Function prototype for callback to be called on RMS_EXIT message received */ | ||
27 | typedef void (*msg_onexit) (void *h, s32 node_status); | ||
28 | |||
29 | #endif /* MSGDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldr.h b/drivers/staging/tidspbridge/include/dspbridge/nldr.h deleted file mode 100644 index c5e48ca6c548..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/nldr.h +++ /dev/null | |||
@@ -1,55 +0,0 @@ | |||
1 | /* | ||
2 | * nldr.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge dynamic loader interface. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <dspbridge/dbdefs.h> | ||
20 | #include <dspbridge/dbdcddef.h> | ||
21 | #include <dspbridge/dev.h> | ||
22 | #include <dspbridge/rmm.h> | ||
23 | #include <dspbridge/nldrdefs.h> | ||
24 | |||
25 | #ifndef NLDR_ | ||
26 | #define NLDR_ | ||
27 | |||
28 | extern int nldr_allocate(struct nldr_object *nldr_obj, | ||
29 | void *priv_ref, const struct dcd_nodeprops | ||
30 | *node_props, | ||
31 | struct nldr_nodeobject **nldr_nodeobj, | ||
32 | bool *pf_phase_split); | ||
33 | |||
34 | extern int nldr_create(struct nldr_object **nldr, | ||
35 | struct dev_object *hdev_obj, | ||
36 | const struct nldr_attrs *pattrs); | ||
37 | |||
38 | extern void nldr_delete(struct nldr_object *nldr_obj); | ||
39 | |||
40 | extern int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj, | ||
41 | char *str_fxn, u32 * addr); | ||
42 | |||
43 | extern int nldr_get_rmm_manager(struct nldr_object *nldr, | ||
44 | struct rmm_target_obj **rmm_mgr); | ||
45 | |||
46 | extern int nldr_load(struct nldr_nodeobject *nldr_node_obj, | ||
47 | enum nldr_phase phase); | ||
48 | extern int nldr_unload(struct nldr_nodeobject *nldr_node_obj, | ||
49 | enum nldr_phase phase); | ||
50 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
51 | int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr, | ||
52 | u32 offset_range, void *offset_output, char *sym_name); | ||
53 | #endif | ||
54 | |||
55 | #endif /* NLDR_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h b/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h deleted file mode 100644 index 7e3c7f58b496..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/nldrdefs.h +++ /dev/null | |||
@@ -1,259 +0,0 @@ | |||
1 | /* | ||
2 | * nldrdefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global Dynamic + static/overlay Node loader (NLDR) constants and types. | ||
7 | * | ||
8 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef NLDRDEFS_ | ||
20 | #define NLDRDEFS_ | ||
21 | |||
22 | #include <dspbridge/dbdcddef.h> | ||
23 | #include <dspbridge/devdefs.h> | ||
24 | |||
25 | #define NLDR_MAXPATHLENGTH 255 | ||
26 | /* NLDR Objects: */ | ||
27 | struct nldr_object; | ||
28 | struct nldr_nodeobject; | ||
29 | |||
30 | /* | ||
31 | * ======== nldr_loadtype ======== | ||
32 | * Load types for a node. Must match values in node.h55. | ||
33 | */ | ||
34 | enum nldr_loadtype { | ||
35 | NLDR_STATICLOAD, /* Linked in base image, not overlay */ | ||
36 | NLDR_DYNAMICLOAD, /* Dynamically loaded node */ | ||
37 | NLDR_OVLYLOAD /* Linked in base image, overlay node */ | ||
38 | }; | ||
39 | |||
40 | /* | ||
41 | * ======== nldr_ovlyfxn ======== | ||
42 | * Causes code or data to be copied from load address to run address. This | ||
43 | * is the "cod_writefxn" that gets passed to the DBLL_Library and is used as | ||
44 | * the ZL write function. | ||
45 | * | ||
46 | * Parameters: | ||
47 | * priv_ref: Handle to identify the node. | ||
48 | * dsp_run_addr: Run address of code or data. | ||
49 | * dsp_load_addr: Load address of code or data. | ||
50 | * ul_num_bytes: Number of (GPP) bytes to copy. | ||
51 | * mem_space: RMS_CODE or RMS_DATA. | ||
52 | * Returns: | ||
53 | * ul_num_bytes: Success. | ||
54 | * 0: Failure. | ||
55 | * Requires: | ||
56 | * Ensures: | ||
57 | */ | ||
58 | typedef u32(*nldr_ovlyfxn) (void *priv_ref, u32 dsp_run_addr, | ||
59 | u32 dsp_load_addr, u32 ul_num_bytes, u32 mem_space); | ||
60 | |||
61 | /* | ||
62 | * ======== nldr_writefxn ======== | ||
63 | * Write memory function. Used for dynamic load writes. | ||
64 | * Parameters: | ||
65 | * priv_ref: Handle to identify the node. | ||
66 | * dsp_add: Address of code or data. | ||
67 | * pbuf: Code or data to be written | ||
68 | * ul_num_bytes: Number of (GPP) bytes to write. | ||
69 | * mem_space: DBLL_DATA or DBLL_CODE. | ||
70 | * Returns: | ||
71 | * ul_num_bytes: Success. | ||
72 | * 0: Failure. | ||
73 | * Requires: | ||
74 | * Ensures: | ||
75 | */ | ||
76 | typedef u32(*nldr_writefxn) (void *priv_ref, | ||
77 | u32 dsp_add, void *pbuf, | ||
78 | u32 ul_num_bytes, u32 mem_space); | ||
79 | |||
80 | /* | ||
81 | * ======== nldr_attrs ======== | ||
82 | * Attributes passed to nldr_create function. | ||
83 | */ | ||
84 | struct nldr_attrs { | ||
85 | nldr_ovlyfxn ovly; | ||
86 | nldr_writefxn write; | ||
87 | u16 dsp_word_size; | ||
88 | u16 dsp_mau_size; | ||
89 | }; | ||
90 | |||
91 | /* | ||
92 | * ======== nldr_phase ======== | ||
93 | * Indicates node create, delete, or execute phase function. | ||
94 | */ | ||
95 | enum nldr_phase { | ||
96 | NLDR_CREATE, | ||
97 | NLDR_DELETE, | ||
98 | NLDR_EXECUTE, | ||
99 | NLDR_NOPHASE | ||
100 | }; | ||
101 | |||
102 | /* | ||
103 | * Typedefs of loader functions imported from a DLL, or defined in a | ||
104 | * function table. | ||
105 | */ | ||
106 | |||
107 | /* | ||
108 | * ======== nldr_allocate ======== | ||
109 | * Allocate resources to manage the loading of a node on the DSP. | ||
110 | * | ||
111 | * Parameters: | ||
112 | * nldr_obj: Handle of loader that will load the node. | ||
113 | * priv_ref: Handle to identify the node. | ||
114 | * node_props: Pointer to a dcd_nodeprops for the node. | ||
115 | * nldr_nodeobj: Location to store node handle on output. This handle | ||
116 | * will be passed to nldr_load/nldr_unload. | ||
117 | * pf_phase_split: pointer to int variable referenced in node.c | ||
118 | * Returns: | ||
119 | * 0: Success. | ||
120 | * -ENOMEM: Insufficient memory on GPP. | ||
121 | * Requires: | ||
122 | * Valid nldr_obj. | ||
123 | * node_props != NULL. | ||
124 | * nldr_nodeobj != NULL. | ||
125 | * Ensures: | ||
126 | * 0: IsValidNode(*nldr_nodeobj). | ||
127 | * error: *nldr_nodeobj == NULL. | ||
128 | */ | ||
129 | typedef int(*nldr_allocatefxn) (struct nldr_object *nldr_obj, | ||
130 | void *priv_ref, | ||
131 | const struct dcd_nodeprops | ||
132 | * node_props, | ||
133 | struct nldr_nodeobject | ||
134 | **nldr_nodeobj, | ||
135 | bool *pf_phase_split); | ||
136 | |||
137 | /* | ||
138 | * ======== nldr_create ======== | ||
139 | * Create a loader object. This object handles the loading and unloading of | ||
140 | * create, delete, and execute phase functions of nodes on the DSP target. | ||
141 | * | ||
142 | * Parameters: | ||
143 | * nldr: Location to store loader handle on output. | ||
144 | * hdev_obj: Device for this processor. | ||
145 | * pattrs: Loader attributes. | ||
146 | * Returns: | ||
147 | * 0: Success; | ||
148 | * -ENOMEM: Insufficient memory for requested resources. | ||
149 | * Requires: | ||
150 | * nldr != NULL. | ||
151 | * hdev_obj != NULL. | ||
152 | * pattrs != NULL. | ||
153 | * Ensures: | ||
154 | * 0: Valid *nldr. | ||
155 | * error: *nldr == NULL. | ||
156 | */ | ||
157 | typedef int(*nldr_createfxn) (struct nldr_object **nldr, | ||
158 | struct dev_object *hdev_obj, | ||
159 | const struct nldr_attrs *pattrs); | ||
160 | |||
161 | /* | ||
162 | * ======== nldr_delete ======== | ||
163 | * Delete the NLDR loader. | ||
164 | * | ||
165 | * Parameters: | ||
166 | * nldr_obj: Node manager object. | ||
167 | * Returns: | ||
168 | * Requires: | ||
169 | * Valid nldr_obj. | ||
170 | * Ensures: | ||
171 | * nldr_obj invalid | ||
172 | */ | ||
173 | typedef void (*nldr_deletefxn) (struct nldr_object *nldr_obj); | ||
174 | |||
175 | /* | ||
176 | * ======== NLDR_Free ======== | ||
177 | * Free resources allocated in nldr_allocate. | ||
178 | * | ||
179 | * Parameters: | ||
180 | * nldr_node_obj: Handle returned from nldr_allocate(). | ||
181 | * Returns: | ||
182 | * Requires: | ||
183 | * Valid nldr_node_obj. | ||
184 | * Ensures: | ||
185 | */ | ||
186 | typedef void (*nldr_freefxn) (struct nldr_nodeobject *nldr_node_obj); | ||
187 | |||
188 | /* | ||
189 | * ======== nldr_get_fxn_addr ======== | ||
190 | * Get address of create, delete, or execute phase function of a node on | ||
191 | * the DSP. | ||
192 | * | ||
193 | * Parameters: | ||
194 | * nldr_node_obj: Handle returned from nldr_allocate(). | ||
195 | * str_fxn: Name of function. | ||
196 | * addr: Location to store function address. | ||
197 | * Returns: | ||
198 | * 0: Success. | ||
199 | * -ESPIPE: Address of function not found. | ||
200 | * Requires: | ||
201 | * Valid nldr_node_obj. | ||
202 | * addr != NULL; | ||
203 | * str_fxn != NULL; | ||
204 | * Ensures: | ||
205 | */ | ||
206 | typedef int(*nldr_getfxnaddrfxn) (struct nldr_nodeobject | ||
207 | * nldr_node_obj, | ||
208 | char *str_fxn, u32 * addr); | ||
209 | |||
210 | /* | ||
211 | * ======== nldr_load ======== | ||
212 | * Load create, delete, or execute phase function of a node on the DSP. | ||
213 | * | ||
214 | * Parameters: | ||
215 | * nldr_node_obj: Handle returned from nldr_allocate(). | ||
216 | * phase: Type of function to load (create, delete, or execute). | ||
217 | * Returns: | ||
218 | * 0: Success. | ||
219 | * -ENOMEM: Insufficient memory on GPP. | ||
220 | * -ENXIO: Can't overlay phase because overlay memory | ||
221 | * is already in use. | ||
222 | * -EILSEQ: Failure in dynamic loader library. | ||
223 | * Requires: | ||
224 | * Valid nldr_node_obj. | ||
225 | * Ensures: | ||
226 | */ | ||
227 | typedef int(*nldr_loadfxn) (struct nldr_nodeobject *nldr_node_obj, | ||
228 | enum nldr_phase phase); | ||
229 | |||
230 | /* | ||
231 | * ======== nldr_unload ======== | ||
232 | * Unload create, delete, or execute phase function of a node on the DSP. | ||
233 | * | ||
234 | * Parameters: | ||
235 | * nldr_node_obj: Handle returned from nldr_allocate(). | ||
236 | * phase: Node function to unload (create, delete, or execute). | ||
237 | * Returns: | ||
238 | * 0: Success. | ||
239 | * -ENOMEM: Insufficient memory on GPP. | ||
240 | * Requires: | ||
241 | * Valid nldr_node_obj. | ||
242 | * Ensures: | ||
243 | */ | ||
244 | typedef int(*nldr_unloadfxn) (struct nldr_nodeobject *nldr_node_obj, | ||
245 | enum nldr_phase phase); | ||
246 | |||
247 | /* | ||
248 | * ======== node_ldr_fxns ======== | ||
249 | */ | ||
250 | struct node_ldr_fxns { | ||
251 | nldr_allocatefxn allocate; | ||
252 | nldr_createfxn create; | ||
253 | nldr_deletefxn delete; | ||
254 | nldr_getfxnaddrfxn get_fxn_addr; | ||
255 | nldr_loadfxn load; | ||
256 | nldr_unloadfxn unload; | ||
257 | }; | ||
258 | |||
259 | #endif /* NLDRDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/node.h b/drivers/staging/tidspbridge/include/dspbridge/node.h deleted file mode 100644 index 68ed74a86c95..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/node.h +++ /dev/null | |||
@@ -1,524 +0,0 @@ | |||
1 | /* | ||
2 | * node.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge Node Manager. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef NODE_ | ||
20 | #define NODE_ | ||
21 | |||
22 | #include <dspbridge/procpriv.h> | ||
23 | |||
24 | #include <dspbridge/nodedefs.h> | ||
25 | #include <dspbridge/disp.h> | ||
26 | #include <dspbridge/nldrdefs.h> | ||
27 | #include <dspbridge/drv.h> | ||
28 | |||
29 | /* | ||
30 | * ======== node_allocate ======== | ||
31 | * Purpose: | ||
32 | * Allocate GPP resources to manage a node on the DSP. | ||
33 | * Parameters: | ||
34 | * hprocessor: Handle of processor that is allocating the node. | ||
35 | * node_uuid: Pointer to a dsp_uuid for the node. | ||
36 | * pargs: Optional arguments to be passed to the node. | ||
37 | * attr_in: Optional pointer to node attributes (priority, | ||
38 | * timeout...) | ||
39 | * noderes: Location to store node resource info. | ||
40 | * Returns: | ||
41 | * 0: Success. | ||
42 | * -ENOMEM: Insufficient memory on GPP. | ||
43 | * -ENOKEY: Node UUID has not been registered. | ||
44 | * -ESPIPE: iAlg functions not found for a DAIS node. | ||
45 | * -EDOM: attr_in != NULL and attr_in->prio out of | ||
46 | * range. | ||
47 | * -EPERM: A failure occurred, unable to allocate node. | ||
48 | * -EBADR: Proccessor is not in the running state. | ||
49 | * Requires: | ||
50 | * hprocessor != NULL. | ||
51 | * node_uuid != NULL. | ||
52 | * noderes != NULL. | ||
53 | * Ensures: | ||
54 | * 0: IsValidNode(*ph_node). | ||
55 | * error: *noderes == NULL. | ||
56 | */ | ||
57 | extern int node_allocate(struct proc_object *hprocessor, | ||
58 | const struct dsp_uuid *node_uuid, | ||
59 | const struct dsp_cbdata | ||
60 | *pargs, const struct dsp_nodeattrin | ||
61 | *attr_in, | ||
62 | struct node_res_object **noderes, | ||
63 | struct process_context *pr_ctxt); | ||
64 | |||
65 | /* | ||
66 | * ======== node_alloc_msg_buf ======== | ||
67 | * Purpose: | ||
68 | * Allocate and Prepare a buffer whose descriptor will be passed to a | ||
69 | * Node within a (dsp_msg)message | ||
70 | * Parameters: | ||
71 | * hnode: The node handle. | ||
72 | * usize: The size of the buffer to be allocated. | ||
73 | * pattr: Pointer to a dsp_bufferattr structure. | ||
74 | * pbuffer: Location to store the address of the allocated | ||
75 | * buffer on output. | ||
76 | * Returns: | ||
77 | * 0: Success. | ||
78 | * -EFAULT: Invalid node handle. | ||
79 | * -ENOMEM: Insufficent memory. | ||
80 | * -EPERM: General Failure. | ||
81 | * -EINVAL: Invalid Size. | ||
82 | * Requires: | ||
83 | * pbuffer != NULL. | ||
84 | * Ensures: | ||
85 | */ | ||
86 | extern int node_alloc_msg_buf(struct node_object *hnode, | ||
87 | u32 usize, struct dsp_bufferattr | ||
88 | *pattr, u8 **pbuffer); | ||
89 | |||
90 | /* | ||
91 | * ======== node_change_priority ======== | ||
92 | * Purpose: | ||
93 | * Change the priority of an allocated node. | ||
94 | * Parameters: | ||
95 | * hnode: Node handle returned from node_allocate. | ||
96 | * prio: New priority level to set node's priority to. | ||
97 | * Returns: | ||
98 | * 0: Success. | ||
99 | * -EFAULT: Invalid hnode. | ||
100 | * -EDOM: prio is out of range. | ||
101 | * -EPERM: The specified node is not a task node. | ||
102 | * Unable to change node's runtime priority level. | ||
103 | * -EBADR: Node is not in the NODE_ALLOCATED, NODE_PAUSED, | ||
104 | * or NODE_RUNNING state. | ||
105 | * -ETIME: A timeout occurred before the DSP responded. | ||
106 | * Requires: | ||
107 | * Ensures: | ||
108 | * 0 && (Node's current priority == prio) | ||
109 | */ | ||
110 | extern int node_change_priority(struct node_object *hnode, s32 prio); | ||
111 | |||
112 | /* | ||
113 | * ======== node_connect ======== | ||
114 | * Purpose: | ||
115 | * Connect two nodes on the DSP, or a node on the DSP to the GPP. In the | ||
116 | * case that the connection is being made between a node on the DSP and | ||
117 | * the GPP, one of the node handles (either node1 or node2) must be | ||
118 | * the constant NODE_HGPPNODE. | ||
119 | * Parameters: | ||
120 | * node1: Handle of first node to connect to second node. If | ||
121 | * this is a connection from the GPP to node2, node1 | ||
122 | * must be the constant NODE_HGPPNODE. Otherwise, node1 | ||
123 | * must be a node handle returned from a successful call | ||
124 | * to Node_Allocate(). | ||
125 | * node2: Handle of second node. Must be either NODE_HGPPNODE | ||
126 | * if this is a connection from DSP node to GPP, or a | ||
127 | * node handle returned from a successful call to | ||
128 | * node_allocate(). | ||
129 | * stream1: Output stream index on first node, to be connected | ||
130 | * to second node's input stream. Value must range from | ||
131 | * 0 <= stream1 < number of output streams. | ||
132 | * stream2: Input stream index on second node. Value must range | ||
133 | * from 0 <= stream2 < number of input streams. | ||
134 | * pattrs: Stream attributes (NULL ==> use defaults). | ||
135 | * conn_param: A pointer to a dsp_cbdata structure that defines | ||
136 | * connection parameter for device nodes to pass to DSP | ||
137 | * side. | ||
138 | * If the value of this parameter is NULL, then this API | ||
139 | * behaves like DSPNode_Connect. This parameter will have | ||
140 | * length of the string and the null terminated string in | ||
141 | * dsp_cbdata struct. This can be extended in future tp | ||
142 | * pass binary data. | ||
143 | * Returns: | ||
144 | * 0: Success. | ||
145 | * -EFAULT: Invalid node1 or node2. | ||
146 | * -ENOMEM: Insufficient host memory. | ||
147 | * -EINVAL: A stream index parameter is invalid. | ||
148 | * -EISCONN: A connection already exists for one of the | ||
149 | * indices stream1 or stream2. | ||
150 | * -EBADR: Either node1 or node2 is not in the | ||
151 | * NODE_ALLOCATED state. | ||
152 | * -ECONNREFUSED: No more connections available. | ||
153 | * -EPERM: Attempt to make an illegal connection (eg, | ||
154 | * Device node to device node, or device node to | ||
155 | * GPP), the two nodes are on different DSPs. | ||
156 | * Requires: | ||
157 | * Ensures: | ||
158 | */ | ||
159 | extern int node_connect(struct node_object *node1, | ||
160 | u32 stream1, | ||
161 | struct node_object *node2, | ||
162 | u32 stream2, | ||
163 | struct dsp_strmattr *pattrs, | ||
164 | struct dsp_cbdata | ||
165 | *conn_param); | ||
166 | |||
167 | /* | ||
168 | * ======== node_create ======== | ||
169 | * Purpose: | ||
170 | * Create a node on the DSP by remotely calling the node's create | ||
171 | * function. If necessary, load code that contains the node's create | ||
172 | * function. | ||
173 | * Parameters: | ||
174 | * hnode: Node handle returned from node_allocate(). | ||
175 | * Returns: | ||
176 | * 0: Success. | ||
177 | * -EFAULT: Invalid hnode. | ||
178 | * -ESPIPE: Create function not found in the COFF file. | ||
179 | * -EBADR: Node is not in the NODE_ALLOCATED state. | ||
180 | * -ENOMEM: Memory allocation failure on the DSP. | ||
181 | * -ETIME: A timeout occurred before the DSP responded. | ||
182 | * -EPERM: A failure occurred, unable to create node. | ||
183 | * Requires: | ||
184 | * Ensures: | ||
185 | */ | ||
186 | extern int node_create(struct node_object *hnode); | ||
187 | |||
188 | /* | ||
189 | * ======== node_create_mgr ======== | ||
190 | * Purpose: | ||
191 | * Create a NODE Manager object. This object handles the creation, | ||
192 | * deletion, and execution of nodes on the DSP target. The NODE Manager | ||
193 | * also maintains a pipe map of used and available node connections. | ||
194 | * Each DEV object should have exactly one NODE Manager object. | ||
195 | * | ||
196 | * Parameters: | ||
197 | * node_man: Location to store node manager handle on output. | ||
198 | * hdev_obj: Device for this processor. | ||
199 | * Returns: | ||
200 | * 0: Success; | ||
201 | * -ENOMEM: Insufficient memory for requested resources. | ||
202 | * -EPERM: General failure. | ||
203 | * Requires: | ||
204 | * node_man != NULL. | ||
205 | * hdev_obj != NULL. | ||
206 | * Ensures: | ||
207 | * 0: Valide *node_man. | ||
208 | * error: *node_man == NULL. | ||
209 | */ | ||
210 | extern int node_create_mgr(struct node_mgr **node_man, | ||
211 | struct dev_object *hdev_obj); | ||
212 | |||
213 | /* | ||
214 | * ======== node_delete ======== | ||
215 | * Purpose: | ||
216 | * Delete resources allocated in node_allocate(). If the node was | ||
217 | * created, delete the node on the DSP by remotely calling the node's | ||
218 | * delete function. Loads the node's delete function if necessary. | ||
219 | * GPP side resources are freed after node's delete function returns. | ||
220 | * Parameters: | ||
221 | * noderes: Node resource info handle returned from | ||
222 | * node_allocate(). | ||
223 | * pr_ctxt: Pointer to process context data. | ||
224 | * Returns: | ||
225 | * 0: Success. | ||
226 | * -EFAULT: Invalid hnode. | ||
227 | * -ETIME: A timeout occurred before the DSP responded. | ||
228 | * -EPERM: A failure occurred in deleting the node. | ||
229 | * -ESPIPE: Delete function not found in the COFF file. | ||
230 | * Requires: | ||
231 | * Ensures: | ||
232 | * 0: hnode is invalid. | ||
233 | */ | ||
234 | extern int node_delete(struct node_res_object *noderes, | ||
235 | struct process_context *pr_ctxt); | ||
236 | |||
237 | /* | ||
238 | * ======== node_delete_mgr ======== | ||
239 | * Purpose: | ||
240 | * Delete the NODE Manager. | ||
241 | * Parameters: | ||
242 | * hnode_mgr: Node manager object. | ||
243 | * Returns: | ||
244 | * 0: Success. | ||
245 | * Requires: | ||
246 | * Valid hnode_mgr. | ||
247 | * Ensures: | ||
248 | */ | ||
249 | extern int node_delete_mgr(struct node_mgr *hnode_mgr); | ||
250 | |||
251 | /* | ||
252 | * ======== node_enum_nodes ======== | ||
253 | * Purpose: | ||
254 | * Enumerate the nodes currently allocated for the DSP. | ||
255 | * Parameters: | ||
256 | * hnode_mgr: Node manager returned from node_create_mgr(). | ||
257 | * node_tab: Array to copy node handles into. | ||
258 | * node_tab_size: Number of handles that can be written to node_tab. | ||
259 | * pu_num_nodes: Location where number of node handles written to | ||
260 | * node_tab will be written. | ||
261 | * pu_allocated: Location to write total number of allocated nodes. | ||
262 | * Returns: | ||
263 | * 0: Success. | ||
264 | * -EINVAL: node_tab is too small to hold all node handles. | ||
265 | * Requires: | ||
266 | * Valid hnode_mgr. | ||
267 | * node_tab != NULL || node_tab_size == 0. | ||
268 | * pu_num_nodes != NULL. | ||
269 | * pu_allocated != NULL. | ||
270 | * Ensures: | ||
271 | * - (-EINVAL && *pu_num_nodes == 0) | ||
272 | * - || (0 && *pu_num_nodes <= node_tab_size) && | ||
273 | * (*pu_allocated == *pu_num_nodes) | ||
274 | */ | ||
275 | extern int node_enum_nodes(struct node_mgr *hnode_mgr, | ||
276 | void **node_tab, | ||
277 | u32 node_tab_size, | ||
278 | u32 *pu_num_nodes, | ||
279 | u32 *pu_allocated); | ||
280 | |||
281 | /* | ||
282 | * ======== node_free_msg_buf ======== | ||
283 | * Purpose: | ||
284 | * Free a message buffer previously allocated with node_alloc_msg_buf. | ||
285 | * Parameters: | ||
286 | * hnode: The node handle. | ||
287 | * pbuffer: (Address) Buffer allocated by node_alloc_msg_buf. | ||
288 | * pattr: Same buffer attributes passed to node_alloc_msg_buf. | ||
289 | * Returns: | ||
290 | * 0: Success. | ||
291 | * -EFAULT: Invalid node handle. | ||
292 | * -EPERM: Failure to free the buffer. | ||
293 | * Requires: | ||
294 | * pbuffer != NULL. | ||
295 | * Ensures: | ||
296 | */ | ||
297 | extern int node_free_msg_buf(struct node_object *hnode, | ||
298 | u8 *pbuffer, | ||
299 | struct dsp_bufferattr | ||
300 | *pattr); | ||
301 | |||
302 | /* | ||
303 | * ======== node_get_attr ======== | ||
304 | * Purpose: | ||
305 | * Copy the current attributes of the specified node into a dsp_nodeattr | ||
306 | * structure. | ||
307 | * Parameters: | ||
308 | * hnode: Node object allocated from node_allocate(). | ||
309 | * pattr: Pointer to dsp_nodeattr structure to copy node's | ||
310 | * attributes. | ||
311 | * attr_size: Size of pattr. | ||
312 | * Returns: | ||
313 | * 0: Success. | ||
314 | * -EFAULT: Invalid hnode. | ||
315 | * Requires: | ||
316 | * pattr != NULL. | ||
317 | * Ensures: | ||
318 | * 0: *pattrs contains the node's current attributes. | ||
319 | */ | ||
320 | extern int node_get_attr(struct node_object *hnode, | ||
321 | struct dsp_nodeattr *pattr, u32 attr_size); | ||
322 | |||
323 | /* | ||
324 | * ======== node_get_message ======== | ||
325 | * Purpose: | ||
326 | * Retrieve a message from a node on the DSP. The node must be either a | ||
327 | * message node, task node, or XDAIS socket node. | ||
328 | * If a message is not available, this function will block until a | ||
329 | * message is available, or the node's timeout value is reached. | ||
330 | * Parameters: | ||
331 | * hnode: Node handle returned from node_allocate(). | ||
332 | * message: Pointer to dsp_msg structure to copy the | ||
333 | * message into. | ||
334 | * utimeout: Timeout in milliseconds to wait for message. | ||
335 | * Returns: | ||
336 | * 0: Success. | ||
337 | * -EFAULT: Invalid hnode. | ||
338 | * -EPERM: Cannot retrieve messages from this type of node. | ||
339 | * Error occurred while trying to retrieve a message. | ||
340 | * -ETIME: Timeout occurred and no message is available. | ||
341 | * Requires: | ||
342 | * message != NULL. | ||
343 | * Ensures: | ||
344 | */ | ||
345 | extern int node_get_message(struct node_object *hnode, | ||
346 | struct dsp_msg *message, u32 utimeout); | ||
347 | |||
348 | /* | ||
349 | * ======== node_get_nldr_obj ======== | ||
350 | * Purpose: | ||
351 | * Retrieve the Nldr manager | ||
352 | * Parameters: | ||
353 | * hnode_mgr: Node Manager | ||
354 | * nldr_ovlyobj: Pointer to a Nldr manager handle | ||
355 | * Returns: | ||
356 | * 0: Success. | ||
357 | * -EFAULT: Invalid hnode. | ||
358 | * Ensures: | ||
359 | */ | ||
360 | extern int node_get_nldr_obj(struct node_mgr *hnode_mgr, | ||
361 | struct nldr_object **nldr_ovlyobj); | ||
362 | |||
363 | /* | ||
364 | * ======== node_on_exit ======== | ||
365 | * Purpose: | ||
366 | * Gets called when RMS_EXIT is received for a node. PROC needs to pass | ||
367 | * this function as a parameter to msg_create(). This function then gets | ||
368 | * called by the Bridge driver when an exit message for a node is received. | ||
369 | * Parameters: | ||
370 | * hnode: Handle of the node that the exit message is for. | ||
371 | * node_status: Return status of the node's execute phase. | ||
372 | * Returns: | ||
373 | * Ensures: | ||
374 | */ | ||
375 | void node_on_exit(struct node_object *hnode, s32 node_status); | ||
376 | |||
377 | /* | ||
378 | * ======== node_pause ======== | ||
379 | * Purpose: | ||
380 | * Suspend execution of a node currently running on the DSP. | ||
381 | * Parameters: | ||
382 | * hnode: Node object representing a node currently | ||
383 | * running on the DSP. | ||
384 | * Returns: | ||
385 | * 0: Success. | ||
386 | * -EFAULT: Invalid hnode. | ||
387 | * -EPERM: Node is not a task or socket node. | ||
388 | * Failed to pause node. | ||
389 | * -ETIME: A timeout occurred before the DSP responded. | ||
390 | * DSP_EWRONGSTSATE: Node is not in NODE_RUNNING state. | ||
391 | * Requires: | ||
392 | * Ensures: | ||
393 | */ | ||
394 | extern int node_pause(struct node_object *hnode); | ||
395 | |||
396 | /* | ||
397 | * ======== node_put_message ======== | ||
398 | * Purpose: | ||
399 | * Send a message to a message node, task node, or XDAIS socket node. | ||
400 | * This function will block until the message stream can accommodate | ||
401 | * the message, or a timeout occurs. The message will be copied, so Msg | ||
402 | * can be re-used immediately after return. | ||
403 | * Parameters: | ||
404 | * hnode: Node handle returned by node_allocate(). | ||
405 | * pmsg: Location of message to be sent to the node. | ||
406 | * utimeout: Timeout in msecs to wait. | ||
407 | * Returns: | ||
408 | * 0: Success. | ||
409 | * -EFAULT: Invalid hnode. | ||
410 | * -EPERM: Messages can't be sent to this type of node. | ||
411 | * Unable to send message. | ||
412 | * -ETIME: Timeout occurred before message could be set. | ||
413 | * -EBADR: Node is in invalid state for sending messages. | ||
414 | * Requires: | ||
415 | * pmsg != NULL. | ||
416 | * Ensures: | ||
417 | */ | ||
418 | extern int node_put_message(struct node_object *hnode, | ||
419 | const struct dsp_msg *pmsg, u32 utimeout); | ||
420 | |||
421 | /* | ||
422 | * ======== node_register_notify ======== | ||
423 | * Purpose: | ||
424 | * Register to be notified on specific events for this node. | ||
425 | * Parameters: | ||
426 | * hnode: Node handle returned by node_allocate(). | ||
427 | * event_mask: Mask of types of events to be notified about. | ||
428 | * notify_type: Type of notification to be sent. | ||
429 | * hnotification: Handle to be used for notification. | ||
430 | * Returns: | ||
431 | * 0: Success. | ||
432 | * -EFAULT: Invalid hnode. | ||
433 | * -ENOMEM: Insufficient memory on GPP. | ||
434 | * -EINVAL: event_mask is invalid. | ||
435 | * -ENOSYS: Notification type specified by notify_type is not | ||
436 | * supported. | ||
437 | * Requires: | ||
438 | * hnotification != NULL. | ||
439 | * Ensures: | ||
440 | */ | ||
441 | extern int node_register_notify(struct node_object *hnode, | ||
442 | u32 event_mask, u32 notify_type, | ||
443 | struct dsp_notification | ||
444 | *hnotification); | ||
445 | |||
446 | /* | ||
447 | * ======== node_run ======== | ||
448 | * Purpose: | ||
449 | * Start execution of a node's execute phase, or resume execution of | ||
450 | * a node that has been suspended (via node_pause()) on the DSP. Load | ||
451 | * the node's execute function if necessary. | ||
452 | * Parameters: | ||
453 | * hnode: Node object representing a node currently | ||
454 | * running on the DSP. | ||
455 | * Returns: | ||
456 | * 0: Success. | ||
457 | * -EFAULT: Invalid hnode. | ||
458 | * -EPERM: hnode doesn't represent a message, task or dais socket node. | ||
459 | * Unable to start or resume execution. | ||
460 | * -ETIME: A timeout occurred before the DSP responded. | ||
461 | * DSP_EWRONGSTSATE: Node is not in NODE_PAUSED or NODE_CREATED state. | ||
462 | * -ESPIPE: Execute function not found in the COFF file. | ||
463 | * Requires: | ||
464 | * Ensures: | ||
465 | */ | ||
466 | extern int node_run(struct node_object *hnode); | ||
467 | |||
468 | /* | ||
469 | * ======== node_terminate ======== | ||
470 | * Purpose: | ||
471 | * Signal a node running on the DSP that it should exit its execute | ||
472 | * phase function. | ||
473 | * Parameters: | ||
474 | * hnode: Node object representing a node currently | ||
475 | * running on the DSP. | ||
476 | * pstatus: Location to store execute-phase function return | ||
477 | * value. | ||
478 | * Returns: | ||
479 | * 0: Success. | ||
480 | * -EFAULT: Invalid hnode. | ||
481 | * -ETIME: A timeout occurred before the DSP responded. | ||
482 | * -EPERM: Type of node specified cannot be terminated. | ||
483 | * Unable to terminate the node. | ||
484 | * -EBADR: Operation not valid for the current node state. | ||
485 | * Requires: | ||
486 | * pstatus != NULL. | ||
487 | * Ensures: | ||
488 | */ | ||
489 | extern int node_terminate(struct node_object *hnode, | ||
490 | int *pstatus); | ||
491 | |||
492 | /* | ||
493 | * ======== node_get_uuid_props ======== | ||
494 | * Purpose: | ||
495 | * Fetch Node properties given the UUID | ||
496 | * Parameters: | ||
497 | * | ||
498 | */ | ||
499 | extern int node_get_uuid_props(void *hprocessor, | ||
500 | const struct dsp_uuid *node_uuid, | ||
501 | struct dsp_ndbprops | ||
502 | *node_props); | ||
503 | |||
504 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
505 | /** | ||
506 | * node_find_addr() - Find the closest symbol to the given address. | ||
507 | * | ||
508 | * @node_mgr: Node manager handle | ||
509 | * @sym_addr: Given address to find the closest symbol | ||
510 | * @offset_range: offset range to look fo the closest symbol | ||
511 | * @sym_addr_output: Symbol Output address | ||
512 | * @sym_name: String with the symbol name of the closest symbol | ||
513 | * | ||
514 | * This function finds the closest symbol to the address where a MMU | ||
515 | * Fault occurred on the DSP side. | ||
516 | */ | ||
517 | int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr, | ||
518 | u32 offset_range, void *sym_addr_output, | ||
519 | char *sym_name); | ||
520 | |||
521 | enum node_state node_get_state(void *hnode); | ||
522 | #endif | ||
523 | |||
524 | #endif /* NODE_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nodedefs.h b/drivers/staging/tidspbridge/include/dspbridge/nodedefs.h deleted file mode 100644 index fb9623d8a79a..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/nodedefs.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* | ||
2 | * nodedefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global NODE constants and types, shared by PROCESSOR, NODE, and DISP. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef NODEDEFS_ | ||
20 | #define NODEDEFS_ | ||
21 | |||
22 | #define NODE_SUSPENDEDPRI -1 | ||
23 | |||
24 | /* NODE Objects: */ | ||
25 | struct node_mgr; | ||
26 | struct node_object; | ||
27 | |||
28 | #endif /* NODEDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h b/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h deleted file mode 100644 index d5b54bb81e8e..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/nodepriv.h +++ /dev/null | |||
@@ -1,181 +0,0 @@ | |||
1 | /* | ||
2 | * nodepriv.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Private node header shared by NODE and DISP. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef NODEPRIV_ | ||
20 | #define NODEPRIV_ | ||
21 | |||
22 | #include <dspbridge/strmdefs.h> | ||
23 | #include <dspbridge/nodedefs.h> | ||
24 | #include <dspbridge/nldrdefs.h> | ||
25 | |||
26 | /* DSP address of node environment structure */ | ||
27 | typedef u32 nodeenv; | ||
28 | |||
29 | /* | ||
30 | * Node create structures | ||
31 | */ | ||
32 | |||
33 | /* Message node */ | ||
34 | struct node_msgargs { | ||
35 | u32 max_msgs; /* Max # of simultaneous messages for node */ | ||
36 | u32 seg_id; /* Segment for allocating message buffers */ | ||
37 | u32 notify_type; /* Notify type (SEM_post, SWI_post, etc.) */ | ||
38 | u32 arg_length; /* Length in 32-bit words of arg data block */ | ||
39 | u8 *pdata; /* Argument data for node */ | ||
40 | }; | ||
41 | |||
42 | struct node_strmdef { | ||
43 | u32 buf_size; /* Size of buffers for SIO stream */ | ||
44 | u32 num_bufs; /* max # of buffers in SIO stream at once */ | ||
45 | u32 seg_id; /* Memory segment id to allocate buffers */ | ||
46 | u32 timeout; /* Timeout for blocking SIO calls */ | ||
47 | u32 buf_alignment; /* Buffer alignment */ | ||
48 | char *sz_device; /* Device name for stream */ | ||
49 | }; | ||
50 | |||
51 | /* Task node */ | ||
52 | struct node_taskargs { | ||
53 | struct node_msgargs node_msg_args; | ||
54 | s32 prio; | ||
55 | u32 stack_size; | ||
56 | u32 sys_stack_size; | ||
57 | u32 stack_seg; | ||
58 | u32 dsp_heap_res_addr; /* DSP virtual heap address */ | ||
59 | u32 dsp_heap_addr; /* DSP virtual heap address */ | ||
60 | u32 heap_size; /* Heap size */ | ||
61 | u32 gpp_heap_addr; /* GPP virtual heap address */ | ||
62 | u32 profile_id; /* Profile ID */ | ||
63 | u32 num_inputs; | ||
64 | u32 num_outputs; | ||
65 | u32 dais_arg; /* Address of iAlg object */ | ||
66 | struct node_strmdef *strm_in_def; | ||
67 | struct node_strmdef *strm_out_def; | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * ======== node_createargs ======== | ||
72 | */ | ||
73 | struct node_createargs { | ||
74 | union { | ||
75 | struct node_msgargs node_msg_args; | ||
76 | struct node_taskargs task_arg_obj; | ||
77 | } asa; | ||
78 | }; | ||
79 | |||
80 | /* | ||
81 | * ======== node_get_channel_id ======== | ||
82 | * Purpose: | ||
83 | * Get the channel index reserved for a stream connection between the | ||
84 | * host and a node. This index is reserved when node_connect() is called | ||
85 | * to connect the node with the host. This index should be passed to | ||
86 | * the CHNL_Open function when the stream is actually opened. | ||
87 | * Parameters: | ||
88 | * hnode: Node object allocated from node_allocate(). | ||
89 | * dir: Input (DSP_TONODE) or output (DSP_FROMNODE). | ||
90 | * index: Stream index. | ||
91 | * chan_id: Location to store channel index. | ||
92 | * Returns: | ||
93 | * 0: Success. | ||
94 | * -EFAULT: Invalid hnode. | ||
95 | * -EPERM: Not a task or DAIS socket node. | ||
96 | * -EINVAL: The node's stream corresponding to index and dir | ||
97 | * is not a stream to or from the host. | ||
98 | * Requires: | ||
99 | * Valid dir. | ||
100 | * chan_id != NULL. | ||
101 | * Ensures: | ||
102 | */ | ||
103 | extern int node_get_channel_id(struct node_object *hnode, | ||
104 | u32 dir, u32 index, u32 *chan_id); | ||
105 | |||
106 | /* | ||
107 | * ======== node_get_strm_mgr ======== | ||
108 | * Purpose: | ||
109 | * Get the STRM manager for a node. | ||
110 | * Parameters: | ||
111 | * hnode: Node allocated with node_allocate(). | ||
112 | * strm_man: Location to store STRM manager on output. | ||
113 | * Returns: | ||
114 | * 0: Success. | ||
115 | * -EFAULT: Invalid hnode. | ||
116 | * Requires: | ||
117 | * strm_man != NULL. | ||
118 | * Ensures: | ||
119 | */ | ||
120 | extern int node_get_strm_mgr(struct node_object *hnode, | ||
121 | struct strm_mgr **strm_man); | ||
122 | |||
123 | /* | ||
124 | * ======== node_get_timeout ======== | ||
125 | * Purpose: | ||
126 | * Get the timeout value of a node. | ||
127 | * Parameters: | ||
128 | * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE. | ||
129 | * Returns: | ||
130 | * Node's timeout value. | ||
131 | * Requires: | ||
132 | * Valid hnode. | ||
133 | * Ensures: | ||
134 | */ | ||
135 | extern u32 node_get_timeout(struct node_object *hnode); | ||
136 | |||
137 | /* | ||
138 | * ======== node_get_type ======== | ||
139 | * Purpose: | ||
140 | * Get the type (device, message, task, or XDAIS socket) of a node. | ||
141 | * Parameters: | ||
142 | * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE. | ||
143 | * Returns: | ||
144 | * Node type: NODE_DEVICE, NODE_TASK, NODE_XDAIS, or NODE_GPP. | ||
145 | * Requires: | ||
146 | * Valid hnode. | ||
147 | * Ensures: | ||
148 | */ | ||
149 | extern enum node_type node_get_type(struct node_object *hnode); | ||
150 | |||
151 | /* | ||
152 | * ======== get_node_info ======== | ||
153 | * Purpose: | ||
154 | * Get node information without holding semaphore. | ||
155 | * Parameters: | ||
156 | * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE. | ||
157 | * Returns: | ||
158 | * Node info: priority, device owner, no. of streams, execution state | ||
159 | * NDB properties. | ||
160 | * Requires: | ||
161 | * Valid hnode. | ||
162 | * Ensures: | ||
163 | */ | ||
164 | extern void get_node_info(struct node_object *hnode, | ||
165 | struct dsp_nodeinfo *node_info); | ||
166 | |||
167 | /* | ||
168 | * ======== node_get_load_type ======== | ||
169 | * Purpose: | ||
170 | * Get the load type (dynamic, overlay, static) of a node. | ||
171 | * Parameters: | ||
172 | * hnode: Node allocated with node_allocate(), or DSP_HGPPNODE. | ||
173 | * Returns: | ||
174 | * Node type: NLDR_DYNAMICLOAD, NLDR_OVLYLOAD, NLDR_STATICLOAD | ||
175 | * Requires: | ||
176 | * Valid hnode. | ||
177 | * Ensures: | ||
178 | */ | ||
179 | extern enum nldr_loadtype node_get_load_type(struct node_object *hnode); | ||
180 | |||
181 | #endif /* NODEPRIV_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/ntfy.h b/drivers/staging/tidspbridge/include/dspbridge/ntfy.h deleted file mode 100644 index 6bb94d20e99a..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/ntfy.h +++ /dev/null | |||
@@ -1,217 +0,0 @@ | |||
1 | /* | ||
2 | * ntfy.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Manage lists of notification events. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef NTFY_ | ||
20 | #define NTFY_ | ||
21 | |||
22 | #include <dspbridge/host_os.h> | ||
23 | #include <dspbridge/dbdefs.h> | ||
24 | #include <dspbridge/sync.h> | ||
25 | |||
26 | /** | ||
27 | * ntfy_object - head structure to nofify dspbridge events | ||
28 | * @head: List of notify objects | ||
29 | * @ntfy_lock: lock for list access. | ||
30 | * | ||
31 | */ | ||
32 | struct ntfy_object { | ||
33 | struct raw_notifier_head head;/* List of notifier objects */ | ||
34 | spinlock_t ntfy_lock; /* For critical sections */ | ||
35 | }; | ||
36 | |||
37 | /** | ||
38 | * ntfy_event - structure store specify event to be notified | ||
39 | * @noti_block: List of notify objects | ||
40 | * @event: event that it respond | ||
41 | * @type: event type (only DSP_SIGNALEVENT supported) | ||
42 | * @sync_obj: sync_event used to set the event | ||
43 | * | ||
44 | */ | ||
45 | struct ntfy_event { | ||
46 | struct notifier_block noti_block; | ||
47 | u32 event; /* Events to be notified about */ | ||
48 | u32 type; /* Type of notification to be sent */ | ||
49 | struct sync_object sync_obj; | ||
50 | }; | ||
51 | |||
52 | |||
53 | /** | ||
54 | * dsp_notifier_event() - callback function to nofity events | ||
55 | * @this: pointer to itself struct notifier_block | ||
56 | * @event: event to be notified. | ||
57 | * @data: Currently not used. | ||
58 | * | ||
59 | */ | ||
60 | int dsp_notifier_event(struct notifier_block *this, unsigned long event, | ||
61 | void *data); | ||
62 | |||
63 | /** | ||
64 | * ntfy_init() - Set the initial state of the ntfy_object structure. | ||
65 | * @no: pointer to ntfy_object structure. | ||
66 | * | ||
67 | * This function sets the initial state of the ntfy_object in order it | ||
68 | * can be used by the other ntfy functions. | ||
69 | */ | ||
70 | |||
71 | static inline void ntfy_init(struct ntfy_object *no) | ||
72 | { | ||
73 | spin_lock_init(&no->ntfy_lock); | ||
74 | RAW_INIT_NOTIFIER_HEAD(&no->head); | ||
75 | } | ||
76 | |||
77 | /** | ||
78 | * ntfy_delete() - delete list of nofy events registered. | ||
79 | * @ntfy_obj: Pointer to the ntfy object structure. | ||
80 | * | ||
81 | * This function is used to remove all the notify events registered. | ||
82 | * unregister function is not needed in this function, to unregister | ||
83 | * a ntfy_event please look at ntfy_register function. | ||
84 | * | ||
85 | */ | ||
86 | static inline void ntfy_delete(struct ntfy_object *ntfy_obj) | ||
87 | { | ||
88 | struct ntfy_event *ne; | ||
89 | struct notifier_block *nb; | ||
90 | |||
91 | spin_lock_bh(&ntfy_obj->ntfy_lock); | ||
92 | nb = ntfy_obj->head.head; | ||
93 | while (nb) { | ||
94 | ne = container_of(nb, struct ntfy_event, noti_block); | ||
95 | nb = nb->next; | ||
96 | kfree(ne); | ||
97 | } | ||
98 | spin_unlock_bh(&ntfy_obj->ntfy_lock); | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * ntfy_notify() - nofity all event register for an specific event. | ||
103 | * @ntfy_obj: Pointer to the ntfy_object structure. | ||
104 | * @event: event to be notified. | ||
105 | * | ||
106 | * This function traverses all the ntfy events registers and | ||
107 | * set the event with mach with @event. | ||
108 | */ | ||
109 | static inline void ntfy_notify(struct ntfy_object *ntfy_obj, u32 event) | ||
110 | { | ||
111 | spin_lock_bh(&ntfy_obj->ntfy_lock); | ||
112 | raw_notifier_call_chain(&ntfy_obj->head, event, NULL); | ||
113 | spin_unlock_bh(&ntfy_obj->ntfy_lock); | ||
114 | } | ||
115 | |||
116 | |||
117 | |||
118 | /** | ||
119 | * ntfy_init() - Create and initialize a ntfy_event structure. | ||
120 | * @event: event that the ntfy event will respond | ||
121 | * @type event type (only DSP_SIGNALEVENT supported) | ||
122 | * | ||
123 | * This function create a ntfy_event element and sets the event it will | ||
124 | * respond the ntfy_event in order it can be used by the other ntfy functions. | ||
125 | * In case of success it will return a pointer to the ntfy_event struct | ||
126 | * created. Otherwise it will return NULL; | ||
127 | */ | ||
128 | |||
129 | static inline struct ntfy_event *ntfy_event_create(u32 event, u32 type) | ||
130 | { | ||
131 | struct ntfy_event *ne; | ||
132 | ne = kmalloc(sizeof(struct ntfy_event), GFP_KERNEL); | ||
133 | if (ne) { | ||
134 | sync_init_event(&ne->sync_obj); | ||
135 | ne->noti_block.notifier_call = dsp_notifier_event; | ||
136 | ne->event = event; | ||
137 | ne->type = type; | ||
138 | } | ||
139 | return ne; | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * ntfy_register() - register new ntfy_event into a given ntfy_object | ||
144 | * @ntfy_obj: Pointer to the ntfy_object structure. | ||
145 | * @noti: Pointer to the handle to be returned to the user space. | ||
146 | * @event event that the ntfy event will respond | ||
147 | * @type event type (only DSP_SIGNALEVENT supported) | ||
148 | * | ||
149 | * This function register a new ntfy_event into the ntfy_object list, | ||
150 | * which will respond to the @event passed. | ||
151 | * This function will return 0 in case of error. | ||
152 | * -EFAULT in case of bad pointers and | ||
153 | * DSP_EMemory in case of no memory to create ntfy_event. | ||
154 | */ | ||
155 | static inline int ntfy_register(struct ntfy_object *ntfy_obj, | ||
156 | struct dsp_notification *noti, | ||
157 | u32 event, u32 type) | ||
158 | { | ||
159 | struct ntfy_event *ne; | ||
160 | int status = 0; | ||
161 | |||
162 | if (!noti || !ntfy_obj) { | ||
163 | status = -EFAULT; | ||
164 | goto func_end; | ||
165 | } | ||
166 | if (!event) { | ||
167 | status = -EINVAL; | ||
168 | goto func_end; | ||
169 | } | ||
170 | ne = ntfy_event_create(event, type); | ||
171 | if (!ne) { | ||
172 | status = -ENOMEM; | ||
173 | goto func_end; | ||
174 | } | ||
175 | noti->handle = &ne->sync_obj; | ||
176 | |||
177 | spin_lock_bh(&ntfy_obj->ntfy_lock); | ||
178 | raw_notifier_chain_register(&ntfy_obj->head, &ne->noti_block); | ||
179 | spin_unlock_bh(&ntfy_obj->ntfy_lock); | ||
180 | func_end: | ||
181 | return status; | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * ntfy_unregister() - unregister a ntfy_event from a given ntfy_object | ||
186 | * @ntfy_obj: Pointer to the ntfy_object structure. | ||
187 | * @noti: Pointer to the event that will be removed. | ||
188 | * | ||
189 | * This function unregister a ntfy_event from the ntfy_object list, | ||
190 | * @noti contains the event which is wanted to be removed. | ||
191 | * This function will return 0 in case of error. | ||
192 | * -EFAULT in case of bad pointers and | ||
193 | * DSP_EMemory in case of no memory to create ntfy_event. | ||
194 | */ | ||
195 | static inline int ntfy_unregister(struct ntfy_object *ntfy_obj, | ||
196 | struct dsp_notification *noti) | ||
197 | { | ||
198 | int status = 0; | ||
199 | struct ntfy_event *ne; | ||
200 | |||
201 | if (!noti || !ntfy_obj) { | ||
202 | status = -EFAULT; | ||
203 | goto func_end; | ||
204 | } | ||
205 | |||
206 | ne = container_of((struct sync_object *)noti, struct ntfy_event, | ||
207 | sync_obj); | ||
208 | spin_lock_bh(&ntfy_obj->ntfy_lock); | ||
209 | raw_notifier_chain_unregister(&ntfy_obj->head, | ||
210 | &ne->noti_block); | ||
211 | kfree(ne); | ||
212 | spin_unlock_bh(&ntfy_obj->ntfy_lock); | ||
213 | func_end: | ||
214 | return status; | ||
215 | } | ||
216 | |||
217 | #endif /* NTFY_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h deleted file mode 100644 index 64c2457aae95..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/proc.h +++ /dev/null | |||
@@ -1,591 +0,0 @@ | |||
1 | /* | ||
2 | * proc.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * This is the DSP API RM module interface. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef PROC_ | ||
20 | #define PROC_ | ||
21 | |||
22 | #include <dspbridge/cfgdefs.h> | ||
23 | #include <dspbridge/devdefs.h> | ||
24 | #include <dspbridge/drv.h> | ||
25 | |||
26 | /* | ||
27 | * ======== proc_attach ======== | ||
28 | * Purpose: | ||
29 | * Prepare for communication with a particular DSP processor, and return | ||
30 | * a handle to the processor object. The PROC Object gets created | ||
31 | * Parameters: | ||
32 | * processor_id : The processor index (zero-based). | ||
33 | * hmgr_obj : Handle to the Manager Object | ||
34 | * attr_in : Ptr to the dsp_processorattrin structure. | ||
35 | * A NULL value means use default values. | ||
36 | * ph_processor : Ptr to location to store processor handle. | ||
37 | * Returns: | ||
38 | * 0 : Success. | ||
39 | * -EPERM : General failure. | ||
40 | * -EFAULT : Invalid processor handle. | ||
41 | * 0: Success; Processor already attached. | ||
42 | * Requires: | ||
43 | * ph_processor != NULL. | ||
44 | * PROC Initialized. | ||
45 | * Ensures: | ||
46 | * -EPERM, and *ph_processor == NULL, OR | ||
47 | * Success and *ph_processor is a Valid Processor handle OR | ||
48 | * 0 and *ph_processor is a Valid Processor. | ||
49 | * Details: | ||
50 | * When attr_in is NULL, the default timeout value is 10 seconds. | ||
51 | */ | ||
52 | extern int proc_attach(u32 processor_id, | ||
53 | const struct dsp_processorattrin | ||
54 | *attr_in, void **ph_processor, | ||
55 | struct process_context *pr_ctxt); | ||
56 | |||
57 | /* | ||
58 | * ======== proc_auto_start ========= | ||
59 | * Purpose: | ||
60 | * A Particular device gets loaded with the default image | ||
61 | * if the AutoStart flag is set. | ||
62 | * Parameters: | ||
63 | * hdev_obj : Handle to the Device | ||
64 | * Returns: | ||
65 | * 0 : On Successful Loading | ||
66 | * -ENOENT : No DSP exec file found. | ||
67 | * -EPERM : General Failure | ||
68 | * Requires: | ||
69 | * hdev_obj != NULL. | ||
70 | * dev_node_obj != NULL. | ||
71 | * PROC Initialized. | ||
72 | * Ensures: | ||
73 | */ | ||
74 | extern int proc_auto_start(struct cfg_devnode *dev_node_obj, | ||
75 | struct dev_object *hdev_obj); | ||
76 | |||
77 | /* | ||
78 | * ======== proc_ctrl ======== | ||
79 | * Purpose: | ||
80 | * Pass control information to the GPP device driver managing the DSP | ||
81 | * processor. This will be an OEM-only function, and not part of the | ||
82 | * 'Bridge application developer's API. | ||
83 | * Parameters: | ||
84 | * hprocessor : The processor handle. | ||
85 | * dw_cmd : Private driver IOCTL cmd ID. | ||
86 | * pargs : Ptr to an driver defined argument structure. | ||
87 | * Returns: | ||
88 | * 0 : SUCCESS | ||
89 | * -EFAULT : Invalid processor handle. | ||
90 | * -ETIME: A Timeout Occurred before the Control information | ||
91 | * could be sent. | ||
92 | * -EPERM : General Failure. | ||
93 | * Requires: | ||
94 | * PROC Initialized. | ||
95 | * Ensures | ||
96 | * Details: | ||
97 | * This function Calls bridge_dev_ctrl. | ||
98 | */ | ||
99 | extern int proc_ctrl(void *hprocessor, | ||
100 | u32 dw_cmd, struct dsp_cbdata *arg); | ||
101 | |||
102 | /* | ||
103 | * ======== proc_detach ======== | ||
104 | * Purpose: | ||
105 | * Close a DSP processor and de-allocate all (GPP) resources reserved | ||
106 | * for it. The Processor Object is deleted. | ||
107 | * Parameters: | ||
108 | * pr_ctxt : The processor handle. | ||
109 | * Returns: | ||
110 | * 0 : Success. | ||
111 | * -EFAULT : InValid Handle. | ||
112 | * -EPERM : General failure. | ||
113 | * Requires: | ||
114 | * PROC Initialized. | ||
115 | * Ensures: | ||
116 | * PROC Object is destroyed. | ||
117 | */ | ||
118 | extern int proc_detach(struct process_context *pr_ctxt); | ||
119 | |||
120 | /* | ||
121 | * ======== proc_enum_nodes ======== | ||
122 | * Purpose: | ||
123 | * Enumerate the nodes currently allocated on a processor. | ||
124 | * Parameters: | ||
125 | * hprocessor : The processor handle. | ||
126 | * node_tab : The first Location of an array allocated for node | ||
127 | * handles. | ||
128 | * node_tab_size: The number of (DSP_HNODE) handles that can be held | ||
129 | * to the memory the client has allocated for node_tab | ||
130 | * pu_num_nodes : Location where DSPProcessor_EnumNodes will return | ||
131 | * the number of valid handles written to node_tab | ||
132 | * pu_allocated : Location where DSPProcessor_EnumNodes will return | ||
133 | * the number of nodes that are allocated on the DSP. | ||
134 | * Returns: | ||
135 | * 0 : Success. | ||
136 | * -EFAULT : Invalid processor handle. | ||
137 | * -EINVAL : The amount of memory allocated for node_tab is | ||
138 | * insufficent. That is the number of nodes actually | ||
139 | * allocated on the DSP is greater than the value | ||
140 | * specified for node_tab_size. | ||
141 | * -EPERM : Unable to get Resource Information. | ||
142 | * Details: | ||
143 | * Requires | ||
144 | * pu_num_nodes is not NULL. | ||
145 | * pu_allocated is not NULL. | ||
146 | * node_tab is not NULL. | ||
147 | * PROC Initialized. | ||
148 | * Ensures: | ||
149 | * Details: | ||
150 | */ | ||
151 | extern int proc_enum_nodes(void *hprocessor, | ||
152 | void **node_tab, | ||
153 | u32 node_tab_size, | ||
154 | u32 *pu_num_nodes, | ||
155 | u32 *pu_allocated); | ||
156 | |||
157 | /* | ||
158 | * ======== proc_get_resource_info ======== | ||
159 | * Purpose: | ||
160 | * Enumerate the resources currently available on a processor. | ||
161 | * Parameters: | ||
162 | * hprocessor : The processor handle. | ||
163 | * resource_type: Type of resource . | ||
164 | * resource_info: Ptr to the dsp_resourceinfo structure. | ||
165 | * resource_info_size: Size of the structure. | ||
166 | * Returns: | ||
167 | * 0 : Success. | ||
168 | * -EFAULT : Invalid processor handle. | ||
169 | * -EBADR: The processor is not in the PROC_RUNNING state. | ||
170 | * -ETIME: A timeout occurred before the DSP responded to the | ||
171 | * querry. | ||
172 | * -EPERM : Unable to get Resource Information | ||
173 | * Requires: | ||
174 | * resource_info is not NULL. | ||
175 | * Parameter resource_type is Valid.[TBD] | ||
176 | * resource_info_size is >= sizeof dsp_resourceinfo struct. | ||
177 | * PROC Initialized. | ||
178 | * Ensures: | ||
179 | * Details: | ||
180 | * This function currently returns | ||
181 | * -ENOSYS, and does not write any data to the resource_info struct. | ||
182 | */ | ||
183 | extern int proc_get_resource_info(void *hprocessor, | ||
184 | u32 resource_type, | ||
185 | struct dsp_resourceinfo | ||
186 | *resource_info, | ||
187 | u32 resource_info_size); | ||
188 | |||
189 | /* | ||
190 | * ======== proc_get_dev_object ========= | ||
191 | * Purpose: | ||
192 | * Returns the DEV Hanlde for a given Processor handle | ||
193 | * Parameters: | ||
194 | * hprocessor : Processor Handle | ||
195 | * device_obj : Location to store the DEV Handle. | ||
196 | * Returns: | ||
197 | * 0 : Success; *device_obj has Dev handle | ||
198 | * -EPERM : Failure; *device_obj is zero. | ||
199 | * Requires: | ||
200 | * device_obj is not NULL | ||
201 | * PROC Initialized. | ||
202 | * Ensures: | ||
203 | * 0 : *device_obj is not NULL | ||
204 | * -EPERM : *device_obj is NULL. | ||
205 | */ | ||
206 | extern int proc_get_dev_object(void *hprocessor, | ||
207 | struct dev_object **device_obj); | ||
208 | |||
209 | /* | ||
210 | * ======== proc_get_state ======== | ||
211 | * Purpose: | ||
212 | * Report the state of the specified DSP processor. | ||
213 | * Parameters: | ||
214 | * hprocessor : The processor handle. | ||
215 | * proc_state_obj : Ptr to location to store the dsp_processorstate | ||
216 | * structure. | ||
217 | * state_info_size: Size of dsp_processorstate. | ||
218 | * Returns: | ||
219 | * 0 : Success. | ||
220 | * -EFAULT : Invalid processor handle. | ||
221 | * -EPERM : General failure while querying processor state. | ||
222 | * Requires: | ||
223 | * proc_state_obj is not NULL | ||
224 | * state_info_size is >= than the size of dsp_processorstate structure. | ||
225 | * PROC Initialized. | ||
226 | * Ensures: | ||
227 | * Details: | ||
228 | */ | ||
229 | extern int proc_get_state(void *hprocessor, struct dsp_processorstate | ||
230 | *proc_state_obj, u32 state_info_size); | ||
231 | |||
232 | /* | ||
233 | * ======== PROC_GetProcessorID ======== | ||
234 | * Purpose: | ||
235 | * Report the state of the specified DSP processor. | ||
236 | * Parameters: | ||
237 | * hprocessor : The processor handle. | ||
238 | * proc_id : Processor ID | ||
239 | * | ||
240 | * Returns: | ||
241 | * 0 : Success. | ||
242 | * -EFAULT : Invalid processor handle. | ||
243 | * -EPERM : General failure while querying processor state. | ||
244 | * Requires: | ||
245 | * proc_state_obj is not NULL | ||
246 | * state_info_size is >= than the size of dsp_processorstate structure. | ||
247 | * PROC Initialized. | ||
248 | * Ensures: | ||
249 | * Details: | ||
250 | */ | ||
251 | extern int proc_get_processor_id(void *proc, u32 * proc_id); | ||
252 | |||
253 | /* | ||
254 | * ======== proc_get_trace ======== | ||
255 | * Purpose: | ||
256 | * Retrieve the trace buffer from the specified DSP processor. | ||
257 | * Parameters: | ||
258 | * hprocessor : The processor handle. | ||
259 | * pbuf : Ptr to buffer to hold trace output. | ||
260 | * max_size : Maximum size of the output buffer. | ||
261 | * Returns: | ||
262 | * 0 : Success. | ||
263 | * -EFAULT : Invalid processor handle. | ||
264 | * -EPERM : General failure while retrieving processor trace | ||
265 | * Buffer. | ||
266 | * Requires: | ||
267 | * pbuf is not NULL | ||
268 | * max_size is > 0. | ||
269 | * PROC Initialized. | ||
270 | * Ensures: | ||
271 | * Details: | ||
272 | */ | ||
273 | extern int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size); | ||
274 | |||
275 | /* | ||
276 | * ======== proc_load ======== | ||
277 | * Purpose: | ||
278 | * Reset a processor and load a new base program image. | ||
279 | * This will be an OEM-only function. | ||
280 | * Parameters: | ||
281 | * hprocessor: The processor handle. | ||
282 | * argc_index: The number of Arguments(strings)in the aArgV[] | ||
283 | * user_args: An Array of Arguments(Unicode Strings) | ||
284 | * user_envp: An Array of Environment settings(Unicode Strings) | ||
285 | * Returns: | ||
286 | * 0: Success. | ||
287 | * -ENOENT: The DSP Executable was not found. | ||
288 | * -EFAULT: Invalid processor handle. | ||
289 | * -EPERM : Unable to Load the Processor | ||
290 | * Requires: | ||
291 | * user_args is not NULL | ||
292 | * argc_index is > 0 | ||
293 | * PROC Initialized. | ||
294 | * Ensures: | ||
295 | * Success and ProcState == PROC_LOADED | ||
296 | * or DSP_FAILED status. | ||
297 | * Details: | ||
298 | * Does not implement access rights to control which GPP application | ||
299 | * can load the processor. | ||
300 | */ | ||
301 | extern int proc_load(void *hprocessor, | ||
302 | const s32 argc_index, const char **user_args, | ||
303 | const char **user_envp); | ||
304 | |||
305 | /* | ||
306 | * ======== proc_register_notify ======== | ||
307 | * Purpose: | ||
308 | * Register to be notified of specific processor events | ||
309 | * Parameters: | ||
310 | * hprocessor : The processor handle. | ||
311 | * event_mask : Mask of types of events to be notified about. | ||
312 | * notify_type : Type of notification to be sent. | ||
313 | * hnotification: Handle to be used for notification. | ||
314 | * Returns: | ||
315 | * 0 : Success. | ||
316 | * -EFAULT : Invalid processor handle or hnotification. | ||
317 | * -EINVAL : Parameter event_mask is Invalid | ||
318 | * DSP_ENOTIMP : The notification type specified in uNotifyMask | ||
319 | * is not supported. | ||
320 | * -EPERM : Unable to register for notification. | ||
321 | * Requires: | ||
322 | * hnotification is not NULL | ||
323 | * PROC Initialized. | ||
324 | * Ensures: | ||
325 | * Details: | ||
326 | */ | ||
327 | extern int proc_register_notify(void *hprocessor, | ||
328 | u32 event_mask, u32 notify_type, | ||
329 | struct dsp_notification | ||
330 | *hnotification); | ||
331 | |||
332 | /* | ||
333 | * ======== proc_notify_clients ======== | ||
334 | * Purpose: | ||
335 | * Notify the Processor Clients | ||
336 | * Parameters: | ||
337 | * proc : The processor handle. | ||
338 | * events : Event to be notified about. | ||
339 | * Returns: | ||
340 | * 0 : Success. | ||
341 | * -EFAULT : Invalid processor handle. | ||
342 | * -EPERM : Failure to Set or Reset the Event | ||
343 | * Requires: | ||
344 | * events is Supported or Valid type of Event | ||
345 | * proc is a valid handle | ||
346 | * PROC Initialized. | ||
347 | * Ensures: | ||
348 | */ | ||
349 | extern int proc_notify_clients(void *proc, u32 events); | ||
350 | |||
351 | /* | ||
352 | * ======== proc_notify_all_clients ======== | ||
353 | * Purpose: | ||
354 | * Notify the Processor Clients | ||
355 | * Parameters: | ||
356 | * proc : The processor handle. | ||
357 | * events : Event to be notified about. | ||
358 | * Returns: | ||
359 | * 0 : Success. | ||
360 | * -EFAULT : Invalid processor handle. | ||
361 | * -EPERM : Failure to Set or Reset the Event | ||
362 | * Requires: | ||
363 | * events is Supported or Valid type of Event | ||
364 | * proc is a valid handle | ||
365 | * PROC Initialized. | ||
366 | * Ensures: | ||
367 | * Details: | ||
368 | * NODE And STRM would use this function to notify their clients | ||
369 | * about the state changes in NODE or STRM. | ||
370 | */ | ||
371 | extern int proc_notify_all_clients(void *proc, u32 events); | ||
372 | |||
373 | /* | ||
374 | * ======== proc_start ======== | ||
375 | * Purpose: | ||
376 | * Start a processor running. | ||
377 | * Processor must be in PROC_LOADED state. | ||
378 | * This will be an OEM-only function, and not part of the 'Bridge | ||
379 | * application developer's API. | ||
380 | * Parameters: | ||
381 | * hprocessor : The processor handle. | ||
382 | * Returns: | ||
383 | * 0 : Success. | ||
384 | * -EFAULT : Invalid processor handle. | ||
385 | * -EBADR: Processor is not in PROC_LOADED state. | ||
386 | * -EPERM : Unable to start the processor. | ||
387 | * Requires: | ||
388 | * PROC Initialized. | ||
389 | * Ensures: | ||
390 | * Success and ProcState == PROC_RUNNING or DSP_FAILED status. | ||
391 | * Details: | ||
392 | */ | ||
393 | extern int proc_start(void *hprocessor); | ||
394 | |||
395 | /* | ||
396 | * ======== proc_stop ======== | ||
397 | * Purpose: | ||
398 | * Start a processor running. | ||
399 | * Processor must be in PROC_LOADED state. | ||
400 | * This will be an OEM-only function, and not part of the 'Bridge | ||
401 | * application developer's API. | ||
402 | * Parameters: | ||
403 | * hprocessor : The processor handle. | ||
404 | * Returns: | ||
405 | * 0 : Success. | ||
406 | * -EFAULT : Invalid processor handle. | ||
407 | * -EBADR: Processor is not in PROC_LOADED state. | ||
408 | * -EPERM : Unable to start the processor. | ||
409 | * Requires: | ||
410 | * PROC Initialized. | ||
411 | * Ensures: | ||
412 | * Success and ProcState == PROC_RUNNING or DSP_FAILED status. | ||
413 | * Details: | ||
414 | */ | ||
415 | extern int proc_stop(void *hprocessor); | ||
416 | |||
417 | /* | ||
418 | * ======== proc_end_dma ======== | ||
419 | * Purpose: | ||
420 | * Begin a DMA transfer | ||
421 | * Parameters: | ||
422 | * hprocessor : The processor handle. | ||
423 | * pmpu_addr : Buffer start address | ||
424 | * ul_size : Buffer size | ||
425 | * dir : The direction of the transfer | ||
426 | * Requires: | ||
427 | * Memory was previously mapped. | ||
428 | */ | ||
429 | extern int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, | ||
430 | enum dma_data_direction dir); | ||
431 | /* | ||
432 | * ======== proc_begin_dma ======== | ||
433 | * Purpose: | ||
434 | * Begin a DMA transfer | ||
435 | * Parameters: | ||
436 | * hprocessor : The processor handle. | ||
437 | * pmpu_addr : Buffer start address | ||
438 | * ul_size : Buffer size | ||
439 | * dir : The direction of the transfer | ||
440 | * Requires: | ||
441 | * Memory was previously mapped. | ||
442 | */ | ||
443 | extern int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, | ||
444 | enum dma_data_direction dir); | ||
445 | |||
446 | /* | ||
447 | * ======== proc_flush_memory ======== | ||
448 | * Purpose: | ||
449 | * Flushes a buffer from the MPU data cache. | ||
450 | * Parameters: | ||
451 | * hprocessor : The processor handle. | ||
452 | * pmpu_addr : Buffer start address | ||
453 | * ul_size : Buffer size | ||
454 | * ul_flags : Reserved. | ||
455 | * Returns: | ||
456 | * 0 : Success. | ||
457 | * -EFAULT : Invalid processor handle. | ||
458 | * -EPERM : General failure. | ||
459 | * Requires: | ||
460 | * PROC Initialized. | ||
461 | * Ensures: | ||
462 | * Details: | ||
463 | * All the arguments are currently ignored. | ||
464 | */ | ||
465 | extern int proc_flush_memory(void *hprocessor, | ||
466 | void *pmpu_addr, u32 ul_size, u32 ul_flags); | ||
467 | |||
468 | /* | ||
469 | * ======== proc_invalidate_memory ======== | ||
470 | * Purpose: | ||
471 | * Invalidates a buffer from the MPU data cache. | ||
472 | * Parameters: | ||
473 | * hprocessor : The processor handle. | ||
474 | * pmpu_addr : Buffer start address | ||
475 | * ul_size : Buffer size | ||
476 | * Returns: | ||
477 | * 0 : Success. | ||
478 | * -EFAULT : Invalid processor handle. | ||
479 | * -EPERM : General failure. | ||
480 | * Requires: | ||
481 | * PROC Initialized. | ||
482 | * Ensures: | ||
483 | * Details: | ||
484 | * All the arguments are currently ignored. | ||
485 | */ | ||
486 | extern int proc_invalidate_memory(void *hprocessor, | ||
487 | void *pmpu_addr, u32 ul_size); | ||
488 | |||
489 | /* | ||
490 | * ======== proc_map ======== | ||
491 | * Purpose: | ||
492 | * Maps a MPU buffer to DSP address space. | ||
493 | * Parameters: | ||
494 | * hprocessor : The processor handle. | ||
495 | * pmpu_addr : Starting address of the memory region to map. | ||
496 | * ul_size : Size of the memory region to map. | ||
497 | * req_addr : Requested DSP start address. Offset-adjusted actual | ||
498 | * mapped address is in the last argument. | ||
499 | * pp_map_addr : Ptr to DSP side mapped u8 address. | ||
500 | * ul_map_attr : Optional endianness attributes, virt to phys flag. | ||
501 | * Returns: | ||
502 | * 0 : Success. | ||
503 | * -EFAULT : Invalid processor handle. | ||
504 | * -EPERM : General failure. | ||
505 | * -ENOMEM : MPU side memory allocation error. | ||
506 | * -ENOENT : Cannot find a reserved region starting with this | ||
507 | * : address. | ||
508 | * Requires: | ||
509 | * pmpu_addr is not NULL | ||
510 | * ul_size is not zero | ||
511 | * pp_map_addr is not NULL | ||
512 | * PROC Initialized. | ||
513 | * Ensures: | ||
514 | * Details: | ||
515 | */ | ||
516 | extern int proc_map(void *hprocessor, | ||
517 | void *pmpu_addr, | ||
518 | u32 ul_size, | ||
519 | void *req_addr, | ||
520 | void **pp_map_addr, u32 ul_map_attr, | ||
521 | struct process_context *pr_ctxt); | ||
522 | |||
523 | /* | ||
524 | * ======== proc_reserve_memory ======== | ||
525 | * Purpose: | ||
526 | * Reserve a virtually contiguous region of DSP address space. | ||
527 | * Parameters: | ||
528 | * hprocessor : The processor handle. | ||
529 | * ul_size : Size of the address space to reserve. | ||
530 | * pp_rsv_addr : Ptr to DSP side reserved u8 address. | ||
531 | * Returns: | ||
532 | * 0 : Success. | ||
533 | * -EFAULT : Invalid processor handle. | ||
534 | * -EPERM : General failure. | ||
535 | * -ENOMEM : Cannot reserve chunk of this size. | ||
536 | * Requires: | ||
537 | * pp_rsv_addr is not NULL | ||
538 | * PROC Initialized. | ||
539 | * Ensures: | ||
540 | * Details: | ||
541 | */ | ||
542 | extern int proc_reserve_memory(void *hprocessor, | ||
543 | u32 ul_size, void **pp_rsv_addr, | ||
544 | struct process_context *pr_ctxt); | ||
545 | |||
546 | /* | ||
547 | * ======== proc_un_map ======== | ||
548 | * Purpose: | ||
549 | * Removes a MPU buffer mapping from the DSP address space. | ||
550 | * Parameters: | ||
551 | * hprocessor : The processor handle. | ||
552 | * map_addr : Starting address of the mapped memory region. | ||
553 | * Returns: | ||
554 | * 0 : Success. | ||
555 | * -EFAULT : Invalid processor handle. | ||
556 | * -EPERM : General failure. | ||
557 | * -ENOENT : Cannot find a mapped region starting with this | ||
558 | * : address. | ||
559 | * Requires: | ||
560 | * map_addr is not NULL | ||
561 | * PROC Initialized. | ||
562 | * Ensures: | ||
563 | * Details: | ||
564 | */ | ||
565 | extern int proc_un_map(void *hprocessor, void *map_addr, | ||
566 | struct process_context *pr_ctxt); | ||
567 | |||
568 | /* | ||
569 | * ======== proc_un_reserve_memory ======== | ||
570 | * Purpose: | ||
571 | * Frees a previously reserved region of DSP address space. | ||
572 | * Parameters: | ||
573 | * hprocessor : The processor handle. | ||
574 | * prsv_addr : Ptr to DSP side reservedBYTE address. | ||
575 | * Returns: | ||
576 | * 0 : Success. | ||
577 | * -EFAULT : Invalid processor handle. | ||
578 | * -EPERM : General failure. | ||
579 | * -ENOENT : Cannot find a reserved region starting with this | ||
580 | * : address. | ||
581 | * Requires: | ||
582 | * prsv_addr is not NULL | ||
583 | * PROC Initialized. | ||
584 | * Ensures: | ||
585 | * Details: | ||
586 | */ | ||
587 | extern int proc_un_reserve_memory(void *hprocessor, | ||
588 | void *prsv_addr, | ||
589 | struct process_context *pr_ctxt); | ||
590 | |||
591 | #endif /* PROC_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/procpriv.h b/drivers/staging/tidspbridge/include/dspbridge/procpriv.h deleted file mode 100644 index 77d1f0ef95c3..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/procpriv.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | /* | ||
2 | * procpriv.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global PROC constants and types, shared by PROC, MGR and DSP API. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef PROCPRIV_ | ||
20 | #define PROCPRIV_ | ||
21 | |||
22 | /* RM PROC Object */ | ||
23 | struct proc_object; | ||
24 | |||
25 | #endif /* PROCPRIV_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/pwr.h b/drivers/staging/tidspbridge/include/dspbridge/pwr.h deleted file mode 100644 index 0fb066488da9..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/pwr.h +++ /dev/null | |||
@@ -1,113 +0,0 @@ | |||
1 | /* | ||
2 | * pwr.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #ifndef PWR_ | ||
18 | #define PWR_ | ||
19 | |||
20 | #include <dspbridge/dbdefs.h> | ||
21 | #include <dspbridge/mbx_sh.h> | ||
22 | |||
23 | /* valid sleep command codes that can be sent by GPP via mailbox: */ | ||
24 | #define PWR_DEEPSLEEP MBX_PM_DSPIDLE | ||
25 | #define PWR_EMERGENCYDEEPSLEEP MBX_PM_EMERGENCYSLEEP | ||
26 | #define PWR_WAKEUP MBX_PM_DSPWAKEUP | ||
27 | |||
28 | |||
29 | /* | ||
30 | * ======== pwr_sleep_dsp ======== | ||
31 | * Signal the DSP to go to sleep. | ||
32 | * | ||
33 | * Parameters: | ||
34 | * sleep_code: New sleep state for DSP. (Initially, valid codes | ||
35 | * are PWR_DEEPSLEEP or PWR_EMERGENCYDEEPSLEEP; both of | ||
36 | * these codes will simply put the DSP in deep sleep.) | ||
37 | * | ||
38 | * timeout: Maximum time (msec) that PWR should wait for | ||
39 | * confirmation that the DSP sleep state has been | ||
40 | * reached. If PWR should simply send the command to | ||
41 | * the DSP to go to sleep and then return (i.e., | ||
42 | * asynchrounous sleep), the timeout should be | ||
43 | * specified as zero. | ||
44 | * | ||
45 | * Returns: | ||
46 | * 0: Success. | ||
47 | * 0: Success, but the DSP was already asleep. | ||
48 | * -EINVAL: The specified sleep_code is not supported. | ||
49 | * -ETIME: A timeout occurred while waiting for DSP sleep | ||
50 | * confirmation. | ||
51 | * -EPERM: General failure, unable to send sleep command to | ||
52 | * the DSP. | ||
53 | */ | ||
54 | extern int pwr_sleep_dsp(const u32 sleep_code, const u32 timeout); | ||
55 | |||
56 | /* | ||
57 | * ======== pwr_wake_dsp ======== | ||
58 | * Signal the DSP to wake from sleep. | ||
59 | * | ||
60 | * Parameters: | ||
61 | * timeout: Maximum time (msec) that PWR should wait for | ||
62 | * confirmation that the DSP is awake. If PWR should | ||
63 | * simply send a command to the DSP to wake and then | ||
64 | * return (i.e., asynchrounous wake), timeout should | ||
65 | * be specified as zero. | ||
66 | * | ||
67 | * Returns: | ||
68 | * 0: Success. | ||
69 | * 0: Success, but the DSP was already awake. | ||
70 | * -ETIME: A timeout occurred while waiting for wake | ||
71 | * confirmation. | ||
72 | * -EPERM: General failure, unable to send wake command to | ||
73 | * the DSP. | ||
74 | */ | ||
75 | extern int pwr_wake_dsp(const u32 timeout); | ||
76 | |||
77 | /* | ||
78 | * ======== pwr_pm_pre_scale ======== | ||
79 | * Prescale notification to DSP. | ||
80 | * | ||
81 | * Parameters: | ||
82 | * voltage_domain: The voltage domain for which notification is sent | ||
83 | * level: The level of voltage domain | ||
84 | * | ||
85 | * Returns: | ||
86 | * 0: Success. | ||
87 | * 0: Success, but the DSP was already awake. | ||
88 | * -ETIME: A timeout occurred while waiting for wake | ||
89 | * confirmation. | ||
90 | * -EPERM: General failure, unable to send wake command to | ||
91 | * the DSP. | ||
92 | */ | ||
93 | extern int pwr_pm_pre_scale(u16 voltage_domain, u32 level); | ||
94 | |||
95 | /* | ||
96 | * ======== pwr_pm_post_scale ======== | ||
97 | * PostScale notification to DSP. | ||
98 | * | ||
99 | * Parameters: | ||
100 | * voltage_domain: The voltage domain for which notification is sent | ||
101 | * level: The level of voltage domain | ||
102 | * | ||
103 | * Returns: | ||
104 | * 0: Success. | ||
105 | * 0: Success, but the DSP was already awake. | ||
106 | * -ETIME: A timeout occurred while waiting for wake | ||
107 | * confirmation. | ||
108 | * -EPERM: General failure, unable to send wake command to | ||
109 | * the DSP. | ||
110 | */ | ||
111 | extern int pwr_pm_post_scale(u16 voltage_domain, u32 level); | ||
112 | |||
113 | #endif /* PWR_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h b/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h deleted file mode 100644 index 8c9c902a0432..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/resourcecleanup.h +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* | ||
2 | * resourcecleanup.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | #include <dspbridge/nodepriv.h> | ||
18 | #include <dspbridge/drv.h> | ||
19 | |||
20 | extern int drv_remove_all_dmm_res_elements(void *process_ctxt); | ||
21 | |||
22 | extern int drv_remove_all_node_res_elements(void *process_ctxt); | ||
23 | |||
24 | extern int drv_remove_all_resources(void *process_ctxt); | ||
25 | |||
26 | extern int drv_insert_node_res_element(void *hnode, void *node_resource, | ||
27 | void *process_ctxt); | ||
28 | |||
29 | extern void drv_proc_node_update_heap_status(void *node_resource, s32 status); | ||
30 | |||
31 | extern void drv_proc_node_update_status(void *node_resource, s32 status); | ||
32 | |||
33 | extern int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources); | ||
34 | |||
35 | extern int drv_proc_insert_strm_res_element(void *stream_obj, | ||
36 | void *strm_res, | ||
37 | void *process_ctxt); | ||
38 | |||
39 | extern int drv_remove_all_strm_res_elements(void *process_ctxt); | ||
40 | |||
41 | extern enum node_state node_get_state(void *hnode); | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/rmm.h b/drivers/staging/tidspbridge/include/dspbridge/rmm.h deleted file mode 100644 index f7a4dc8ecb4f..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/rmm.h +++ /dev/null | |||
@@ -1,156 +0,0 @@ | |||
1 | /* | ||
2 | * rmm.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * This memory manager provides general heap management and arbitrary | ||
7 | * alignment for any number of memory segments, and management of overlay | ||
8 | * memory. | ||
9 | * | ||
10 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
11 | * | ||
12 | * This package is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
17 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
18 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
19 | */ | ||
20 | |||
21 | #ifndef RMM_ | ||
22 | #define RMM_ | ||
23 | |||
24 | /* | ||
25 | * ======== rmm_addr ======== | ||
26 | * DSP address + segid | ||
27 | */ | ||
28 | struct rmm_addr { | ||
29 | u32 addr; | ||
30 | s32 segid; | ||
31 | }; | ||
32 | |||
33 | /* | ||
34 | * ======== rmm_segment ======== | ||
35 | * Memory segment on the DSP available for remote allocations. | ||
36 | */ | ||
37 | struct rmm_segment { | ||
38 | u32 base; /* Base of the segment */ | ||
39 | u32 length; /* Size of the segment (target MAUs) */ | ||
40 | s32 space; /* Code or data */ | ||
41 | u32 number; /* Number of Allocated Blocks */ | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * ======== RMM_Target ======== | ||
46 | */ | ||
47 | struct rmm_target_obj; | ||
48 | |||
49 | /* | ||
50 | * ======== rmm_alloc ======== | ||
51 | * | ||
52 | * rmm_alloc is used to remotely allocate or reserve memory on the DSP. | ||
53 | * | ||
54 | * Parameters: | ||
55 | * target - Target returned from rmm_create(). | ||
56 | * segid - Memory segment to allocate from. | ||
57 | * size - Size (target MAUS) to allocate. | ||
58 | * align - alignment. | ||
59 | * dsp_address - If reserve is FALSE, the location to store allocated | ||
60 | * address on output, otherwise, the DSP address to | ||
61 | * reserve. | ||
62 | * reserve - If TRUE, reserve the memory specified by dsp_address. | ||
63 | * Returns: | ||
64 | * 0: Success. | ||
65 | * -ENOMEM: Memory allocation on GPP failed. | ||
66 | * -ENXIO: Cannot "allocate" overlay memory because it's | ||
67 | * already in use. | ||
68 | * Requires: | ||
69 | * RMM initialized. | ||
70 | * Valid target. | ||
71 | * dsp_address != NULL. | ||
72 | * size > 0 | ||
73 | * reserve || target->num_segs > 0. | ||
74 | * Ensures: | ||
75 | */ | ||
76 | extern int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size, | ||
77 | u32 align, u32 *dsp_address, bool reserve); | ||
78 | |||
79 | /* | ||
80 | * ======== rmm_create ======== | ||
81 | * Create a target object with memory segments for remote allocation. If | ||
82 | * seg_tab == NULL or num_segs == 0, memory can only be reserved through | ||
83 | * rmm_alloc(). | ||
84 | * | ||
85 | * Parameters: | ||
86 | * target_obj: - Location to store target on output. | ||
87 | * seg_tab: - Table of memory segments. | ||
88 | * num_segs: - Number of memory segments. | ||
89 | * Returns: | ||
90 | * 0: Success. | ||
91 | * -ENOMEM: Memory allocation failed. | ||
92 | * Requires: | ||
93 | * RMM initialized. | ||
94 | * target_obj != NULL. | ||
95 | * num_segs == 0 || seg_tab != NULL. | ||
96 | * Ensures: | ||
97 | * Success: Valid *target_obj. | ||
98 | * Failure: *target_obj == NULL. | ||
99 | */ | ||
100 | extern int rmm_create(struct rmm_target_obj **target_obj, | ||
101 | struct rmm_segment seg_tab[], u32 num_segs); | ||
102 | |||
103 | /* | ||
104 | * ======== rmm_delete ======== | ||
105 | * Delete target allocated in rmm_create(). | ||
106 | * | ||
107 | * Parameters: | ||
108 | * target - Target returned from rmm_create(). | ||
109 | * Returns: | ||
110 | * Requires: | ||
111 | * RMM initialized. | ||
112 | * Valid target. | ||
113 | * Ensures: | ||
114 | */ | ||
115 | extern void rmm_delete(struct rmm_target_obj *target); | ||
116 | |||
117 | /* | ||
118 | * ======== rmm_free ======== | ||
119 | * Free or unreserve memory allocated through rmm_alloc(). | ||
120 | * | ||
121 | * Parameters: | ||
122 | * target: - Target returned from rmm_create(). | ||
123 | * segid: - Segment of memory to free. | ||
124 | * dsp_address: - Address to free or unreserve. | ||
125 | * size: - Size of memory to free or unreserve. | ||
126 | * reserved: - TRUE if memory was reserved only, otherwise FALSE. | ||
127 | * Returns: | ||
128 | * Requires: | ||
129 | * RMM initialized. | ||
130 | * Valid target. | ||
131 | * reserved || segid < target->num_segs. | ||
132 | * reserve || [dsp_address, dsp_address + size] is a valid memory range. | ||
133 | * Ensures: | ||
134 | */ | ||
135 | extern bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, | ||
136 | u32 size, bool reserved); | ||
137 | |||
138 | /* | ||
139 | * ======== rmm_stat ======== | ||
140 | * Obtain memory segment status | ||
141 | * | ||
142 | * Parameters: | ||
143 | * segid: Segment ID of the dynamic loading segment. | ||
144 | * mem_stat_buf: Pointer to allocated buffer into which memory stats are | ||
145 | * placed. | ||
146 | * Returns: | ||
147 | * TRUE: Success. | ||
148 | * FALSE: Failure. | ||
149 | * Requires: | ||
150 | * segid < target->num_segs | ||
151 | * Ensures: | ||
152 | */ | ||
153 | extern bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid, | ||
154 | struct dsp_memstat *mem_stat_buf); | ||
155 | |||
156 | #endif /* RMM_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h b/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h deleted file mode 100644 index ba7f47845673..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/rms_sh.h +++ /dev/null | |||
@@ -1,86 +0,0 @@ | |||
1 | /* | ||
2 | * rms_sh.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge Resource Manager Server shared definitions (used on both | ||
7 | * GPP and DSP sides). | ||
8 | * | ||
9 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef RMS_SH_ | ||
21 | #define RMS_SH_ | ||
22 | |||
23 | #include <dspbridge/rmstypes.h> | ||
24 | |||
25 | /* Memory Types: */ | ||
26 | #define RMS_CODE 0 /* Program space */ | ||
27 | #define RMS_DATA 1 /* Data space */ | ||
28 | |||
29 | /* RM Server Command and Response Buffer Sizes: */ | ||
30 | #define RMS_COMMANDBUFSIZE 256 /* Size of command buffer */ | ||
31 | |||
32 | /* Pre-Defined Command/Response Codes: */ | ||
33 | #define RMS_EXIT 0x80000000 /* GPP->Node: shutdown */ | ||
34 | #define RMS_EXITACK 0x40000000 /* Node->GPP: ack shutdown */ | ||
35 | #define RMS_BUFDESC 0x20000000 /* Arg1 SM buf, Arg2 SM size */ | ||
36 | #define RMS_KILLTASK 0x10000000 /* GPP->Node: Kill Task */ | ||
37 | |||
38 | /* RM Server RPC Command Structure: */ | ||
39 | struct rms_command { | ||
40 | rms_word fxn; /* Server function address */ | ||
41 | rms_word arg1; /* First argument */ | ||
42 | rms_word arg2; /* Second argument */ | ||
43 | rms_word data; /* Function-specific data array */ | ||
44 | }; | ||
45 | |||
46 | /* | ||
47 | * The rms_strm_def structure defines the parameters for both input and output | ||
48 | * streams, and is passed to a node's create function. | ||
49 | */ | ||
50 | struct rms_strm_def { | ||
51 | rms_word bufsize; /* Buffer size (in DSP words) */ | ||
52 | rms_word nbufs; /* Max number of bufs in stream */ | ||
53 | rms_word segid; /* Segment to allocate buffers */ | ||
54 | rms_word align; /* Alignment for allocated buffers */ | ||
55 | rms_word timeout; /* Timeout (msec) for blocking calls */ | ||
56 | char name[1]; /* Device Name (terminated by '\0') */ | ||
57 | }; | ||
58 | |||
59 | /* Message node create args structure: */ | ||
60 | struct rms_msg_args { | ||
61 | rms_word max_msgs; /* Max # simultaneous msgs to node */ | ||
62 | rms_word segid; /* Mem segment for NODE_allocMsgBuf */ | ||
63 | rms_word notify_type; /* Type of message notification */ | ||
64 | rms_word arg_length; /* Length (in DSP chars) of arg data */ | ||
65 | rms_word arg_data; /* Arg data for node */ | ||
66 | }; | ||
67 | |||
68 | /* Partial task create args structure */ | ||
69 | struct rms_more_task_args { | ||
70 | rms_word priority; /* Task's runtime priority level */ | ||
71 | rms_word stack_size; /* Task's stack size */ | ||
72 | rms_word sysstack_size; /* Task's system stack size (55x) */ | ||
73 | rms_word stack_seg; /* Memory segment for task's stack */ | ||
74 | rms_word heap_addr; /* base address of the node memory heap in | ||
75 | * external memory (DSP virtual address) */ | ||
76 | rms_word heap_size; /* size in MAUs of the node memory heap in | ||
77 | * external memory */ | ||
78 | rms_word misc; /* Misc field. Not used for 'normal' | ||
79 | * task nodes; for xDAIS socket nodes | ||
80 | * specifies the IALG_Fxn pointer. | ||
81 | */ | ||
82 | /* # input STRM definition structures */ | ||
83 | rms_word num_input_streams; | ||
84 | }; | ||
85 | |||
86 | #endif /* RMS_SH_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/rmstypes.h b/drivers/staging/tidspbridge/include/dspbridge/rmstypes.h deleted file mode 100644 index 83c0f1d9619e..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/rmstypes.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | /* | ||
2 | * rmstypes.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge Resource Manager Server shared data type definitions. | ||
7 | * | ||
8 | * Copyright (C) 2008 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef RMSTYPES_ | ||
20 | #define RMSTYPES_ | ||
21 | #include <linux/types.h> | ||
22 | typedef u32 rms_word; | ||
23 | |||
24 | #endif /* RMSTYPES_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/strm.h b/drivers/staging/tidspbridge/include/dspbridge/strm.h deleted file mode 100644 index 97aee4c63d24..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/strm.h +++ /dev/null | |||
@@ -1,306 +0,0 @@ | |||
1 | /* | ||
2 | * strm.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSPBridge Stream Manager. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef STRM_ | ||
20 | #define STRM_ | ||
21 | |||
22 | #include <dspbridge/dev.h> | ||
23 | |||
24 | #include <dspbridge/strmdefs.h> | ||
25 | #include <dspbridge/proc.h> | ||
26 | |||
27 | /* | ||
28 | * ======== strm_allocate_buffer ======== | ||
29 | * Purpose: | ||
30 | * Allocate data buffer(s) for use with a stream. | ||
31 | * Parameter: | ||
32 | * strmres: Stream resource info handle returned from strm_open(). | ||
33 | * usize: Size (GPP bytes) of the buffer(s). | ||
34 | * num_bufs: Number of buffers to allocate. | ||
35 | * ap_buffer: Array to hold buffer addresses. | ||
36 | * Returns: | ||
37 | * 0: Success. | ||
38 | * -EFAULT: Invalid stream_obj. | ||
39 | * -ENOMEM: Insufficient memory. | ||
40 | * -EPERM: Failure occurred, unable to allocate buffers. | ||
41 | * -EINVAL: usize must be > 0 bytes. | ||
42 | * Requires: | ||
43 | * ap_buffer != NULL. | ||
44 | * Ensures: | ||
45 | */ | ||
46 | extern int strm_allocate_buffer(struct strm_res_object *strmres, | ||
47 | u32 usize, | ||
48 | u8 **ap_buffer, | ||
49 | u32 num_bufs, | ||
50 | struct process_context *pr_ctxt); | ||
51 | |||
52 | /* | ||
53 | * ======== strm_close ======== | ||
54 | * Purpose: | ||
55 | * Close a stream opened with strm_open(). | ||
56 | * Parameter: | ||
57 | * strmres: Stream resource info handle returned from strm_open(). | ||
58 | * Returns: | ||
59 | * 0: Success. | ||
60 | * -EFAULT: Invalid stream_obj. | ||
61 | * -EPIPE: Some data buffers issued to the stream have not | ||
62 | * been reclaimed. | ||
63 | * -EPERM: Failure to close stream. | ||
64 | * Requires: | ||
65 | * Ensures: | ||
66 | */ | ||
67 | extern int strm_close(struct strm_res_object *strmres, | ||
68 | struct process_context *pr_ctxt); | ||
69 | |||
70 | /* | ||
71 | * ======== strm_create ======== | ||
72 | * Purpose: | ||
73 | * Create a STRM manager object. This object holds information about the | ||
74 | * device needed to open streams. | ||
75 | * Parameters: | ||
76 | * strm_man: Location to store handle to STRM manager object on | ||
77 | * output. | ||
78 | * dev_obj: Device for this processor. | ||
79 | * Returns: | ||
80 | * 0: Success; | ||
81 | * -ENOMEM: Insufficient memory for requested resources. | ||
82 | * -EPERM: General failure. | ||
83 | * Requires: | ||
84 | * strm_man != NULL. | ||
85 | * dev_obj != NULL. | ||
86 | * Ensures: | ||
87 | * 0: Valid *strm_man. | ||
88 | * error: *strm_man == NULL. | ||
89 | */ | ||
90 | extern int strm_create(struct strm_mgr **strm_man, | ||
91 | struct dev_object *dev_obj); | ||
92 | |||
93 | /* | ||
94 | * ======== strm_delete ======== | ||
95 | * Purpose: | ||
96 | * Delete the STRM Object. | ||
97 | * Parameters: | ||
98 | * strm_mgr_obj: Handle to STRM manager object from strm_create. | ||
99 | * Returns: | ||
100 | * Requires: | ||
101 | * Valid strm_mgr_obj. | ||
102 | * Ensures: | ||
103 | * strm_mgr_obj is not valid. | ||
104 | */ | ||
105 | extern void strm_delete(struct strm_mgr *strm_mgr_obj); | ||
106 | |||
107 | /* | ||
108 | * ======== strm_free_buffer ======== | ||
109 | * Purpose: | ||
110 | * Free buffer(s) allocated with strm_allocate_buffer. | ||
111 | * Parameter: | ||
112 | * strmres: Stream resource info handle returned from strm_open(). | ||
113 | * ap_buffer: Array containing buffer addresses. | ||
114 | * num_bufs: Number of buffers to be freed. | ||
115 | * Returns: | ||
116 | * 0: Success. | ||
117 | * -EFAULT: Invalid stream handle. | ||
118 | * -EPERM: Failure occurred, unable to free buffers. | ||
119 | * Requires: | ||
120 | * ap_buffer != NULL. | ||
121 | * Ensures: | ||
122 | */ | ||
123 | extern int strm_free_buffer(struct strm_res_object *strmres, | ||
124 | u8 **ap_buffer, u32 num_bufs, | ||
125 | struct process_context *pr_ctxt); | ||
126 | |||
127 | /* | ||
128 | * ======== strm_get_info ======== | ||
129 | * Purpose: | ||
130 | * Get information about a stream. User's dsp_streaminfo is contained | ||
131 | * in stream_info struct. stream_info also contains Bridge private info. | ||
132 | * Parameters: | ||
133 | * stream_obj: Stream handle returned from strm_open(). | ||
134 | * stream_info: Location to store stream info on output. | ||
135 | * uSteamInfoSize: Size of user's dsp_streaminfo structure. | ||
136 | * Returns: | ||
137 | * 0: Success. | ||
138 | * -EFAULT: Invalid stream_obj. | ||
139 | * -EINVAL: stream_info_size < sizeof(dsp_streaminfo). | ||
140 | * -EPERM: Unable to get stream info. | ||
141 | * Requires: | ||
142 | * stream_info != NULL. | ||
143 | * Ensures: | ||
144 | */ | ||
145 | extern int strm_get_info(struct strm_object *stream_obj, | ||
146 | struct stream_info *stream_info, | ||
147 | u32 stream_info_size); | ||
148 | |||
149 | /* | ||
150 | * ======== strm_idle ======== | ||
151 | * Purpose: | ||
152 | * Idle a stream and optionally flush output data buffers. | ||
153 | * If this is an output stream and flush_data is TRUE, all data currently | ||
154 | * enqueued will be discarded. | ||
155 | * If this is an output stream and flush_data is FALSE, this function | ||
156 | * will block until all currently buffered data is output, or the timeout | ||
157 | * specified has been reached. | ||
158 | * After a successful call to strm_idle(), all buffers can immediately | ||
159 | * be reclaimed. | ||
160 | * Parameters: | ||
161 | * stream_obj: Stream handle returned from strm_open(). | ||
162 | * flush_data: If TRUE, discard output buffers. | ||
163 | * Returns: | ||
164 | * 0: Success. | ||
165 | * -EFAULT: Invalid stream_obj. | ||
166 | * -ETIME: A timeout occurred before the stream could be idled. | ||
167 | * -EPERM: Unable to idle stream. | ||
168 | * Requires: | ||
169 | * Ensures: | ||
170 | */ | ||
171 | extern int strm_idle(struct strm_object *stream_obj, bool flush_data); | ||
172 | |||
173 | /* | ||
174 | * ======== strm_issue ======== | ||
175 | * Purpose: | ||
176 | * Send a buffer of data to a stream. | ||
177 | * Parameters: | ||
178 | * stream_obj: Stream handle returned from strm_open(). | ||
179 | * pbuf: Pointer to buffer of data to be sent to the stream. | ||
180 | * ul_bytes: Number of bytes of data in the buffer. | ||
181 | * ul_buf_size: Actual buffer size in bytes. | ||
182 | * dw_arg: A user argument that travels with the buffer. | ||
183 | * Returns: | ||
184 | * 0: Success. | ||
185 | * -EFAULT: Invalid stream_obj. | ||
186 | * -ENOSR: The stream is full. | ||
187 | * -EPERM: Failure occurred, unable to issue buffer. | ||
188 | * Requires: | ||
189 | * pbuf != NULL. | ||
190 | * Ensures: | ||
191 | */ | ||
192 | extern int strm_issue(struct strm_object *stream_obj, u8 * pbuf, | ||
193 | u32 ul_bytes, u32 ul_buf_size, u32 dw_arg); | ||
194 | |||
195 | /* | ||
196 | * ======== strm_open ======== | ||
197 | * Purpose: | ||
198 | * Open a stream for sending/receiving data buffers to/from a task of | ||
199 | * DAIS socket node on the DSP. | ||
200 | * Parameters: | ||
201 | * hnode: Node handle returned from node_allocate(). | ||
202 | * dir: DSP_TONODE or DSP_FROMNODE. | ||
203 | * index: Stream index. | ||
204 | * pattr: Pointer to structure containing attributes to be | ||
205 | * applied to stream. Cannot be NULL. | ||
206 | * strmres: Location to store stream resource info handle on output. | ||
207 | * Returns: | ||
208 | * 0: Success. | ||
209 | * -EFAULT: Invalid hnode. | ||
210 | * -EPERM: Invalid direction. | ||
211 | * hnode is not a task or DAIS socket node. | ||
212 | * Unable to open stream. | ||
213 | * -EINVAL: Invalid index. | ||
214 | * Requires: | ||
215 | * strmres != NULL. | ||
216 | * pattr != NULL. | ||
217 | * Ensures: | ||
218 | * 0: *strmres is valid. | ||
219 | * error: *strmres == NULL. | ||
220 | */ | ||
221 | extern int strm_open(struct node_object *hnode, u32 dir, | ||
222 | u32 index, struct strm_attr *pattr, | ||
223 | struct strm_res_object **strmres, | ||
224 | struct process_context *pr_ctxt); | ||
225 | |||
226 | /* | ||
227 | * ======== strm_reclaim ======== | ||
228 | * Purpose: | ||
229 | * Request a buffer back from a stream. | ||
230 | * Parameters: | ||
231 | * stream_obj: Stream handle returned from strm_open(). | ||
232 | * buf_ptr: Location to store pointer to reclaimed buffer. | ||
233 | * nbytes: Location where number of bytes of data in the | ||
234 | * buffer will be written. | ||
235 | * buff_size: Location where actual buffer size will be written. | ||
236 | * pdw_arg: Location where user argument that travels with | ||
237 | * the buffer will be written. | ||
238 | * Returns: | ||
239 | * 0: Success. | ||
240 | * -EFAULT: Invalid stream_obj. | ||
241 | * -ETIME: A timeout occurred before a buffer could be | ||
242 | * retrieved. | ||
243 | * -EPERM: Failure occurred, unable to reclaim buffer. | ||
244 | * Requires: | ||
245 | * buf_ptr != NULL. | ||
246 | * nbytes != NULL. | ||
247 | * pdw_arg != NULL. | ||
248 | * Ensures: | ||
249 | */ | ||
250 | extern int strm_reclaim(struct strm_object *stream_obj, | ||
251 | u8 **buf_ptr, u32 * nbytes, | ||
252 | u32 *buff_size, u32 *pdw_arg); | ||
253 | |||
254 | /* | ||
255 | * ======== strm_register_notify ======== | ||
256 | * Purpose: | ||
257 | * Register to be notified on specific events for this stream. | ||
258 | * Parameters: | ||
259 | * stream_obj: Stream handle returned by strm_open(). | ||
260 | * event_mask: Mask of types of events to be notified about. | ||
261 | * notify_type: Type of notification to be sent. | ||
262 | * hnotification: Handle to be used for notification. | ||
263 | * Returns: | ||
264 | * 0: Success. | ||
265 | * -EFAULT: Invalid stream_obj. | ||
266 | * -ENOMEM: Insufficient memory on GPP. | ||
267 | * -EINVAL: event_mask is invalid. | ||
268 | * -ENOSYS: Notification type specified by notify_type is not | ||
269 | * supported. | ||
270 | * Requires: | ||
271 | * hnotification != NULL. | ||
272 | * Ensures: | ||
273 | */ | ||
274 | extern int strm_register_notify(struct strm_object *stream_obj, | ||
275 | u32 event_mask, u32 notify_type, | ||
276 | struct dsp_notification | ||
277 | *hnotification); | ||
278 | |||
279 | /* | ||
280 | * ======== strm_select ======== | ||
281 | * Purpose: | ||
282 | * Select a ready stream. | ||
283 | * Parameters: | ||
284 | * strm_tab: Array of stream handles returned from strm_open(). | ||
285 | * strms: Number of stream handles in array. | ||
286 | * pmask: Location to store mask of ready streams on output. | ||
287 | * utimeout: Timeout value (milliseconds). | ||
288 | * Returns: | ||
289 | * 0: Success. | ||
290 | * -EDOM: strms out of range. | ||
291 | |||
292 | * -EFAULT: Invalid stream handle in array. | ||
293 | * -ETIME: A timeout occurred before a stream became ready. | ||
294 | * -EPERM: Failure occurred, unable to select a stream. | ||
295 | * Requires: | ||
296 | * strm_tab != NULL. | ||
297 | * strms > 0. | ||
298 | * pmask != NULL. | ||
299 | * Ensures: | ||
300 | * 0: *pmask != 0 || utimeout == 0. | ||
301 | * Error: *pmask == 0. | ||
302 | */ | ||
303 | extern int strm_select(struct strm_object **strm_tab, | ||
304 | u32 strms, u32 *pmask, u32 utimeout); | ||
305 | |||
306 | #endif /* STRM_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h b/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h deleted file mode 100644 index 4f90e6ba69ef..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/strmdefs.h +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | /* | ||
2 | * strmdefs.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Global STRM constants and types. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef STRMDEFS_ | ||
20 | #define STRMDEFS_ | ||
21 | |||
22 | struct strm_mgr; | ||
23 | |||
24 | struct strm_object; | ||
25 | |||
26 | struct strm_attr { | ||
27 | void *user_event; | ||
28 | char *str_event_name; | ||
29 | void *virt_base; /* Process virtual base address of | ||
30 | * mapped SM */ | ||
31 | u32 virt_size; /* Size of virtual space in bytes */ | ||
32 | struct dsp_streamattrin *stream_attr_in; | ||
33 | }; | ||
34 | |||
35 | struct stream_info { | ||
36 | enum dsp_strmmode strm_mode; /* transport mode of | ||
37 | * stream(DMA, ZEROCOPY..) */ | ||
38 | u32 segment_id; /* Segment strm allocs from. 0 is local mem */ | ||
39 | void *virt_base; /* " " Stream'process virt base */ | ||
40 | struct dsp_streaminfo *user_strm; /* User's stream information | ||
41 | * returned */ | ||
42 | }; | ||
43 | |||
44 | #endif /* STRMDEFS_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/sync.h b/drivers/staging/tidspbridge/include/dspbridge/sync.h deleted file mode 100644 index fc19b9707087..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/sync.h +++ /dev/null | |||
@@ -1,119 +0,0 @@ | |||
1 | /* | ||
2 | * sync.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Provide synchronization services. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef _SYNC_H | ||
20 | #define _SYNC_H | ||
21 | |||
22 | #include <dspbridge/dbdefs.h> | ||
23 | #include <dspbridge/host_os.h> | ||
24 | |||
25 | |||
26 | /* Special timeout value indicating an infinite wait: */ | ||
27 | #define SYNC_INFINITE 0xffffffff | ||
28 | |||
29 | /** | ||
30 | * struct sync_object - the basic sync_object structure | ||
31 | * @comp: use to signal events | ||
32 | * @multi_comp: use to signal multiple events. | ||
33 | * | ||
34 | */ | ||
35 | struct sync_object{ | ||
36 | struct completion comp; | ||
37 | struct completion *multi_comp; | ||
38 | }; | ||
39 | |||
40 | /** | ||
41 | * sync_init_event() - set initial state for a sync_event element | ||
42 | * @event: event to be initialized. | ||
43 | * | ||
44 | * Set the initial state for a sync_event element. | ||
45 | */ | ||
46 | |||
47 | static inline void sync_init_event(struct sync_object *event) | ||
48 | { | ||
49 | init_completion(&event->comp); | ||
50 | event->multi_comp = NULL; | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * sync_reset_event() - reset a sync_event element | ||
55 | * @event: event to be reset. | ||
56 | * | ||
57 | * This function reset to the initial state to @event. | ||
58 | */ | ||
59 | |||
60 | static inline void sync_reset_event(struct sync_object *event) | ||
61 | { | ||
62 | reinit_completion(&event->comp); | ||
63 | event->multi_comp = NULL; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * sync_set_event() - set or signal and specified event | ||
68 | * @event: Event to be set.. | ||
69 | * | ||
70 | * set the @event, if there is an thread waiting for the event | ||
71 | * it will be waken up, this function only wakes one thread. | ||
72 | */ | ||
73 | |||
74 | void sync_set_event(struct sync_object *event); | ||
75 | |||
76 | /** | ||
77 | * sync_wait_on_event() - waits for a event to be set. | ||
78 | * @event: events to wait for it. | ||
79 | * @timeout timeout on waiting for the evetn. | ||
80 | * | ||
81 | * This function will wait until @event is set or until timeout. In case of | ||
82 | * success the function will return 0 and | ||
83 | * in case of timeout the function will return -ETIME | ||
84 | * in case of signal the function will return -ERESTARTSYS | ||
85 | */ | ||
86 | |||
87 | static inline int sync_wait_on_event(struct sync_object *event, | ||
88 | unsigned timeout) | ||
89 | { | ||
90 | int res; | ||
91 | |||
92 | res = wait_for_completion_interruptible_timeout(&event->comp, | ||
93 | msecs_to_jiffies(timeout)); | ||
94 | if (!res) | ||
95 | res = -ETIME; | ||
96 | else if (res > 0) | ||
97 | res = 0; | ||
98 | |||
99 | return res; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * sync_wait_on_multiple_events() - waits for multiple events to be set. | ||
104 | * @events: Array of events to wait for them. | ||
105 | * @count: number of elements of the array. | ||
106 | * @timeout timeout on waiting for the evetns. | ||
107 | * @pu_index index of the event set. | ||
108 | * | ||
109 | * This function will wait until any of the array element is set or until | ||
110 | * timeout. In case of success the function will return 0 and | ||
111 | * @pu_index will store the index of the array element set and in case | ||
112 | * of timeout the function will return -ETIME. | ||
113 | */ | ||
114 | |||
115 | int sync_wait_on_multiple_events(struct sync_object **events, | ||
116 | unsigned count, unsigned timeout, | ||
117 | unsigned *index); | ||
118 | |||
119 | #endif /* _SYNC_H */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h b/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h deleted file mode 100644 index b4951a1381e7..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/uuidutil.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | /* | ||
2 | * uuidutil.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * This file contains the specification of UUID helper functions. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #ifndef UUIDUTIL_ | ||
20 | #define UUIDUTIL_ | ||
21 | |||
22 | #define MAXUUIDLEN 37 | ||
23 | |||
24 | #endif /* UUIDUTIL_ */ | ||
diff --git a/drivers/staging/tidspbridge/include/dspbridge/wdt.h b/drivers/staging/tidspbridge/include/dspbridge/wdt.h deleted file mode 100644 index 36193db2e9a3..000000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/wdt.h +++ /dev/null | |||
@@ -1,79 +0,0 @@ | |||
1 | /* | ||
2 | * wdt.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * IO dispatcher for a shared memory channel driver. | ||
7 | * | ||
8 | * Copyright (C) 2010 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | #ifndef __DSP_WDT3_H_ | ||
19 | #define __DSP_WDT3_H_ | ||
20 | |||
21 | /* WDT defines */ | ||
22 | #define OMAP3_WDT3_ISR_OFFSET 0x0018 | ||
23 | |||
24 | |||
25 | /** | ||
26 | * struct dsp_wdt_setting - the basic dsp_wdt_setting structure | ||
27 | * @reg_base: pointer to the base of the wdt registers | ||
28 | * @sm_wdt: pointer to flags in shared memory | ||
29 | * @wdt3_tasklet tasklet to manage wdt event | ||
30 | * @fclk handle to wdt3 functional clock | ||
31 | * @iclk handle to wdt3 interface clock | ||
32 | * | ||
33 | * This struct is used in the function to manage wdt3. | ||
34 | */ | ||
35 | |||
36 | struct dsp_wdt_setting { | ||
37 | void __iomem *reg_base; | ||
38 | struct shm *sm_wdt; | ||
39 | struct tasklet_struct wdt3_tasklet; | ||
40 | struct clk *fclk; | ||
41 | struct clk *iclk; | ||
42 | }; | ||
43 | |||
44 | /** | ||
45 | * dsp_wdt_init() - initialize wdt3 module. | ||
46 | * | ||
47 | * This function initialize to wdt3 module, so that | ||
48 | * other wdt3 function can be used. | ||
49 | */ | ||
50 | int dsp_wdt_init(void); | ||
51 | |||
52 | /** | ||
53 | * dsp_wdt_exit() - initialize wdt3 module. | ||
54 | * | ||
55 | * This function frees all resources allocated for wdt3 module. | ||
56 | */ | ||
57 | void dsp_wdt_exit(void); | ||
58 | |||
59 | /** | ||
60 | * dsp_wdt_enable() - enable/disable wdt3 | ||
61 | * @enable: bool value to enable/disable wdt3 | ||
62 | * | ||
63 | * This function enables or disables wdt3 base on @enable value. | ||
64 | * | ||
65 | */ | ||
66 | void dsp_wdt_enable(bool enable); | ||
67 | |||
68 | /** | ||
69 | * dsp_wdt_sm_set() - store pointer to the share memory | ||
70 | * @data: pointer to dspbridge share memory | ||
71 | * | ||
72 | * This function is used to pass a valid pointer to share memory, | ||
73 | * so that the flags can be set in order DSP side can read them. | ||
74 | * | ||
75 | */ | ||
76 | void dsp_wdt_sm_set(void *data); | ||
77 | |||
78 | #endif | ||
79 | |||
diff --git a/drivers/staging/tidspbridge/pmgr/chnl.c b/drivers/staging/tidspbridge/pmgr/chnl.c deleted file mode 100644 index e03c32679aa5..000000000000 --- a/drivers/staging/tidspbridge/pmgr/chnl.c +++ /dev/null | |||
@@ -1,116 +0,0 @@ | |||
1 | /* | ||
2 | * chnl.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP API channel interface: multiplexes data streams through the single | ||
7 | * physical link managed by a Bridge Bridge driver. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | /* ----------------------------------- Host OS */ | ||
22 | #include <dspbridge/host_os.h> | ||
23 | |||
24 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
25 | #include <dspbridge/dbdefs.h> | ||
26 | |||
27 | /* ----------------------------------- OS Adaptation Layer */ | ||
28 | #include <dspbridge/sync.h> | ||
29 | |||
30 | /* ----------------------------------- Platform Manager */ | ||
31 | #include <dspbridge/proc.h> | ||
32 | #include <dspbridge/dev.h> | ||
33 | |||
34 | /* ----------------------------------- Others */ | ||
35 | #include <dspbridge/chnlpriv.h> | ||
36 | #include <chnlobj.h> | ||
37 | |||
38 | /* ----------------------------------- This */ | ||
39 | #include <dspbridge/chnl.h> | ||
40 | |||
41 | /* | ||
42 | * ======== chnl_create ======== | ||
43 | * Purpose: | ||
44 | * Create a channel manager object, responsible for opening new channels | ||
45 | * and closing old ones for a given 'Bridge board. | ||
46 | */ | ||
47 | int chnl_create(struct chnl_mgr **channel_mgr, | ||
48 | struct dev_object *hdev_obj, | ||
49 | const struct chnl_mgrattrs *mgr_attrts) | ||
50 | { | ||
51 | int status; | ||
52 | struct chnl_mgr *hchnl_mgr; | ||
53 | struct chnl_mgr_ *chnl_mgr_obj = NULL; | ||
54 | |||
55 | *channel_mgr = NULL; | ||
56 | |||
57 | /* Validate args: */ | ||
58 | if ((0 < mgr_attrts->max_channels) && | ||
59 | (mgr_attrts->max_channels <= CHNL_MAXCHANNELS)) | ||
60 | status = 0; | ||
61 | else if (mgr_attrts->max_channels == 0) | ||
62 | status = -EINVAL; | ||
63 | else | ||
64 | status = -ECHRNG; | ||
65 | |||
66 | if (mgr_attrts->word_size == 0) | ||
67 | status = -EINVAL; | ||
68 | |||
69 | if (!status) { | ||
70 | status = dev_get_chnl_mgr(hdev_obj, &hchnl_mgr); | ||
71 | if (!status && hchnl_mgr != NULL) | ||
72 | status = -EEXIST; | ||
73 | |||
74 | } | ||
75 | |||
76 | if (!status) { | ||
77 | struct bridge_drv_interface *intf_fxns; | ||
78 | |||
79 | dev_get_intf_fxns(hdev_obj, &intf_fxns); | ||
80 | /* Let Bridge channel module finish the create: */ | ||
81 | status = (*intf_fxns->chnl_create) (&hchnl_mgr, hdev_obj, | ||
82 | mgr_attrts); | ||
83 | if (!status) { | ||
84 | /* Fill in DSP API channel module's fields of the | ||
85 | * chnl_mgr structure */ | ||
86 | chnl_mgr_obj = (struct chnl_mgr_ *)hchnl_mgr; | ||
87 | chnl_mgr_obj->intf_fxns = intf_fxns; | ||
88 | /* Finally, return the new channel manager handle: */ | ||
89 | *channel_mgr = hchnl_mgr; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | return status; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * ======== chnl_destroy ======== | ||
98 | * Purpose: | ||
99 | * Close all open channels, and destroy the channel manager. | ||
100 | */ | ||
101 | int chnl_destroy(struct chnl_mgr *hchnl_mgr) | ||
102 | { | ||
103 | struct chnl_mgr_ *chnl_mgr_obj = (struct chnl_mgr_ *)hchnl_mgr; | ||
104 | struct bridge_drv_interface *intf_fxns; | ||
105 | int status; | ||
106 | |||
107 | if (chnl_mgr_obj) { | ||
108 | intf_fxns = chnl_mgr_obj->intf_fxns; | ||
109 | /* Let Bridge channel module destroy the chnl_mgr: */ | ||
110 | status = (*intf_fxns->chnl_destroy) (hchnl_mgr); | ||
111 | } else { | ||
112 | status = -EFAULT; | ||
113 | } | ||
114 | |||
115 | return status; | ||
116 | } | ||
diff --git a/drivers/staging/tidspbridge/pmgr/chnlobj.h b/drivers/staging/tidspbridge/pmgr/chnlobj.h deleted file mode 100644 index 6795e0aa8fd6..000000000000 --- a/drivers/staging/tidspbridge/pmgr/chnlobj.h +++ /dev/null | |||
@@ -1,46 +0,0 @@ | |||
1 | /* | ||
2 | * chnlobj.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Structure subcomponents of channel class library channel objects which | ||
7 | * are exposed to DSP API from Bridge driver. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef CHNLOBJ_ | ||
21 | #define CHNLOBJ_ | ||
22 | |||
23 | #include <dspbridge/chnldefs.h> | ||
24 | #include <dspbridge/dspdefs.h> | ||
25 | |||
26 | /* | ||
27 | * This struct is the first field in a chnl_mgr struct. Other. implementation | ||
28 | * specific fields follow this structure in memory. | ||
29 | */ | ||
30 | struct chnl_mgr_ { | ||
31 | /* These must be the first fields in a chnl_mgr struct: */ | ||
32 | |||
33 | /* Function interface to Bridge driver. */ | ||
34 | struct bridge_drv_interface *intf_fxns; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * This struct is the first field in a chnl_object struct. Other, | ||
39 | * implementation specific fields follow this structure in memory. | ||
40 | */ | ||
41 | struct chnl_object_ { | ||
42 | /* These must be the first fields in a chnl_object struct: */ | ||
43 | struct chnl_mgr_ *chnl_mgr_obj; /* Pointer back to channel manager. */ | ||
44 | }; | ||
45 | |||
46 | #endif /* CHNLOBJ_ */ | ||
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c deleted file mode 100644 index f961e0ec9da8..000000000000 --- a/drivers/staging/tidspbridge/pmgr/cmm.c +++ /dev/null | |||
@@ -1,915 +0,0 @@ | |||
1 | /* | ||
2 | * cmm.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * The Communication(Shared) Memory Management(CMM) module provides | ||
7 | * shared memory management services for DSP/BIOS Bridge data streaming | ||
8 | * and messaging. | ||
9 | * | ||
10 | * Multiple shared memory segments can be registered with CMM. | ||
11 | * Each registered SM segment is represented by a SM "allocator" that | ||
12 | * describes a block of physically contiguous shared memory used for | ||
13 | * future allocations by CMM. | ||
14 | * | ||
15 | * Memory is coalesced back to the appropriate heap when a buffer is | ||
16 | * freed. | ||
17 | * | ||
18 | * Notes: | ||
19 | * Va: Virtual address. | ||
20 | * Pa: Physical or kernel system address. | ||
21 | * | ||
22 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
23 | * | ||
24 | * This package is free software; you can redistribute it and/or modify | ||
25 | * it under the terms of the GNU General Public License version 2 as | ||
26 | * published by the Free Software Foundation. | ||
27 | * | ||
28 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
29 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
30 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
31 | */ | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/list.h> | ||
34 | |||
35 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
36 | #include <dspbridge/dbdefs.h> | ||
37 | |||
38 | /* ----------------------------------- OS Adaptation Layer */ | ||
39 | #include <dspbridge/sync.h> | ||
40 | |||
41 | /* ----------------------------------- Platform Manager */ | ||
42 | #include <dspbridge/dev.h> | ||
43 | #include <dspbridge/proc.h> | ||
44 | |||
45 | /* ----------------------------------- This */ | ||
46 | #include <dspbridge/cmm.h> | ||
47 | |||
48 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
49 | #define NEXT_PA(pnode) (pnode->pa + pnode->size) | ||
50 | |||
51 | /* Other bus/platform translations */ | ||
52 | #define DSPPA2GPPPA(base, x, y) ((x)+(y)) | ||
53 | #define GPPPA2DSPPA(base, x, y) ((x)-(y)) | ||
54 | |||
55 | /* | ||
56 | * Allocators define a block of contiguous memory used for future allocations. | ||
57 | * | ||
58 | * sma - shared memory allocator. | ||
59 | * vma - virtual memory allocator.(not used). | ||
60 | */ | ||
61 | struct cmm_allocator { /* sma */ | ||
62 | unsigned int shm_base; /* Start of physical SM block */ | ||
63 | u32 sm_size; /* Size of SM block in bytes */ | ||
64 | unsigned int vm_base; /* Start of VM block. (Dev driver | ||
65 | * context for 'sma') */ | ||
66 | u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this | ||
67 | * SM space */ | ||
68 | s8 c_factor; /* DSPPa to GPPPa Conversion Factor */ | ||
69 | unsigned int dsp_base; /* DSP virt base byte address */ | ||
70 | u32 dsp_size; /* DSP seg size in bytes */ | ||
71 | struct cmm_object *cmm_mgr; /* back ref to parent mgr */ | ||
72 | /* node list of available memory */ | ||
73 | struct list_head free_list; | ||
74 | /* node list of memory in use */ | ||
75 | struct list_head in_use_list; | ||
76 | }; | ||
77 | |||
78 | struct cmm_xlator { /* Pa<->Va translator object */ | ||
79 | /* CMM object this translator associated */ | ||
80 | struct cmm_object *cmm_mgr; | ||
81 | /* | ||
82 | * Client process virtual base address that corresponds to phys SM | ||
83 | * base address for translator's seg_id. | ||
84 | * Only 1 segment ID currently supported. | ||
85 | */ | ||
86 | unsigned int virt_base; /* virtual base address */ | ||
87 | u32 virt_size; /* size of virt space in bytes */ | ||
88 | u32 seg_id; /* Segment Id */ | ||
89 | }; | ||
90 | |||
91 | /* CMM Mgr */ | ||
92 | struct cmm_object { | ||
93 | /* | ||
94 | * Cmm Lock is used to serialize access mem manager for multi-threads. | ||
95 | */ | ||
96 | struct mutex cmm_lock; /* Lock to access cmm mgr */ | ||
97 | struct list_head node_free_list; /* Free list of memory nodes */ | ||
98 | u32 min_block_size; /* Min SM block; default 16 bytes */ | ||
99 | u32 page_size; /* Memory Page size (1k/4k) */ | ||
100 | /* GPP SM segment ptrs */ | ||
101 | struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS]; | ||
102 | }; | ||
103 | |||
104 | /* Default CMM Mgr attributes */ | ||
105 | static struct cmm_mgrattrs cmm_dfltmgrattrs = { | ||
106 | /* min_block_size, min block size(bytes) allocated by cmm mgr */ | ||
107 | 16 | ||
108 | }; | ||
109 | |||
110 | /* Default allocation attributes */ | ||
111 | static struct cmm_attrs cmm_dfltalctattrs = { | ||
112 | 1 /* seg_id, default segment Id for allocator */ | ||
113 | }; | ||
114 | |||
115 | /* Address translator default attrs */ | ||
116 | static struct cmm_xlatorattrs cmm_dfltxlatorattrs = { | ||
117 | /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */ | ||
118 | 1, | ||
119 | 0, /* dsp_bufs */ | ||
120 | 0, /* dsp_buf_size */ | ||
121 | NULL, /* vm_base */ | ||
122 | 0, /* vm_size */ | ||
123 | }; | ||
124 | |||
125 | /* SM node representing a block of memory. */ | ||
126 | struct cmm_mnode { | ||
127 | struct list_head link; /* must be 1st element */ | ||
128 | u32 pa; /* Phys addr */ | ||
129 | u32 va; /* Virtual address in device process context */ | ||
130 | u32 size; /* SM block size in bytes */ | ||
131 | u32 client_proc; /* Process that allocated this mem block */ | ||
132 | }; | ||
133 | |||
134 | /* ----------------------------------- Function Prototypes */ | ||
135 | static void add_to_free_list(struct cmm_allocator *allocator, | ||
136 | struct cmm_mnode *pnode); | ||
137 | static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj, | ||
138 | u32 ul_seg_id); | ||
139 | static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator, | ||
140 | u32 usize); | ||
141 | static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa, | ||
142 | u32 dw_va, u32 ul_size); | ||
143 | /* get available slot for new allocator */ | ||
144 | static s32 get_slot(struct cmm_object *cmm_mgr_obj); | ||
145 | static void un_register_gppsm_seg(struct cmm_allocator *psma); | ||
146 | |||
147 | /* | ||
148 | * ======== cmm_calloc_buf ======== | ||
149 | * Purpose: | ||
150 | * Allocate a SM buffer, zero contents, and return the physical address | ||
151 | * and optional driver context virtual address(pp_buf_va). | ||
152 | * | ||
153 | * The freelist is sorted in increasing size order. Get the first | ||
154 | * block that satifies the request and sort the remaining back on | ||
155 | * the freelist; if large enough. The kept block is placed on the | ||
156 | * inUseList. | ||
157 | */ | ||
158 | void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize, | ||
159 | struct cmm_attrs *pattrs, void **pp_buf_va) | ||
160 | { | ||
161 | struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; | ||
162 | void *buf_pa = NULL; | ||
163 | struct cmm_mnode *pnode = NULL; | ||
164 | struct cmm_mnode *new_node = NULL; | ||
165 | struct cmm_allocator *allocator = NULL; | ||
166 | u32 delta_size; | ||
167 | u8 *pbyte = NULL; | ||
168 | s32 cnt; | ||
169 | |||
170 | if (pattrs == NULL) | ||
171 | pattrs = &cmm_dfltalctattrs; | ||
172 | |||
173 | if (pp_buf_va != NULL) | ||
174 | *pp_buf_va = NULL; | ||
175 | |||
176 | if (cmm_mgr_obj && (usize != 0)) { | ||
177 | if (pattrs->seg_id > 0) { | ||
178 | /* SegId > 0 is SM */ | ||
179 | /* get the allocator object for this segment id */ | ||
180 | allocator = | ||
181 | get_allocator(cmm_mgr_obj, pattrs->seg_id); | ||
182 | /* keep block size a multiple of min_block_size */ | ||
183 | usize = | ||
184 | ((usize - 1) & ~(cmm_mgr_obj->min_block_size - | ||
185 | 1)) | ||
186 | + cmm_mgr_obj->min_block_size; | ||
187 | mutex_lock(&cmm_mgr_obj->cmm_lock); | ||
188 | pnode = get_free_block(allocator, usize); | ||
189 | } | ||
190 | if (pnode) { | ||
191 | delta_size = (pnode->size - usize); | ||
192 | if (delta_size >= cmm_mgr_obj->min_block_size) { | ||
193 | /* create a new block with the leftovers and | ||
194 | * add to freelist */ | ||
195 | new_node = | ||
196 | get_node(cmm_mgr_obj, pnode->pa + usize, | ||
197 | pnode->va + usize, | ||
198 | (u32) delta_size); | ||
199 | /* leftovers go free */ | ||
200 | add_to_free_list(allocator, new_node); | ||
201 | /* adjust our node's size */ | ||
202 | pnode->size = usize; | ||
203 | } | ||
204 | /* Tag node with client process requesting allocation | ||
205 | * We'll need to free up a process's alloc'd SM if the | ||
206 | * client process goes away. | ||
207 | */ | ||
208 | /* Return TGID instead of process handle */ | ||
209 | pnode->client_proc = current->tgid; | ||
210 | |||
211 | /* put our node on InUse list */ | ||
212 | list_add_tail(&pnode->link, &allocator->in_use_list); | ||
213 | buf_pa = (void *)pnode->pa; /* physical address */ | ||
214 | /* clear mem */ | ||
215 | pbyte = (u8 *) pnode->va; | ||
216 | for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++) | ||
217 | *pbyte = 0; | ||
218 | |||
219 | if (pp_buf_va != NULL) { | ||
220 | /* Virtual address */ | ||
221 | *pp_buf_va = (void *)pnode->va; | ||
222 | } | ||
223 | } | ||
224 | mutex_unlock(&cmm_mgr_obj->cmm_lock); | ||
225 | } | ||
226 | return buf_pa; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * ======== cmm_create ======== | ||
231 | * Purpose: | ||
232 | * Create a communication memory manager object. | ||
233 | */ | ||
234 | int cmm_create(struct cmm_object **ph_cmm_mgr, | ||
235 | struct dev_object *hdev_obj, | ||
236 | const struct cmm_mgrattrs *mgr_attrts) | ||
237 | { | ||
238 | struct cmm_object *cmm_obj = NULL; | ||
239 | int status = 0; | ||
240 | |||
241 | *ph_cmm_mgr = NULL; | ||
242 | /* create, zero, and tag a cmm mgr object */ | ||
243 | cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL); | ||
244 | if (!cmm_obj) | ||
245 | return -ENOMEM; | ||
246 | |||
247 | if (mgr_attrts == NULL) | ||
248 | mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */ | ||
249 | |||
250 | /* save away smallest block allocation for this cmm mgr */ | ||
251 | cmm_obj->min_block_size = mgr_attrts->min_block_size; | ||
252 | cmm_obj->page_size = PAGE_SIZE; | ||
253 | |||
254 | /* create node free list */ | ||
255 | INIT_LIST_HEAD(&cmm_obj->node_free_list); | ||
256 | mutex_init(&cmm_obj->cmm_lock); | ||
257 | *ph_cmm_mgr = cmm_obj; | ||
258 | |||
259 | return status; | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * ======== cmm_destroy ======== | ||
264 | * Purpose: | ||
265 | * Release the communication memory manager resources. | ||
266 | */ | ||
267 | int cmm_destroy(struct cmm_object *hcmm_mgr, bool force) | ||
268 | { | ||
269 | struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; | ||
270 | struct cmm_info temp_info; | ||
271 | int status = 0; | ||
272 | s32 slot_seg; | ||
273 | struct cmm_mnode *node, *tmp; | ||
274 | |||
275 | if (!hcmm_mgr) { | ||
276 | status = -EFAULT; | ||
277 | return status; | ||
278 | } | ||
279 | mutex_lock(&cmm_mgr_obj->cmm_lock); | ||
280 | /* If not force then fail if outstanding allocations exist */ | ||
281 | if (!force) { | ||
282 | /* Check for outstanding memory allocations */ | ||
283 | status = cmm_get_info(hcmm_mgr, &temp_info); | ||
284 | if (!status) { | ||
285 | if (temp_info.total_in_use_cnt > 0) { | ||
286 | /* outstanding allocations */ | ||
287 | status = -EPERM; | ||
288 | } | ||
289 | } | ||
290 | } | ||
291 | if (!status) { | ||
292 | /* UnRegister SM allocator */ | ||
293 | for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) { | ||
294 | if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) { | ||
295 | un_register_gppsm_seg | ||
296 | (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]); | ||
297 | /* Set slot to NULL for future reuse */ | ||
298 | cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL; | ||
299 | } | ||
300 | } | ||
301 | } | ||
302 | list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list, | ||
303 | link) { | ||
304 | list_del(&node->link); | ||
305 | kfree(node); | ||
306 | } | ||
307 | mutex_unlock(&cmm_mgr_obj->cmm_lock); | ||
308 | if (!status) { | ||
309 | /* delete CS & cmm mgr object */ | ||
310 | mutex_destroy(&cmm_mgr_obj->cmm_lock); | ||
311 | kfree(cmm_mgr_obj); | ||
312 | } | ||
313 | return status; | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * ======== cmm_free_buf ======== | ||
318 | * Purpose: | ||
319 | * Free the given buffer. | ||
320 | */ | ||
321 | int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id) | ||
322 | { | ||
323 | struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; | ||
324 | int status = -EFAULT; | ||
325 | struct cmm_mnode *curr, *tmp; | ||
326 | struct cmm_allocator *allocator; | ||
327 | struct cmm_attrs *pattrs; | ||
328 | |||
329 | if (ul_seg_id == 0) { | ||
330 | pattrs = &cmm_dfltalctattrs; | ||
331 | ul_seg_id = pattrs->seg_id; | ||
332 | } | ||
333 | if (!hcmm_mgr || !(ul_seg_id > 0)) { | ||
334 | status = -EFAULT; | ||
335 | return status; | ||
336 | } | ||
337 | |||
338 | allocator = get_allocator(cmm_mgr_obj, ul_seg_id); | ||
339 | if (!allocator) | ||
340 | return status; | ||
341 | |||
342 | mutex_lock(&cmm_mgr_obj->cmm_lock); | ||
343 | list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) { | ||
344 | if (curr->pa == (u32) buf_pa) { | ||
345 | list_del(&curr->link); | ||
346 | add_to_free_list(allocator, curr); | ||
347 | status = 0; | ||
348 | break; | ||
349 | } | ||
350 | } | ||
351 | mutex_unlock(&cmm_mgr_obj->cmm_lock); | ||
352 | |||
353 | return status; | ||
354 | } | ||
355 | |||
356 | /* | ||
357 | * ======== cmm_get_handle ======== | ||
358 | * Purpose: | ||
359 | * Return the communication memory manager object for this device. | ||
360 | * This is typically called from the client process. | ||
361 | */ | ||
362 | int cmm_get_handle(void *hprocessor, struct cmm_object **ph_cmm_mgr) | ||
363 | { | ||
364 | int status = 0; | ||
365 | struct dev_object *hdev_obj; | ||
366 | |||
367 | if (hprocessor != NULL) | ||
368 | status = proc_get_dev_object(hprocessor, &hdev_obj); | ||
369 | else | ||
370 | hdev_obj = dev_get_first(); /* default */ | ||
371 | |||
372 | if (!status) | ||
373 | status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr); | ||
374 | |||
375 | return status; | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * ======== cmm_get_info ======== | ||
380 | * Purpose: | ||
381 | * Return the current memory utilization information. | ||
382 | */ | ||
383 | int cmm_get_info(struct cmm_object *hcmm_mgr, | ||
384 | struct cmm_info *cmm_info_obj) | ||
385 | { | ||
386 | struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; | ||
387 | u32 ul_seg; | ||
388 | int status = 0; | ||
389 | struct cmm_allocator *altr; | ||
390 | struct cmm_mnode *curr; | ||
391 | |||
392 | if (!hcmm_mgr) { | ||
393 | status = -EFAULT; | ||
394 | return status; | ||
395 | } | ||
396 | mutex_lock(&cmm_mgr_obj->cmm_lock); | ||
397 | cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */ | ||
398 | /* Total # of outstanding alloc */ | ||
399 | cmm_info_obj->total_in_use_cnt = 0; | ||
400 | /* min block size */ | ||
401 | cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size; | ||
402 | /* check SM memory segments */ | ||
403 | for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) { | ||
404 | /* get the allocator object for this segment id */ | ||
405 | altr = get_allocator(cmm_mgr_obj, ul_seg); | ||
406 | if (!altr) | ||
407 | continue; | ||
408 | cmm_info_obj->num_gppsm_segs++; | ||
409 | cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa = | ||
410 | altr->shm_base - altr->dsp_size; | ||
411 | cmm_info_obj->seg_info[ul_seg - 1].total_seg_size = | ||
412 | altr->dsp_size + altr->sm_size; | ||
413 | cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa = | ||
414 | altr->shm_base; | ||
415 | cmm_info_obj->seg_info[ul_seg - 1].gpp_size = | ||
416 | altr->sm_size; | ||
417 | cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va = | ||
418 | altr->dsp_base; | ||
419 | cmm_info_obj->seg_info[ul_seg - 1].dsp_size = | ||
420 | altr->dsp_size; | ||
421 | cmm_info_obj->seg_info[ul_seg - 1].seg_base_va = | ||
422 | altr->vm_base - altr->dsp_size; | ||
423 | cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0; | ||
424 | |||
425 | list_for_each_entry(curr, &altr->in_use_list, link) { | ||
426 | cmm_info_obj->total_in_use_cnt++; | ||
427 | cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++; | ||
428 | } | ||
429 | } | ||
430 | mutex_unlock(&cmm_mgr_obj->cmm_lock); | ||
431 | return status; | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * ======== cmm_register_gppsm_seg ======== | ||
436 | * Purpose: | ||
437 | * Register a block of SM with the CMM to be used for later GPP SM | ||
438 | * allocations. | ||
439 | */ | ||
440 | int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr, | ||
441 | u32 dw_gpp_base_pa, u32 ul_size, | ||
442 | u32 dsp_addr_offset, s8 c_factor, | ||
443 | u32 dw_dsp_base, u32 ul_dsp_size, | ||
444 | u32 *sgmt_id, u32 gpp_base_va) | ||
445 | { | ||
446 | struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; | ||
447 | struct cmm_allocator *psma = NULL; | ||
448 | int status = 0; | ||
449 | struct cmm_mnode *new_node; | ||
450 | s32 slot_seg; | ||
451 | |||
452 | dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", | ||
453 | __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset, | ||
454 | dw_dsp_base, ul_dsp_size, gpp_base_va); | ||
455 | |||
456 | if (!hcmm_mgr) | ||
457 | return -EFAULT; | ||
458 | |||
459 | /* make sure we have room for another allocator */ | ||
460 | mutex_lock(&cmm_mgr_obj->cmm_lock); | ||
461 | |||
462 | slot_seg = get_slot(cmm_mgr_obj); | ||
463 | if (slot_seg < 0) { | ||
464 | status = -EPERM; | ||
465 | goto func_end; | ||
466 | } | ||
467 | |||
468 | /* Check if input ul_size is big enough to alloc at least one block */ | ||
469 | if (ul_size < cmm_mgr_obj->min_block_size) { | ||
470 | status = -EINVAL; | ||
471 | goto func_end; | ||
472 | } | ||
473 | |||
474 | /* create, zero, and tag an SM allocator object */ | ||
475 | psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL); | ||
476 | if (!psma) { | ||
477 | status = -ENOMEM; | ||
478 | goto func_end; | ||
479 | } | ||
480 | |||
481 | psma->cmm_mgr = hcmm_mgr; /* ref to parent */ | ||
482 | psma->shm_base = dw_gpp_base_pa; /* SM Base phys */ | ||
483 | psma->sm_size = ul_size; /* SM segment size in bytes */ | ||
484 | psma->vm_base = gpp_base_va; | ||
485 | psma->dsp_phys_addr_offset = dsp_addr_offset; | ||
486 | psma->c_factor = c_factor; | ||
487 | psma->dsp_base = dw_dsp_base; | ||
488 | psma->dsp_size = ul_dsp_size; | ||
489 | if (psma->vm_base == 0) { | ||
490 | status = -EPERM; | ||
491 | goto func_end; | ||
492 | } | ||
493 | /* return the actual segment identifier */ | ||
494 | *sgmt_id = (u32) slot_seg + 1; | ||
495 | |||
496 | INIT_LIST_HEAD(&psma->free_list); | ||
497 | INIT_LIST_HEAD(&psma->in_use_list); | ||
498 | |||
499 | /* Get a mem node for this hunk-o-memory */ | ||
500 | new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa, | ||
501 | psma->vm_base, ul_size); | ||
502 | /* Place node on the SM allocator's free list */ | ||
503 | if (new_node) { | ||
504 | list_add_tail(&new_node->link, &psma->free_list); | ||
505 | } else { | ||
506 | status = -ENOMEM; | ||
507 | goto func_end; | ||
508 | } | ||
509 | /* make entry */ | ||
510 | cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma; | ||
511 | |||
512 | func_end: | ||
513 | /* Cleanup allocator */ | ||
514 | if (status && psma) | ||
515 | un_register_gppsm_seg(psma); | ||
516 | mutex_unlock(&cmm_mgr_obj->cmm_lock); | ||
517 | |||
518 | return status; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * ======== cmm_un_register_gppsm_seg ======== | ||
523 | * Purpose: | ||
524 | * UnRegister GPP SM segments with the CMM. | ||
525 | */ | ||
526 | int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr, | ||
527 | u32 ul_seg_id) | ||
528 | { | ||
529 | struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr; | ||
530 | int status = 0; | ||
531 | struct cmm_allocator *psma; | ||
532 | u32 ul_id = ul_seg_id; | ||
533 | |||
534 | if (!hcmm_mgr) | ||
535 | return -EFAULT; | ||
536 | |||
537 | if (ul_seg_id == CMM_ALLSEGMENTS) | ||
538 | ul_id = 1; | ||
539 | |||
540 | if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS)) | ||
541 | return -EINVAL; | ||
542 | |||
543 | /* | ||
544 | * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like | ||
545 | * the ul_seg_id is not needed here. It must be always 1. | ||
546 | */ | ||
547 | while (ul_id <= CMM_MAXGPPSEGS) { | ||
548 | mutex_lock(&cmm_mgr_obj->cmm_lock); | ||
549 | /* slot = seg_id-1 */ | ||
550 | psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1]; | ||
551 | if (psma != NULL) { | ||
552 | un_register_gppsm_seg(psma); | ||
553 | /* Set alctr ptr to NULL for future reuse */ | ||
554 | cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL; | ||
555 | } else if (ul_seg_id != CMM_ALLSEGMENTS) { | ||
556 | status = -EPERM; | ||
557 | } | ||
558 | mutex_unlock(&cmm_mgr_obj->cmm_lock); | ||
559 | if (ul_seg_id != CMM_ALLSEGMENTS) | ||
560 | break; | ||
561 | |||
562 | ul_id++; | ||
563 | } /* end while */ | ||
564 | return status; | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * ======== un_register_gppsm_seg ======== | ||
569 | * Purpose: | ||
570 | * UnRegister the SM allocator by freeing all its resources and | ||
571 | * nulling cmm mgr table entry. | ||
572 | * Note: | ||
573 | * This routine is always called within cmm lock crit sect. | ||
574 | */ | ||
575 | static void un_register_gppsm_seg(struct cmm_allocator *psma) | ||
576 | { | ||
577 | struct cmm_mnode *curr, *tmp; | ||
578 | |||
579 | /* free nodes on free list */ | ||
580 | list_for_each_entry_safe(curr, tmp, &psma->free_list, link) { | ||
581 | list_del(&curr->link); | ||
582 | kfree(curr); | ||
583 | } | ||
584 | |||
585 | /* free nodes on InUse list */ | ||
586 | list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) { | ||
587 | list_del(&curr->link); | ||
588 | kfree(curr); | ||
589 | } | ||
590 | |||
591 | if ((void *)psma->vm_base != NULL) | ||
592 | MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base); | ||
593 | |||
594 | /* Free allocator itself */ | ||
595 | kfree(psma); | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * ======== get_slot ======== | ||
600 | * Purpose: | ||
601 | * An available slot # is returned. Returns negative on failure. | ||
602 | */ | ||
603 | static s32 get_slot(struct cmm_object *cmm_mgr_obj) | ||
604 | { | ||
605 | s32 slot_seg = -1; /* neg on failure */ | ||
606 | /* get first available slot in cmm mgr SMSegTab[] */ | ||
607 | for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) { | ||
608 | if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL) | ||
609 | break; | ||
610 | |||
611 | } | ||
612 | if (slot_seg == CMM_MAXGPPSEGS) | ||
613 | slot_seg = -1; /* failed */ | ||
614 | |||
615 | return slot_seg; | ||
616 | } | ||
617 | |||
618 | /* | ||
619 | * ======== get_node ======== | ||
620 | * Purpose: | ||
621 | * Get a memory node from freelist or create a new one. | ||
622 | */ | ||
623 | static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa, | ||
624 | u32 dw_va, u32 ul_size) | ||
625 | { | ||
626 | struct cmm_mnode *pnode; | ||
627 | |||
628 | /* Check cmm mgr's node freelist */ | ||
629 | if (list_empty(&cmm_mgr_obj->node_free_list)) { | ||
630 | pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL); | ||
631 | if (!pnode) | ||
632 | return NULL; | ||
633 | } else { | ||
634 | /* surely a valid element */ | ||
635 | pnode = list_first_entry(&cmm_mgr_obj->node_free_list, | ||
636 | struct cmm_mnode, link); | ||
637 | list_del_init(&pnode->link); | ||
638 | } | ||
639 | |||
640 | pnode->pa = dw_pa; | ||
641 | pnode->va = dw_va; | ||
642 | pnode->size = ul_size; | ||
643 | |||
644 | return pnode; | ||
645 | } | ||
646 | |||
647 | /* | ||
648 | * ======== delete_node ======== | ||
649 | * Purpose: | ||
650 | * Put a memory node on the cmm nodelist for later use. | ||
651 | * Doesn't actually delete the node. Heap thrashing friendly. | ||
652 | */ | ||
653 | static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode) | ||
654 | { | ||
655 | list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list); | ||
656 | } | ||
657 | |||
658 | /* | ||
659 | * ====== get_free_block ======== | ||
660 | * Purpose: | ||
661 | * Scan the free block list and return the first block that satisfies | ||
662 | * the size. | ||
663 | */ | ||
664 | static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator, | ||
665 | u32 usize) | ||
666 | { | ||
667 | struct cmm_mnode *node, *tmp; | ||
668 | |||
669 | if (!allocator) | ||
670 | return NULL; | ||
671 | |||
672 | list_for_each_entry_safe(node, tmp, &allocator->free_list, link) { | ||
673 | if (usize <= node->size) { | ||
674 | list_del(&node->link); | ||
675 | return node; | ||
676 | } | ||
677 | } | ||
678 | |||
679 | return NULL; | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * ======== add_to_free_list ======== | ||
684 | * Purpose: | ||
685 | * Coalesce node into the freelist in ascending size order. | ||
686 | */ | ||
687 | static void add_to_free_list(struct cmm_allocator *allocator, | ||
688 | struct cmm_mnode *node) | ||
689 | { | ||
690 | struct cmm_mnode *curr; | ||
691 | |||
692 | if (!node) { | ||
693 | pr_err("%s: failed - node is NULL\n", __func__); | ||
694 | return; | ||
695 | } | ||
696 | |||
697 | list_for_each_entry(curr, &allocator->free_list, link) { | ||
698 | if (NEXT_PA(curr) == node->pa) { | ||
699 | curr->size += node->size; | ||
700 | delete_node(allocator->cmm_mgr, node); | ||
701 | return; | ||
702 | } | ||
703 | if (curr->pa == NEXT_PA(node)) { | ||
704 | curr->pa = node->pa; | ||
705 | curr->va = node->va; | ||
706 | curr->size += node->size; | ||
707 | delete_node(allocator->cmm_mgr, node); | ||
708 | return; | ||
709 | } | ||
710 | } | ||
711 | list_for_each_entry(curr, &allocator->free_list, link) { | ||
712 | if (curr->size >= node->size) { | ||
713 | list_add_tail(&node->link, &curr->link); | ||
714 | return; | ||
715 | } | ||
716 | } | ||
717 | list_add_tail(&node->link, &allocator->free_list); | ||
718 | } | ||
719 | |||
720 | /* | ||
721 | * ======== get_allocator ======== | ||
722 | * Purpose: | ||
723 | * Return the allocator for the given SM Segid. | ||
724 | * SegIds: 1,2,3..max. | ||
725 | */ | ||
726 | static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj, | ||
727 | u32 ul_seg_id) | ||
728 | { | ||
729 | return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1]; | ||
730 | } | ||
731 | |||
732 | /* | ||
733 | * The CMM_Xlator[xxx] routines below are used by Node and Stream | ||
734 | * to perform SM address translation to the client process address space. | ||
735 | * A "translator" object is created by a node/stream for each SM seg used. | ||
736 | */ | ||
737 | |||
738 | /* | ||
739 | * ======== cmm_xlator_create ======== | ||
740 | * Purpose: | ||
741 | * Create an address translator object. | ||
742 | */ | ||
743 | int cmm_xlator_create(struct cmm_xlatorobject **xlator, | ||
744 | struct cmm_object *hcmm_mgr, | ||
745 | struct cmm_xlatorattrs *xlator_attrs) | ||
746 | { | ||
747 | struct cmm_xlator *xlator_object = NULL; | ||
748 | int status = 0; | ||
749 | |||
750 | *xlator = NULL; | ||
751 | if (xlator_attrs == NULL) | ||
752 | xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */ | ||
753 | |||
754 | xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL); | ||
755 | if (xlator_object != NULL) { | ||
756 | xlator_object->cmm_mgr = hcmm_mgr; /* ref back to CMM */ | ||
757 | /* SM seg_id */ | ||
758 | xlator_object->seg_id = xlator_attrs->seg_id; | ||
759 | } else { | ||
760 | status = -ENOMEM; | ||
761 | } | ||
762 | if (!status) | ||
763 | *xlator = (struct cmm_xlatorobject *)xlator_object; | ||
764 | |||
765 | return status; | ||
766 | } | ||
767 | |||
768 | /* | ||
769 | * ======== cmm_xlator_alloc_buf ======== | ||
770 | */ | ||
771 | void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf, | ||
772 | u32 pa_size) | ||
773 | { | ||
774 | struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; | ||
775 | void *pbuf = NULL; | ||
776 | void *tmp_va_buff; | ||
777 | struct cmm_attrs attrs; | ||
778 | |||
779 | if (xlator_obj) { | ||
780 | attrs.seg_id = xlator_obj->seg_id; | ||
781 | __raw_writel(0, va_buf); | ||
782 | /* Alloc SM */ | ||
783 | pbuf = | ||
784 | cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL); | ||
785 | if (pbuf) { | ||
786 | /* convert to translator(node/strm) process Virtual | ||
787 | * address */ | ||
788 | tmp_va_buff = cmm_xlator_translate(xlator, | ||
789 | pbuf, CMM_PA2VA); | ||
790 | __raw_writel((u32)tmp_va_buff, va_buf); | ||
791 | } | ||
792 | } | ||
793 | return pbuf; | ||
794 | } | ||
795 | |||
796 | /* | ||
797 | * ======== cmm_xlator_free_buf ======== | ||
798 | * Purpose: | ||
799 | * Free the given SM buffer and descriptor. | ||
800 | * Does not free virtual memory. | ||
801 | */ | ||
802 | int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va) | ||
803 | { | ||
804 | struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; | ||
805 | int status = -EPERM; | ||
806 | void *buf_pa = NULL; | ||
807 | |||
808 | if (xlator_obj) { | ||
809 | /* convert Va to Pa so we can free it. */ | ||
810 | buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA); | ||
811 | if (buf_pa) { | ||
812 | status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa, | ||
813 | xlator_obj->seg_id); | ||
814 | if (status) { | ||
815 | /* Uh oh, this shouldn't happen. Descriptor | ||
816 | * gone! */ | ||
817 | pr_err("%s, line %d: Assertion failed\n", | ||
818 | __FILE__, __LINE__); | ||
819 | } | ||
820 | } | ||
821 | } | ||
822 | return status; | ||
823 | } | ||
824 | |||
825 | /* | ||
826 | * ======== cmm_xlator_info ======== | ||
827 | * Purpose: | ||
828 | * Set/Get translator info. | ||
829 | */ | ||
830 | int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 **paddr, | ||
831 | u32 ul_size, u32 segm_id, bool set_info) | ||
832 | { | ||
833 | struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; | ||
834 | int status = 0; | ||
835 | |||
836 | if (xlator_obj) { | ||
837 | if (set_info) { | ||
838 | /* set translators virtual address range */ | ||
839 | xlator_obj->virt_base = (u32) *paddr; | ||
840 | xlator_obj->virt_size = ul_size; | ||
841 | } else { /* return virt base address */ | ||
842 | *paddr = (u8 *) xlator_obj->virt_base; | ||
843 | } | ||
844 | } else { | ||
845 | status = -EFAULT; | ||
846 | } | ||
847 | return status; | ||
848 | } | ||
849 | |||
850 | /* | ||
851 | * ======== cmm_xlator_translate ======== | ||
852 | */ | ||
853 | void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr, | ||
854 | enum cmm_xlatetype xtype) | ||
855 | { | ||
856 | u32 dw_addr_xlate = 0; | ||
857 | struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; | ||
858 | struct cmm_object *cmm_mgr_obj = NULL; | ||
859 | struct cmm_allocator *allocator = NULL; | ||
860 | u32 dw_offset = 0; | ||
861 | |||
862 | if (!xlator_obj) | ||
863 | goto loop_cont; | ||
864 | |||
865 | cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr; | ||
866 | /* get this translator's default SM allocator */ | ||
867 | allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1]; | ||
868 | if (!allocator) | ||
869 | goto loop_cont; | ||
870 | |||
871 | if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) || | ||
872 | (xtype == CMM_PA2VA)) { | ||
873 | if (xtype == CMM_PA2VA) { | ||
874 | /* Gpp Va = Va Base + offset */ | ||
875 | dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base - | ||
876 | allocator-> | ||
877 | dsp_size); | ||
878 | dw_addr_xlate = xlator_obj->virt_base + dw_offset; | ||
879 | /* Check if translated Va base is in range */ | ||
880 | if ((dw_addr_xlate < xlator_obj->virt_base) || | ||
881 | (dw_addr_xlate >= | ||
882 | (xlator_obj->virt_base + | ||
883 | xlator_obj->virt_size))) { | ||
884 | dw_addr_xlate = 0; /* bad address */ | ||
885 | } | ||
886 | } else { | ||
887 | /* Gpp PA = Gpp Base + offset */ | ||
888 | dw_offset = | ||
889 | (u8 *) paddr - (u8 *) xlator_obj->virt_base; | ||
890 | dw_addr_xlate = | ||
891 | allocator->shm_base - allocator->dsp_size + | ||
892 | dw_offset; | ||
893 | } | ||
894 | } else { | ||
895 | dw_addr_xlate = (u32) paddr; | ||
896 | } | ||
897 | /*Now convert address to proper target physical address if needed */ | ||
898 | if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) { | ||
899 | /* Got Gpp Pa now, convert to DSP Pa */ | ||
900 | dw_addr_xlate = | ||
901 | GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size), | ||
902 | dw_addr_xlate, | ||
903 | allocator->dsp_phys_addr_offset * | ||
904 | allocator->c_factor); | ||
905 | } else if (xtype == CMM_DSPPA2PA) { | ||
906 | /* Got DSP Pa, convert to GPP Pa */ | ||
907 | dw_addr_xlate = | ||
908 | DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size, | ||
909 | dw_addr_xlate, | ||
910 | allocator->dsp_phys_addr_offset * | ||
911 | allocator->c_factor); | ||
912 | } | ||
913 | loop_cont: | ||
914 | return (void *)dw_addr_xlate; | ||
915 | } | ||
diff --git a/drivers/staging/tidspbridge/pmgr/cod.c b/drivers/staging/tidspbridge/pmgr/cod.c deleted file mode 100644 index 6c29379baf60..000000000000 --- a/drivers/staging/tidspbridge/pmgr/cod.c +++ /dev/null | |||
@@ -1,537 +0,0 @@ | |||
1 | /* | ||
2 | * cod.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * This module implements DSP code management for the DSP/BIOS Bridge | ||
7 | * environment. It is mostly a thin wrapper. | ||
8 | * | ||
9 | * This module provides an interface for loading both static and | ||
10 | * dynamic code objects onto DSP systems. | ||
11 | * | ||
12 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
13 | * | ||
14 | * This package is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License version 2 as | ||
16 | * published by the Free Software Foundation. | ||
17 | * | ||
18 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
19 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
20 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
21 | */ | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | |||
25 | /* ----------------------------------- Host OS */ | ||
26 | #include <dspbridge/host_os.h> | ||
27 | #include <linux/fs.h> | ||
28 | #include <linux/uaccess.h> | ||
29 | |||
30 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
31 | #include <dspbridge/dbdefs.h> | ||
32 | |||
33 | /* ----------------------------------- Platform Manager */ | ||
34 | /* Include appropriate loader header file */ | ||
35 | #include <dspbridge/dbll.h> | ||
36 | |||
37 | /* ----------------------------------- This */ | ||
38 | #include <dspbridge/cod.h> | ||
39 | |||
40 | /* | ||
41 | * ======== cod_manager ======== | ||
42 | */ | ||
43 | struct cod_manager { | ||
44 | struct dbll_tar_obj *target; | ||
45 | struct dbll_library_obj *base_lib; | ||
46 | bool loaded; /* Base library loaded? */ | ||
47 | u32 entry; | ||
48 | struct dbll_fxns fxns; | ||
49 | struct dbll_attrs attrs; | ||
50 | char sz_zl_file[COD_MAXPATHLENGTH]; | ||
51 | }; | ||
52 | |||
53 | /* | ||
54 | * ======== cod_libraryobj ======== | ||
55 | */ | ||
56 | struct cod_libraryobj { | ||
57 | struct dbll_library_obj *dbll_lib; | ||
58 | struct cod_manager *cod_mgr; | ||
59 | }; | ||
60 | |||
61 | static struct dbll_fxns ldr_fxns = { | ||
62 | (dbll_close_fxn) dbll_close, | ||
63 | (dbll_create_fxn) dbll_create, | ||
64 | (dbll_delete_fxn) dbll_delete, | ||
65 | (dbll_exit_fxn) dbll_exit, | ||
66 | (dbll_get_attrs_fxn) dbll_get_attrs, | ||
67 | (dbll_get_addr_fxn) dbll_get_addr, | ||
68 | (dbll_get_c_addr_fxn) dbll_get_c_addr, | ||
69 | (dbll_get_sect_fxn) dbll_get_sect, | ||
70 | (dbll_init_fxn) dbll_init, | ||
71 | (dbll_load_fxn) dbll_load, | ||
72 | (dbll_open_fxn) dbll_open, | ||
73 | (dbll_read_sect_fxn) dbll_read_sect, | ||
74 | (dbll_unload_fxn) dbll_unload, | ||
75 | }; | ||
76 | |||
77 | static bool no_op(void); | ||
78 | |||
79 | /* | ||
80 | * File operations (originally were under kfile.c) | ||
81 | */ | ||
82 | static s32 cod_f_close(struct file *filp) | ||
83 | { | ||
84 | /* Check for valid handle */ | ||
85 | if (!filp) | ||
86 | return -EFAULT; | ||
87 | |||
88 | filp_close(filp, NULL); | ||
89 | |||
90 | /* we can't use 0 here */ | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static struct file *cod_f_open(const char *psz_file_name, const char *sz_mode) | ||
95 | { | ||
96 | mm_segment_t fs; | ||
97 | struct file *filp; | ||
98 | |||
99 | fs = get_fs(); | ||
100 | set_fs(get_ds()); | ||
101 | |||
102 | /* ignore given mode and open file as read-only */ | ||
103 | filp = filp_open(psz_file_name, O_RDONLY, 0); | ||
104 | |||
105 | if (IS_ERR(filp)) | ||
106 | filp = NULL; | ||
107 | |||
108 | set_fs(fs); | ||
109 | |||
110 | return filp; | ||
111 | } | ||
112 | |||
113 | static s32 cod_f_read(void __user *pbuffer, s32 size, s32 count, | ||
114 | struct file *filp) | ||
115 | { | ||
116 | /* check for valid file handle */ | ||
117 | if (!filp) | ||
118 | return -EFAULT; | ||
119 | |||
120 | if ((size > 0) && (count > 0) && pbuffer) { | ||
121 | u32 dw_bytes_read; | ||
122 | mm_segment_t fs; | ||
123 | |||
124 | /* read from file */ | ||
125 | fs = get_fs(); | ||
126 | set_fs(get_ds()); | ||
127 | dw_bytes_read = filp->f_op->read(filp, pbuffer, size * count, | ||
128 | &(filp->f_pos)); | ||
129 | set_fs(fs); | ||
130 | |||
131 | if (!dw_bytes_read) | ||
132 | return -EBADF; | ||
133 | |||
134 | return dw_bytes_read / size; | ||
135 | } | ||
136 | |||
137 | return -EINVAL; | ||
138 | } | ||
139 | |||
140 | static s32 cod_f_seek(struct file *filp, s32 offset, s32 origin) | ||
141 | { | ||
142 | loff_t dw_cur_pos; | ||
143 | |||
144 | /* check for valid file handle */ | ||
145 | if (!filp) | ||
146 | return -EFAULT; | ||
147 | |||
148 | /* based on the origin flag, move the internal pointer */ | ||
149 | dw_cur_pos = filp->f_op->llseek(filp, offset, origin); | ||
150 | |||
151 | if ((s32) dw_cur_pos < 0) | ||
152 | return -EPERM; | ||
153 | |||
154 | /* we can't use 0 here */ | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static s32 cod_f_tell(struct file *filp) | ||
159 | { | ||
160 | loff_t dw_cur_pos; | ||
161 | |||
162 | if (!filp) | ||
163 | return -EFAULT; | ||
164 | |||
165 | /* Get current position */ | ||
166 | dw_cur_pos = filp->f_op->llseek(filp, 0, SEEK_CUR); | ||
167 | |||
168 | if ((s32) dw_cur_pos < 0) | ||
169 | return -EPERM; | ||
170 | |||
171 | return dw_cur_pos; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * ======== cod_close ======== | ||
176 | */ | ||
177 | void cod_close(struct cod_libraryobj *lib) | ||
178 | { | ||
179 | struct cod_manager *hmgr; | ||
180 | |||
181 | hmgr = lib->cod_mgr; | ||
182 | hmgr->fxns.close_fxn(lib->dbll_lib); | ||
183 | |||
184 | kfree(lib); | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * ======== cod_create ======== | ||
189 | * Purpose: | ||
190 | * Create an object to manage code on a DSP system. | ||
191 | * This object can be used to load an initial program image with | ||
192 | * arguments that can later be expanded with | ||
193 | * dynamically loaded object files. | ||
194 | * | ||
195 | */ | ||
196 | int cod_create(struct cod_manager **mgr, char *str_zl_file) | ||
197 | { | ||
198 | struct cod_manager *mgr_new; | ||
199 | struct dbll_attrs zl_attrs; | ||
200 | int status = 0; | ||
201 | |||
202 | /* assume failure */ | ||
203 | *mgr = NULL; | ||
204 | |||
205 | mgr_new = kzalloc(sizeof(struct cod_manager), GFP_KERNEL); | ||
206 | if (mgr_new == NULL) | ||
207 | return -ENOMEM; | ||
208 | |||
209 | /* Set up loader functions */ | ||
210 | mgr_new->fxns = ldr_fxns; | ||
211 | |||
212 | /* initialize the ZL module */ | ||
213 | mgr_new->fxns.init_fxn(); | ||
214 | |||
215 | zl_attrs.alloc = (dbll_alloc_fxn) no_op; | ||
216 | zl_attrs.free = (dbll_free_fxn) no_op; | ||
217 | zl_attrs.fread = (dbll_read_fxn) cod_f_read; | ||
218 | zl_attrs.fseek = (dbll_seek_fxn) cod_f_seek; | ||
219 | zl_attrs.ftell = (dbll_tell_fxn) cod_f_tell; | ||
220 | zl_attrs.fclose = (dbll_f_close_fxn) cod_f_close; | ||
221 | zl_attrs.fopen = (dbll_f_open_fxn) cod_f_open; | ||
222 | zl_attrs.sym_lookup = NULL; | ||
223 | zl_attrs.base_image = true; | ||
224 | zl_attrs.log_write = NULL; | ||
225 | zl_attrs.log_write_handle = NULL; | ||
226 | zl_attrs.write = NULL; | ||
227 | zl_attrs.rmm_handle = NULL; | ||
228 | zl_attrs.input_params = NULL; | ||
229 | zl_attrs.sym_handle = NULL; | ||
230 | zl_attrs.sym_arg = NULL; | ||
231 | |||
232 | mgr_new->attrs = zl_attrs; | ||
233 | |||
234 | status = mgr_new->fxns.create_fxn(&mgr_new->target, &zl_attrs); | ||
235 | |||
236 | if (status) { | ||
237 | cod_delete(mgr_new); | ||
238 | return -ESPIPE; | ||
239 | } | ||
240 | |||
241 | /* return the new manager */ | ||
242 | *mgr = mgr_new; | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * ======== cod_delete ======== | ||
249 | * Purpose: | ||
250 | * Delete a code manager object. | ||
251 | */ | ||
252 | void cod_delete(struct cod_manager *cod_mgr_obj) | ||
253 | { | ||
254 | if (cod_mgr_obj->base_lib) { | ||
255 | if (cod_mgr_obj->loaded) | ||
256 | cod_mgr_obj->fxns.unload_fxn(cod_mgr_obj->base_lib, | ||
257 | &cod_mgr_obj->attrs); | ||
258 | |||
259 | cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib); | ||
260 | } | ||
261 | if (cod_mgr_obj->target) { | ||
262 | cod_mgr_obj->fxns.delete_fxn(cod_mgr_obj->target); | ||
263 | cod_mgr_obj->fxns.exit_fxn(); | ||
264 | } | ||
265 | kfree(cod_mgr_obj); | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * ======== cod_get_base_lib ======== | ||
270 | * Purpose: | ||
271 | * Get handle to the base image DBL library. | ||
272 | */ | ||
273 | int cod_get_base_lib(struct cod_manager *cod_mgr_obj, | ||
274 | struct dbll_library_obj **plib) | ||
275 | { | ||
276 | int status = 0; | ||
277 | |||
278 | *plib = (struct dbll_library_obj *)cod_mgr_obj->base_lib; | ||
279 | |||
280 | return status; | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * ======== cod_get_base_name ======== | ||
285 | */ | ||
286 | int cod_get_base_name(struct cod_manager *cod_mgr_obj, char *sz_name, | ||
287 | u32 usize) | ||
288 | { | ||
289 | int status = 0; | ||
290 | |||
291 | if (usize <= COD_MAXPATHLENGTH) | ||
292 | strlcpy(sz_name, cod_mgr_obj->sz_zl_file, usize); | ||
293 | else | ||
294 | status = -EPERM; | ||
295 | |||
296 | return status; | ||
297 | } | ||
298 | |||
299 | /* | ||
300 | * ======== cod_get_entry ======== | ||
301 | * Purpose: | ||
302 | * Retrieve the entry point of a loaded DSP program image | ||
303 | * | ||
304 | */ | ||
305 | int cod_get_entry(struct cod_manager *cod_mgr_obj, u32 *entry_pt) | ||
306 | { | ||
307 | *entry_pt = cod_mgr_obj->entry; | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * ======== cod_get_loader ======== | ||
314 | * Purpose: | ||
315 | * Get handle to the DBLL loader. | ||
316 | */ | ||
317 | int cod_get_loader(struct cod_manager *cod_mgr_obj, | ||
318 | struct dbll_tar_obj **loader) | ||
319 | { | ||
320 | int status = 0; | ||
321 | |||
322 | *loader = (struct dbll_tar_obj *)cod_mgr_obj->target; | ||
323 | |||
324 | return status; | ||
325 | } | ||
326 | |||
327 | /* | ||
328 | * ======== cod_get_section ======== | ||
329 | * Purpose: | ||
330 | * Retrieve the starting address and length of a section in the COFF file | ||
331 | * given the section name. | ||
332 | */ | ||
333 | int cod_get_section(struct cod_libraryobj *lib, char *str_sect, | ||
334 | u32 *addr, u32 *len) | ||
335 | { | ||
336 | struct cod_manager *cod_mgr_obj; | ||
337 | int status = 0; | ||
338 | |||
339 | *addr = 0; | ||
340 | *len = 0; | ||
341 | if (lib != NULL) { | ||
342 | cod_mgr_obj = lib->cod_mgr; | ||
343 | status = cod_mgr_obj->fxns.get_sect_fxn(lib->dbll_lib, str_sect, | ||
344 | addr, len); | ||
345 | } else { | ||
346 | status = -ESPIPE; | ||
347 | } | ||
348 | |||
349 | return status; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * ======== cod_get_sym_value ======== | ||
354 | * Purpose: | ||
355 | * Retrieve the value for the specified symbol. The symbol is first | ||
356 | * searched for literally and then, if not found, searched for as a | ||
357 | * C symbol. | ||
358 | * | ||
359 | */ | ||
360 | int cod_get_sym_value(struct cod_manager *cod_mgr_obj, char *str_sym, | ||
361 | u32 *pul_value) | ||
362 | { | ||
363 | struct dbll_sym_val *dbll_sym; | ||
364 | |||
365 | dev_dbg(bridge, "%s: cod_mgr_obj: %p str_sym: %s pul_value: %p\n", | ||
366 | __func__, cod_mgr_obj, str_sym, pul_value); | ||
367 | if (cod_mgr_obj->base_lib) { | ||
368 | if (!cod_mgr_obj->fxns. | ||
369 | get_addr_fxn(cod_mgr_obj->base_lib, str_sym, &dbll_sym)) { | ||
370 | if (!cod_mgr_obj->fxns. | ||
371 | get_c_addr_fxn(cod_mgr_obj->base_lib, str_sym, | ||
372 | &dbll_sym)) | ||
373 | return -ESPIPE; | ||
374 | } | ||
375 | } else { | ||
376 | return -ESPIPE; | ||
377 | } | ||
378 | |||
379 | *pul_value = dbll_sym->value; | ||
380 | |||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * ======== cod_load_base ======== | ||
386 | * Purpose: | ||
387 | * Load the initial program image, optionally with command-line arguments, | ||
388 | * on the DSP system managed by the supplied handle. The program to be | ||
389 | * loaded must be the first element of the args array and must be a fully | ||
390 | * qualified pathname. | ||
391 | * Details: | ||
392 | * if num_argc doesn't match the number of arguments in the args array, the | ||
393 | * args array is searched for a NULL terminating entry, and argc is | ||
394 | * recalculated to reflect this. In this way, we can support NULL | ||
395 | * terminating args arrays, if num_argc is very large. | ||
396 | */ | ||
397 | int cod_load_base(struct cod_manager *cod_mgr_obj, u32 num_argc, char *args[], | ||
398 | cod_writefxn pfn_write, void *arb, char *envp[]) | ||
399 | { | ||
400 | dbll_flags flags; | ||
401 | struct dbll_attrs save_attrs; | ||
402 | struct dbll_attrs new_attrs; | ||
403 | int status; | ||
404 | u32 i; | ||
405 | |||
406 | /* | ||
407 | * Make sure every argv[] stated in argc has a value, or change argc to | ||
408 | * reflect true number in NULL terminated argv array. | ||
409 | */ | ||
410 | for (i = 0; i < num_argc; i++) { | ||
411 | if (args[i] == NULL) { | ||
412 | num_argc = i; | ||
413 | break; | ||
414 | } | ||
415 | } | ||
416 | |||
417 | /* set the write function for this operation */ | ||
418 | cod_mgr_obj->fxns.get_attrs_fxn(cod_mgr_obj->target, &save_attrs); | ||
419 | |||
420 | new_attrs = save_attrs; | ||
421 | new_attrs.write = (dbll_write_fxn) pfn_write; | ||
422 | new_attrs.input_params = arb; | ||
423 | new_attrs.alloc = (dbll_alloc_fxn) no_op; | ||
424 | new_attrs.free = (dbll_free_fxn) no_op; | ||
425 | new_attrs.log_write = NULL; | ||
426 | new_attrs.log_write_handle = NULL; | ||
427 | |||
428 | /* Load the image */ | ||
429 | flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB; | ||
430 | status = cod_mgr_obj->fxns.load_fxn(cod_mgr_obj->base_lib, flags, | ||
431 | &new_attrs, | ||
432 | &cod_mgr_obj->entry); | ||
433 | if (status) | ||
434 | cod_mgr_obj->fxns.close_fxn(cod_mgr_obj->base_lib); | ||
435 | |||
436 | if (!status) | ||
437 | cod_mgr_obj->loaded = true; | ||
438 | else | ||
439 | cod_mgr_obj->base_lib = NULL; | ||
440 | |||
441 | return status; | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * ======== cod_open ======== | ||
446 | * Open library for reading sections. | ||
447 | */ | ||
448 | int cod_open(struct cod_manager *hmgr, char *sz_coff_path, | ||
449 | u32 flags, struct cod_libraryobj **lib_obj) | ||
450 | { | ||
451 | int status = 0; | ||
452 | struct cod_libraryobj *lib = NULL; | ||
453 | |||
454 | *lib_obj = NULL; | ||
455 | |||
456 | lib = kzalloc(sizeof(struct cod_libraryobj), GFP_KERNEL); | ||
457 | if (lib == NULL) | ||
458 | status = -ENOMEM; | ||
459 | |||
460 | if (!status) { | ||
461 | lib->cod_mgr = hmgr; | ||
462 | status = hmgr->fxns.open_fxn(hmgr->target, sz_coff_path, flags, | ||
463 | &lib->dbll_lib); | ||
464 | if (!status) | ||
465 | *lib_obj = lib; | ||
466 | } | ||
467 | |||
468 | if (status) | ||
469 | pr_err("%s: error status 0x%x, sz_coff_path: %s flags: 0x%x\n", | ||
470 | __func__, status, sz_coff_path, flags); | ||
471 | return status; | ||
472 | } | ||
473 | |||
474 | /* | ||
475 | * ======== cod_open_base ======== | ||
476 | * Purpose: | ||
477 | * Open base image for reading sections. | ||
478 | */ | ||
479 | int cod_open_base(struct cod_manager *hmgr, char *sz_coff_path, | ||
480 | dbll_flags flags) | ||
481 | { | ||
482 | int status = 0; | ||
483 | struct dbll_library_obj *lib; | ||
484 | |||
485 | /* if we previously opened a base image, close it now */ | ||
486 | if (hmgr->base_lib) { | ||
487 | if (hmgr->loaded) { | ||
488 | hmgr->fxns.unload_fxn(hmgr->base_lib, &hmgr->attrs); | ||
489 | hmgr->loaded = false; | ||
490 | } | ||
491 | hmgr->fxns.close_fxn(hmgr->base_lib); | ||
492 | hmgr->base_lib = NULL; | ||
493 | } | ||
494 | status = hmgr->fxns.open_fxn(hmgr->target, sz_coff_path, flags, &lib); | ||
495 | if (!status) { | ||
496 | /* hang onto the library for subsequent sym table usage */ | ||
497 | hmgr->base_lib = lib; | ||
498 | strncpy(hmgr->sz_zl_file, sz_coff_path, COD_MAXPATHLENGTH - 1); | ||
499 | hmgr->sz_zl_file[COD_MAXPATHLENGTH - 1] = '\0'; | ||
500 | } | ||
501 | |||
502 | if (status) | ||
503 | pr_err("%s: error status 0x%x sz_coff_path: %s\n", __func__, | ||
504 | status, sz_coff_path); | ||
505 | return status; | ||
506 | } | ||
507 | |||
508 | /* | ||
509 | * ======== cod_read_section ======== | ||
510 | * Purpose: | ||
511 | * Retrieve the content of a code section given the section name. | ||
512 | */ | ||
513 | int cod_read_section(struct cod_libraryobj *lib, char *str_sect, | ||
514 | char *str_content, u32 content_size) | ||
515 | { | ||
516 | int status = 0; | ||
517 | |||
518 | if (lib != NULL) | ||
519 | status = | ||
520 | lib->cod_mgr->fxns.read_sect_fxn(lib->dbll_lib, str_sect, | ||
521 | str_content, content_size); | ||
522 | else | ||
523 | status = -ESPIPE; | ||
524 | |||
525 | return status; | ||
526 | } | ||
527 | |||
528 | /* | ||
529 | * ======== no_op ======== | ||
530 | * Purpose: | ||
531 | * No Operation. | ||
532 | * | ||
533 | */ | ||
534 | static bool no_op(void) | ||
535 | { | ||
536 | return true; | ||
537 | } | ||
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c deleted file mode 100644 index 8e21d1e47c9c..000000000000 --- a/drivers/staging/tidspbridge/pmgr/dbll.c +++ /dev/null | |||
@@ -1,1421 +0,0 @@ | |||
1 | /* | ||
2 | * dbll.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | #include <linux/types.h> | ||
17 | |||
18 | /* ----------------------------------- Host OS */ | ||
19 | #include <dspbridge/host_os.h> | ||
20 | |||
21 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
22 | #include <dspbridge/dbdefs.h> | ||
23 | |||
24 | #include <dspbridge/gh.h> | ||
25 | |||
26 | /* ----------------------------------- OS Adaptation Layer */ | ||
27 | |||
28 | /* Dynamic loader library interface */ | ||
29 | #include <dspbridge/dynamic_loader.h> | ||
30 | #include <dspbridge/getsection.h> | ||
31 | |||
32 | /* ----------------------------------- This */ | ||
33 | #include <dspbridge/dbll.h> | ||
34 | #include <dspbridge/rmm.h> | ||
35 | |||
36 | /* Max buffer length */ | ||
37 | #define MAXEXPR 128 | ||
38 | |||
39 | #define DOFF_ALIGN(x) (((x) + 3) & ~3UL) | ||
40 | |||
41 | /* | ||
42 | * ======== struct dbll_tar_obj* ======== | ||
43 | * A target may have one or more libraries of symbols/code/data loaded | ||
44 | * onto it, where a library is simply the symbols/code/data contained | ||
45 | * in a DOFF file. | ||
46 | */ | ||
47 | /* | ||
48 | * ======== dbll_tar_obj ======== | ||
49 | */ | ||
50 | struct dbll_tar_obj { | ||
51 | struct dbll_attrs attrs; | ||
52 | struct dbll_library_obj *head; /* List of all opened libraries */ | ||
53 | }; | ||
54 | |||
55 | /* | ||
56 | * The following 4 typedefs are "super classes" of the dynamic loader | ||
57 | * library types used in dynamic loader functions (dynamic_loader.h). | ||
58 | */ | ||
59 | /* | ||
60 | * ======== dbll_stream ======== | ||
61 | * Contains dynamic_loader_stream | ||
62 | */ | ||
63 | struct dbll_stream { | ||
64 | struct dynamic_loader_stream dl_stream; | ||
65 | struct dbll_library_obj *lib; | ||
66 | }; | ||
67 | |||
68 | /* | ||
69 | * ======== ldr_symbol ======== | ||
70 | */ | ||
71 | struct ldr_symbol { | ||
72 | struct dynamic_loader_sym dl_symbol; | ||
73 | struct dbll_library_obj *lib; | ||
74 | }; | ||
75 | |||
76 | /* | ||
77 | * ======== dbll_alloc ======== | ||
78 | */ | ||
79 | struct dbll_alloc { | ||
80 | struct dynamic_loader_allocate dl_alloc; | ||
81 | struct dbll_library_obj *lib; | ||
82 | }; | ||
83 | |||
84 | /* | ||
85 | * ======== dbll_init_obj ======== | ||
86 | */ | ||
87 | struct dbll_init_obj { | ||
88 | struct dynamic_loader_initialize dl_init; | ||
89 | struct dbll_library_obj *lib; | ||
90 | }; | ||
91 | |||
92 | /* | ||
93 | * ======== DBLL_Library ======== | ||
94 | * A library handle is returned by DBLL_Open() and is passed to dbll_load() | ||
95 | * to load symbols/code/data, and to dbll_unload(), to remove the | ||
96 | * symbols/code/data loaded by dbll_load(). | ||
97 | */ | ||
98 | |||
99 | /* | ||
100 | * ======== dbll_library_obj ======== | ||
101 | */ | ||
102 | struct dbll_library_obj { | ||
103 | struct dbll_library_obj *next; /* Next library in target's list */ | ||
104 | struct dbll_library_obj *prev; /* Previous in the list */ | ||
105 | struct dbll_tar_obj *target_obj; /* target for this library */ | ||
106 | |||
107 | /* Objects needed by dynamic loader */ | ||
108 | struct dbll_stream stream; | ||
109 | struct ldr_symbol symbol; | ||
110 | struct dbll_alloc allocate; | ||
111 | struct dbll_init_obj init; | ||
112 | void *dload_mod_obj; | ||
113 | |||
114 | char *file_name; /* COFF file name */ | ||
115 | void *fp; /* Opaque file handle */ | ||
116 | u32 entry; /* Entry point */ | ||
117 | void *desc; /* desc of DOFF file loaded */ | ||
118 | u32 open_ref; /* Number of times opened */ | ||
119 | u32 load_ref; /* Number of times loaded */ | ||
120 | struct gh_t_hash_tab *sym_tab; /* Hash table of symbols */ | ||
121 | u32 pos; | ||
122 | }; | ||
123 | |||
124 | /* | ||
125 | * ======== dbll_symbol ======== | ||
126 | */ | ||
127 | struct dbll_symbol { | ||
128 | struct dbll_sym_val value; | ||
129 | char *name; | ||
130 | }; | ||
131 | |||
132 | static void dof_close(struct dbll_library_obj *zl_lib); | ||
133 | static int dof_open(struct dbll_library_obj *zl_lib); | ||
134 | static s32 no_op(struct dynamic_loader_initialize *thisptr, void *bufr, | ||
135 | ldr_addr locn, struct ldr_section_info *info, | ||
136 | unsigned bytsize); | ||
137 | |||
138 | /* | ||
139 | * Functions called by dynamic loader | ||
140 | * | ||
141 | */ | ||
142 | /* dynamic_loader_stream */ | ||
143 | static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer, | ||
144 | unsigned bufsize); | ||
145 | static int dbll_set_file_posn(struct dynamic_loader_stream *this, | ||
146 | unsigned int pos); | ||
147 | /* dynamic_loader_sym */ | ||
148 | static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this, | ||
149 | const char *name); | ||
150 | static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym | ||
151 | *this, const char *name, | ||
152 | unsigned module_id); | ||
153 | static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym | ||
154 | *this, const char *name, | ||
155 | unsigned moduleid); | ||
156 | static void dbll_purge_symbol_table(struct dynamic_loader_sym *this, | ||
157 | unsigned module_id); | ||
158 | static void *allocate(struct dynamic_loader_sym *this, unsigned memsize); | ||
159 | static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr); | ||
160 | static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr, | ||
161 | va_list args); | ||
162 | /* dynamic_loader_allocate */ | ||
163 | static int dbll_rmm_alloc(struct dynamic_loader_allocate *this, | ||
164 | struct ldr_section_info *info, unsigned align); | ||
165 | static void rmm_dealloc(struct dynamic_loader_allocate *this, | ||
166 | struct ldr_section_info *info); | ||
167 | |||
168 | /* dynamic_loader_initialize */ | ||
169 | static int connect(struct dynamic_loader_initialize *this); | ||
170 | static int read_mem(struct dynamic_loader_initialize *this, void *buf, | ||
171 | ldr_addr addr, struct ldr_section_info *info, | ||
172 | unsigned bytes); | ||
173 | static int write_mem(struct dynamic_loader_initialize *this, void *buf, | ||
174 | ldr_addr addr, struct ldr_section_info *info, | ||
175 | unsigned nbytes); | ||
176 | static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr, | ||
177 | struct ldr_section_info *info, unsigned bytes, | ||
178 | unsigned val); | ||
179 | static int execute(struct dynamic_loader_initialize *this, ldr_addr start); | ||
180 | static void release(struct dynamic_loader_initialize *this); | ||
181 | |||
182 | /* symbol table hash functions */ | ||
183 | static u32 name_hash(const void *key); | ||
184 | static bool name_match(const void *key, const void *sp); | ||
185 | static void sym_delete(void *value); | ||
186 | |||
187 | /* Symbol Redefinition */ | ||
188 | static int redefined_symbol; | ||
189 | static int gbl_search = 1; | ||
190 | |||
191 | /* | ||
192 | * ======== dbll_close ======== | ||
193 | */ | ||
194 | void dbll_close(struct dbll_library_obj *zl_lib) | ||
195 | { | ||
196 | struct dbll_tar_obj *zl_target; | ||
197 | |||
198 | zl_target = zl_lib->target_obj; | ||
199 | zl_lib->open_ref--; | ||
200 | if (zl_lib->open_ref == 0) { | ||
201 | /* Remove library from list */ | ||
202 | if (zl_target->head == zl_lib) | ||
203 | zl_target->head = zl_lib->next; | ||
204 | |||
205 | if (zl_lib->prev) | ||
206 | (zl_lib->prev)->next = zl_lib->next; | ||
207 | |||
208 | if (zl_lib->next) | ||
209 | (zl_lib->next)->prev = zl_lib->prev; | ||
210 | |||
211 | /* Free DOF resources */ | ||
212 | dof_close(zl_lib); | ||
213 | kfree(zl_lib->file_name); | ||
214 | |||
215 | /* remove symbols from symbol table */ | ||
216 | if (zl_lib->sym_tab) | ||
217 | gh_delete(zl_lib->sym_tab); | ||
218 | |||
219 | /* remove the library object itself */ | ||
220 | kfree(zl_lib); | ||
221 | zl_lib = NULL; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * ======== dbll_create ======== | ||
227 | */ | ||
228 | int dbll_create(struct dbll_tar_obj **target_obj, | ||
229 | struct dbll_attrs *pattrs) | ||
230 | { | ||
231 | struct dbll_tar_obj *pzl_target; | ||
232 | int status = 0; | ||
233 | |||
234 | /* Allocate DBL target object */ | ||
235 | pzl_target = kzalloc(sizeof(struct dbll_tar_obj), GFP_KERNEL); | ||
236 | if (target_obj != NULL) { | ||
237 | if (pzl_target == NULL) { | ||
238 | *target_obj = NULL; | ||
239 | status = -ENOMEM; | ||
240 | } else { | ||
241 | pzl_target->attrs = *pattrs; | ||
242 | *target_obj = (struct dbll_tar_obj *)pzl_target; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | return status; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * ======== dbll_delete ======== | ||
251 | */ | ||
252 | void dbll_delete(struct dbll_tar_obj *target) | ||
253 | { | ||
254 | struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; | ||
255 | |||
256 | kfree(zl_target); | ||
257 | |||
258 | } | ||
259 | |||
260 | /* | ||
261 | * ======== dbll_exit ======== | ||
262 | * Discontinue usage of DBL module. | ||
263 | */ | ||
264 | void dbll_exit(void) | ||
265 | { | ||
266 | /* do nothing */ | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * ======== dbll_get_addr ======== | ||
271 | * Get address of name in the specified library. | ||
272 | */ | ||
273 | bool dbll_get_addr(struct dbll_library_obj *zl_lib, char *name, | ||
274 | struct dbll_sym_val **sym_val) | ||
275 | { | ||
276 | struct dbll_symbol *sym; | ||
277 | |||
278 | sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, name); | ||
279 | if (IS_ERR(sym)) | ||
280 | return false; | ||
281 | |||
282 | *sym_val = &sym->value; | ||
283 | |||
284 | dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p\n", | ||
285 | __func__, zl_lib, name, sym_val); | ||
286 | return true; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * ======== dbll_get_attrs ======== | ||
291 | * Retrieve the attributes of the target. | ||
292 | */ | ||
293 | void dbll_get_attrs(struct dbll_tar_obj *target, struct dbll_attrs *pattrs) | ||
294 | { | ||
295 | struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; | ||
296 | |||
297 | if ((pattrs != NULL) && (zl_target != NULL)) | ||
298 | *pattrs = zl_target->attrs; | ||
299 | |||
300 | } | ||
301 | |||
302 | /* | ||
303 | * ======== dbll_get_c_addr ======== | ||
304 | * Get address of a "C" name in the specified library. | ||
305 | */ | ||
306 | bool dbll_get_c_addr(struct dbll_library_obj *zl_lib, char *name, | ||
307 | struct dbll_sym_val **sym_val) | ||
308 | { | ||
309 | struct dbll_symbol *sym; | ||
310 | char cname[MAXEXPR + 1]; | ||
311 | |||
312 | cname[0] = '_'; | ||
313 | |||
314 | strncpy(cname + 1, name, sizeof(cname) - 2); | ||
315 | cname[MAXEXPR] = '\0'; /* insure '\0' string termination */ | ||
316 | |||
317 | /* Check for C name, if not found */ | ||
318 | sym = (struct dbll_symbol *)gh_find(zl_lib->sym_tab, cname); | ||
319 | if (IS_ERR(sym)) | ||
320 | return false; | ||
321 | |||
322 | *sym_val = &sym->value; | ||
323 | |||
324 | return true; | ||
325 | } | ||
326 | |||
327 | /* | ||
328 | * ======== dbll_get_sect ======== | ||
329 | * Get the base address and size (in bytes) of a COFF section. | ||
330 | */ | ||
331 | int dbll_get_sect(struct dbll_library_obj *lib, char *name, u32 *paddr, | ||
332 | u32 *psize) | ||
333 | { | ||
334 | u32 byte_size; | ||
335 | bool opened_doff = false; | ||
336 | const struct ldr_section_info *sect = NULL; | ||
337 | struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; | ||
338 | int status = 0; | ||
339 | |||
340 | /* If DOFF file is not open, we open it. */ | ||
341 | if (zl_lib != NULL) { | ||
342 | if (zl_lib->fp == NULL) { | ||
343 | status = dof_open(zl_lib); | ||
344 | if (!status) | ||
345 | opened_doff = true; | ||
346 | |||
347 | } else { | ||
348 | (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, | ||
349 | zl_lib->pos, | ||
350 | SEEK_SET); | ||
351 | } | ||
352 | } else { | ||
353 | status = -EFAULT; | ||
354 | } | ||
355 | if (!status) { | ||
356 | byte_size = 1; | ||
357 | if (dload_get_section_info(zl_lib->desc, name, §)) { | ||
358 | *paddr = sect->load_addr; | ||
359 | *psize = sect->size * byte_size; | ||
360 | /* Make sure size is even for good swap */ | ||
361 | if (*psize % 2) | ||
362 | (*psize)++; | ||
363 | |||
364 | /* Align size */ | ||
365 | *psize = DOFF_ALIGN(*psize); | ||
366 | } else { | ||
367 | status = -ENXIO; | ||
368 | } | ||
369 | } | ||
370 | if (opened_doff) { | ||
371 | dof_close(zl_lib); | ||
372 | opened_doff = false; | ||
373 | } | ||
374 | |||
375 | dev_dbg(bridge, "%s: lib: %p name: %s paddr: %p psize: %p, status 0x%x\n", | ||
376 | __func__, lib, name, paddr, psize, status); | ||
377 | |||
378 | return status; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * ======== dbll_init ======== | ||
383 | */ | ||
384 | bool dbll_init(void) | ||
385 | { | ||
386 | /* do nothing */ | ||
387 | |||
388 | return true; | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * ======== dbll_load ======== | ||
393 | */ | ||
394 | int dbll_load(struct dbll_library_obj *lib, dbll_flags flags, | ||
395 | struct dbll_attrs *attrs, u32 *entry) | ||
396 | { | ||
397 | struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; | ||
398 | struct dbll_tar_obj *dbzl; | ||
399 | bool got_symbols = true; | ||
400 | s32 err; | ||
401 | int status = 0; | ||
402 | bool opened_doff = false; | ||
403 | |||
404 | /* | ||
405 | * Load if not already loaded. | ||
406 | */ | ||
407 | if (zl_lib->load_ref == 0 || !(flags & DBLL_DYNAMIC)) { | ||
408 | dbzl = zl_lib->target_obj; | ||
409 | dbzl->attrs = *attrs; | ||
410 | /* Create a hash table for symbols if not already created */ | ||
411 | if (zl_lib->sym_tab == NULL) { | ||
412 | got_symbols = false; | ||
413 | zl_lib->sym_tab = gh_create(sizeof(struct dbll_symbol), | ||
414 | name_hash, | ||
415 | name_match, sym_delete); | ||
416 | if (IS_ERR(zl_lib->sym_tab)) { | ||
417 | status = PTR_ERR(zl_lib->sym_tab); | ||
418 | zl_lib->sym_tab = NULL; | ||
419 | } | ||
420 | |||
421 | } | ||
422 | /* | ||
423 | * Set up objects needed by the dynamic loader | ||
424 | */ | ||
425 | /* Stream */ | ||
426 | zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer; | ||
427 | zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn; | ||
428 | zl_lib->stream.lib = zl_lib; | ||
429 | /* Symbol */ | ||
430 | zl_lib->symbol.dl_symbol.find_matching_symbol = | ||
431 | dbll_find_symbol; | ||
432 | if (got_symbols) { | ||
433 | zl_lib->symbol.dl_symbol.add_to_symbol_table = | ||
434 | find_in_symbol_table; | ||
435 | } else { | ||
436 | zl_lib->symbol.dl_symbol.add_to_symbol_table = | ||
437 | dbll_add_to_symbol_table; | ||
438 | } | ||
439 | zl_lib->symbol.dl_symbol.purge_symbol_table = | ||
440 | dbll_purge_symbol_table; | ||
441 | zl_lib->symbol.dl_symbol.dload_allocate = allocate; | ||
442 | zl_lib->symbol.dl_symbol.dload_deallocate = deallocate; | ||
443 | zl_lib->symbol.dl_symbol.error_report = dbll_err_report; | ||
444 | zl_lib->symbol.lib = zl_lib; | ||
445 | /* Allocate */ | ||
446 | zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc; | ||
447 | zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc; | ||
448 | zl_lib->allocate.lib = zl_lib; | ||
449 | /* Init */ | ||
450 | zl_lib->init.dl_init.connect = connect; | ||
451 | zl_lib->init.dl_init.readmem = read_mem; | ||
452 | zl_lib->init.dl_init.writemem = write_mem; | ||
453 | zl_lib->init.dl_init.fillmem = fill_mem; | ||
454 | zl_lib->init.dl_init.execute = execute; | ||
455 | zl_lib->init.dl_init.release = release; | ||
456 | zl_lib->init.lib = zl_lib; | ||
457 | /* If COFF file is not open, we open it. */ | ||
458 | if (zl_lib->fp == NULL) { | ||
459 | status = dof_open(zl_lib); | ||
460 | if (!status) | ||
461 | opened_doff = true; | ||
462 | |||
463 | } | ||
464 | if (!status) { | ||
465 | zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell)) | ||
466 | (zl_lib->fp); | ||
467 | /* Reset file cursor */ | ||
468 | (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, | ||
469 | (long)0, | ||
470 | SEEK_SET); | ||
471 | symbols_reloaded = true; | ||
472 | /* The 5th argument, DLOAD_INITBSS, tells the DLL | ||
473 | * module to zero-init all BSS sections. In general, | ||
474 | * this is not necessary and also increases load time. | ||
475 | * We may want to make this configurable by the user */ | ||
476 | err = dynamic_load_module(&zl_lib->stream.dl_stream, | ||
477 | &zl_lib->symbol.dl_symbol, | ||
478 | &zl_lib->allocate.dl_alloc, | ||
479 | &zl_lib->init.dl_init, | ||
480 | DLOAD_INITBSS, | ||
481 | &zl_lib->dload_mod_obj); | ||
482 | |||
483 | if (err != 0) { | ||
484 | status = -EILSEQ; | ||
485 | } else if (redefined_symbol) { | ||
486 | zl_lib->load_ref++; | ||
487 | dbll_unload(zl_lib, (struct dbll_attrs *)attrs); | ||
488 | redefined_symbol = false; | ||
489 | status = -EILSEQ; | ||
490 | } else { | ||
491 | *entry = zl_lib->entry; | ||
492 | } | ||
493 | } | ||
494 | } | ||
495 | if (!status) | ||
496 | zl_lib->load_ref++; | ||
497 | |||
498 | /* Clean up DOFF resources */ | ||
499 | if (opened_doff) | ||
500 | dof_close(zl_lib); | ||
501 | |||
502 | dev_dbg(bridge, "%s: lib: %p flags: 0x%x entry: %p, status 0x%x\n", | ||
503 | __func__, lib, flags, entry, status); | ||
504 | |||
505 | return status; | ||
506 | } | ||
507 | |||
508 | /* | ||
509 | * ======== dbll_open ======== | ||
510 | */ | ||
511 | int dbll_open(struct dbll_tar_obj *target, char *file, dbll_flags flags, | ||
512 | struct dbll_library_obj **lib_obj) | ||
513 | { | ||
514 | struct dbll_tar_obj *zl_target = (struct dbll_tar_obj *)target; | ||
515 | struct dbll_library_obj *zl_lib = NULL; | ||
516 | s32 err; | ||
517 | int status = 0; | ||
518 | |||
519 | zl_lib = zl_target->head; | ||
520 | while (zl_lib != NULL) { | ||
521 | if (strcmp(zl_lib->file_name, file) == 0) { | ||
522 | /* Library is already opened */ | ||
523 | zl_lib->open_ref++; | ||
524 | break; | ||
525 | } | ||
526 | zl_lib = zl_lib->next; | ||
527 | } | ||
528 | if (zl_lib == NULL) { | ||
529 | /* Allocate DBL library object */ | ||
530 | zl_lib = kzalloc(sizeof(struct dbll_library_obj), GFP_KERNEL); | ||
531 | if (zl_lib == NULL) { | ||
532 | status = -ENOMEM; | ||
533 | } else { | ||
534 | zl_lib->pos = 0; | ||
535 | /* Increment ref count to allow close on failure | ||
536 | * later on */ | ||
537 | zl_lib->open_ref++; | ||
538 | zl_lib->target_obj = zl_target; | ||
539 | /* Keep a copy of the file name */ | ||
540 | zl_lib->file_name = kzalloc(strlen(file) + 1, | ||
541 | GFP_KERNEL); | ||
542 | if (zl_lib->file_name == NULL) { | ||
543 | status = -ENOMEM; | ||
544 | } else { | ||
545 | strncpy(zl_lib->file_name, file, | ||
546 | strlen(file) + 1); | ||
547 | } | ||
548 | zl_lib->sym_tab = NULL; | ||
549 | } | ||
550 | } | ||
551 | /* | ||
552 | * Set up objects needed by the dynamic loader | ||
553 | */ | ||
554 | if (status) | ||
555 | goto func_cont; | ||
556 | |||
557 | /* Stream */ | ||
558 | zl_lib->stream.dl_stream.read_buffer = dbll_read_buffer; | ||
559 | zl_lib->stream.dl_stream.set_file_posn = dbll_set_file_posn; | ||
560 | zl_lib->stream.lib = zl_lib; | ||
561 | /* Symbol */ | ||
562 | zl_lib->symbol.dl_symbol.add_to_symbol_table = dbll_add_to_symbol_table; | ||
563 | zl_lib->symbol.dl_symbol.find_matching_symbol = dbll_find_symbol; | ||
564 | zl_lib->symbol.dl_symbol.purge_symbol_table = dbll_purge_symbol_table; | ||
565 | zl_lib->symbol.dl_symbol.dload_allocate = allocate; | ||
566 | zl_lib->symbol.dl_symbol.dload_deallocate = deallocate; | ||
567 | zl_lib->symbol.dl_symbol.error_report = dbll_err_report; | ||
568 | zl_lib->symbol.lib = zl_lib; | ||
569 | /* Allocate */ | ||
570 | zl_lib->allocate.dl_alloc.dload_allocate = dbll_rmm_alloc; | ||
571 | zl_lib->allocate.dl_alloc.dload_deallocate = rmm_dealloc; | ||
572 | zl_lib->allocate.lib = zl_lib; | ||
573 | /* Init */ | ||
574 | zl_lib->init.dl_init.connect = connect; | ||
575 | zl_lib->init.dl_init.readmem = read_mem; | ||
576 | zl_lib->init.dl_init.writemem = write_mem; | ||
577 | zl_lib->init.dl_init.fillmem = fill_mem; | ||
578 | zl_lib->init.dl_init.execute = execute; | ||
579 | zl_lib->init.dl_init.release = release; | ||
580 | zl_lib->init.lib = zl_lib; | ||
581 | if (!status && zl_lib->fp == NULL) | ||
582 | status = dof_open(zl_lib); | ||
583 | |||
584 | zl_lib->pos = (*(zl_lib->target_obj->attrs.ftell)) (zl_lib->fp); | ||
585 | (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, SEEK_SET); | ||
586 | /* Create a hash table for symbols if flag is set */ | ||
587 | if (zl_lib->sym_tab != NULL || !(flags & DBLL_SYMB)) | ||
588 | goto func_cont; | ||
589 | |||
590 | zl_lib->sym_tab = | ||
591 | gh_create(sizeof(struct dbll_symbol), name_hash, name_match, | ||
592 | sym_delete); | ||
593 | if (IS_ERR(zl_lib->sym_tab)) { | ||
594 | status = PTR_ERR(zl_lib->sym_tab); | ||
595 | zl_lib->sym_tab = NULL; | ||
596 | } else { | ||
597 | /* Do a fake load to get symbols - set write func to no_op */ | ||
598 | zl_lib->init.dl_init.writemem = no_op; | ||
599 | err = dynamic_open_module(&zl_lib->stream.dl_stream, | ||
600 | &zl_lib->symbol.dl_symbol, | ||
601 | &zl_lib->allocate.dl_alloc, | ||
602 | &zl_lib->init.dl_init, 0, | ||
603 | &zl_lib->dload_mod_obj); | ||
604 | if (err != 0) { | ||
605 | status = -EILSEQ; | ||
606 | } else { | ||
607 | /* Now that we have the symbol table, we can unload */ | ||
608 | err = dynamic_unload_module(zl_lib->dload_mod_obj, | ||
609 | &zl_lib->symbol.dl_symbol, | ||
610 | &zl_lib->allocate.dl_alloc, | ||
611 | &zl_lib->init.dl_init); | ||
612 | if (err != 0) | ||
613 | status = -EILSEQ; | ||
614 | |||
615 | zl_lib->dload_mod_obj = NULL; | ||
616 | } | ||
617 | } | ||
618 | func_cont: | ||
619 | if (!status) { | ||
620 | if (zl_lib->open_ref == 1) { | ||
621 | /* First time opened - insert in list */ | ||
622 | if (zl_target->head) | ||
623 | (zl_target->head)->prev = zl_lib; | ||
624 | |||
625 | zl_lib->prev = NULL; | ||
626 | zl_lib->next = zl_target->head; | ||
627 | zl_target->head = zl_lib; | ||
628 | } | ||
629 | *lib_obj = (struct dbll_library_obj *)zl_lib; | ||
630 | } else { | ||
631 | *lib_obj = NULL; | ||
632 | if (zl_lib != NULL) | ||
633 | dbll_close((struct dbll_library_obj *)zl_lib); | ||
634 | |||
635 | } | ||
636 | |||
637 | dev_dbg(bridge, "%s: target: %p file: %s lib_obj: %p, status 0x%x\n", | ||
638 | __func__, target, file, lib_obj, status); | ||
639 | |||
640 | return status; | ||
641 | } | ||
642 | |||
643 | /* | ||
644 | * ======== dbll_read_sect ======== | ||
645 | * Get the content of a COFF section. | ||
646 | */ | ||
647 | int dbll_read_sect(struct dbll_library_obj *lib, char *name, | ||
648 | char *buf, u32 size) | ||
649 | { | ||
650 | struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; | ||
651 | bool opened_doff = false; | ||
652 | u32 byte_size; /* size of bytes */ | ||
653 | u32 ul_sect_size; /* size of section */ | ||
654 | const struct ldr_section_info *sect = NULL; | ||
655 | int status = 0; | ||
656 | |||
657 | /* If DOFF file is not open, we open it. */ | ||
658 | if (zl_lib != NULL) { | ||
659 | if (zl_lib->fp == NULL) { | ||
660 | status = dof_open(zl_lib); | ||
661 | if (!status) | ||
662 | opened_doff = true; | ||
663 | |||
664 | } else { | ||
665 | (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, | ||
666 | zl_lib->pos, | ||
667 | SEEK_SET); | ||
668 | } | ||
669 | } else { | ||
670 | status = -EFAULT; | ||
671 | } | ||
672 | if (status) | ||
673 | goto func_cont; | ||
674 | |||
675 | byte_size = 1; | ||
676 | if (!dload_get_section_info(zl_lib->desc, name, §)) { | ||
677 | status = -ENXIO; | ||
678 | goto func_cont; | ||
679 | } | ||
680 | /* | ||
681 | * Ensure the supplied buffer size is sufficient to store | ||
682 | * the section buf to be read. | ||
683 | */ | ||
684 | ul_sect_size = sect->size * byte_size; | ||
685 | /* Make sure size is even for good swap */ | ||
686 | if (ul_sect_size % 2) | ||
687 | ul_sect_size++; | ||
688 | |||
689 | /* Align size */ | ||
690 | ul_sect_size = DOFF_ALIGN(ul_sect_size); | ||
691 | if (ul_sect_size > size) { | ||
692 | status = -EPERM; | ||
693 | } else { | ||
694 | if (!dload_get_section(zl_lib->desc, sect, buf)) | ||
695 | status = -EBADF; | ||
696 | |||
697 | } | ||
698 | func_cont: | ||
699 | if (opened_doff) { | ||
700 | dof_close(zl_lib); | ||
701 | opened_doff = false; | ||
702 | } | ||
703 | |||
704 | dev_dbg(bridge, "%s: lib: %p name: %s buf: %p size: 0x%x, status 0x%x\n", | ||
705 | __func__, lib, name, buf, size, status); | ||
706 | return status; | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * ======== dbll_unload ======== | ||
711 | */ | ||
712 | void dbll_unload(struct dbll_library_obj *lib, struct dbll_attrs *attrs) | ||
713 | { | ||
714 | struct dbll_library_obj *zl_lib = (struct dbll_library_obj *)lib; | ||
715 | s32 err = 0; | ||
716 | |||
717 | dev_dbg(bridge, "%s: lib: %p\n", __func__, lib); | ||
718 | zl_lib->load_ref--; | ||
719 | /* Unload only if reference count is 0 */ | ||
720 | if (zl_lib->load_ref != 0) | ||
721 | return; | ||
722 | |||
723 | zl_lib->target_obj->attrs = *attrs; | ||
724 | if (zl_lib->dload_mod_obj) { | ||
725 | err = dynamic_unload_module(zl_lib->dload_mod_obj, | ||
726 | &zl_lib->symbol.dl_symbol, | ||
727 | &zl_lib->allocate.dl_alloc, | ||
728 | &zl_lib->init.dl_init); | ||
729 | if (err != 0) | ||
730 | dev_dbg(bridge, "%s: failed: 0x%x\n", __func__, err); | ||
731 | } | ||
732 | /* remove symbols from symbol table */ | ||
733 | if (zl_lib->sym_tab != NULL) { | ||
734 | gh_delete(zl_lib->sym_tab); | ||
735 | zl_lib->sym_tab = NULL; | ||
736 | } | ||
737 | /* delete DOFF desc since it holds *lots* of host OS | ||
738 | * resources */ | ||
739 | dof_close(zl_lib); | ||
740 | } | ||
741 | |||
742 | /* | ||
743 | * ======== dof_close ======== | ||
744 | */ | ||
745 | static void dof_close(struct dbll_library_obj *zl_lib) | ||
746 | { | ||
747 | if (zl_lib->desc) { | ||
748 | dload_module_close(zl_lib->desc); | ||
749 | zl_lib->desc = NULL; | ||
750 | } | ||
751 | /* close file */ | ||
752 | if (zl_lib->fp) { | ||
753 | (zl_lib->target_obj->attrs.fclose) (zl_lib->fp); | ||
754 | zl_lib->fp = NULL; | ||
755 | } | ||
756 | } | ||
757 | |||
758 | /* | ||
759 | * ======== dof_open ======== | ||
760 | */ | ||
761 | static int dof_open(struct dbll_library_obj *zl_lib) | ||
762 | { | ||
763 | void *open = *(zl_lib->target_obj->attrs.fopen); | ||
764 | int status = 0; | ||
765 | |||
766 | /* First open the file for the dynamic loader, then open COF */ | ||
767 | zl_lib->fp = | ||
768 | (void *)((dbll_f_open_fxn) (open)) (zl_lib->file_name, "rb"); | ||
769 | |||
770 | /* Open DOFF module */ | ||
771 | if (zl_lib->fp && zl_lib->desc == NULL) { | ||
772 | (*(zl_lib->target_obj->attrs.fseek)) (zl_lib->fp, (long)0, | ||
773 | SEEK_SET); | ||
774 | zl_lib->desc = | ||
775 | dload_module_open(&zl_lib->stream.dl_stream, | ||
776 | &zl_lib->symbol.dl_symbol); | ||
777 | if (zl_lib->desc == NULL) { | ||
778 | (zl_lib->target_obj->attrs.fclose) (zl_lib->fp); | ||
779 | zl_lib->fp = NULL; | ||
780 | status = -EBADF; | ||
781 | } | ||
782 | } else { | ||
783 | status = -EBADF; | ||
784 | } | ||
785 | |||
786 | return status; | ||
787 | } | ||
788 | |||
789 | /* | ||
790 | * ======== name_hash ======== | ||
791 | */ | ||
792 | static u32 name_hash(const void *key) | ||
793 | { | ||
794 | u32 hash; | ||
795 | const char *name = key; | ||
796 | |||
797 | hash = 0; | ||
798 | |||
799 | while (*name) { | ||
800 | hash <<= 1; | ||
801 | hash ^= *name++; | ||
802 | } | ||
803 | |||
804 | return hash; | ||
805 | } | ||
806 | |||
807 | /* | ||
808 | * ======== name_match ======== | ||
809 | */ | ||
810 | static bool name_match(const void *key, const void *sp) | ||
811 | { | ||
812 | if ((key != NULL) && (sp != NULL)) { | ||
813 | if (strcmp(key, ((struct dbll_symbol *)sp)->name) == 0) | ||
814 | return true; | ||
815 | } | ||
816 | return false; | ||
817 | } | ||
818 | |||
819 | /* | ||
820 | * ======== no_op ======== | ||
821 | */ | ||
822 | static int no_op(struct dynamic_loader_initialize *thisptr, void *bufr, | ||
823 | ldr_addr locn, struct ldr_section_info *info, unsigned bytsize) | ||
824 | { | ||
825 | return 1; | ||
826 | } | ||
827 | |||
828 | /* | ||
829 | * ======== sym_delete ======== | ||
830 | */ | ||
831 | static void sym_delete(void *value) | ||
832 | { | ||
833 | struct dbll_symbol *sp = (struct dbll_symbol *)value; | ||
834 | |||
835 | kfree(sp->name); | ||
836 | } | ||
837 | |||
838 | /* | ||
839 | * Dynamic Loader Functions | ||
840 | */ | ||
841 | |||
842 | /* dynamic_loader_stream */ | ||
843 | /* | ||
844 | * ======== dbll_read_buffer ======== | ||
845 | */ | ||
846 | static int dbll_read_buffer(struct dynamic_loader_stream *this, void *buffer, | ||
847 | unsigned bufsize) | ||
848 | { | ||
849 | struct dbll_stream *pstream = (struct dbll_stream *)this; | ||
850 | struct dbll_library_obj *lib; | ||
851 | int bytes_read = 0; | ||
852 | |||
853 | lib = pstream->lib; | ||
854 | if (lib != NULL) { | ||
855 | bytes_read = | ||
856 | (*(lib->target_obj->attrs.fread)) (buffer, 1, bufsize, | ||
857 | lib->fp); | ||
858 | } | ||
859 | return bytes_read; | ||
860 | } | ||
861 | |||
862 | /* | ||
863 | * ======== dbll_set_file_posn ======== | ||
864 | */ | ||
865 | static int dbll_set_file_posn(struct dynamic_loader_stream *this, | ||
866 | unsigned int pos) | ||
867 | { | ||
868 | struct dbll_stream *pstream = (struct dbll_stream *)this; | ||
869 | struct dbll_library_obj *lib; | ||
870 | int status = 0; /* Success */ | ||
871 | |||
872 | lib = pstream->lib; | ||
873 | if (lib != NULL) { | ||
874 | status = (*(lib->target_obj->attrs.fseek)) (lib->fp, (long)pos, | ||
875 | SEEK_SET); | ||
876 | } | ||
877 | |||
878 | return status; | ||
879 | } | ||
880 | |||
881 | /* dynamic_loader_sym */ | ||
882 | |||
883 | /* | ||
884 | * ======== dbll_find_symbol ======== | ||
885 | */ | ||
886 | static struct dynload_symbol *dbll_find_symbol(struct dynamic_loader_sym *this, | ||
887 | const char *name) | ||
888 | { | ||
889 | struct dynload_symbol *ret_sym; | ||
890 | struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; | ||
891 | struct dbll_library_obj *lib; | ||
892 | struct dbll_sym_val *dbll_sym = NULL; | ||
893 | bool status = false; /* Symbol not found yet */ | ||
894 | |||
895 | lib = ldr_sym->lib; | ||
896 | if (lib != NULL) { | ||
897 | if (lib->target_obj->attrs.sym_lookup) { | ||
898 | /* Check current lib + base lib + dep lib + | ||
899 | * persistent lib */ | ||
900 | status = (*(lib->target_obj->attrs.sym_lookup)) | ||
901 | (lib->target_obj->attrs.sym_handle, | ||
902 | lib->target_obj->attrs.sym_arg, | ||
903 | lib->target_obj->attrs.rmm_handle, name, | ||
904 | &dbll_sym); | ||
905 | } else { | ||
906 | /* Just check current lib for symbol */ | ||
907 | status = dbll_get_addr((struct dbll_library_obj *)lib, | ||
908 | (char *)name, &dbll_sym); | ||
909 | if (!status) { | ||
910 | status = dbll_get_c_addr( | ||
911 | (struct dbll_library_obj *) | ||
912 | lib, (char *)name, | ||
913 | &dbll_sym); | ||
914 | } | ||
915 | } | ||
916 | } | ||
917 | |||
918 | if (!status && gbl_search) | ||
919 | dev_dbg(bridge, "%s: Symbol not found: %s\n", __func__, name); | ||
920 | |||
921 | ret_sym = (struct dynload_symbol *)dbll_sym; | ||
922 | return ret_sym; | ||
923 | } | ||
924 | |||
925 | /* | ||
926 | * ======== find_in_symbol_table ======== | ||
927 | */ | ||
928 | static struct dynload_symbol *find_in_symbol_table(struct dynamic_loader_sym | ||
929 | *this, const char *name, | ||
930 | unsigned moduleid) | ||
931 | { | ||
932 | struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; | ||
933 | struct dbll_library_obj *lib; | ||
934 | struct dbll_symbol *sym; | ||
935 | |||
936 | lib = ldr_sym->lib; | ||
937 | sym = (struct dbll_symbol *)gh_find(lib->sym_tab, (char *)name); | ||
938 | |||
939 | if (IS_ERR(sym)) | ||
940 | return NULL; | ||
941 | |||
942 | return (struct dynload_symbol *)&sym->value; | ||
943 | } | ||
944 | |||
945 | /* | ||
946 | * ======== dbll_add_to_symbol_table ======== | ||
947 | */ | ||
948 | static struct dynload_symbol *dbll_add_to_symbol_table(struct dynamic_loader_sym | ||
949 | *this, const char *name, | ||
950 | unsigned module_id) | ||
951 | { | ||
952 | struct dbll_symbol *sym_ptr = NULL; | ||
953 | struct dbll_symbol symbol; | ||
954 | struct dynload_symbol *dbll_sym = NULL; | ||
955 | struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; | ||
956 | struct dbll_library_obj *lib; | ||
957 | struct dynload_symbol *ret; | ||
958 | |||
959 | lib = ldr_sym->lib; | ||
960 | |||
961 | /* Check to see if symbol is already defined in symbol table */ | ||
962 | if (!(lib->target_obj->attrs.base_image)) { | ||
963 | gbl_search = false; | ||
964 | dbll_sym = dbll_find_symbol(this, name); | ||
965 | gbl_search = true; | ||
966 | if (dbll_sym) { | ||
967 | redefined_symbol = true; | ||
968 | dev_dbg(bridge, "%s already defined in symbol table\n", | ||
969 | name); | ||
970 | return NULL; | ||
971 | } | ||
972 | } | ||
973 | /* Allocate string to copy symbol name */ | ||
974 | symbol.name = kzalloc(strlen((char *const)name) + 1, GFP_KERNEL); | ||
975 | if (symbol.name == NULL) | ||
976 | return NULL; | ||
977 | |||
978 | if (symbol.name != NULL) { | ||
979 | /* Just copy name (value will be filled in by dynamic loader) */ | ||
980 | strncpy(symbol.name, (char *const)name, | ||
981 | strlen((char *const)name) + 1); | ||
982 | |||
983 | /* Add symbol to symbol table */ | ||
984 | sym_ptr = | ||
985 | (struct dbll_symbol *)gh_insert(lib->sym_tab, (void *)name, | ||
986 | (void *)&symbol); | ||
987 | if (IS_ERR(sym_ptr)) { | ||
988 | kfree(symbol.name); | ||
989 | sym_ptr = NULL; | ||
990 | } | ||
991 | |||
992 | } | ||
993 | if (sym_ptr != NULL) | ||
994 | ret = (struct dynload_symbol *)&sym_ptr->value; | ||
995 | else | ||
996 | ret = NULL; | ||
997 | |||
998 | return ret; | ||
999 | } | ||
1000 | |||
1001 | /* | ||
1002 | * ======== dbll_purge_symbol_table ======== | ||
1003 | */ | ||
1004 | static void dbll_purge_symbol_table(struct dynamic_loader_sym *this, | ||
1005 | unsigned module_id) | ||
1006 | { | ||
1007 | struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; | ||
1008 | struct dbll_library_obj *lib; | ||
1009 | |||
1010 | lib = ldr_sym->lib; | ||
1011 | /* May not need to do anything */ | ||
1012 | } | ||
1013 | |||
1014 | /* | ||
1015 | * ======== allocate ======== | ||
1016 | */ | ||
1017 | static void *allocate(struct dynamic_loader_sym *this, unsigned memsize) | ||
1018 | { | ||
1019 | struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; | ||
1020 | struct dbll_library_obj *lib; | ||
1021 | void *buf; | ||
1022 | |||
1023 | lib = ldr_sym->lib; | ||
1024 | |||
1025 | buf = kzalloc(memsize, GFP_KERNEL); | ||
1026 | |||
1027 | return buf; | ||
1028 | } | ||
1029 | |||
1030 | /* | ||
1031 | * ======== deallocate ======== | ||
1032 | */ | ||
1033 | static void deallocate(struct dynamic_loader_sym *this, void *mem_ptr) | ||
1034 | { | ||
1035 | struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; | ||
1036 | struct dbll_library_obj *lib; | ||
1037 | |||
1038 | lib = ldr_sym->lib; | ||
1039 | |||
1040 | kfree(mem_ptr); | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * ======== dbll_err_report ======== | ||
1045 | */ | ||
1046 | static void dbll_err_report(struct dynamic_loader_sym *this, const char *errstr, | ||
1047 | va_list args) | ||
1048 | { | ||
1049 | struct ldr_symbol *ldr_sym = (struct ldr_symbol *)this; | ||
1050 | struct dbll_library_obj *lib; | ||
1051 | char temp_buf[MAXEXPR]; | ||
1052 | |||
1053 | lib = ldr_sym->lib; | ||
1054 | vsnprintf((char *)temp_buf, MAXEXPR, (char *)errstr, args); | ||
1055 | dev_dbg(bridge, "%s\n", temp_buf); | ||
1056 | } | ||
1057 | |||
1058 | /* dynamic_loader_allocate */ | ||
1059 | |||
1060 | /* | ||
1061 | * ======== dbll_rmm_alloc ======== | ||
1062 | */ | ||
1063 | static int dbll_rmm_alloc(struct dynamic_loader_allocate *this, | ||
1064 | struct ldr_section_info *info, unsigned align) | ||
1065 | { | ||
1066 | struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this; | ||
1067 | struct dbll_library_obj *lib; | ||
1068 | int status = 0; | ||
1069 | u32 mem_sect_type; | ||
1070 | struct rmm_addr rmm_addr_obj; | ||
1071 | s32 ret = true; | ||
1072 | unsigned stype = DLOAD_SECTION_TYPE(info->type); | ||
1073 | char *token = NULL; | ||
1074 | char *sz_sec_last_token = NULL; | ||
1075 | char *sz_last_token = NULL; | ||
1076 | char *sz_sect_name = NULL; | ||
1077 | char *psz_cur; | ||
1078 | s32 token_len = 0; | ||
1079 | s32 seg_id = -1; | ||
1080 | s32 req = -1; | ||
1081 | s32 count = 0; | ||
1082 | u32 alloc_size = 0; | ||
1083 | u32 run_addr_flag = 0; | ||
1084 | |||
1085 | lib = dbll_alloc_obj->lib; | ||
1086 | |||
1087 | mem_sect_type = | ||
1088 | (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == | ||
1089 | DLOAD_BSS) ? DBLL_BSS : | ||
1090 | DBLL_DATA; | ||
1091 | |||
1092 | /* Attempt to extract the segment ID and requirement information from | ||
1093 | the name of the section */ | ||
1094 | token_len = strlen((char *)(info->name)) + 1; | ||
1095 | |||
1096 | sz_sect_name = kzalloc(token_len, GFP_KERNEL); | ||
1097 | sz_last_token = kzalloc(token_len, GFP_KERNEL); | ||
1098 | sz_sec_last_token = kzalloc(token_len, GFP_KERNEL); | ||
1099 | |||
1100 | if (sz_sect_name == NULL || sz_sec_last_token == NULL || | ||
1101 | sz_last_token == NULL) { | ||
1102 | status = -ENOMEM; | ||
1103 | goto func_cont; | ||
1104 | } | ||
1105 | strncpy(sz_sect_name, (char *)(info->name), token_len); | ||
1106 | psz_cur = sz_sect_name; | ||
1107 | while ((token = strsep(&psz_cur, ":")) && *token != '\0') { | ||
1108 | strncpy(sz_sec_last_token, sz_last_token, | ||
1109 | strlen(sz_last_token) + 1); | ||
1110 | strncpy(sz_last_token, token, strlen(token) + 1); | ||
1111 | token = strsep(&psz_cur, ":"); | ||
1112 | count++; /* optimizes processing */ | ||
1113 | } | ||
1114 | /* If token is 0 or 1, and sz_sec_last_token is DYN_DARAM or DYN_SARAM, | ||
1115 | or DYN_EXTERNAL, then mem granularity information is present | ||
1116 | within the section name - only process if there are at least three | ||
1117 | tokens within the section name (just a minor optimization) */ | ||
1118 | if (count >= 3) { | ||
1119 | status = kstrtos32(sz_last_token, 10, &req); | ||
1120 | if (status) | ||
1121 | goto func_cont; | ||
1122 | } | ||
1123 | |||
1124 | if ((req == 0) || (req == 1)) { | ||
1125 | if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) { | ||
1126 | seg_id = 0; | ||
1127 | } else { | ||
1128 | if (strcmp(sz_sec_last_token, "DYN_SARAM") == 0) { | ||
1129 | seg_id = 1; | ||
1130 | } else { | ||
1131 | if (strcmp(sz_sec_last_token, | ||
1132 | "DYN_EXTERNAL") == 0) | ||
1133 | seg_id = 2; | ||
1134 | } | ||
1135 | } | ||
1136 | } | ||
1137 | func_cont: | ||
1138 | kfree(sz_sect_name); | ||
1139 | sz_sect_name = NULL; | ||
1140 | kfree(sz_last_token); | ||
1141 | sz_last_token = NULL; | ||
1142 | kfree(sz_sec_last_token); | ||
1143 | sz_sec_last_token = NULL; | ||
1144 | |||
1145 | if (mem_sect_type == DBLL_CODE) | ||
1146 | alloc_size = info->size + GEM_L1P_PREFETCH_SIZE; | ||
1147 | else | ||
1148 | alloc_size = info->size; | ||
1149 | |||
1150 | if (info->load_addr != info->run_addr) | ||
1151 | run_addr_flag = 1; | ||
1152 | /* TODO - ideally, we can pass the alignment requirement also | ||
1153 | * from here */ | ||
1154 | if (lib != NULL) { | ||
1155 | status = | ||
1156 | (lib->target_obj->attrs.alloc) (lib->target_obj->attrs. | ||
1157 | rmm_handle, mem_sect_type, | ||
1158 | alloc_size, align, | ||
1159 | (u32 *) &rmm_addr_obj, | ||
1160 | seg_id, req, false); | ||
1161 | } | ||
1162 | if (status) { | ||
1163 | ret = false; | ||
1164 | } else { | ||
1165 | /* RMM gives word address. Need to convert to byte address */ | ||
1166 | info->load_addr = rmm_addr_obj.addr * DSPWORDSIZE; | ||
1167 | if (!run_addr_flag) | ||
1168 | info->run_addr = info->load_addr; | ||
1169 | info->context = (u32) rmm_addr_obj.segid; | ||
1170 | dev_dbg(bridge, "%s: %s base = 0x%x len = 0x%x, info->run_addr 0x%x, info->load_addr 0x%x\n", | ||
1171 | __func__, info->name, info->load_addr / DSPWORDSIZE, | ||
1172 | info->size / DSPWORDSIZE, info->run_addr, | ||
1173 | info->load_addr); | ||
1174 | } | ||
1175 | return ret; | ||
1176 | } | ||
1177 | |||
1178 | /* | ||
1179 | * ======== rmm_dealloc ======== | ||
1180 | */ | ||
1181 | static void rmm_dealloc(struct dynamic_loader_allocate *this, | ||
1182 | struct ldr_section_info *info) | ||
1183 | { | ||
1184 | struct dbll_alloc *dbll_alloc_obj = (struct dbll_alloc *)this; | ||
1185 | struct dbll_library_obj *lib; | ||
1186 | u32 segid; | ||
1187 | int status = 0; | ||
1188 | unsigned stype = DLOAD_SECTION_TYPE(info->type); | ||
1189 | u32 mem_sect_type; | ||
1190 | u32 free_size = 0; | ||
1191 | |||
1192 | mem_sect_type = | ||
1193 | (stype == DLOAD_TEXT) ? DBLL_CODE : (stype == | ||
1194 | DLOAD_BSS) ? DBLL_BSS : | ||
1195 | DBLL_DATA; | ||
1196 | lib = dbll_alloc_obj->lib; | ||
1197 | /* segid was set by alloc function */ | ||
1198 | segid = (u32) info->context; | ||
1199 | if (mem_sect_type == DBLL_CODE) | ||
1200 | free_size = info->size + GEM_L1P_PREFETCH_SIZE; | ||
1201 | else | ||
1202 | free_size = info->size; | ||
1203 | if (lib != NULL) { | ||
1204 | status = | ||
1205 | (lib->target_obj->attrs.free) (lib->target_obj->attrs. | ||
1206 | sym_handle, segid, | ||
1207 | info->load_addr / | ||
1208 | DSPWORDSIZE, free_size, | ||
1209 | false); | ||
1210 | } | ||
1211 | } | ||
1212 | |||
1213 | /* dynamic_loader_initialize */ | ||
1214 | /* | ||
1215 | * ======== connect ======== | ||
1216 | */ | ||
1217 | static int connect(struct dynamic_loader_initialize *this) | ||
1218 | { | ||
1219 | return true; | ||
1220 | } | ||
1221 | |||
1222 | /* | ||
1223 | * ======== read_mem ======== | ||
1224 | * This function does not need to be implemented. | ||
1225 | */ | ||
1226 | static int read_mem(struct dynamic_loader_initialize *this, void *buf, | ||
1227 | ldr_addr addr, struct ldr_section_info *info, | ||
1228 | unsigned nbytes) | ||
1229 | { | ||
1230 | struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; | ||
1231 | struct dbll_library_obj *lib; | ||
1232 | int bytes_read = 0; | ||
1233 | |||
1234 | lib = init_obj->lib; | ||
1235 | /* Need bridge_brd_read function */ | ||
1236 | return bytes_read; | ||
1237 | } | ||
1238 | |||
1239 | /* | ||
1240 | * ======== write_mem ======== | ||
1241 | */ | ||
1242 | static int write_mem(struct dynamic_loader_initialize *this, void *buf, | ||
1243 | ldr_addr addr, struct ldr_section_info *info, | ||
1244 | unsigned bytes) | ||
1245 | { | ||
1246 | struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; | ||
1247 | struct dbll_library_obj *lib; | ||
1248 | struct dbll_tar_obj *target_obj; | ||
1249 | struct dbll_sect_info sect_info; | ||
1250 | u32 mem_sect_type; | ||
1251 | bool ret = true; | ||
1252 | |||
1253 | lib = init_obj->lib; | ||
1254 | if (!lib) | ||
1255 | return false; | ||
1256 | |||
1257 | target_obj = lib->target_obj; | ||
1258 | |||
1259 | mem_sect_type = | ||
1260 | (DLOAD_SECTION_TYPE(info->type) == | ||
1261 | DLOAD_TEXT) ? DBLL_CODE : DBLL_DATA; | ||
1262 | if (target_obj && target_obj->attrs.write) { | ||
1263 | ret = | ||
1264 | (*target_obj->attrs.write) (target_obj->attrs.input_params, | ||
1265 | addr, buf, bytes, | ||
1266 | mem_sect_type); | ||
1267 | |||
1268 | if (target_obj->attrs.log_write) { | ||
1269 | sect_info.name = info->name; | ||
1270 | sect_info.sect_run_addr = info->run_addr; | ||
1271 | sect_info.sect_load_addr = info->load_addr; | ||
1272 | sect_info.size = info->size; | ||
1273 | sect_info.type = mem_sect_type; | ||
1274 | /* Pass the information about what we've written to | ||
1275 | * another module */ | ||
1276 | (*target_obj->attrs.log_write) (target_obj->attrs. | ||
1277 | log_write_handle, | ||
1278 | §_info, addr, | ||
1279 | bytes); | ||
1280 | } | ||
1281 | } | ||
1282 | return ret; | ||
1283 | } | ||
1284 | |||
1285 | /* | ||
1286 | * ======== fill_mem ======== | ||
1287 | * Fill bytes of memory at a given address with a given value by | ||
1288 | * writing from a buffer containing the given value. Write in | ||
1289 | * sets of MAXEXPR (128) bytes to avoid large stack buffer issues. | ||
1290 | */ | ||
1291 | static int fill_mem(struct dynamic_loader_initialize *this, ldr_addr addr, | ||
1292 | struct ldr_section_info *info, unsigned bytes, unsigned val) | ||
1293 | { | ||
1294 | bool ret = true; | ||
1295 | char *pbuf; | ||
1296 | struct dbll_library_obj *lib; | ||
1297 | struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; | ||
1298 | |||
1299 | lib = init_obj->lib; | ||
1300 | pbuf = NULL; | ||
1301 | /* Pass the NULL pointer to write_mem to get the start address of Shared | ||
1302 | memory. This is a trick to just get the start address, there is no | ||
1303 | writing taking place with this Writemem | ||
1304 | */ | ||
1305 | if ((lib->target_obj->attrs.write) != (dbll_write_fxn) no_op) | ||
1306 | write_mem(this, &pbuf, addr, info, 0); | ||
1307 | if (pbuf) | ||
1308 | memset(pbuf, val, bytes); | ||
1309 | |||
1310 | return ret; | ||
1311 | } | ||
1312 | |||
1313 | /* | ||
1314 | * ======== execute ======== | ||
1315 | */ | ||
1316 | static int execute(struct dynamic_loader_initialize *this, ldr_addr start) | ||
1317 | { | ||
1318 | struct dbll_init_obj *init_obj = (struct dbll_init_obj *)this; | ||
1319 | struct dbll_library_obj *lib; | ||
1320 | bool ret = true; | ||
1321 | |||
1322 | lib = init_obj->lib; | ||
1323 | /* Save entry point */ | ||
1324 | if (lib != NULL) | ||
1325 | lib->entry = (u32) start; | ||
1326 | |||
1327 | return ret; | ||
1328 | } | ||
1329 | |||
1330 | /* | ||
1331 | * ======== release ======== | ||
1332 | */ | ||
1333 | static void release(struct dynamic_loader_initialize *this) | ||
1334 | { | ||
1335 | } | ||
1336 | |||
1337 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
1338 | /** | ||
1339 | * find_symbol_context - Basic symbol context structure | ||
1340 | * @address: Symbol Address | ||
1341 | * @offset_range: Offset range where the search for the DSP symbol | ||
1342 | * started. | ||
1343 | * @cur_best_offset: Best offset to start looking for the DSP symbol | ||
1344 | * @sym_addr: Address of the DSP symbol | ||
1345 | * @name: Symbol name | ||
1346 | * | ||
1347 | */ | ||
1348 | struct find_symbol_context { | ||
1349 | /* input */ | ||
1350 | u32 address; | ||
1351 | u32 offset_range; | ||
1352 | /* state */ | ||
1353 | u32 cur_best_offset; | ||
1354 | /* output */ | ||
1355 | u32 sym_addr; | ||
1356 | char name[120]; | ||
1357 | }; | ||
1358 | |||
1359 | /** | ||
1360 | * find_symbol_callback() - Validates symbol address and copies the symbol name | ||
1361 | * to the user data. | ||
1362 | * @elem: dsp library context | ||
1363 | * @user_data: Find symbol context | ||
1364 | * | ||
1365 | */ | ||
1366 | void find_symbol_callback(void *elem, void *user_data) | ||
1367 | { | ||
1368 | struct dbll_symbol *symbol = elem; | ||
1369 | struct find_symbol_context *context = user_data; | ||
1370 | u32 symbol_addr = symbol->value.value; | ||
1371 | u32 offset = context->address - symbol_addr; | ||
1372 | |||
1373 | /* | ||
1374 | * Address given should be greater than symbol address, | ||
1375 | * symbol address should be within specified range | ||
1376 | * and the offset should be better than previous one | ||
1377 | */ | ||
1378 | if (context->address >= symbol_addr && symbol_addr < (u32)-1 && | ||
1379 | offset < context->cur_best_offset) { | ||
1380 | context->cur_best_offset = offset; | ||
1381 | context->sym_addr = symbol_addr; | ||
1382 | strlcpy(context->name, symbol->name, sizeof(context->name)); | ||
1383 | } | ||
1384 | |||
1385 | return; | ||
1386 | } | ||
1387 | |||
1388 | /** | ||
1389 | * dbll_find_dsp_symbol() - This function retrieves the dsp symbol from the dsp binary. | ||
1390 | * @zl_lib: DSP binary obj library pointer | ||
1391 | * @address: Given address to find the dsp symbol | ||
1392 | * @offset_range: offset range to look for dsp symbol | ||
1393 | * @sym_addr_output: Symbol Output address | ||
1394 | * @name_output: String with the dsp symbol | ||
1395 | * | ||
1396 | * This function retrieves the dsp symbol from the dsp binary. | ||
1397 | */ | ||
1398 | bool dbll_find_dsp_symbol(struct dbll_library_obj *zl_lib, u32 address, | ||
1399 | u32 offset_range, u32 *sym_addr_output, | ||
1400 | char *name_output) | ||
1401 | { | ||
1402 | bool status = false; | ||
1403 | struct find_symbol_context context; | ||
1404 | |||
1405 | context.address = address; | ||
1406 | context.offset_range = offset_range; | ||
1407 | context.cur_best_offset = offset_range; | ||
1408 | context.sym_addr = 0; | ||
1409 | context.name[0] = '\0'; | ||
1410 | |||
1411 | gh_iterate(zl_lib->sym_tab, find_symbol_callback, &context); | ||
1412 | |||
1413 | if (context.name[0]) { | ||
1414 | status = true; | ||
1415 | strcpy(name_output, context.name); | ||
1416 | *sym_addr_output = context.sym_addr; | ||
1417 | } | ||
1418 | |||
1419 | return status; | ||
1420 | } | ||
1421 | #endif | ||
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c deleted file mode 100644 index 616dc1f63070..000000000000 --- a/drivers/staging/tidspbridge/pmgr/dev.c +++ /dev/null | |||
@@ -1,969 +0,0 @@ | |||
1 | /* | ||
2 | * dev.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Implementation of Bridge Bridge driver device operations. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/list.h> | ||
20 | |||
21 | /* ----------------------------------- Host OS */ | ||
22 | #include <dspbridge/host_os.h> | ||
23 | |||
24 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
25 | #include <dspbridge/dbdefs.h> | ||
26 | |||
27 | /* ----------------------------------- Platform Manager */ | ||
28 | #include <dspbridge/cod.h> | ||
29 | #include <dspbridge/drv.h> | ||
30 | #include <dspbridge/proc.h> | ||
31 | #include <dspbridge/dmm.h> | ||
32 | |||
33 | /* ----------------------------------- Resource Manager */ | ||
34 | #include <dspbridge/mgr.h> | ||
35 | #include <dspbridge/node.h> | ||
36 | |||
37 | /* ----------------------------------- Others */ | ||
38 | #include <dspbridge/dspapi.h> /* DSP API version info. */ | ||
39 | |||
40 | #include <dspbridge/chnl.h> | ||
41 | #include <dspbridge/io.h> | ||
42 | #include <dspbridge/msg.h> | ||
43 | #include <dspbridge/cmm.h> | ||
44 | #include <dspbridge/dspdeh.h> | ||
45 | |||
46 | /* ----------------------------------- This */ | ||
47 | #include <dspbridge/dev.h> | ||
48 | |||
49 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
50 | |||
51 | #define MAKEVERSION(major, minor) (major * 10 + minor) | ||
52 | #define BRD_API_VERSION MAKEVERSION(BRD_API_MAJOR_VERSION, \ | ||
53 | BRD_API_MINOR_VERSION) | ||
54 | |||
55 | /* The Bridge device object: */ | ||
56 | struct dev_object { | ||
57 | struct list_head link; /* Link to next dev_object. */ | ||
58 | u8 dev_type; /* Device Type */ | ||
59 | struct cfg_devnode *dev_node_obj; /* Platform specific dev id */ | ||
60 | /* Bridge Context Handle */ | ||
61 | struct bridge_dev_context *bridge_context; | ||
62 | /* Function interface to Bridge driver. */ | ||
63 | struct bridge_drv_interface bridge_interface; | ||
64 | struct brd_object *lock_owner; /* Client with exclusive access. */ | ||
65 | struct cod_manager *cod_mgr; /* Code manager handle. */ | ||
66 | struct chnl_mgr *chnl_mgr; /* Channel manager. */ | ||
67 | struct deh_mgr *deh_mgr; /* DEH manager. */ | ||
68 | struct msg_mgr *msg_mgr; /* Message manager. */ | ||
69 | struct io_mgr *iomgr; /* IO manager (CHNL, msg_ctrl) */ | ||
70 | struct cmm_object *cmm_mgr; /* SM memory manager. */ | ||
71 | struct dmm_object *dmm_mgr; /* Dynamic memory manager. */ | ||
72 | u32 word_size; /* DSP word size: quick access. */ | ||
73 | struct drv_object *drv_obj; /* Driver Object */ | ||
74 | /* List of Processors attached to this device */ | ||
75 | struct list_head proc_list; | ||
76 | struct node_mgr *node_mgr; | ||
77 | }; | ||
78 | |||
79 | struct drv_ext { | ||
80 | struct list_head link; | ||
81 | char sz_string[MAXREGPATHLENGTH]; | ||
82 | }; | ||
83 | |||
84 | /* ----------------------------------- Function Prototypes */ | ||
85 | static int fxn_not_implemented(int arg, ...); | ||
86 | static int init_cod_mgr(struct dev_object *dev_obj); | ||
87 | static void store_interface_fxns(struct bridge_drv_interface *drv_fxns, | ||
88 | struct bridge_drv_interface *intf_fxns); | ||
89 | /* | ||
90 | * ======== dev_brd_write_fxn ======== | ||
91 | * Purpose: | ||
92 | * Exported function to be used as the COD write function. This function | ||
93 | * is passed a handle to a DEV_hObject, then calls the | ||
94 | * device's bridge_brd_write() function. | ||
95 | */ | ||
96 | u32 dev_brd_write_fxn(void *arb, u32 dsp_add, void *host_buf, | ||
97 | u32 ul_num_bytes, u32 mem_space) | ||
98 | { | ||
99 | struct dev_object *dev_obj = (struct dev_object *)arb; | ||
100 | u32 ul_written = 0; | ||
101 | int status; | ||
102 | |||
103 | if (dev_obj) { | ||
104 | /* Require of BrdWrite() */ | ||
105 | status = (*dev_obj->bridge_interface.brd_write) ( | ||
106 | dev_obj->bridge_context, host_buf, | ||
107 | dsp_add, ul_num_bytes, mem_space); | ||
108 | /* Special case of getting the address only */ | ||
109 | if (ul_num_bytes == 0) | ||
110 | ul_num_bytes = 1; | ||
111 | if (!status) | ||
112 | ul_written = ul_num_bytes; | ||
113 | |||
114 | } | ||
115 | return ul_written; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * ======== dev_create_device ======== | ||
120 | * Purpose: | ||
121 | * Called by the operating system to load the PM Bridge Driver for a | ||
122 | * PM board (device). | ||
123 | */ | ||
124 | int dev_create_device(struct dev_object **device_obj, | ||
125 | const char *driver_file_name, | ||
126 | struct cfg_devnode *dev_node_obj) | ||
127 | { | ||
128 | struct cfg_hostres *host_res; | ||
129 | struct bridge_drv_interface *drv_fxns = NULL; | ||
130 | struct dev_object *dev_obj = NULL; | ||
131 | struct chnl_mgrattrs mgr_attrs; | ||
132 | struct io_attrs io_mgr_attrs; | ||
133 | u32 num_windows; | ||
134 | struct drv_object *hdrv_obj = NULL; | ||
135 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
136 | int status = 0; | ||
137 | |||
138 | status = drv_request_bridge_res_dsp((void *)&host_res); | ||
139 | |||
140 | if (status) { | ||
141 | dev_dbg(bridge, "%s: Failed to reserve bridge resources\n", | ||
142 | __func__); | ||
143 | goto leave; | ||
144 | } | ||
145 | |||
146 | /* Get the Bridge driver interface functions */ | ||
147 | bridge_drv_entry(&drv_fxns, driver_file_name); | ||
148 | |||
149 | /* Retrieve the Object handle from the driver data */ | ||
150 | if (drv_datap && drv_datap->drv_object) { | ||
151 | hdrv_obj = drv_datap->drv_object; | ||
152 | } else { | ||
153 | status = -EPERM; | ||
154 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | ||
155 | } | ||
156 | |||
157 | /* Create the device object, and pass a handle to the Bridge driver for | ||
158 | * storage. */ | ||
159 | if (!status) { | ||
160 | dev_obj = kzalloc(sizeof(struct dev_object), GFP_KERNEL); | ||
161 | if (dev_obj) { | ||
162 | /* Fill out the rest of the Dev Object structure: */ | ||
163 | dev_obj->dev_node_obj = dev_node_obj; | ||
164 | dev_obj->cod_mgr = NULL; | ||
165 | dev_obj->chnl_mgr = NULL; | ||
166 | dev_obj->deh_mgr = NULL; | ||
167 | dev_obj->lock_owner = NULL; | ||
168 | dev_obj->word_size = DSPWORDSIZE; | ||
169 | dev_obj->drv_obj = hdrv_obj; | ||
170 | dev_obj->dev_type = DSP_UNIT; | ||
171 | /* Store this Bridge's interface functions, based on its | ||
172 | * version. */ | ||
173 | store_interface_fxns(drv_fxns, | ||
174 | &dev_obj->bridge_interface); | ||
175 | |||
176 | /* Call fxn_dev_create() to get the Bridge's device | ||
177 | * context handle. */ | ||
178 | status = (dev_obj->bridge_interface.dev_create) | ||
179 | (&dev_obj->bridge_context, dev_obj, | ||
180 | host_res); | ||
181 | } else { | ||
182 | status = -ENOMEM; | ||
183 | } | ||
184 | } | ||
185 | /* Attempt to create the COD manager for this device: */ | ||
186 | if (!status) | ||
187 | status = init_cod_mgr(dev_obj); | ||
188 | |||
189 | /* Attempt to create the channel manager for this device: */ | ||
190 | if (!status) { | ||
191 | mgr_attrs.max_channels = CHNL_MAXCHANNELS; | ||
192 | io_mgr_attrs.birq = host_res->birq_registers; | ||
193 | io_mgr_attrs.irq_shared = | ||
194 | (host_res->birq_attrib & CFG_IRQSHARED); | ||
195 | io_mgr_attrs.word_size = DSPWORDSIZE; | ||
196 | mgr_attrs.word_size = DSPWORDSIZE; | ||
197 | num_windows = host_res->num_mem_windows; | ||
198 | if (num_windows) { | ||
199 | /* Assume last memory window is for CHNL */ | ||
200 | io_mgr_attrs.shm_base = host_res->mem_base[1] + | ||
201 | host_res->offset_for_monitor; | ||
202 | io_mgr_attrs.sm_length = | ||
203 | host_res->mem_length[1] - | ||
204 | host_res->offset_for_monitor; | ||
205 | } else { | ||
206 | io_mgr_attrs.shm_base = 0; | ||
207 | io_mgr_attrs.sm_length = 0; | ||
208 | pr_err("%s: No memory reserved for shared structures\n", | ||
209 | __func__); | ||
210 | } | ||
211 | status = chnl_create(&dev_obj->chnl_mgr, dev_obj, &mgr_attrs); | ||
212 | if (status == -ENOSYS) { | ||
213 | /* It's OK for a device not to have a channel | ||
214 | * manager: */ | ||
215 | status = 0; | ||
216 | } | ||
217 | /* Create CMM mgr even if Msg Mgr not impl. */ | ||
218 | status = cmm_create(&dev_obj->cmm_mgr, | ||
219 | (struct dev_object *)dev_obj, NULL); | ||
220 | /* Only create IO manager if we have a channel manager */ | ||
221 | if (!status && dev_obj->chnl_mgr) { | ||
222 | status = io_create(&dev_obj->iomgr, dev_obj, | ||
223 | &io_mgr_attrs); | ||
224 | } | ||
225 | /* Only create DEH manager if we have an IO manager */ | ||
226 | if (!status) { | ||
227 | /* Instantiate the DEH module */ | ||
228 | status = bridge_deh_create(&dev_obj->deh_mgr, dev_obj); | ||
229 | } | ||
230 | /* Create DMM mgr . */ | ||
231 | status = dmm_create(&dev_obj->dmm_mgr, | ||
232 | (struct dev_object *)dev_obj, NULL); | ||
233 | } | ||
234 | /* Add the new DEV_Object to the global list: */ | ||
235 | if (!status) | ||
236 | status = drv_insert_dev_object(hdrv_obj, dev_obj); | ||
237 | |||
238 | /* Create the Processor List */ | ||
239 | if (!status) | ||
240 | INIT_LIST_HEAD(&dev_obj->proc_list); | ||
241 | leave: | ||
242 | /* If all went well, return a handle to the dev object; | ||
243 | * else, cleanup and return NULL in the OUT parameter. */ | ||
244 | if (!status) { | ||
245 | *device_obj = dev_obj; | ||
246 | } else { | ||
247 | if (dev_obj) { | ||
248 | if (dev_obj->cod_mgr) | ||
249 | cod_delete(dev_obj->cod_mgr); | ||
250 | if (dev_obj->dmm_mgr) | ||
251 | dmm_destroy(dev_obj->dmm_mgr); | ||
252 | kfree(dev_obj); | ||
253 | } | ||
254 | |||
255 | *device_obj = NULL; | ||
256 | } | ||
257 | |||
258 | return status; | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * ======== dev_create2 ======== | ||
263 | * Purpose: | ||
264 | * After successful loading of the image from api_init_complete2 | ||
265 | * (PROC Auto_Start) or proc_load this fxn is called. This creates | ||
266 | * the Node Manager and updates the DEV Object. | ||
267 | */ | ||
268 | int dev_create2(struct dev_object *hdev_obj) | ||
269 | { | ||
270 | int status = 0; | ||
271 | struct dev_object *dev_obj = hdev_obj; | ||
272 | |||
273 | /* There can be only one Node Manager per DEV object */ | ||
274 | status = node_create_mgr(&dev_obj->node_mgr, hdev_obj); | ||
275 | if (status) | ||
276 | dev_obj->node_mgr = NULL; | ||
277 | |||
278 | return status; | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * ======== dev_destroy2 ======== | ||
283 | * Purpose: | ||
284 | * Destroys the Node manager for this device. | ||
285 | */ | ||
286 | int dev_destroy2(struct dev_object *hdev_obj) | ||
287 | { | ||
288 | int status = 0; | ||
289 | struct dev_object *dev_obj = hdev_obj; | ||
290 | |||
291 | if (dev_obj->node_mgr) { | ||
292 | if (node_delete_mgr(dev_obj->node_mgr)) | ||
293 | status = -EPERM; | ||
294 | else | ||
295 | dev_obj->node_mgr = NULL; | ||
296 | |||
297 | } | ||
298 | |||
299 | return status; | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * ======== dev_destroy_device ======== | ||
304 | * Purpose: | ||
305 | * Destroys the channel manager for this device, if any, calls | ||
306 | * bridge_dev_destroy(), and then attempts to unload the Bridge module. | ||
307 | */ | ||
308 | int dev_destroy_device(struct dev_object *hdev_obj) | ||
309 | { | ||
310 | int status = 0; | ||
311 | struct dev_object *dev_obj = hdev_obj; | ||
312 | |||
313 | if (hdev_obj) { | ||
314 | if (dev_obj->cod_mgr) { | ||
315 | cod_delete(dev_obj->cod_mgr); | ||
316 | dev_obj->cod_mgr = NULL; | ||
317 | } | ||
318 | |||
319 | if (dev_obj->node_mgr) { | ||
320 | node_delete_mgr(dev_obj->node_mgr); | ||
321 | dev_obj->node_mgr = NULL; | ||
322 | } | ||
323 | |||
324 | /* Free the io, channel, and message managers for this board: */ | ||
325 | if (dev_obj->iomgr) { | ||
326 | io_destroy(dev_obj->iomgr); | ||
327 | dev_obj->iomgr = NULL; | ||
328 | } | ||
329 | if (dev_obj->chnl_mgr) { | ||
330 | chnl_destroy(dev_obj->chnl_mgr); | ||
331 | dev_obj->chnl_mgr = NULL; | ||
332 | } | ||
333 | if (dev_obj->msg_mgr) { | ||
334 | msg_delete(dev_obj->msg_mgr); | ||
335 | dev_obj->msg_mgr = NULL; | ||
336 | } | ||
337 | |||
338 | if (dev_obj->deh_mgr) { | ||
339 | /* Uninitialize DEH module. */ | ||
340 | bridge_deh_destroy(dev_obj->deh_mgr); | ||
341 | dev_obj->deh_mgr = NULL; | ||
342 | } | ||
343 | if (dev_obj->cmm_mgr) { | ||
344 | cmm_destroy(dev_obj->cmm_mgr, true); | ||
345 | dev_obj->cmm_mgr = NULL; | ||
346 | } | ||
347 | |||
348 | if (dev_obj->dmm_mgr) { | ||
349 | dmm_destroy(dev_obj->dmm_mgr); | ||
350 | dev_obj->dmm_mgr = NULL; | ||
351 | } | ||
352 | |||
353 | /* Call the driver's bridge_dev_destroy() function: */ | ||
354 | /* Require of DevDestroy */ | ||
355 | if (dev_obj->bridge_context) { | ||
356 | status = (*dev_obj->bridge_interface.dev_destroy) | ||
357 | (dev_obj->bridge_context); | ||
358 | dev_obj->bridge_context = NULL; | ||
359 | } else | ||
360 | status = -EPERM; | ||
361 | if (!status) { | ||
362 | /* Remove this DEV_Object from the global list: */ | ||
363 | drv_remove_dev_object(dev_obj->drv_obj, dev_obj); | ||
364 | /* Free The library * LDR_FreeModule | ||
365 | * (dev_obj->module_obj); */ | ||
366 | /* Free this dev object: */ | ||
367 | kfree(dev_obj); | ||
368 | dev_obj = NULL; | ||
369 | } | ||
370 | } else { | ||
371 | status = -EFAULT; | ||
372 | } | ||
373 | |||
374 | return status; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * ======== dev_get_chnl_mgr ======== | ||
379 | * Purpose: | ||
380 | * Retrieve the handle to the channel manager handle created for this | ||
381 | * device. | ||
382 | */ | ||
383 | int dev_get_chnl_mgr(struct dev_object *hdev_obj, | ||
384 | struct chnl_mgr **mgr) | ||
385 | { | ||
386 | int status = 0; | ||
387 | struct dev_object *dev_obj = hdev_obj; | ||
388 | |||
389 | if (hdev_obj) { | ||
390 | *mgr = dev_obj->chnl_mgr; | ||
391 | } else { | ||
392 | *mgr = NULL; | ||
393 | status = -EFAULT; | ||
394 | } | ||
395 | |||
396 | return status; | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * ======== dev_get_cmm_mgr ======== | ||
401 | * Purpose: | ||
402 | * Retrieve the handle to the shared memory manager created for this | ||
403 | * device. | ||
404 | */ | ||
405 | int dev_get_cmm_mgr(struct dev_object *hdev_obj, | ||
406 | struct cmm_object **mgr) | ||
407 | { | ||
408 | int status = 0; | ||
409 | struct dev_object *dev_obj = hdev_obj; | ||
410 | |||
411 | if (hdev_obj) { | ||
412 | *mgr = dev_obj->cmm_mgr; | ||
413 | } else { | ||
414 | *mgr = NULL; | ||
415 | status = -EFAULT; | ||
416 | } | ||
417 | |||
418 | return status; | ||
419 | } | ||
420 | |||
421 | /* | ||
422 | * ======== dev_get_dmm_mgr ======== | ||
423 | * Purpose: | ||
424 | * Retrieve the handle to the dynamic memory manager created for this | ||
425 | * device. | ||
426 | */ | ||
427 | int dev_get_dmm_mgr(struct dev_object *hdev_obj, | ||
428 | struct dmm_object **mgr) | ||
429 | { | ||
430 | int status = 0; | ||
431 | struct dev_object *dev_obj = hdev_obj; | ||
432 | |||
433 | if (hdev_obj) { | ||
434 | *mgr = dev_obj->dmm_mgr; | ||
435 | } else { | ||
436 | *mgr = NULL; | ||
437 | status = -EFAULT; | ||
438 | } | ||
439 | |||
440 | return status; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * ======== dev_get_cod_mgr ======== | ||
445 | * Purpose: | ||
446 | * Retrieve the COD manager create for this device. | ||
447 | */ | ||
448 | int dev_get_cod_mgr(struct dev_object *hdev_obj, | ||
449 | struct cod_manager **cod_mgr) | ||
450 | { | ||
451 | int status = 0; | ||
452 | struct dev_object *dev_obj = hdev_obj; | ||
453 | |||
454 | if (hdev_obj) { | ||
455 | *cod_mgr = dev_obj->cod_mgr; | ||
456 | } else { | ||
457 | *cod_mgr = NULL; | ||
458 | status = -EFAULT; | ||
459 | } | ||
460 | |||
461 | return status; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * ========= dev_get_deh_mgr ======== | ||
466 | */ | ||
467 | int dev_get_deh_mgr(struct dev_object *hdev_obj, | ||
468 | struct deh_mgr **deh_manager) | ||
469 | { | ||
470 | int status = 0; | ||
471 | |||
472 | if (hdev_obj) { | ||
473 | *deh_manager = hdev_obj->deh_mgr; | ||
474 | } else { | ||
475 | *deh_manager = NULL; | ||
476 | status = -EFAULT; | ||
477 | } | ||
478 | return status; | ||
479 | } | ||
480 | |||
481 | /* | ||
482 | * ======== dev_get_dev_node ======== | ||
483 | * Purpose: | ||
484 | * Retrieve the platform specific device ID for this device. | ||
485 | */ | ||
486 | int dev_get_dev_node(struct dev_object *hdev_obj, | ||
487 | struct cfg_devnode **dev_nde) | ||
488 | { | ||
489 | int status = 0; | ||
490 | struct dev_object *dev_obj = hdev_obj; | ||
491 | |||
492 | if (hdev_obj) { | ||
493 | *dev_nde = dev_obj->dev_node_obj; | ||
494 | } else { | ||
495 | *dev_nde = NULL; | ||
496 | status = -EFAULT; | ||
497 | } | ||
498 | |||
499 | return status; | ||
500 | } | ||
501 | |||
502 | /* | ||
503 | * ======== dev_get_first ======== | ||
504 | * Purpose: | ||
505 | * Retrieve the first Device Object handle from an internal linked list | ||
506 | * DEV_OBJECTs maintained by DEV. | ||
507 | */ | ||
508 | struct dev_object *dev_get_first(void) | ||
509 | { | ||
510 | struct dev_object *dev_obj = NULL; | ||
511 | |||
512 | dev_obj = (struct dev_object *)drv_get_first_dev_object(); | ||
513 | |||
514 | return dev_obj; | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * ======== dev_get_intf_fxns ======== | ||
519 | * Purpose: | ||
520 | * Retrieve the Bridge interface function structure for the loaded driver. | ||
521 | * if_fxns != NULL. | ||
522 | */ | ||
523 | int dev_get_intf_fxns(struct dev_object *hdev_obj, | ||
524 | struct bridge_drv_interface **if_fxns) | ||
525 | { | ||
526 | int status = 0; | ||
527 | struct dev_object *dev_obj = hdev_obj; | ||
528 | |||
529 | if (hdev_obj) { | ||
530 | *if_fxns = &dev_obj->bridge_interface; | ||
531 | } else { | ||
532 | *if_fxns = NULL; | ||
533 | status = -EFAULT; | ||
534 | } | ||
535 | |||
536 | return status; | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * ========= dev_get_io_mgr ======== | ||
541 | */ | ||
542 | int dev_get_io_mgr(struct dev_object *hdev_obj, | ||
543 | struct io_mgr **io_man) | ||
544 | { | ||
545 | int status = 0; | ||
546 | |||
547 | if (hdev_obj) { | ||
548 | *io_man = hdev_obj->iomgr; | ||
549 | } else { | ||
550 | *io_man = NULL; | ||
551 | status = -EFAULT; | ||
552 | } | ||
553 | |||
554 | return status; | ||
555 | } | ||
556 | |||
557 | /* | ||
558 | * ======== dev_get_next ======== | ||
559 | * Purpose: | ||
560 | * Retrieve the next Device Object handle from an internal linked list | ||
561 | * of DEV_OBJECTs maintained by DEV, after having previously called | ||
562 | * dev_get_first() and zero or more dev_get_next | ||
563 | */ | ||
564 | struct dev_object *dev_get_next(struct dev_object *hdev_obj) | ||
565 | { | ||
566 | struct dev_object *next_dev_object = NULL; | ||
567 | |||
568 | if (hdev_obj) { | ||
569 | next_dev_object = (struct dev_object *) | ||
570 | drv_get_next_dev_object((u32) hdev_obj); | ||
571 | } | ||
572 | |||
573 | return next_dev_object; | ||
574 | } | ||
575 | |||
576 | /* | ||
577 | * ========= dev_get_msg_mgr ======== | ||
578 | */ | ||
579 | void dev_get_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr **msg_man) | ||
580 | { | ||
581 | *msg_man = hdev_obj->msg_mgr; | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * ======== dev_get_node_manager ======== | ||
586 | * Purpose: | ||
587 | * Retrieve the Node Manager Handle | ||
588 | */ | ||
589 | int dev_get_node_manager(struct dev_object *hdev_obj, | ||
590 | struct node_mgr **node_man) | ||
591 | { | ||
592 | int status = 0; | ||
593 | struct dev_object *dev_obj = hdev_obj; | ||
594 | |||
595 | if (hdev_obj) { | ||
596 | *node_man = dev_obj->node_mgr; | ||
597 | } else { | ||
598 | *node_man = NULL; | ||
599 | status = -EFAULT; | ||
600 | } | ||
601 | |||
602 | return status; | ||
603 | } | ||
604 | |||
605 | /* | ||
606 | * ======== dev_get_symbol ======== | ||
607 | */ | ||
608 | int dev_get_symbol(struct dev_object *hdev_obj, | ||
609 | const char *str_sym, u32 *pul_value) | ||
610 | { | ||
611 | int status = 0; | ||
612 | struct cod_manager *cod_mgr; | ||
613 | |||
614 | if (hdev_obj) { | ||
615 | status = dev_get_cod_mgr(hdev_obj, &cod_mgr); | ||
616 | if (cod_mgr) | ||
617 | status = cod_get_sym_value(cod_mgr, (char *)str_sym, | ||
618 | pul_value); | ||
619 | else | ||
620 | status = -EFAULT; | ||
621 | } | ||
622 | |||
623 | return status; | ||
624 | } | ||
625 | |||
626 | /* | ||
627 | * ======== dev_get_bridge_context ======== | ||
628 | * Purpose: | ||
629 | * Retrieve the Bridge Context handle, as returned by the | ||
630 | * bridge_dev_create fxn. | ||
631 | */ | ||
632 | int dev_get_bridge_context(struct dev_object *hdev_obj, | ||
633 | struct bridge_dev_context **phbridge_context) | ||
634 | { | ||
635 | int status = 0; | ||
636 | struct dev_object *dev_obj = hdev_obj; | ||
637 | |||
638 | if (hdev_obj) { | ||
639 | *phbridge_context = dev_obj->bridge_context; | ||
640 | } else { | ||
641 | *phbridge_context = NULL; | ||
642 | status = -EFAULT; | ||
643 | } | ||
644 | |||
645 | return status; | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * ======== dev_notify_clients ======== | ||
650 | * Purpose: | ||
651 | * Notify all clients of this device of a change in device status. | ||
652 | */ | ||
653 | int dev_notify_clients(struct dev_object *dev_obj, u32 ret) | ||
654 | { | ||
655 | struct list_head *curr; | ||
656 | |||
657 | /* | ||
658 | * FIXME: this code needs struct proc_object to have a list_head | ||
659 | * at the beginning. If not, this can go horribly wrong. | ||
660 | */ | ||
661 | list_for_each(curr, &dev_obj->proc_list) | ||
662 | proc_notify_clients((void *)curr, ret); | ||
663 | |||
664 | return 0; | ||
665 | } | ||
666 | |||
667 | /* | ||
668 | * ======== dev_remove_device ======== | ||
669 | */ | ||
670 | int dev_remove_device(struct cfg_devnode *dev_node_obj) | ||
671 | { | ||
672 | struct dev_object *hdev_obj; /* handle to device object */ | ||
673 | int status = 0; | ||
674 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
675 | |||
676 | if (!drv_datap) | ||
677 | status = -ENODATA; | ||
678 | |||
679 | if (!dev_node_obj) | ||
680 | status = -EFAULT; | ||
681 | |||
682 | /* Retrieve the device object handle originally stored with | ||
683 | * the dev_node: */ | ||
684 | if (!status) { | ||
685 | /* check the device string and then store dev object */ | ||
686 | if (!strcmp((char *)((struct drv_ext *)dev_node_obj)->sz_string, | ||
687 | "TIOMAP1510")) { | ||
688 | hdev_obj = drv_datap->dev_object; | ||
689 | /* Destroy the device object. */ | ||
690 | status = dev_destroy_device(hdev_obj); | ||
691 | } else { | ||
692 | status = -EPERM; | ||
693 | } | ||
694 | } | ||
695 | |||
696 | if (status) | ||
697 | pr_err("%s: Failed, status 0x%x\n", __func__, status); | ||
698 | |||
699 | return status; | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * ======== dev_set_chnl_mgr ======== | ||
704 | * Purpose: | ||
705 | * Set the channel manager for this device. | ||
706 | */ | ||
707 | int dev_set_chnl_mgr(struct dev_object *hdev_obj, | ||
708 | struct chnl_mgr *hmgr) | ||
709 | { | ||
710 | int status = 0; | ||
711 | struct dev_object *dev_obj = hdev_obj; | ||
712 | |||
713 | if (hdev_obj) | ||
714 | dev_obj->chnl_mgr = hmgr; | ||
715 | else | ||
716 | status = -EFAULT; | ||
717 | |||
718 | return status; | ||
719 | } | ||
720 | |||
721 | /* | ||
722 | * ======== dev_set_msg_mgr ======== | ||
723 | * Purpose: | ||
724 | * Set the message manager for this device. | ||
725 | */ | ||
726 | void dev_set_msg_mgr(struct dev_object *hdev_obj, struct msg_mgr *hmgr) | ||
727 | { | ||
728 | hdev_obj->msg_mgr = hmgr; | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * ======== dev_start_device ======== | ||
733 | * Purpose: | ||
734 | * Initializes the new device with the BRIDGE environment. | ||
735 | */ | ||
736 | int dev_start_device(struct cfg_devnode *dev_node_obj) | ||
737 | { | ||
738 | struct dev_object *hdev_obj = NULL; /* handle to 'Bridge Device */ | ||
739 | /* Bridge driver filename */ | ||
740 | char *bridge_file_name = "UMA"; | ||
741 | int status; | ||
742 | struct mgr_object *hmgr_obj = NULL; | ||
743 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
744 | |||
745 | /* Given all resources, create a device object. */ | ||
746 | status = dev_create_device(&hdev_obj, bridge_file_name, | ||
747 | dev_node_obj); | ||
748 | if (!status) { | ||
749 | /* Store away the hdev_obj with the DEVNODE */ | ||
750 | if (!drv_datap || !dev_node_obj) { | ||
751 | status = -EFAULT; | ||
752 | pr_err("%s: Failed, status 0x%x\n", __func__, status); | ||
753 | } else if (!(strcmp((char *)dev_node_obj, "TIOMAP1510"))) { | ||
754 | drv_datap->dev_object = (void *) hdev_obj; | ||
755 | } | ||
756 | if (!status) { | ||
757 | /* Create the Manager Object */ | ||
758 | status = mgr_create(&hmgr_obj, dev_node_obj); | ||
759 | if (status && !(strcmp((char *)dev_node_obj, | ||
760 | "TIOMAP1510"))) { | ||
761 | /* Ensure the device extension is NULL */ | ||
762 | drv_datap->dev_object = NULL; | ||
763 | } | ||
764 | } | ||
765 | if (status) { | ||
766 | /* Clean up */ | ||
767 | dev_destroy_device(hdev_obj); | ||
768 | hdev_obj = NULL; | ||
769 | } | ||
770 | } | ||
771 | |||
772 | return status; | ||
773 | } | ||
774 | |||
775 | /* | ||
776 | * ======== fxn_not_implemented ======== | ||
777 | * Purpose: | ||
778 | * Takes the place of a Bridge Null Function. | ||
779 | * Parameters: | ||
780 | * Multiple, optional. | ||
781 | * Returns: | ||
782 | * -ENOSYS: Always. | ||
783 | */ | ||
784 | static int fxn_not_implemented(int arg, ...) | ||
785 | { | ||
786 | return -ENOSYS; | ||
787 | } | ||
788 | |||
789 | /* | ||
790 | * ======== init_cod_mgr ======== | ||
791 | * Purpose: | ||
792 | * Create a COD manager for this device. | ||
793 | * Parameters: | ||
794 | * dev_obj: Pointer to device object created with | ||
795 | * dev_create_device() | ||
796 | * Returns: | ||
797 | * 0: Success. | ||
798 | * -EFAULT: Invalid hdev_obj. | ||
799 | * Requires: | ||
800 | * Should only be called once by dev_create_device() for a given DevObject. | ||
801 | * Ensures: | ||
802 | */ | ||
803 | static int init_cod_mgr(struct dev_object *dev_obj) | ||
804 | { | ||
805 | int status = 0; | ||
806 | char *sz_dummy_file = "dummy"; | ||
807 | |||
808 | status = cod_create(&dev_obj->cod_mgr, sz_dummy_file); | ||
809 | |||
810 | return status; | ||
811 | } | ||
812 | |||
813 | /* | ||
814 | * ======== dev_insert_proc_object ======== | ||
815 | * Purpose: | ||
816 | * Insert a ProcObject into the list maintained by DEV. | ||
817 | * Parameters: | ||
818 | * p_proc_object: Ptr to ProcObject to insert. | ||
819 | * dev_obj: Ptr to Dev Object where the list is. | ||
820 | * already_attached: Ptr to return the bool | ||
821 | * Returns: | ||
822 | * 0: If successful. | ||
823 | * Requires: | ||
824 | * List Exists | ||
825 | * hdev_obj is Valid handle | ||
826 | * DEV Initialized | ||
827 | * already_attached != NULL | ||
828 | * proc_obj != 0 | ||
829 | * Ensures: | ||
830 | * 0 and List is not Empty. | ||
831 | */ | ||
832 | int dev_insert_proc_object(struct dev_object *hdev_obj, | ||
833 | u32 proc_obj, bool *already_attached) | ||
834 | { | ||
835 | struct dev_object *dev_obj = (struct dev_object *)hdev_obj; | ||
836 | |||
837 | if (!list_empty(&dev_obj->proc_list)) | ||
838 | *already_attached = true; | ||
839 | |||
840 | /* Add DevObject to tail. */ | ||
841 | /* | ||
842 | * FIXME: this code needs struct proc_object to have a list_head | ||
843 | * at the beginning. If not, this can go horribly wrong. | ||
844 | */ | ||
845 | list_add_tail((struct list_head *)proc_obj, &dev_obj->proc_list); | ||
846 | |||
847 | return 0; | ||
848 | } | ||
849 | |||
850 | /* | ||
851 | * ======== dev_remove_proc_object ======== | ||
852 | * Purpose: | ||
853 | * Search for and remove a Proc object from the given list maintained | ||
854 | * by the DEV | ||
855 | * Parameters: | ||
856 | * p_proc_object: Ptr to ProcObject to insert. | ||
857 | * dev_obj Ptr to Dev Object where the list is. | ||
858 | * Returns: | ||
859 | * 0: If successful. | ||
860 | * Requires: | ||
861 | * List exists and is not empty | ||
862 | * proc_obj != 0 | ||
863 | * hdev_obj is a valid Dev handle. | ||
864 | * Ensures: | ||
865 | * Details: | ||
866 | * List will be deleted when the DEV is destroyed. | ||
867 | */ | ||
868 | int dev_remove_proc_object(struct dev_object *hdev_obj, u32 proc_obj) | ||
869 | { | ||
870 | int status = -EPERM; | ||
871 | struct list_head *cur_elem; | ||
872 | struct dev_object *dev_obj = (struct dev_object *)hdev_obj; | ||
873 | |||
874 | /* Search list for dev_obj: */ | ||
875 | list_for_each(cur_elem, &dev_obj->proc_list) { | ||
876 | if ((u32) cur_elem == proc_obj) { | ||
877 | list_del(cur_elem); | ||
878 | status = 0; | ||
879 | break; | ||
880 | } | ||
881 | } | ||
882 | |||
883 | return status; | ||
884 | } | ||
885 | |||
886 | int dev_get_dev_type(struct dev_object *dev_obj, u8 *dev_type) | ||
887 | { | ||
888 | *dev_type = dev_obj->dev_type; | ||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | /* | ||
893 | * ======== store_interface_fxns ======== | ||
894 | * Purpose: | ||
895 | * Copy the Bridge's interface functions into the device object, | ||
896 | * ensuring that fxn_not_implemented() is set for: | ||
897 | * | ||
898 | * 1. All Bridge function pointers which are NULL; and | ||
899 | * 2. All function slots in the struct dev_object structure which have no | ||
900 | * corresponding slots in the the Bridge's interface, because the Bridge | ||
901 | * is of an *older* version. | ||
902 | * Parameters: | ||
903 | * intf_fxns: Interface fxn Structure of the Bridge's Dev Object. | ||
904 | * drv_fxns: Interface Fxns offered by the Bridge during DEV_Create(). | ||
905 | * Returns: | ||
906 | * Requires: | ||
907 | * Input pointers are valid. | ||
908 | * Bridge driver is *not* written for a newer DSP API. | ||
909 | * Ensures: | ||
910 | * All function pointers in the dev object's fxn interface are not NULL. | ||
911 | */ | ||
912 | static void store_interface_fxns(struct bridge_drv_interface *drv_fxns, | ||
913 | struct bridge_drv_interface *intf_fxns) | ||
914 | { | ||
915 | u32 bridge_version; | ||
916 | |||
917 | /* Local helper macro: */ | ||
918 | #define STORE_FXN(cast, pfn) \ | ||
919 | (intf_fxns->pfn = ((drv_fxns->pfn != NULL) ? drv_fxns->pfn : \ | ||
920 | (cast)fxn_not_implemented)) | ||
921 | |||
922 | bridge_version = MAKEVERSION(drv_fxns->brd_api_major_version, | ||
923 | drv_fxns->brd_api_minor_version); | ||
924 | intf_fxns->brd_api_major_version = drv_fxns->brd_api_major_version; | ||
925 | intf_fxns->brd_api_minor_version = drv_fxns->brd_api_minor_version; | ||
926 | /* Install functions up to DSP API version .80 (first alpha): */ | ||
927 | if (bridge_version > 0) { | ||
928 | STORE_FXN(fxn_dev_create, dev_create); | ||
929 | STORE_FXN(fxn_dev_destroy, dev_destroy); | ||
930 | STORE_FXN(fxn_dev_ctrl, dev_cntrl); | ||
931 | STORE_FXN(fxn_brd_monitor, brd_monitor); | ||
932 | STORE_FXN(fxn_brd_start, brd_start); | ||
933 | STORE_FXN(fxn_brd_stop, brd_stop); | ||
934 | STORE_FXN(fxn_brd_status, brd_status); | ||
935 | STORE_FXN(fxn_brd_read, brd_read); | ||
936 | STORE_FXN(fxn_brd_write, brd_write); | ||
937 | STORE_FXN(fxn_brd_setstate, brd_set_state); | ||
938 | STORE_FXN(fxn_brd_memcopy, brd_mem_copy); | ||
939 | STORE_FXN(fxn_brd_memwrite, brd_mem_write); | ||
940 | STORE_FXN(fxn_brd_memmap, brd_mem_map); | ||
941 | STORE_FXN(fxn_brd_memunmap, brd_mem_un_map); | ||
942 | STORE_FXN(fxn_chnl_create, chnl_create); | ||
943 | STORE_FXN(fxn_chnl_destroy, chnl_destroy); | ||
944 | STORE_FXN(fxn_chnl_open, chnl_open); | ||
945 | STORE_FXN(fxn_chnl_close, chnl_close); | ||
946 | STORE_FXN(fxn_chnl_addioreq, chnl_add_io_req); | ||
947 | STORE_FXN(fxn_chnl_getioc, chnl_get_ioc); | ||
948 | STORE_FXN(fxn_chnl_cancelio, chnl_cancel_io); | ||
949 | STORE_FXN(fxn_chnl_flushio, chnl_flush_io); | ||
950 | STORE_FXN(fxn_chnl_getinfo, chnl_get_info); | ||
951 | STORE_FXN(fxn_chnl_getmgrinfo, chnl_get_mgr_info); | ||
952 | STORE_FXN(fxn_chnl_idle, chnl_idle); | ||
953 | STORE_FXN(fxn_chnl_registernotify, chnl_register_notify); | ||
954 | STORE_FXN(fxn_io_create, io_create); | ||
955 | STORE_FXN(fxn_io_destroy, io_destroy); | ||
956 | STORE_FXN(fxn_io_onloaded, io_on_loaded); | ||
957 | STORE_FXN(fxn_io_getprocload, io_get_proc_load); | ||
958 | STORE_FXN(fxn_msg_create, msg_create); | ||
959 | STORE_FXN(fxn_msg_createqueue, msg_create_queue); | ||
960 | STORE_FXN(fxn_msg_delete, msg_delete); | ||
961 | STORE_FXN(fxn_msg_deletequeue, msg_delete_queue); | ||
962 | STORE_FXN(fxn_msg_get, msg_get); | ||
963 | STORE_FXN(fxn_msg_put, msg_put); | ||
964 | STORE_FXN(fxn_msg_registernotify, msg_register_notify); | ||
965 | STORE_FXN(fxn_msg_setqueueid, msg_set_queue_id); | ||
966 | } | ||
967 | /* Add code for any additional functions in newerBridge versions here */ | ||
968 | #undef STORE_FXN | ||
969 | } | ||
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c deleted file mode 100644 index fcf564aa566d..000000000000 --- a/drivers/staging/tidspbridge/pmgr/dmm.c +++ /dev/null | |||
@@ -1,487 +0,0 @@ | |||
1 | /* | ||
2 | * dmm.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address | ||
7 | * space that can be directly mapped to any MPU buffer or memory region | ||
8 | * | ||
9 | * Notes: | ||
10 | * Region: Generic memory entitiy having a start address and a size | ||
11 | * Chunk: Reserved region | ||
12 | * | ||
13 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
14 | * | ||
15 | * This package is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License version 2 as | ||
17 | * published by the Free Software Foundation. | ||
18 | * | ||
19 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
20 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
21 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | */ | ||
23 | #include <linux/types.h> | ||
24 | |||
25 | /* ----------------------------------- Host OS */ | ||
26 | #include <dspbridge/host_os.h> | ||
27 | |||
28 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
29 | #include <dspbridge/dbdefs.h> | ||
30 | |||
31 | /* ----------------------------------- OS Adaptation Layer */ | ||
32 | #include <dspbridge/sync.h> | ||
33 | |||
34 | /* ----------------------------------- Platform Manager */ | ||
35 | #include <dspbridge/dev.h> | ||
36 | #include <dspbridge/proc.h> | ||
37 | |||
38 | /* ----------------------------------- This */ | ||
39 | #include <dspbridge/dmm.h> | ||
40 | |||
41 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
42 | #define DMM_ADDR_VIRTUAL(a) \ | ||
43 | (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\ | ||
44 | dyn_mem_map_beg) | ||
45 | #define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K) | ||
46 | |||
47 | /* DMM Mgr */ | ||
48 | struct dmm_object { | ||
49 | /* Dmm Lock is used to serialize access mem manager for | ||
50 | * multi-threads. */ | ||
51 | spinlock_t dmm_lock; /* Lock to access dmm mgr */ | ||
52 | }; | ||
53 | |||
54 | struct map_page { | ||
55 | u32 region_size:15; | ||
56 | u32 mapped_size:15; | ||
57 | u32 reserved:1; | ||
58 | u32 mapped:1; | ||
59 | }; | ||
60 | |||
61 | /* Create the free list */ | ||
62 | static struct map_page *virtual_mapping_table; | ||
63 | static u32 free_region; /* The index of free region */ | ||
64 | static u32 free_size; | ||
65 | static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */ | ||
66 | static u32 table_size; /* The size of virt and phys pages tables */ | ||
67 | |||
68 | /* ----------------------------------- Function Prototypes */ | ||
69 | static struct map_page *get_region(u32 addr); | ||
70 | static struct map_page *get_free_region(u32 len); | ||
71 | static struct map_page *get_mapped_region(u32 addrs); | ||
72 | |||
73 | /* ======== dmm_create_tables ======== | ||
74 | * Purpose: | ||
75 | * Create table to hold the information of physical address | ||
76 | * the buffer pages that is passed by the user, and the table | ||
77 | * to hold the information of the virtual memory that is reserved | ||
78 | * for DSP. | ||
79 | */ | ||
80 | int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size) | ||
81 | { | ||
82 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
83 | int status = 0; | ||
84 | |||
85 | status = dmm_delete_tables(dmm_obj); | ||
86 | if (!status) { | ||
87 | dyn_mem_map_beg = addr; | ||
88 | table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K; | ||
89 | /* Create the free list */ | ||
90 | virtual_mapping_table = __vmalloc(table_size * | ||
91 | sizeof(struct map_page), GFP_KERNEL | | ||
92 | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||
93 | if (virtual_mapping_table == NULL) | ||
94 | status = -ENOMEM; | ||
95 | else { | ||
96 | /* On successful allocation, | ||
97 | * all entries are zero ('free') */ | ||
98 | free_region = 0; | ||
99 | free_size = table_size * PG_SIZE4K; | ||
100 | virtual_mapping_table[0].region_size = table_size; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | if (status) | ||
105 | pr_err("%s: failure, status 0x%x\n", __func__, status); | ||
106 | |||
107 | return status; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * ======== dmm_create ======== | ||
112 | * Purpose: | ||
113 | * Create a dynamic memory manager object. | ||
114 | */ | ||
115 | int dmm_create(struct dmm_object **dmm_manager, | ||
116 | struct dev_object *hdev_obj, | ||
117 | const struct dmm_mgrattrs *mgr_attrts) | ||
118 | { | ||
119 | struct dmm_object *dmm_obj = NULL; | ||
120 | int status = 0; | ||
121 | |||
122 | *dmm_manager = NULL; | ||
123 | /* create, zero, and tag a cmm mgr object */ | ||
124 | dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL); | ||
125 | if (dmm_obj != NULL) { | ||
126 | spin_lock_init(&dmm_obj->dmm_lock); | ||
127 | *dmm_manager = dmm_obj; | ||
128 | } else { | ||
129 | status = -ENOMEM; | ||
130 | } | ||
131 | |||
132 | return status; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * ======== dmm_destroy ======== | ||
137 | * Purpose: | ||
138 | * Release the communication memory manager resources. | ||
139 | */ | ||
140 | int dmm_destroy(struct dmm_object *dmm_mgr) | ||
141 | { | ||
142 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
143 | int status = 0; | ||
144 | |||
145 | if (dmm_mgr) { | ||
146 | status = dmm_delete_tables(dmm_obj); | ||
147 | if (!status) | ||
148 | kfree(dmm_obj); | ||
149 | } else | ||
150 | status = -EFAULT; | ||
151 | |||
152 | return status; | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * ======== dmm_delete_tables ======== | ||
157 | * Purpose: | ||
158 | * Delete DMM Tables. | ||
159 | */ | ||
160 | int dmm_delete_tables(struct dmm_object *dmm_mgr) | ||
161 | { | ||
162 | int status = 0; | ||
163 | |||
164 | /* Delete all DMM tables */ | ||
165 | if (dmm_mgr) | ||
166 | vfree(virtual_mapping_table); | ||
167 | else | ||
168 | status = -EFAULT; | ||
169 | return status; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * ======== dmm_get_handle ======== | ||
174 | * Purpose: | ||
175 | * Return the dynamic memory manager object for this device. | ||
176 | * This is typically called from the client process. | ||
177 | */ | ||
178 | int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager) | ||
179 | { | ||
180 | int status = 0; | ||
181 | struct dev_object *hdev_obj; | ||
182 | |||
183 | if (hprocessor != NULL) | ||
184 | status = proc_get_dev_object(hprocessor, &hdev_obj); | ||
185 | else | ||
186 | hdev_obj = dev_get_first(); /* default */ | ||
187 | |||
188 | if (!status) | ||
189 | status = dev_get_dmm_mgr(hdev_obj, dmm_manager); | ||
190 | |||
191 | return status; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * ======== dmm_map_memory ======== | ||
196 | * Purpose: | ||
197 | * Add a mapping block to the reserved chunk. DMM assumes that this block | ||
198 | * will be mapped in the DSP/IVA's address space. DMM returns an error if a | ||
199 | * mapping overlaps another one. This function stores the info that will be | ||
200 | * required later while unmapping the block. | ||
201 | */ | ||
202 | int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size) | ||
203 | { | ||
204 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
205 | struct map_page *chunk; | ||
206 | int status = 0; | ||
207 | |||
208 | spin_lock(&dmm_obj->dmm_lock); | ||
209 | /* Find the Reserved memory chunk containing the DSP block to | ||
210 | * be mapped */ | ||
211 | chunk = (struct map_page *)get_region(addr); | ||
212 | if (chunk != NULL) { | ||
213 | /* Mark the region 'mapped', leave the 'reserved' info as-is */ | ||
214 | chunk->mapped = true; | ||
215 | chunk->mapped_size = (size / PG_SIZE4K); | ||
216 | } else | ||
217 | status = -ENOENT; | ||
218 | spin_unlock(&dmm_obj->dmm_lock); | ||
219 | |||
220 | dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, chunk %p", | ||
221 | __func__, dmm_mgr, addr, size, status, chunk); | ||
222 | |||
223 | return status; | ||
224 | } | ||
225 | |||
226 | /* | ||
227 | * ======== dmm_reserve_memory ======== | ||
228 | * Purpose: | ||
229 | * Reserve a chunk of virtually contiguous DSP/IVA address space. | ||
230 | */ | ||
231 | int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size, | ||
232 | u32 *prsv_addr) | ||
233 | { | ||
234 | int status = 0; | ||
235 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
236 | struct map_page *node; | ||
237 | u32 rsv_addr = 0; | ||
238 | u32 rsv_size = 0; | ||
239 | |||
240 | spin_lock(&dmm_obj->dmm_lock); | ||
241 | |||
242 | /* Try to get a DSP chunk from the free list */ | ||
243 | node = get_free_region(size); | ||
244 | if (node != NULL) { | ||
245 | /* DSP chunk of given size is available. */ | ||
246 | rsv_addr = DMM_ADDR_VIRTUAL(node); | ||
247 | /* Calculate the number entries to use */ | ||
248 | rsv_size = size / PG_SIZE4K; | ||
249 | if (rsv_size < node->region_size) { | ||
250 | /* Mark remainder of free region */ | ||
251 | node[rsv_size].mapped = false; | ||
252 | node[rsv_size].reserved = false; | ||
253 | node[rsv_size].region_size = | ||
254 | node->region_size - rsv_size; | ||
255 | node[rsv_size].mapped_size = 0; | ||
256 | } | ||
257 | /* get_region will return first fit chunk. But we only use what | ||
258 | is requested. */ | ||
259 | node->mapped = false; | ||
260 | node->reserved = true; | ||
261 | node->region_size = rsv_size; | ||
262 | node->mapped_size = 0; | ||
263 | /* Return the chunk's starting address */ | ||
264 | *prsv_addr = rsv_addr; | ||
265 | } else | ||
266 | /*dSP chunk of given size is not available */ | ||
267 | status = -ENOMEM; | ||
268 | |||
269 | spin_unlock(&dmm_obj->dmm_lock); | ||
270 | |||
271 | dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, rsv_addr %x, rsv_size %x\n", | ||
272 | __func__, dmm_mgr, size, | ||
273 | prsv_addr, status, rsv_addr, rsv_size); | ||
274 | |||
275 | return status; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * ======== dmm_un_map_memory ======== | ||
280 | * Purpose: | ||
281 | * Remove the mapped block from the reserved chunk. | ||
282 | */ | ||
283 | int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize) | ||
284 | { | ||
285 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
286 | struct map_page *chunk; | ||
287 | int status = 0; | ||
288 | |||
289 | spin_lock(&dmm_obj->dmm_lock); | ||
290 | chunk = get_mapped_region(addr); | ||
291 | if (chunk == NULL) | ||
292 | status = -ENOENT; | ||
293 | |||
294 | if (!status) { | ||
295 | /* Unmap the region */ | ||
296 | *psize = chunk->mapped_size * PG_SIZE4K; | ||
297 | chunk->mapped = false; | ||
298 | chunk->mapped_size = 0; | ||
299 | } | ||
300 | spin_unlock(&dmm_obj->dmm_lock); | ||
301 | |||
302 | dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, chunk %p\n", | ||
303 | __func__, dmm_mgr, addr, psize, status, chunk); | ||
304 | |||
305 | return status; | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * ======== dmm_un_reserve_memory ======== | ||
310 | * Purpose: | ||
311 | * Free a chunk of reserved DSP/IVA address space. | ||
312 | */ | ||
313 | int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr) | ||
314 | { | ||
315 | struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; | ||
316 | struct map_page *chunk; | ||
317 | u32 i; | ||
318 | int status = 0; | ||
319 | u32 chunk_size; | ||
320 | |||
321 | spin_lock(&dmm_obj->dmm_lock); | ||
322 | |||
323 | /* Find the chunk containing the reserved address */ | ||
324 | chunk = get_mapped_region(rsv_addr); | ||
325 | if (chunk == NULL) | ||
326 | status = -ENOENT; | ||
327 | |||
328 | if (!status) { | ||
329 | /* Free all the mapped pages for this reserved region */ | ||
330 | i = 0; | ||
331 | while (i < chunk->region_size) { | ||
332 | if (chunk[i].mapped) { | ||
333 | /* Remove mapping from the page tables. */ | ||
334 | chunk_size = chunk[i].mapped_size; | ||
335 | /* Clear the mapping flags */ | ||
336 | chunk[i].mapped = false; | ||
337 | chunk[i].mapped_size = 0; | ||
338 | i += chunk_size; | ||
339 | } else | ||
340 | i++; | ||
341 | } | ||
342 | /* Clear the flags (mark the region 'free') */ | ||
343 | chunk->reserved = false; | ||
344 | /* NOTE: We do NOT coalesce free regions here. | ||
345 | * Free regions are coalesced in get_region(), as it traverses | ||
346 | *the whole mapping table | ||
347 | */ | ||
348 | } | ||
349 | spin_unlock(&dmm_obj->dmm_lock); | ||
350 | |||
351 | dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p", | ||
352 | __func__, dmm_mgr, rsv_addr, status, chunk); | ||
353 | |||
354 | return status; | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * ======== get_region ======== | ||
359 | * Purpose: | ||
360 | * Returns a region containing the specified memory region | ||
361 | */ | ||
362 | static struct map_page *get_region(u32 addr) | ||
363 | { | ||
364 | struct map_page *curr_region = NULL; | ||
365 | u32 i = 0; | ||
366 | |||
367 | if (virtual_mapping_table != NULL) { | ||
368 | /* find page mapped by this address */ | ||
369 | i = DMM_ADDR_TO_INDEX(addr); | ||
370 | if (i < table_size) | ||
371 | curr_region = virtual_mapping_table + i; | ||
372 | } | ||
373 | |||
374 | dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n", | ||
375 | __func__, curr_region, free_region, free_size); | ||
376 | return curr_region; | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * ======== get_free_region ======== | ||
381 | * Purpose: | ||
382 | * Returns the requested free region | ||
383 | */ | ||
384 | static struct map_page *get_free_region(u32 len) | ||
385 | { | ||
386 | struct map_page *curr_region = NULL; | ||
387 | u32 i = 0; | ||
388 | u32 region_size = 0; | ||
389 | u32 next_i = 0; | ||
390 | |||
391 | if (virtual_mapping_table == NULL) | ||
392 | return curr_region; | ||
393 | if (len > free_size) { | ||
394 | /* Find the largest free region | ||
395 | * (coalesce during the traversal) */ | ||
396 | while (i < table_size) { | ||
397 | region_size = virtual_mapping_table[i].region_size; | ||
398 | next_i = i + region_size; | ||
399 | if (virtual_mapping_table[i].reserved == false) { | ||
400 | /* Coalesce, if possible */ | ||
401 | if (next_i < table_size && | ||
402 | virtual_mapping_table[next_i].reserved | ||
403 | == false) { | ||
404 | virtual_mapping_table[i].region_size += | ||
405 | virtual_mapping_table | ||
406 | [next_i].region_size; | ||
407 | continue; | ||
408 | } | ||
409 | region_size *= PG_SIZE4K; | ||
410 | if (region_size > free_size) { | ||
411 | free_region = i; | ||
412 | free_size = region_size; | ||
413 | } | ||
414 | } | ||
415 | i = next_i; | ||
416 | } | ||
417 | } | ||
418 | if (len <= free_size) { | ||
419 | curr_region = virtual_mapping_table + free_region; | ||
420 | free_region += (len / PG_SIZE4K); | ||
421 | free_size -= len; | ||
422 | } | ||
423 | return curr_region; | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * ======== get_mapped_region ======== | ||
428 | * Purpose: | ||
429 | * Returns the requestedmapped region | ||
430 | */ | ||
431 | static struct map_page *get_mapped_region(u32 addrs) | ||
432 | { | ||
433 | u32 i = 0; | ||
434 | struct map_page *curr_region = NULL; | ||
435 | |||
436 | if (virtual_mapping_table == NULL) | ||
437 | return curr_region; | ||
438 | |||
439 | i = DMM_ADDR_TO_INDEX(addrs); | ||
440 | if (i < table_size && (virtual_mapping_table[i].mapped || | ||
441 | virtual_mapping_table[i].reserved)) | ||
442 | curr_region = virtual_mapping_table + i; | ||
443 | return curr_region; | ||
444 | } | ||
445 | |||
446 | #ifdef DSP_DMM_DEBUG | ||
447 | u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr) | ||
448 | { | ||
449 | struct map_page *curr_node = NULL; | ||
450 | u32 i; | ||
451 | u32 freemem = 0; | ||
452 | u32 bigsize = 0; | ||
453 | |||
454 | spin_lock(&dmm_mgr->dmm_lock); | ||
455 | |||
456 | if (virtual_mapping_table != NULL) { | ||
457 | for (i = 0; i < table_size; i += | ||
458 | virtual_mapping_table[i].region_size) { | ||
459 | curr_node = virtual_mapping_table + i; | ||
460 | if (curr_node->reserved) { | ||
461 | /*printk("RESERVED size = 0x%x, " | ||
462 | "Map size = 0x%x\n", | ||
463 | (curr_node->region_size * PG_SIZE4K), | ||
464 | (curr_node->mapped == false) ? 0 : | ||
465 | (curr_node->mapped_size * PG_SIZE4K)); | ||
466 | */ | ||
467 | } else { | ||
468 | /* printk("UNRESERVED size = 0x%x\n", | ||
469 | (curr_node->region_size * PG_SIZE4K)); | ||
470 | */ | ||
471 | freemem += (curr_node->region_size * PG_SIZE4K); | ||
472 | if (curr_node->region_size > bigsize) | ||
473 | bigsize = curr_node->region_size; | ||
474 | } | ||
475 | } | ||
476 | } | ||
477 | spin_unlock(&dmm_mgr->dmm_lock); | ||
478 | dev_info(bridge, "Total DSP VA FREE memory = %d Mbytes\n", | ||
479 | freemem / (1024 * 1024)); | ||
480 | dev_info(bridge, "Total DSP VA USED memory= %d Mbytes\n", | ||
481 | (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024)); | ||
482 | dev_info(bridge, "DSP VA - Biggest FREE block = %d Mbytes\n", | ||
483 | (bigsize * PG_SIZE4K / (1024 * 1024))); | ||
484 | |||
485 | return 0; | ||
486 | } | ||
487 | #endif | ||
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c deleted file mode 100644 index c4ccf17d21c0..000000000000 --- a/drivers/staging/tidspbridge/pmgr/dspapi.c +++ /dev/null | |||
@@ -1,1841 +0,0 @@ | |||
1 | /* | ||
2 | * dspapi.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Common DSP API functions, also includes the wrapper | ||
7 | * functions called directly by the DeviceIOControl interface. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | #include <linux/types.h> | ||
20 | |||
21 | /* ----------------------------------- Host OS */ | ||
22 | #include <dspbridge/host_os.h> | ||
23 | |||
24 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
25 | #include <dspbridge/dbdefs.h> | ||
26 | |||
27 | /* ----------------------------------- OS Adaptation Layer */ | ||
28 | #include <dspbridge/ntfy.h> | ||
29 | |||
30 | /* ----------------------------------- Platform Manager */ | ||
31 | #include <dspbridge/chnl.h> | ||
32 | #include <dspbridge/dev.h> | ||
33 | #include <dspbridge/drv.h> | ||
34 | |||
35 | #include <dspbridge/proc.h> | ||
36 | #include <dspbridge/strm.h> | ||
37 | |||
38 | /* ----------------------------------- Resource Manager */ | ||
39 | #include <dspbridge/disp.h> | ||
40 | #include <dspbridge/mgr.h> | ||
41 | #include <dspbridge/node.h> | ||
42 | #include <dspbridge/rmm.h> | ||
43 | |||
44 | /* ----------------------------------- Others */ | ||
45 | #include <dspbridge/msg.h> | ||
46 | #include <dspbridge/cmm.h> | ||
47 | #include <dspbridge/io.h> | ||
48 | |||
49 | /* ----------------------------------- This */ | ||
50 | #include <dspbridge/dspapi.h> | ||
51 | #include <dspbridge/dbdcd.h> | ||
52 | |||
53 | #include <dspbridge/resourcecleanup.h> | ||
54 | |||
55 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
56 | #define MAX_TRACEBUFLEN 255 | ||
57 | #define MAX_LOADARGS 16 | ||
58 | #define MAX_NODES 64 | ||
59 | #define MAX_STREAMS 16 | ||
60 | #define MAX_BUFS 64 | ||
61 | |||
62 | /* Used to get dspbridge ioctl table */ | ||
63 | #define DB_GET_IOC_TABLE(cmd) (DB_GET_MODULE(cmd) >> DB_MODULE_SHIFT) | ||
64 | |||
65 | /* Device IOCtl function pointer */ | ||
66 | struct api_cmd { | ||
67 | u32(*fxn) (union trapped_args *args, void *pr_ctxt); | ||
68 | u32 index; | ||
69 | }; | ||
70 | |||
71 | /* ----------------------------------- Globals */ | ||
72 | static u32 api_c_refs; | ||
73 | |||
74 | /* | ||
75 | * Function tables. | ||
76 | * The order of these functions MUST be the same as the order of the command | ||
77 | * numbers defined in dspapi-ioctl.h This is how an IOCTL number in user mode | ||
78 | * turns into a function call in kernel mode. | ||
79 | */ | ||
80 | |||
81 | /* MGR wrapper functions */ | ||
82 | static struct api_cmd mgr_cmd[] = { | ||
83 | {mgrwrap_enum_node_info}, /* MGR_ENUMNODE_INFO */ | ||
84 | {mgrwrap_enum_proc_info}, /* MGR_ENUMPROC_INFO */ | ||
85 | {mgrwrap_register_object}, /* MGR_REGISTEROBJECT */ | ||
86 | {mgrwrap_unregister_object}, /* MGR_UNREGISTEROBJECT */ | ||
87 | {mgrwrap_wait_for_bridge_events}, /* MGR_WAIT */ | ||
88 | {mgrwrap_get_process_resources_info}, /* MGR_GET_PROC_RES */ | ||
89 | }; | ||
90 | |||
91 | /* PROC wrapper functions */ | ||
92 | static struct api_cmd proc_cmd[] = { | ||
93 | {procwrap_attach}, /* PROC_ATTACH */ | ||
94 | {procwrap_ctrl}, /* PROC_CTRL */ | ||
95 | {procwrap_detach}, /* PROC_DETACH */ | ||
96 | {procwrap_enum_node_info}, /* PROC_ENUMNODE */ | ||
97 | {procwrap_enum_resources}, /* PROC_ENUMRESOURCES */ | ||
98 | {procwrap_get_state}, /* PROC_GET_STATE */ | ||
99 | {procwrap_get_trace}, /* PROC_GET_TRACE */ | ||
100 | {procwrap_load}, /* PROC_LOAD */ | ||
101 | {procwrap_register_notify}, /* PROC_REGISTERNOTIFY */ | ||
102 | {procwrap_start}, /* PROC_START */ | ||
103 | {procwrap_reserve_memory}, /* PROC_RSVMEM */ | ||
104 | {procwrap_un_reserve_memory}, /* PROC_UNRSVMEM */ | ||
105 | {procwrap_map}, /* PROC_MAPMEM */ | ||
106 | {procwrap_un_map}, /* PROC_UNMAPMEM */ | ||
107 | {procwrap_flush_memory}, /* PROC_FLUSHMEMORY */ | ||
108 | {procwrap_stop}, /* PROC_STOP */ | ||
109 | {procwrap_invalidate_memory}, /* PROC_INVALIDATEMEMORY */ | ||
110 | {procwrap_begin_dma}, /* PROC_BEGINDMA */ | ||
111 | {procwrap_end_dma}, /* PROC_ENDDMA */ | ||
112 | }; | ||
113 | |||
114 | /* NODE wrapper functions */ | ||
115 | static struct api_cmd node_cmd[] = { | ||
116 | {nodewrap_allocate}, /* NODE_ALLOCATE */ | ||
117 | {nodewrap_alloc_msg_buf}, /* NODE_ALLOCMSGBUF */ | ||
118 | {nodewrap_change_priority}, /* NODE_CHANGEPRIORITY */ | ||
119 | {nodewrap_connect}, /* NODE_CONNECT */ | ||
120 | {nodewrap_create}, /* NODE_CREATE */ | ||
121 | {nodewrap_delete}, /* NODE_DELETE */ | ||
122 | {nodewrap_free_msg_buf}, /* NODE_FREEMSGBUF */ | ||
123 | {nodewrap_get_attr}, /* NODE_GETATTR */ | ||
124 | {nodewrap_get_message}, /* NODE_GETMESSAGE */ | ||
125 | {nodewrap_pause}, /* NODE_PAUSE */ | ||
126 | {nodewrap_put_message}, /* NODE_PUTMESSAGE */ | ||
127 | {nodewrap_register_notify}, /* NODE_REGISTERNOTIFY */ | ||
128 | {nodewrap_run}, /* NODE_RUN */ | ||
129 | {nodewrap_terminate}, /* NODE_TERMINATE */ | ||
130 | {nodewrap_get_uuid_props}, /* NODE_GETUUIDPROPS */ | ||
131 | }; | ||
132 | |||
133 | /* STRM wrapper functions */ | ||
134 | static struct api_cmd strm_cmd[] = { | ||
135 | {strmwrap_allocate_buffer}, /* STRM_ALLOCATEBUFFER */ | ||
136 | {strmwrap_close}, /* STRM_CLOSE */ | ||
137 | {strmwrap_free_buffer}, /* STRM_FREEBUFFER */ | ||
138 | {strmwrap_get_event_handle}, /* STRM_GETEVENTHANDLE */ | ||
139 | {strmwrap_get_info}, /* STRM_GETINFO */ | ||
140 | {strmwrap_idle}, /* STRM_IDLE */ | ||
141 | {strmwrap_issue}, /* STRM_ISSUE */ | ||
142 | {strmwrap_open}, /* STRM_OPEN */ | ||
143 | {strmwrap_reclaim}, /* STRM_RECLAIM */ | ||
144 | {strmwrap_register_notify}, /* STRM_REGISTERNOTIFY */ | ||
145 | {strmwrap_select}, /* STRM_SELECT */ | ||
146 | }; | ||
147 | |||
148 | /* CMM wrapper functions */ | ||
149 | static struct api_cmd cmm_cmd[] = { | ||
150 | {cmmwrap_calloc_buf}, /* CMM_ALLOCBUF */ | ||
151 | {cmmwrap_free_buf}, /* CMM_FREEBUF */ | ||
152 | {cmmwrap_get_handle}, /* CMM_GETHANDLE */ | ||
153 | {cmmwrap_get_info}, /* CMM_GETINFO */ | ||
154 | }; | ||
155 | |||
156 | /* Array used to store ioctl table sizes. It can hold up to 8 entries */ | ||
157 | static u8 size_cmd[] = { | ||
158 | ARRAY_SIZE(mgr_cmd), | ||
159 | ARRAY_SIZE(proc_cmd), | ||
160 | ARRAY_SIZE(node_cmd), | ||
161 | ARRAY_SIZE(strm_cmd), | ||
162 | ARRAY_SIZE(cmm_cmd), | ||
163 | }; | ||
164 | |||
165 | static inline void _cp_fm_usr(void *to, const void __user *from, | ||
166 | int *err, unsigned long bytes) | ||
167 | { | ||
168 | if (*err) | ||
169 | return; | ||
170 | |||
171 | if (unlikely(!from)) { | ||
172 | *err = -EFAULT; | ||
173 | return; | ||
174 | } | ||
175 | |||
176 | if (unlikely(copy_from_user(to, from, bytes))) | ||
177 | *err = -EFAULT; | ||
178 | } | ||
179 | |||
180 | #define CP_FM_USR(to, from, err, n) \ | ||
181 | _cp_fm_usr(to, from, &(err), (n) * sizeof(*(to))) | ||
182 | |||
183 | static inline void _cp_to_usr(void __user *to, const void *from, | ||
184 | int *err, unsigned long bytes) | ||
185 | { | ||
186 | if (*err) | ||
187 | return; | ||
188 | |||
189 | if (unlikely(!to)) { | ||
190 | *err = -EFAULT; | ||
191 | return; | ||
192 | } | ||
193 | |||
194 | if (unlikely(copy_to_user(to, from, bytes))) | ||
195 | *err = -EFAULT; | ||
196 | } | ||
197 | |||
198 | #define CP_TO_USR(to, from, err, n) \ | ||
199 | _cp_to_usr(to, from, &(err), (n) * sizeof(*(from))) | ||
200 | |||
201 | /* | ||
202 | * ======== api_call_dev_ioctl ======== | ||
203 | * Purpose: | ||
204 | * Call the (wrapper) function for the corresponding API IOCTL. | ||
205 | */ | ||
206 | inline int api_call_dev_ioctl(u32 cmd, union trapped_args *args, | ||
207 | u32 *result, void *pr_ctxt) | ||
208 | { | ||
209 | u32(*ioctl_cmd) (union trapped_args *args, void *pr_ctxt) = NULL; | ||
210 | int i; | ||
211 | |||
212 | if (_IOC_TYPE(cmd) != DB) { | ||
213 | pr_err("%s: Incompatible dspbridge ioctl number\n", __func__); | ||
214 | goto err; | ||
215 | } | ||
216 | |||
217 | if (DB_GET_IOC_TABLE(cmd) > ARRAY_SIZE(size_cmd)) { | ||
218 | pr_err("%s: undefined ioctl module\n", __func__); | ||
219 | goto err; | ||
220 | } | ||
221 | |||
222 | /* Check the size of the required cmd table */ | ||
223 | i = DB_GET_IOC(cmd); | ||
224 | if (i > size_cmd[DB_GET_IOC_TABLE(cmd)]) { | ||
225 | pr_err("%s: requested ioctl %d out of bounds for table %d\n", | ||
226 | __func__, i, DB_GET_IOC_TABLE(cmd)); | ||
227 | goto err; | ||
228 | } | ||
229 | |||
230 | switch (DB_GET_MODULE(cmd)) { | ||
231 | case DB_MGR: | ||
232 | ioctl_cmd = mgr_cmd[i].fxn; | ||
233 | break; | ||
234 | case DB_PROC: | ||
235 | ioctl_cmd = proc_cmd[i].fxn; | ||
236 | break; | ||
237 | case DB_NODE: | ||
238 | ioctl_cmd = node_cmd[i].fxn; | ||
239 | break; | ||
240 | case DB_STRM: | ||
241 | ioctl_cmd = strm_cmd[i].fxn; | ||
242 | break; | ||
243 | case DB_CMM: | ||
244 | ioctl_cmd = cmm_cmd[i].fxn; | ||
245 | break; | ||
246 | } | ||
247 | |||
248 | if (!ioctl_cmd) { | ||
249 | pr_err("%s: requested ioctl not defined\n", __func__); | ||
250 | goto err; | ||
251 | } else { | ||
252 | *result = (*ioctl_cmd) (args, pr_ctxt); | ||
253 | } | ||
254 | |||
255 | return 0; | ||
256 | |||
257 | err: | ||
258 | return -EINVAL; | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * ======== api_exit ======== | ||
263 | */ | ||
264 | void api_exit(void) | ||
265 | { | ||
266 | api_c_refs--; | ||
267 | |||
268 | if (api_c_refs == 0) | ||
269 | mgr_exit(); | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * ======== api_init ======== | ||
274 | * Purpose: | ||
275 | * Module initialization used by Bridge API. | ||
276 | */ | ||
277 | bool api_init(void) | ||
278 | { | ||
279 | bool ret = true; | ||
280 | |||
281 | if (api_c_refs == 0) | ||
282 | ret = mgr_init(); | ||
283 | |||
284 | if (ret) | ||
285 | api_c_refs++; | ||
286 | |||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * ======== api_init_complete2 ======== | ||
292 | * Purpose: | ||
293 | * Perform any required bridge initialization which cannot | ||
294 | * be performed in api_init() or dev_start_device() due | ||
295 | * to the fact that some services are not yet | ||
296 | * completely initialized. | ||
297 | * Parameters: | ||
298 | * Returns: | ||
299 | * 0: Allow this device to load | ||
300 | * -EPERM: Failure. | ||
301 | * Requires: | ||
302 | * Bridge API initialized. | ||
303 | * Ensures: | ||
304 | */ | ||
305 | int api_init_complete2(void) | ||
306 | { | ||
307 | int status = 0; | ||
308 | struct cfg_devnode *dev_node; | ||
309 | struct dev_object *hdev_obj; | ||
310 | struct drv_data *drv_datap; | ||
311 | u8 dev_type; | ||
312 | |||
313 | /* Walk the list of DevObjects, get each devnode, and attempting to | ||
314 | * autostart the board. Note that this requires COF loading, which | ||
315 | * requires KFILE. */ | ||
316 | for (hdev_obj = dev_get_first(); hdev_obj != NULL; | ||
317 | hdev_obj = dev_get_next(hdev_obj)) { | ||
318 | if (dev_get_dev_node(hdev_obj, &dev_node)) | ||
319 | continue; | ||
320 | |||
321 | if (dev_get_dev_type(hdev_obj, &dev_type)) | ||
322 | continue; | ||
323 | |||
324 | if ((dev_type == DSP_UNIT) || (dev_type == IVA_UNIT)) { | ||
325 | drv_datap = dev_get_drvdata(bridge); | ||
326 | |||
327 | if (drv_datap && drv_datap->base_img) | ||
328 | proc_auto_start(dev_node, hdev_obj); | ||
329 | } | ||
330 | } | ||
331 | |||
332 | return status; | ||
333 | } | ||
334 | |||
335 | /* TODO: Remove deprecated and not implemented ioctl wrappers */ | ||
336 | |||
337 | /* | ||
338 | * ======== mgrwrap_enum_node_info ======== | ||
339 | */ | ||
340 | u32 mgrwrap_enum_node_info(union trapped_args *args, void *pr_ctxt) | ||
341 | { | ||
342 | u8 *pndb_props; | ||
343 | u32 num_nodes = 0; | ||
344 | int status; | ||
345 | u32 size = args->args_mgr_enumnode_info.ndb_props_size; | ||
346 | |||
347 | if (size < sizeof(struct dsp_ndbprops)) | ||
348 | return -EINVAL; | ||
349 | size = sizeof(struct dsp_ndbprops); | ||
350 | |||
351 | pndb_props = kmalloc(size, GFP_KERNEL); | ||
352 | if (pndb_props == NULL) | ||
353 | return -ENOMEM; | ||
354 | |||
355 | status = | ||
356 | mgr_enum_node_info(args->args_mgr_enumnode_info.node_id, | ||
357 | (struct dsp_ndbprops *)pndb_props, size, | ||
358 | &num_nodes); | ||
359 | |||
360 | CP_TO_USR(args->args_mgr_enumnode_info.ndb_props, pndb_props, status, | ||
361 | size); | ||
362 | CP_TO_USR(args->args_mgr_enumnode_info.num_nodes, &num_nodes, status, | ||
363 | 1); | ||
364 | kfree(pndb_props); | ||
365 | |||
366 | return status; | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * ======== mgrwrap_enum_proc_info ======== | ||
371 | */ | ||
372 | u32 mgrwrap_enum_proc_info(union trapped_args *args, void *pr_ctxt) | ||
373 | { | ||
374 | u8 *processor_info; | ||
375 | u8 num_procs = 0; | ||
376 | int status; | ||
377 | u32 size = args->args_mgr_enumproc_info.processor_info_size; | ||
378 | |||
379 | if (size < sizeof(struct dsp_processorinfo)) | ||
380 | return -EINVAL; | ||
381 | |||
382 | if (size > sizeof(struct mgr_processorextinfo)) | ||
383 | size = sizeof(struct mgr_processorextinfo); | ||
384 | |||
385 | processor_info = kzalloc(size, GFP_KERNEL); | ||
386 | if (processor_info == NULL) | ||
387 | return -ENOMEM; | ||
388 | |||
389 | status = | ||
390 | mgr_enum_processor_info(args->args_mgr_enumproc_info. | ||
391 | processor_id, | ||
392 | (struct dsp_processorinfo *) | ||
393 | processor_info, size, &num_procs); | ||
394 | |||
395 | CP_TO_USR(args->args_mgr_enumproc_info.processor_info, processor_info, | ||
396 | status, size); | ||
397 | CP_TO_USR(args->args_mgr_enumproc_info.num_procs, &num_procs, | ||
398 | status, 1); | ||
399 | kfree(processor_info); | ||
400 | |||
401 | return status; | ||
402 | } | ||
403 | |||
404 | #define WRAP_MAP2CALLER(x) x | ||
405 | /* | ||
406 | * ======== mgrwrap_register_object ======== | ||
407 | */ | ||
408 | u32 mgrwrap_register_object(union trapped_args *args, void *pr_ctxt) | ||
409 | { | ||
410 | u32 ret; | ||
411 | struct dsp_uuid uuid_obj; | ||
412 | u32 path_size = 0; | ||
413 | char *psz_path_name = NULL; | ||
414 | int status = 0; | ||
415 | |||
416 | CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1); | ||
417 | if (status) | ||
418 | goto func_end; | ||
419 | path_size = strlen_user((char *) | ||
420 | args->args_mgr_registerobject.sz_path_name); | ||
421 | if (!path_size) { | ||
422 | status = -EINVAL; | ||
423 | goto func_end; | ||
424 | } | ||
425 | |||
426 | psz_path_name = kmalloc(path_size, GFP_KERNEL); | ||
427 | if (!psz_path_name) { | ||
428 | status = -ENOMEM; | ||
429 | goto func_end; | ||
430 | } | ||
431 | ret = strncpy_from_user(psz_path_name, | ||
432 | (char *)args->args_mgr_registerobject. | ||
433 | sz_path_name, path_size); | ||
434 | if (!ret) { | ||
435 | status = -EFAULT; | ||
436 | goto func_end; | ||
437 | } | ||
438 | |||
439 | if (args->args_mgr_registerobject.obj_type >= DSP_DCDMAXOBJTYPE) { | ||
440 | status = -EINVAL; | ||
441 | goto func_end; | ||
442 | } | ||
443 | |||
444 | status = dcd_register_object(&uuid_obj, | ||
445 | args->args_mgr_registerobject.obj_type, | ||
446 | (char *)psz_path_name); | ||
447 | func_end: | ||
448 | kfree(psz_path_name); | ||
449 | return status; | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * ======== mgrwrap_unregister_object ======== | ||
454 | */ | ||
455 | u32 mgrwrap_unregister_object(union trapped_args *args, void *pr_ctxt) | ||
456 | { | ||
457 | int status = 0; | ||
458 | struct dsp_uuid uuid_obj; | ||
459 | |||
460 | CP_FM_USR(&uuid_obj, args->args_mgr_registerobject.uuid_obj, status, 1); | ||
461 | if (status) | ||
462 | goto func_end; | ||
463 | |||
464 | status = dcd_unregister_object(&uuid_obj, | ||
465 | args->args_mgr_unregisterobject. | ||
466 | obj_type); | ||
467 | func_end: | ||
468 | return status; | ||
469 | |||
470 | } | ||
471 | |||
472 | /* | ||
473 | * ======== mgrwrap_wait_for_bridge_events ======== | ||
474 | */ | ||
475 | u32 mgrwrap_wait_for_bridge_events(union trapped_args *args, void *pr_ctxt) | ||
476 | { | ||
477 | int status = 0; | ||
478 | struct dsp_notification *anotifications[MAX_EVENTS]; | ||
479 | struct dsp_notification notifications[MAX_EVENTS]; | ||
480 | u32 index = 0, i; | ||
481 | u32 count = args->args_mgr_wait.count; | ||
482 | |||
483 | if (count > MAX_EVENTS) | ||
484 | return -EINVAL; | ||
485 | |||
486 | /* get the array of pointers to user structures */ | ||
487 | CP_FM_USR(anotifications, args->args_mgr_wait.anotifications, | ||
488 | status, count); | ||
489 | /* get the events */ | ||
490 | for (i = 0; i < count; i++) { | ||
491 | CP_FM_USR(¬ifications[i], anotifications[i], status, 1); | ||
492 | if (status || !notifications[i].handle) | ||
493 | return -EINVAL; | ||
494 | /* set the array of pointers to kernel structures */ | ||
495 | anotifications[i] = ¬ifications[i]; | ||
496 | } | ||
497 | status = mgr_wait_for_bridge_events(anotifications, count, | ||
498 | &index, | ||
499 | args->args_mgr_wait. | ||
500 | timeout); | ||
501 | CP_TO_USR(args->args_mgr_wait.index, &index, status, 1); | ||
502 | return status; | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * ======== MGRWRAP_GetProcessResourceInfo ======== | ||
507 | */ | ||
508 | u32 __deprecated mgrwrap_get_process_resources_info(union trapped_args *args, | ||
509 | void *pr_ctxt) | ||
510 | { | ||
511 | pr_err("%s: deprecated dspbridge ioctl\n", __func__); | ||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * ======== procwrap_attach ======== | ||
517 | */ | ||
518 | u32 procwrap_attach(union trapped_args *args, void *pr_ctxt) | ||
519 | { | ||
520 | void *processor; | ||
521 | int status = 0; | ||
522 | struct dsp_processorattrin proc_attr_in, *attr_in = NULL; | ||
523 | |||
524 | /* Optional argument */ | ||
525 | if (args->args_proc_attach.attr_in) { | ||
526 | CP_FM_USR(&proc_attr_in, args->args_proc_attach.attr_in, status, | ||
527 | 1); | ||
528 | if (!status) | ||
529 | attr_in = &proc_attr_in; | ||
530 | else | ||
531 | goto func_end; | ||
532 | |||
533 | } | ||
534 | status = proc_attach(args->args_proc_attach.processor_id, attr_in, | ||
535 | &processor, pr_ctxt); | ||
536 | CP_TO_USR(args->args_proc_attach.ph_processor, &processor, status, 1); | ||
537 | func_end: | ||
538 | return status; | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * ======== procwrap_ctrl ======== | ||
543 | */ | ||
544 | u32 procwrap_ctrl(union trapped_args *args, void *pr_ctxt) | ||
545 | { | ||
546 | u32 cb_data_size, __user * psize = (u32 __user *) | ||
547 | args->args_proc_ctrl.args; | ||
548 | u8 *pargs = NULL; | ||
549 | int status = 0; | ||
550 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
551 | |||
552 | if (psize) { | ||
553 | if (get_user(cb_data_size, psize)) { | ||
554 | status = -EPERM; | ||
555 | goto func_end; | ||
556 | } | ||
557 | cb_data_size += sizeof(u32); | ||
558 | pargs = kmalloc(cb_data_size, GFP_KERNEL); | ||
559 | if (pargs == NULL) { | ||
560 | status = -ENOMEM; | ||
561 | goto func_end; | ||
562 | } | ||
563 | |||
564 | CP_FM_USR(pargs, args->args_proc_ctrl.args, status, | ||
565 | cb_data_size); | ||
566 | } | ||
567 | if (!status) { | ||
568 | status = proc_ctrl(hprocessor, | ||
569 | args->args_proc_ctrl.cmd, | ||
570 | (struct dsp_cbdata *)pargs); | ||
571 | } | ||
572 | |||
573 | /* CP_TO_USR(args->args_proc_ctrl.args, pargs, status, 1); */ | ||
574 | kfree(pargs); | ||
575 | func_end: | ||
576 | return status; | ||
577 | } | ||
578 | |||
579 | /* | ||
580 | * ======== procwrap_detach ======== | ||
581 | */ | ||
582 | u32 __deprecated procwrap_detach(union trapped_args *args, void *pr_ctxt) | ||
583 | { | ||
584 | /* proc_detach called at bridge_release only */ | ||
585 | pr_err("%s: deprecated dspbridge ioctl\n", __func__); | ||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | /* | ||
590 | * ======== procwrap_enum_node_info ======== | ||
591 | */ | ||
592 | u32 procwrap_enum_node_info(union trapped_args *args, void *pr_ctxt) | ||
593 | { | ||
594 | int status; | ||
595 | void *node_tab[MAX_NODES]; | ||
596 | u32 num_nodes; | ||
597 | u32 alloc_cnt; | ||
598 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
599 | |||
600 | if (!args->args_proc_enumnode_info.node_tab_size) | ||
601 | return -EINVAL; | ||
602 | |||
603 | status = proc_enum_nodes(hprocessor, | ||
604 | node_tab, | ||
605 | args->args_proc_enumnode_info.node_tab_size, | ||
606 | &num_nodes, &alloc_cnt); | ||
607 | CP_TO_USR(args->args_proc_enumnode_info.node_tab, node_tab, status, | ||
608 | num_nodes); | ||
609 | CP_TO_USR(args->args_proc_enumnode_info.num_nodes, &num_nodes, | ||
610 | status, 1); | ||
611 | CP_TO_USR(args->args_proc_enumnode_info.allocated, &alloc_cnt, | ||
612 | status, 1); | ||
613 | return status; | ||
614 | } | ||
615 | |||
616 | u32 procwrap_end_dma(union trapped_args *args, void *pr_ctxt) | ||
617 | { | ||
618 | int status; | ||
619 | |||
620 | if (args->args_proc_dma.dir >= DMA_NONE) | ||
621 | return -EINVAL; | ||
622 | |||
623 | status = proc_end_dma(pr_ctxt, | ||
624 | args->args_proc_dma.mpu_addr, | ||
625 | args->args_proc_dma.size, | ||
626 | args->args_proc_dma.dir); | ||
627 | return status; | ||
628 | } | ||
629 | |||
630 | u32 procwrap_begin_dma(union trapped_args *args, void *pr_ctxt) | ||
631 | { | ||
632 | int status; | ||
633 | |||
634 | if (args->args_proc_dma.dir >= DMA_NONE) | ||
635 | return -EINVAL; | ||
636 | |||
637 | status = proc_begin_dma(pr_ctxt, | ||
638 | args->args_proc_dma.mpu_addr, | ||
639 | args->args_proc_dma.size, | ||
640 | args->args_proc_dma.dir); | ||
641 | return status; | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * ======== procwrap_flush_memory ======== | ||
646 | */ | ||
647 | u32 procwrap_flush_memory(union trapped_args *args, void *pr_ctxt) | ||
648 | { | ||
649 | int status; | ||
650 | |||
651 | if (args->args_proc_flushmemory.flags > | ||
652 | PROC_WRITEBACK_INVALIDATE_MEM) | ||
653 | return -EINVAL; | ||
654 | |||
655 | status = proc_flush_memory(pr_ctxt, | ||
656 | args->args_proc_flushmemory.mpu_addr, | ||
657 | args->args_proc_flushmemory.size, | ||
658 | args->args_proc_flushmemory.flags); | ||
659 | return status; | ||
660 | } | ||
661 | |||
662 | /* | ||
663 | * ======== procwrap_invalidate_memory ======== | ||
664 | */ | ||
665 | u32 procwrap_invalidate_memory(union trapped_args *args, void *pr_ctxt) | ||
666 | { | ||
667 | int status; | ||
668 | |||
669 | status = | ||
670 | proc_invalidate_memory(pr_ctxt, | ||
671 | args->args_proc_invalidatememory.mpu_addr, | ||
672 | args->args_proc_invalidatememory.size); | ||
673 | return status; | ||
674 | } | ||
675 | |||
676 | /* | ||
677 | * ======== procwrap_enum_resources ======== | ||
678 | */ | ||
679 | u32 procwrap_enum_resources(union trapped_args *args, void *pr_ctxt) | ||
680 | { | ||
681 | int status = 0; | ||
682 | struct dsp_resourceinfo resource_info; | ||
683 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
684 | |||
685 | if (args->args_proc_enumresources.resource_info_size < | ||
686 | sizeof(struct dsp_resourceinfo)) | ||
687 | return -EINVAL; | ||
688 | |||
689 | status = | ||
690 | proc_get_resource_info(hprocessor, | ||
691 | args->args_proc_enumresources.resource_type, | ||
692 | &resource_info, | ||
693 | args->args_proc_enumresources. | ||
694 | resource_info_size); | ||
695 | |||
696 | CP_TO_USR(args->args_proc_enumresources.resource_info, &resource_info, | ||
697 | status, 1); | ||
698 | |||
699 | return status; | ||
700 | |||
701 | } | ||
702 | |||
703 | /* | ||
704 | * ======== procwrap_get_state ======== | ||
705 | */ | ||
706 | u32 procwrap_get_state(union trapped_args *args, void *pr_ctxt) | ||
707 | { | ||
708 | int status; | ||
709 | struct dsp_processorstate proc_state; | ||
710 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
711 | |||
712 | if (args->args_proc_getstate.state_info_size < | ||
713 | sizeof(struct dsp_processorstate)) | ||
714 | return -EINVAL; | ||
715 | |||
716 | status = proc_get_state(hprocessor, &proc_state, | ||
717 | args->args_proc_getstate.state_info_size); | ||
718 | CP_TO_USR(args->args_proc_getstate.proc_state_obj, &proc_state, status, | ||
719 | 1); | ||
720 | return status; | ||
721 | |||
722 | } | ||
723 | |||
724 | /* | ||
725 | * ======== procwrap_get_trace ======== | ||
726 | */ | ||
727 | u32 procwrap_get_trace(union trapped_args *args, void *pr_ctxt) | ||
728 | { | ||
729 | int status; | ||
730 | u8 *pbuf; | ||
731 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
732 | |||
733 | if (args->args_proc_gettrace.max_size > MAX_TRACEBUFLEN) | ||
734 | return -EINVAL; | ||
735 | |||
736 | pbuf = kzalloc(args->args_proc_gettrace.max_size, GFP_KERNEL); | ||
737 | if (pbuf != NULL) { | ||
738 | status = proc_get_trace(hprocessor, pbuf, | ||
739 | args->args_proc_gettrace.max_size); | ||
740 | } else { | ||
741 | status = -ENOMEM; | ||
742 | } | ||
743 | CP_TO_USR(args->args_proc_gettrace.buf, pbuf, status, | ||
744 | args->args_proc_gettrace.max_size); | ||
745 | kfree(pbuf); | ||
746 | |||
747 | return status; | ||
748 | } | ||
749 | |||
750 | /* | ||
751 | * ======== procwrap_load ======== | ||
752 | */ | ||
753 | u32 procwrap_load(union trapped_args *args, void *pr_ctxt) | ||
754 | { | ||
755 | s32 i, len; | ||
756 | int status = 0; | ||
757 | char *temp; | ||
758 | s32 count = args->args_proc_load.argc_index; | ||
759 | u8 **argv = NULL, **envp = NULL; | ||
760 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
761 | |||
762 | if (count <= 0 || count > MAX_LOADARGS) { | ||
763 | status = -EINVAL; | ||
764 | goto func_cont; | ||
765 | } | ||
766 | |||
767 | argv = kmalloc(count * sizeof(u8 *), GFP_KERNEL); | ||
768 | if (!argv) { | ||
769 | status = -ENOMEM; | ||
770 | goto func_cont; | ||
771 | } | ||
772 | |||
773 | CP_FM_USR(argv, args->args_proc_load.user_args, status, count); | ||
774 | if (status) { | ||
775 | kfree(argv); | ||
776 | argv = NULL; | ||
777 | goto func_cont; | ||
778 | } | ||
779 | |||
780 | for (i = 0; i < count; i++) { | ||
781 | if (argv[i]) { | ||
782 | /* User space pointer to argument */ | ||
783 | temp = (char *)argv[i]; | ||
784 | /* len is increased by 1 to accommodate NULL */ | ||
785 | len = strlen_user((char *)temp) + 1; | ||
786 | /* Kernel space pointer to argument */ | ||
787 | argv[i] = kmalloc(len, GFP_KERNEL); | ||
788 | if (argv[i]) { | ||
789 | CP_FM_USR(argv[i], temp, status, len); | ||
790 | if (status) { | ||
791 | kfree(argv[i]); | ||
792 | argv[i] = NULL; | ||
793 | goto func_cont; | ||
794 | } | ||
795 | } else { | ||
796 | status = -ENOMEM; | ||
797 | goto func_cont; | ||
798 | } | ||
799 | } | ||
800 | } | ||
801 | /* TODO: validate this */ | ||
802 | if (args->args_proc_load.user_envp) { | ||
803 | /* number of elements in the envp array including NULL */ | ||
804 | count = 0; | ||
805 | do { | ||
806 | if (get_user(temp, | ||
807 | args->args_proc_load.user_envp + count)) { | ||
808 | status = -EFAULT; | ||
809 | goto func_cont; | ||
810 | } | ||
811 | count++; | ||
812 | } while (temp); | ||
813 | envp = kmalloc(count * sizeof(u8 *), GFP_KERNEL); | ||
814 | if (!envp) { | ||
815 | status = -ENOMEM; | ||
816 | goto func_cont; | ||
817 | } | ||
818 | |||
819 | CP_FM_USR(envp, args->args_proc_load.user_envp, status, count); | ||
820 | if (status) { | ||
821 | kfree(envp); | ||
822 | envp = NULL; | ||
823 | goto func_cont; | ||
824 | } | ||
825 | for (i = 0; envp[i]; i++) { | ||
826 | /* User space pointer to argument */ | ||
827 | temp = (char *)envp[i]; | ||
828 | /* len is increased by 1 to accommodate NULL */ | ||
829 | len = strlen_user((char *)temp) + 1; | ||
830 | /* Kernel space pointer to argument */ | ||
831 | envp[i] = kmalloc(len, GFP_KERNEL); | ||
832 | if (envp[i]) { | ||
833 | CP_FM_USR(envp[i], temp, status, len); | ||
834 | if (status) { | ||
835 | kfree(envp[i]); | ||
836 | envp[i] = NULL; | ||
837 | goto func_cont; | ||
838 | } | ||
839 | } else { | ||
840 | status = -ENOMEM; | ||
841 | goto func_cont; | ||
842 | } | ||
843 | } | ||
844 | } | ||
845 | |||
846 | if (!status) { | ||
847 | status = proc_load(hprocessor, | ||
848 | args->args_proc_load.argc_index, | ||
849 | (const char **)argv, (const char **)envp); | ||
850 | } | ||
851 | func_cont: | ||
852 | if (envp) { | ||
853 | i = 0; | ||
854 | while (envp[i]) | ||
855 | kfree(envp[i++]); | ||
856 | |||
857 | kfree(envp); | ||
858 | } | ||
859 | |||
860 | if (argv) { | ||
861 | count = args->args_proc_load.argc_index; | ||
862 | for (i = 0; (i < count) && argv[i]; i++) | ||
863 | kfree(argv[i]); | ||
864 | |||
865 | kfree(argv); | ||
866 | } | ||
867 | |||
868 | return status; | ||
869 | } | ||
870 | |||
871 | /* | ||
872 | * ======== procwrap_map ======== | ||
873 | */ | ||
874 | u32 procwrap_map(union trapped_args *args, void *pr_ctxt) | ||
875 | { | ||
876 | int status; | ||
877 | void *map_addr; | ||
878 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
879 | |||
880 | if (!args->args_proc_mapmem.size) | ||
881 | return -EINVAL; | ||
882 | |||
883 | status = proc_map(args->args_proc_mapmem.processor, | ||
884 | args->args_proc_mapmem.mpu_addr, | ||
885 | args->args_proc_mapmem.size, | ||
886 | args->args_proc_mapmem.req_addr, &map_addr, | ||
887 | args->args_proc_mapmem.map_attr, pr_ctxt); | ||
888 | if (!status) { | ||
889 | if (put_user(map_addr, args->args_proc_mapmem.map_addr)) { | ||
890 | status = -EINVAL; | ||
891 | proc_un_map(hprocessor, map_addr, pr_ctxt); | ||
892 | } | ||
893 | |||
894 | } | ||
895 | return status; | ||
896 | } | ||
897 | |||
898 | /* | ||
899 | * ======== procwrap_register_notify ======== | ||
900 | */ | ||
901 | u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt) | ||
902 | { | ||
903 | int status; | ||
904 | struct dsp_notification notification; | ||
905 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
906 | |||
907 | /* Initialize the notification data structure */ | ||
908 | notification.name = NULL; | ||
909 | notification.handle = NULL; | ||
910 | |||
911 | status = proc_register_notify(hprocessor, | ||
912 | args->args_proc_register_notify.event_mask, | ||
913 | args->args_proc_register_notify.notify_type, | ||
914 | ¬ification); | ||
915 | CP_TO_USR(args->args_proc_register_notify.notification, ¬ification, | ||
916 | status, 1); | ||
917 | return status; | ||
918 | } | ||
919 | |||
920 | /* | ||
921 | * ======== procwrap_reserve_memory ======== | ||
922 | */ | ||
923 | u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt) | ||
924 | { | ||
925 | int status; | ||
926 | void *prsv_addr; | ||
927 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
928 | |||
929 | if ((args->args_proc_rsvmem.size <= 0) || | ||
930 | (args->args_proc_rsvmem.size & (PG_SIZE4K - 1)) != 0) | ||
931 | return -EINVAL; | ||
932 | |||
933 | status = proc_reserve_memory(hprocessor, | ||
934 | args->args_proc_rsvmem.size, &prsv_addr, | ||
935 | pr_ctxt); | ||
936 | if (!status) { | ||
937 | if (put_user(prsv_addr, args->args_proc_rsvmem.rsv_addr)) { | ||
938 | status = -EINVAL; | ||
939 | proc_un_reserve_memory(args->args_proc_rsvmem. | ||
940 | processor, prsv_addr, pr_ctxt); | ||
941 | } | ||
942 | } | ||
943 | return status; | ||
944 | } | ||
945 | |||
946 | /* | ||
947 | * ======== procwrap_start ======== | ||
948 | */ | ||
949 | u32 procwrap_start(union trapped_args *args, void *pr_ctxt) | ||
950 | { | ||
951 | u32 ret; | ||
952 | |||
953 | ret = proc_start(((struct process_context *)pr_ctxt)->processor); | ||
954 | return ret; | ||
955 | } | ||
956 | |||
957 | /* | ||
958 | * ======== procwrap_un_map ======== | ||
959 | */ | ||
960 | u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt) | ||
961 | { | ||
962 | int status; | ||
963 | |||
964 | status = proc_un_map(((struct process_context *)pr_ctxt)->processor, | ||
965 | args->args_proc_unmapmem.map_addr, pr_ctxt); | ||
966 | return status; | ||
967 | } | ||
968 | |||
969 | /* | ||
970 | * ======== procwrap_un_reserve_memory ======== | ||
971 | */ | ||
972 | u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt) | ||
973 | { | ||
974 | int status; | ||
975 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
976 | |||
977 | status = proc_un_reserve_memory(hprocessor, | ||
978 | args->args_proc_unrsvmem.rsv_addr, | ||
979 | pr_ctxt); | ||
980 | return status; | ||
981 | } | ||
982 | |||
983 | /* | ||
984 | * ======== procwrap_stop ======== | ||
985 | */ | ||
986 | u32 procwrap_stop(union trapped_args *args, void *pr_ctxt) | ||
987 | { | ||
988 | u32 ret; | ||
989 | |||
990 | ret = proc_stop(((struct process_context *)pr_ctxt)->processor); | ||
991 | |||
992 | return ret; | ||
993 | } | ||
994 | |||
995 | /* | ||
996 | * ======== find_handle ========= | ||
997 | */ | ||
998 | inline void find_node_handle(struct node_res_object **noderes, | ||
999 | void *pr_ctxt, void *hnode) | ||
1000 | { | ||
1001 | rcu_read_lock(); | ||
1002 | *noderes = idr_find(((struct process_context *)pr_ctxt)->node_id, | ||
1003 | (int)hnode - 1); | ||
1004 | rcu_read_unlock(); | ||
1005 | return; | ||
1006 | } | ||
1007 | |||
1008 | |||
1009 | /* | ||
1010 | * ======== nodewrap_allocate ======== | ||
1011 | */ | ||
1012 | u32 nodewrap_allocate(union trapped_args *args, void *pr_ctxt) | ||
1013 | { | ||
1014 | int status = 0; | ||
1015 | struct dsp_uuid node_uuid; | ||
1016 | u32 cb_data_size = 0; | ||
1017 | u32 __user *psize = (u32 __user *) args->args_node_allocate.args; | ||
1018 | u8 *pargs = NULL; | ||
1019 | struct dsp_nodeattrin proc_attr_in, *attr_in = NULL; | ||
1020 | struct node_res_object *node_res; | ||
1021 | int nodeid; | ||
1022 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
1023 | |||
1024 | /* Optional argument */ | ||
1025 | if (psize) { | ||
1026 | if (get_user(cb_data_size, psize)) | ||
1027 | status = -EPERM; | ||
1028 | |||
1029 | cb_data_size += sizeof(u32); | ||
1030 | if (!status) { | ||
1031 | pargs = kmalloc(cb_data_size, GFP_KERNEL); | ||
1032 | if (pargs == NULL) | ||
1033 | status = -ENOMEM; | ||
1034 | |||
1035 | } | ||
1036 | CP_FM_USR(pargs, args->args_node_allocate.args, status, | ||
1037 | cb_data_size); | ||
1038 | } | ||
1039 | CP_FM_USR(&node_uuid, args->args_node_allocate.node_id_ptr, status, 1); | ||
1040 | if (status) | ||
1041 | goto func_cont; | ||
1042 | /* Optional argument */ | ||
1043 | if (args->args_node_allocate.attr_in) { | ||
1044 | CP_FM_USR(&proc_attr_in, args->args_node_allocate.attr_in, | ||
1045 | status, 1); | ||
1046 | if (!status) | ||
1047 | attr_in = &proc_attr_in; | ||
1048 | else | ||
1049 | status = -ENOMEM; | ||
1050 | |||
1051 | } | ||
1052 | if (!status) { | ||
1053 | status = node_allocate(hprocessor, | ||
1054 | &node_uuid, (struct dsp_cbdata *)pargs, | ||
1055 | attr_in, &node_res, pr_ctxt); | ||
1056 | } | ||
1057 | if (!status) { | ||
1058 | nodeid = node_res->id + 1; | ||
1059 | CP_TO_USR(args->args_node_allocate.node, &nodeid, | ||
1060 | status, 1); | ||
1061 | if (status) { | ||
1062 | status = -EFAULT; | ||
1063 | node_delete(node_res, pr_ctxt); | ||
1064 | } | ||
1065 | } | ||
1066 | func_cont: | ||
1067 | kfree(pargs); | ||
1068 | |||
1069 | return status; | ||
1070 | } | ||
1071 | |||
1072 | /* | ||
1073 | * ======== nodewrap_alloc_msg_buf ======== | ||
1074 | */ | ||
1075 | u32 nodewrap_alloc_msg_buf(union trapped_args *args, void *pr_ctxt) | ||
1076 | { | ||
1077 | int status = 0; | ||
1078 | struct dsp_bufferattr *pattr = NULL; | ||
1079 | struct dsp_bufferattr attr; | ||
1080 | u8 *pbuffer = NULL; | ||
1081 | struct node_res_object *node_res; | ||
1082 | |||
1083 | find_node_handle(&node_res, pr_ctxt, | ||
1084 | args->args_node_allocmsgbuf.node); | ||
1085 | |||
1086 | if (!node_res) | ||
1087 | return -EFAULT; | ||
1088 | |||
1089 | if (!args->args_node_allocmsgbuf.size) | ||
1090 | return -EINVAL; | ||
1091 | |||
1092 | if (args->args_node_allocmsgbuf.attr) { /* Optional argument */ | ||
1093 | CP_FM_USR(&attr, args->args_node_allocmsgbuf.attr, status, 1); | ||
1094 | if (!status) | ||
1095 | pattr = &attr; | ||
1096 | |||
1097 | } | ||
1098 | /* argument */ | ||
1099 | CP_FM_USR(&pbuffer, args->args_node_allocmsgbuf.buffer, status, 1); | ||
1100 | if (!status) { | ||
1101 | status = node_alloc_msg_buf(node_res->node, | ||
1102 | args->args_node_allocmsgbuf.size, | ||
1103 | pattr, &pbuffer); | ||
1104 | } | ||
1105 | CP_TO_USR(args->args_node_allocmsgbuf.buffer, &pbuffer, status, 1); | ||
1106 | return status; | ||
1107 | } | ||
1108 | |||
1109 | /* | ||
1110 | * ======== nodewrap_change_priority ======== | ||
1111 | */ | ||
1112 | u32 nodewrap_change_priority(union trapped_args *args, void *pr_ctxt) | ||
1113 | { | ||
1114 | u32 ret; | ||
1115 | struct node_res_object *node_res; | ||
1116 | |||
1117 | find_node_handle(&node_res, pr_ctxt, | ||
1118 | args->args_node_changepriority.node); | ||
1119 | |||
1120 | if (!node_res) | ||
1121 | return -EFAULT; | ||
1122 | |||
1123 | ret = node_change_priority(node_res->node, | ||
1124 | args->args_node_changepriority.prio); | ||
1125 | |||
1126 | return ret; | ||
1127 | } | ||
1128 | |||
1129 | /* | ||
1130 | * ======== nodewrap_connect ======== | ||
1131 | */ | ||
1132 | u32 nodewrap_connect(union trapped_args *args, void *pr_ctxt) | ||
1133 | { | ||
1134 | int status = 0; | ||
1135 | struct dsp_strmattr attrs; | ||
1136 | struct dsp_strmattr *pattrs = NULL; | ||
1137 | u32 cb_data_size; | ||
1138 | u32 __user *psize = (u32 __user *) args->args_node_connect.conn_param; | ||
1139 | u8 *pargs = NULL; | ||
1140 | struct node_res_object *node_res1, *node_res2; | ||
1141 | struct node_object *node1 = NULL, *node2 = NULL; | ||
1142 | |||
1143 | if ((int)args->args_node_connect.node != DSP_HGPPNODE) { | ||
1144 | find_node_handle(&node_res1, pr_ctxt, | ||
1145 | args->args_node_connect.node); | ||
1146 | if (node_res1) | ||
1147 | node1 = node_res1->node; | ||
1148 | } else { | ||
1149 | node1 = args->args_node_connect.node; | ||
1150 | } | ||
1151 | |||
1152 | if ((int)args->args_node_connect.other_node != DSP_HGPPNODE) { | ||
1153 | find_node_handle(&node_res2, pr_ctxt, | ||
1154 | args->args_node_connect.other_node); | ||
1155 | if (node_res2) | ||
1156 | node2 = node_res2->node; | ||
1157 | } else { | ||
1158 | node2 = args->args_node_connect.other_node; | ||
1159 | } | ||
1160 | |||
1161 | if (!node1 || !node2) | ||
1162 | return -EFAULT; | ||
1163 | |||
1164 | /* Optional argument */ | ||
1165 | if (psize) { | ||
1166 | if (get_user(cb_data_size, psize)) | ||
1167 | status = -EPERM; | ||
1168 | |||
1169 | cb_data_size += sizeof(u32); | ||
1170 | if (!status) { | ||
1171 | pargs = kmalloc(cb_data_size, GFP_KERNEL); | ||
1172 | if (pargs == NULL) { | ||
1173 | status = -ENOMEM; | ||
1174 | goto func_cont; | ||
1175 | } | ||
1176 | |||
1177 | } | ||
1178 | CP_FM_USR(pargs, args->args_node_connect.conn_param, status, | ||
1179 | cb_data_size); | ||
1180 | if (status) | ||
1181 | goto func_cont; | ||
1182 | } | ||
1183 | if (args->args_node_connect.attrs) { /* Optional argument */ | ||
1184 | CP_FM_USR(&attrs, args->args_node_connect.attrs, status, 1); | ||
1185 | if (!status) | ||
1186 | pattrs = &attrs; | ||
1187 | |||
1188 | } | ||
1189 | if (!status) { | ||
1190 | status = node_connect(node1, | ||
1191 | args->args_node_connect.stream_id, | ||
1192 | node2, | ||
1193 | args->args_node_connect.other_stream, | ||
1194 | pattrs, (struct dsp_cbdata *)pargs); | ||
1195 | } | ||
1196 | func_cont: | ||
1197 | kfree(pargs); | ||
1198 | |||
1199 | return status; | ||
1200 | } | ||
1201 | |||
1202 | /* | ||
1203 | * ======== nodewrap_create ======== | ||
1204 | */ | ||
1205 | u32 nodewrap_create(union trapped_args *args, void *pr_ctxt) | ||
1206 | { | ||
1207 | u32 ret; | ||
1208 | struct node_res_object *node_res; | ||
1209 | |||
1210 | find_node_handle(&node_res, pr_ctxt, args->args_node_create.node); | ||
1211 | |||
1212 | if (!node_res) | ||
1213 | return -EFAULT; | ||
1214 | |||
1215 | ret = node_create(node_res->node); | ||
1216 | |||
1217 | return ret; | ||
1218 | } | ||
1219 | |||
1220 | /* | ||
1221 | * ======== nodewrap_delete ======== | ||
1222 | */ | ||
1223 | u32 nodewrap_delete(union trapped_args *args, void *pr_ctxt) | ||
1224 | { | ||
1225 | u32 ret; | ||
1226 | struct node_res_object *node_res; | ||
1227 | |||
1228 | find_node_handle(&node_res, pr_ctxt, args->args_node_delete.node); | ||
1229 | |||
1230 | if (!node_res) | ||
1231 | return -EFAULT; | ||
1232 | |||
1233 | ret = node_delete(node_res, pr_ctxt); | ||
1234 | |||
1235 | return ret; | ||
1236 | } | ||
1237 | |||
1238 | /* | ||
1239 | * ======== nodewrap_free_msg_buf ======== | ||
1240 | */ | ||
1241 | u32 nodewrap_free_msg_buf(union trapped_args *args, void *pr_ctxt) | ||
1242 | { | ||
1243 | int status = 0; | ||
1244 | struct dsp_bufferattr *pattr = NULL; | ||
1245 | struct dsp_bufferattr attr; | ||
1246 | struct node_res_object *node_res; | ||
1247 | |||
1248 | find_node_handle(&node_res, pr_ctxt, args->args_node_freemsgbuf.node); | ||
1249 | |||
1250 | if (!node_res) | ||
1251 | return -EFAULT; | ||
1252 | |||
1253 | if (args->args_node_freemsgbuf.attr) { /* Optional argument */ | ||
1254 | CP_FM_USR(&attr, args->args_node_freemsgbuf.attr, status, 1); | ||
1255 | if (!status) | ||
1256 | pattr = &attr; | ||
1257 | |||
1258 | } | ||
1259 | |||
1260 | if (!args->args_node_freemsgbuf.buffer) | ||
1261 | return -EFAULT; | ||
1262 | |||
1263 | if (!status) { | ||
1264 | status = node_free_msg_buf(node_res->node, | ||
1265 | args->args_node_freemsgbuf.buffer, | ||
1266 | pattr); | ||
1267 | } | ||
1268 | |||
1269 | return status; | ||
1270 | } | ||
1271 | |||
1272 | /* | ||
1273 | * ======== nodewrap_get_attr ======== | ||
1274 | */ | ||
1275 | u32 nodewrap_get_attr(union trapped_args *args, void *pr_ctxt) | ||
1276 | { | ||
1277 | int status = 0; | ||
1278 | struct dsp_nodeattr attr; | ||
1279 | struct node_res_object *node_res; | ||
1280 | |||
1281 | find_node_handle(&node_res, pr_ctxt, args->args_node_getattr.node); | ||
1282 | |||
1283 | if (!node_res) | ||
1284 | return -EFAULT; | ||
1285 | |||
1286 | status = node_get_attr(node_res->node, &attr, | ||
1287 | args->args_node_getattr.attr_size); | ||
1288 | CP_TO_USR(args->args_node_getattr.attr, &attr, status, 1); | ||
1289 | |||
1290 | return status; | ||
1291 | } | ||
1292 | |||
1293 | /* | ||
1294 | * ======== nodewrap_get_message ======== | ||
1295 | */ | ||
1296 | u32 nodewrap_get_message(union trapped_args *args, void *pr_ctxt) | ||
1297 | { | ||
1298 | int status; | ||
1299 | struct dsp_msg msg; | ||
1300 | struct node_res_object *node_res; | ||
1301 | |||
1302 | find_node_handle(&node_res, pr_ctxt, args->args_node_getmessage.node); | ||
1303 | |||
1304 | if (!node_res) | ||
1305 | return -EFAULT; | ||
1306 | |||
1307 | status = node_get_message(node_res->node, &msg, | ||
1308 | args->args_node_getmessage.timeout); | ||
1309 | |||
1310 | CP_TO_USR(args->args_node_getmessage.message, &msg, status, 1); | ||
1311 | |||
1312 | return status; | ||
1313 | } | ||
1314 | |||
1315 | /* | ||
1316 | * ======== nodewrap_pause ======== | ||
1317 | */ | ||
1318 | u32 nodewrap_pause(union trapped_args *args, void *pr_ctxt) | ||
1319 | { | ||
1320 | u32 ret; | ||
1321 | struct node_res_object *node_res; | ||
1322 | |||
1323 | find_node_handle(&node_res, pr_ctxt, args->args_node_pause.node); | ||
1324 | |||
1325 | if (!node_res) | ||
1326 | return -EFAULT; | ||
1327 | |||
1328 | ret = node_pause(node_res->node); | ||
1329 | |||
1330 | return ret; | ||
1331 | } | ||
1332 | |||
1333 | /* | ||
1334 | * ======== nodewrap_put_message ======== | ||
1335 | */ | ||
1336 | u32 nodewrap_put_message(union trapped_args *args, void *pr_ctxt) | ||
1337 | { | ||
1338 | int status = 0; | ||
1339 | struct dsp_msg msg; | ||
1340 | struct node_res_object *node_res; | ||
1341 | |||
1342 | find_node_handle(&node_res, pr_ctxt, args->args_node_putmessage.node); | ||
1343 | |||
1344 | if (!node_res) | ||
1345 | return -EFAULT; | ||
1346 | |||
1347 | CP_FM_USR(&msg, args->args_node_putmessage.message, status, 1); | ||
1348 | |||
1349 | if (!status) { | ||
1350 | status = | ||
1351 | node_put_message(node_res->node, &msg, | ||
1352 | args->args_node_putmessage.timeout); | ||
1353 | } | ||
1354 | |||
1355 | return status; | ||
1356 | } | ||
1357 | |||
1358 | /* | ||
1359 | * ======== nodewrap_register_notify ======== | ||
1360 | */ | ||
1361 | u32 nodewrap_register_notify(union trapped_args *args, void *pr_ctxt) | ||
1362 | { | ||
1363 | int status = 0; | ||
1364 | struct dsp_notification notification; | ||
1365 | struct node_res_object *node_res; | ||
1366 | |||
1367 | find_node_handle(&node_res, pr_ctxt, | ||
1368 | args->args_node_registernotify.node); | ||
1369 | |||
1370 | if (!node_res) | ||
1371 | return -EFAULT; | ||
1372 | |||
1373 | /* Initialize the notification data structure */ | ||
1374 | notification.name = NULL; | ||
1375 | notification.handle = NULL; | ||
1376 | |||
1377 | if (!args->args_proc_register_notify.event_mask) | ||
1378 | CP_FM_USR(¬ification, | ||
1379 | args->args_proc_register_notify.notification, | ||
1380 | status, 1); | ||
1381 | |||
1382 | status = node_register_notify(node_res->node, | ||
1383 | args->args_node_registernotify.event_mask, | ||
1384 | args->args_node_registernotify. | ||
1385 | notify_type, ¬ification); | ||
1386 | CP_TO_USR(args->args_node_registernotify.notification, ¬ification, | ||
1387 | status, 1); | ||
1388 | return status; | ||
1389 | } | ||
1390 | |||
1391 | /* | ||
1392 | * ======== nodewrap_run ======== | ||
1393 | */ | ||
1394 | u32 nodewrap_run(union trapped_args *args, void *pr_ctxt) | ||
1395 | { | ||
1396 | u32 ret; | ||
1397 | struct node_res_object *node_res; | ||
1398 | |||
1399 | find_node_handle(&node_res, pr_ctxt, args->args_node_run.node); | ||
1400 | |||
1401 | if (!node_res) | ||
1402 | return -EFAULT; | ||
1403 | |||
1404 | ret = node_run(node_res->node); | ||
1405 | |||
1406 | return ret; | ||
1407 | } | ||
1408 | |||
1409 | /* | ||
1410 | * ======== nodewrap_terminate ======== | ||
1411 | */ | ||
1412 | u32 nodewrap_terminate(union trapped_args *args, void *pr_ctxt) | ||
1413 | { | ||
1414 | int status; | ||
1415 | int tempstatus; | ||
1416 | struct node_res_object *node_res; | ||
1417 | |||
1418 | find_node_handle(&node_res, pr_ctxt, args->args_node_terminate.node); | ||
1419 | |||
1420 | if (!node_res) | ||
1421 | return -EFAULT; | ||
1422 | |||
1423 | status = node_terminate(node_res->node, &tempstatus); | ||
1424 | |||
1425 | CP_TO_USR(args->args_node_terminate.status, &tempstatus, status, 1); | ||
1426 | |||
1427 | return status; | ||
1428 | } | ||
1429 | |||
1430 | /* | ||
1431 | * ======== nodewrap_get_uuid_props ======== | ||
1432 | */ | ||
1433 | u32 nodewrap_get_uuid_props(union trapped_args *args, void *pr_ctxt) | ||
1434 | { | ||
1435 | int status = 0; | ||
1436 | struct dsp_uuid node_uuid; | ||
1437 | struct dsp_ndbprops *pnode_props = NULL; | ||
1438 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
1439 | |||
1440 | CP_FM_USR(&node_uuid, args->args_node_getuuidprops.node_id_ptr, status, | ||
1441 | 1); | ||
1442 | if (status) | ||
1443 | goto func_cont; | ||
1444 | pnode_props = kmalloc(sizeof(struct dsp_ndbprops), GFP_KERNEL); | ||
1445 | if (pnode_props != NULL) { | ||
1446 | status = | ||
1447 | node_get_uuid_props(hprocessor, &node_uuid, pnode_props); | ||
1448 | CP_TO_USR(args->args_node_getuuidprops.node_props, pnode_props, | ||
1449 | status, 1); | ||
1450 | } else | ||
1451 | status = -ENOMEM; | ||
1452 | func_cont: | ||
1453 | kfree(pnode_props); | ||
1454 | return status; | ||
1455 | } | ||
1456 | |||
1457 | /* | ||
1458 | * ======== find_strm_handle ========= | ||
1459 | */ | ||
1460 | inline void find_strm_handle(struct strm_res_object **strmres, | ||
1461 | void *pr_ctxt, void *hstream) | ||
1462 | { | ||
1463 | rcu_read_lock(); | ||
1464 | *strmres = idr_find(((struct process_context *)pr_ctxt)->stream_id, | ||
1465 | (int)hstream - 1); | ||
1466 | rcu_read_unlock(); | ||
1467 | return; | ||
1468 | } | ||
1469 | |||
1470 | /* | ||
1471 | * ======== strmwrap_allocate_buffer ======== | ||
1472 | */ | ||
1473 | u32 strmwrap_allocate_buffer(union trapped_args *args, void *pr_ctxt) | ||
1474 | { | ||
1475 | int status; | ||
1476 | u8 **ap_buffer = NULL; | ||
1477 | u32 num_bufs = args->args_strm_allocatebuffer.num_bufs; | ||
1478 | struct strm_res_object *strm_res; | ||
1479 | |||
1480 | find_strm_handle(&strm_res, pr_ctxt, | ||
1481 | args->args_strm_allocatebuffer.stream); | ||
1482 | |||
1483 | if (!strm_res) | ||
1484 | return -EFAULT; | ||
1485 | |||
1486 | if (num_bufs > MAX_BUFS) | ||
1487 | return -EINVAL; | ||
1488 | |||
1489 | ap_buffer = kmalloc((num_bufs * sizeof(u8 *)), GFP_KERNEL); | ||
1490 | if (ap_buffer == NULL) | ||
1491 | return -ENOMEM; | ||
1492 | |||
1493 | status = strm_allocate_buffer(strm_res, | ||
1494 | args->args_strm_allocatebuffer.size, | ||
1495 | ap_buffer, num_bufs, pr_ctxt); | ||
1496 | if (!status) { | ||
1497 | CP_TO_USR(args->args_strm_allocatebuffer.ap_buffer, ap_buffer, | ||
1498 | status, num_bufs); | ||
1499 | if (status) { | ||
1500 | status = -EFAULT; | ||
1501 | strm_free_buffer(strm_res, | ||
1502 | ap_buffer, num_bufs, pr_ctxt); | ||
1503 | } | ||
1504 | } | ||
1505 | kfree(ap_buffer); | ||
1506 | |||
1507 | return status; | ||
1508 | } | ||
1509 | |||
1510 | /* | ||
1511 | * ======== strmwrap_close ======== | ||
1512 | */ | ||
1513 | u32 strmwrap_close(union trapped_args *args, void *pr_ctxt) | ||
1514 | { | ||
1515 | struct strm_res_object *strm_res; | ||
1516 | |||
1517 | find_strm_handle(&strm_res, pr_ctxt, args->args_strm_close.stream); | ||
1518 | |||
1519 | if (!strm_res) | ||
1520 | return -EFAULT; | ||
1521 | |||
1522 | return strm_close(strm_res, pr_ctxt); | ||
1523 | } | ||
1524 | |||
1525 | /* | ||
1526 | * ======== strmwrap_free_buffer ======== | ||
1527 | */ | ||
1528 | u32 strmwrap_free_buffer(union trapped_args *args, void *pr_ctxt) | ||
1529 | { | ||
1530 | int status = 0; | ||
1531 | u8 **ap_buffer = NULL; | ||
1532 | u32 num_bufs = args->args_strm_freebuffer.num_bufs; | ||
1533 | struct strm_res_object *strm_res; | ||
1534 | |||
1535 | find_strm_handle(&strm_res, pr_ctxt, | ||
1536 | args->args_strm_freebuffer.stream); | ||
1537 | |||
1538 | if (!strm_res) | ||
1539 | return -EFAULT; | ||
1540 | |||
1541 | if (num_bufs > MAX_BUFS) | ||
1542 | return -EINVAL; | ||
1543 | |||
1544 | ap_buffer = kmalloc_array(num_bufs, sizeof(u8 *), GFP_KERNEL); | ||
1545 | if (ap_buffer == NULL) | ||
1546 | return -ENOMEM; | ||
1547 | |||
1548 | CP_FM_USR(ap_buffer, args->args_strm_freebuffer.ap_buffer, status, | ||
1549 | num_bufs); | ||
1550 | |||
1551 | if (!status) | ||
1552 | status = strm_free_buffer(strm_res, | ||
1553 | ap_buffer, num_bufs, pr_ctxt); | ||
1554 | |||
1555 | CP_TO_USR(args->args_strm_freebuffer.ap_buffer, ap_buffer, status, | ||
1556 | num_bufs); | ||
1557 | kfree(ap_buffer); | ||
1558 | |||
1559 | return status; | ||
1560 | } | ||
1561 | |||
1562 | /* | ||
1563 | * ======== strmwrap_get_event_handle ======== | ||
1564 | */ | ||
1565 | u32 __deprecated strmwrap_get_event_handle(union trapped_args *args, | ||
1566 | void *pr_ctxt) | ||
1567 | { | ||
1568 | pr_err("%s: deprecated dspbridge ioctl\n", __func__); | ||
1569 | return -ENOSYS; | ||
1570 | } | ||
1571 | |||
1572 | /* | ||
1573 | * ======== strmwrap_get_info ======== | ||
1574 | */ | ||
1575 | u32 strmwrap_get_info(union trapped_args *args, void *pr_ctxt) | ||
1576 | { | ||
1577 | int status = 0; | ||
1578 | struct stream_info strm_info; | ||
1579 | struct dsp_streaminfo user; | ||
1580 | struct dsp_streaminfo *temp; | ||
1581 | struct strm_res_object *strm_res; | ||
1582 | |||
1583 | find_strm_handle(&strm_res, pr_ctxt, | ||
1584 | args->args_strm_getinfo.stream); | ||
1585 | |||
1586 | if (!strm_res) | ||
1587 | return -EFAULT; | ||
1588 | |||
1589 | CP_FM_USR(&strm_info, args->args_strm_getinfo.stream_info, status, 1); | ||
1590 | temp = strm_info.user_strm; | ||
1591 | |||
1592 | strm_info.user_strm = &user; | ||
1593 | |||
1594 | if (!status) { | ||
1595 | status = strm_get_info(strm_res->stream, | ||
1596 | &strm_info, | ||
1597 | args->args_strm_getinfo. | ||
1598 | stream_info_size); | ||
1599 | } | ||
1600 | CP_TO_USR(temp, strm_info.user_strm, status, 1); | ||
1601 | strm_info.user_strm = temp; | ||
1602 | CP_TO_USR(args->args_strm_getinfo.stream_info, &strm_info, status, 1); | ||
1603 | return status; | ||
1604 | } | ||
1605 | |||
1606 | /* | ||
1607 | * ======== strmwrap_idle ======== | ||
1608 | */ | ||
1609 | u32 strmwrap_idle(union trapped_args *args, void *pr_ctxt) | ||
1610 | { | ||
1611 | u32 ret; | ||
1612 | struct strm_res_object *strm_res; | ||
1613 | |||
1614 | find_strm_handle(&strm_res, pr_ctxt, args->args_strm_idle.stream); | ||
1615 | |||
1616 | if (!strm_res) | ||
1617 | return -EFAULT; | ||
1618 | |||
1619 | ret = strm_idle(strm_res->stream, args->args_strm_idle.flush_flag); | ||
1620 | |||
1621 | return ret; | ||
1622 | } | ||
1623 | |||
1624 | /* | ||
1625 | * ======== strmwrap_issue ======== | ||
1626 | */ | ||
1627 | u32 strmwrap_issue(union trapped_args *args, void *pr_ctxt) | ||
1628 | { | ||
1629 | int status = 0; | ||
1630 | struct strm_res_object *strm_res; | ||
1631 | |||
1632 | find_strm_handle(&strm_res, pr_ctxt, args->args_strm_issue.stream); | ||
1633 | |||
1634 | if (!strm_res) | ||
1635 | return -EFAULT; | ||
1636 | |||
1637 | if (!args->args_strm_issue.buffer) | ||
1638 | return -EFAULT; | ||
1639 | |||
1640 | /* No need of doing CP_FM_USR for the user buffer (pbuffer) | ||
1641 | as this is done in Bridge internal function bridge_chnl_add_io_req | ||
1642 | in chnl_sm.c */ | ||
1643 | status = strm_issue(strm_res->stream, | ||
1644 | args->args_strm_issue.buffer, | ||
1645 | args->args_strm_issue.bytes, | ||
1646 | args->args_strm_issue.buf_size, | ||
1647 | args->args_strm_issue.arg); | ||
1648 | |||
1649 | return status; | ||
1650 | } | ||
1651 | |||
1652 | /* | ||
1653 | * ======== strmwrap_open ======== | ||
1654 | */ | ||
1655 | u32 strmwrap_open(union trapped_args *args, void *pr_ctxt) | ||
1656 | { | ||
1657 | int status = 0; | ||
1658 | struct strm_attr attr; | ||
1659 | struct strm_res_object *strm_res_obj; | ||
1660 | struct dsp_streamattrin strm_attr_in; | ||
1661 | struct node_res_object *node_res; | ||
1662 | int strmid; | ||
1663 | |||
1664 | find_node_handle(&node_res, pr_ctxt, args->args_strm_open.node); | ||
1665 | |||
1666 | if (!node_res) | ||
1667 | return -EFAULT; | ||
1668 | |||
1669 | CP_FM_USR(&attr, args->args_strm_open.attr_in, status, 1); | ||
1670 | |||
1671 | if (attr.stream_attr_in != NULL) { /* Optional argument */ | ||
1672 | CP_FM_USR(&strm_attr_in, attr.stream_attr_in, status, 1); | ||
1673 | if (!status) { | ||
1674 | attr.stream_attr_in = &strm_attr_in; | ||
1675 | if (attr.stream_attr_in->strm_mode == STRMMODE_LDMA) | ||
1676 | return -ENOSYS; | ||
1677 | } | ||
1678 | |||
1679 | } | ||
1680 | status = strm_open(node_res->node, | ||
1681 | args->args_strm_open.direction, | ||
1682 | args->args_strm_open.index, &attr, &strm_res_obj, | ||
1683 | pr_ctxt); | ||
1684 | if (!status) { | ||
1685 | strmid = strm_res_obj->id + 1; | ||
1686 | CP_TO_USR(args->args_strm_open.stream, &strmid, status, 1); | ||
1687 | } | ||
1688 | return status; | ||
1689 | } | ||
1690 | |||
1691 | /* | ||
1692 | * ======== strmwrap_reclaim ======== | ||
1693 | */ | ||
1694 | u32 strmwrap_reclaim(union trapped_args *args, void *pr_ctxt) | ||
1695 | { | ||
1696 | int status = 0; | ||
1697 | u8 *buf_ptr; | ||
1698 | u32 ul_bytes; | ||
1699 | u32 dw_arg; | ||
1700 | u32 ul_buf_size; | ||
1701 | struct strm_res_object *strm_res; | ||
1702 | |||
1703 | find_strm_handle(&strm_res, pr_ctxt, args->args_strm_reclaim.stream); | ||
1704 | |||
1705 | if (!strm_res) | ||
1706 | return -EFAULT; | ||
1707 | |||
1708 | status = strm_reclaim(strm_res->stream, &buf_ptr, | ||
1709 | &ul_bytes, &ul_buf_size, &dw_arg); | ||
1710 | CP_TO_USR(args->args_strm_reclaim.buf_ptr, &buf_ptr, status, 1); | ||
1711 | CP_TO_USR(args->args_strm_reclaim.bytes, &ul_bytes, status, 1); | ||
1712 | CP_TO_USR(args->args_strm_reclaim.arg, &dw_arg, status, 1); | ||
1713 | |||
1714 | if (args->args_strm_reclaim.buf_size_ptr != NULL) { | ||
1715 | CP_TO_USR(args->args_strm_reclaim.buf_size_ptr, &ul_buf_size, | ||
1716 | status, 1); | ||
1717 | } | ||
1718 | |||
1719 | return status; | ||
1720 | } | ||
1721 | |||
1722 | /* | ||
1723 | * ======== strmwrap_register_notify ======== | ||
1724 | */ | ||
1725 | u32 strmwrap_register_notify(union trapped_args *args, void *pr_ctxt) | ||
1726 | { | ||
1727 | int status = 0; | ||
1728 | struct dsp_notification notification; | ||
1729 | struct strm_res_object *strm_res; | ||
1730 | |||
1731 | find_strm_handle(&strm_res, pr_ctxt, | ||
1732 | args->args_strm_registernotify.stream); | ||
1733 | |||
1734 | if (!strm_res) | ||
1735 | return -EFAULT; | ||
1736 | |||
1737 | /* Initialize the notification data structure */ | ||
1738 | notification.name = NULL; | ||
1739 | notification.handle = NULL; | ||
1740 | |||
1741 | status = strm_register_notify(strm_res->stream, | ||
1742 | args->args_strm_registernotify.event_mask, | ||
1743 | args->args_strm_registernotify. | ||
1744 | notify_type, ¬ification); | ||
1745 | CP_TO_USR(args->args_strm_registernotify.notification, ¬ification, | ||
1746 | status, 1); | ||
1747 | |||
1748 | return status; | ||
1749 | } | ||
1750 | |||
1751 | /* | ||
1752 | * ======== strmwrap_select ======== | ||
1753 | */ | ||
1754 | u32 strmwrap_select(union trapped_args *args, void *pr_ctxt) | ||
1755 | { | ||
1756 | u32 mask = 0; | ||
1757 | struct strm_object *strm_tab[MAX_STREAMS]; | ||
1758 | int status = 0; | ||
1759 | struct strm_res_object *strm_res; | ||
1760 | int *ids[MAX_STREAMS]; | ||
1761 | int i; | ||
1762 | |||
1763 | if (args->args_strm_select.strm_num > MAX_STREAMS) | ||
1764 | return -EINVAL; | ||
1765 | |||
1766 | CP_FM_USR(ids, args->args_strm_select.stream_tab, status, | ||
1767 | args->args_strm_select.strm_num); | ||
1768 | |||
1769 | if (status) | ||
1770 | return status; | ||
1771 | |||
1772 | for (i = 0; i < args->args_strm_select.strm_num; i++) { | ||
1773 | find_strm_handle(&strm_res, pr_ctxt, ids[i]); | ||
1774 | |||
1775 | if (!strm_res) | ||
1776 | return -EFAULT; | ||
1777 | |||
1778 | strm_tab[i] = strm_res->stream; | ||
1779 | } | ||
1780 | |||
1781 | if (!status) { | ||
1782 | status = strm_select(strm_tab, args->args_strm_select.strm_num, | ||
1783 | &mask, args->args_strm_select.timeout); | ||
1784 | } | ||
1785 | CP_TO_USR(args->args_strm_select.mask, &mask, status, 1); | ||
1786 | return status; | ||
1787 | } | ||
1788 | |||
1789 | /* CMM */ | ||
1790 | |||
1791 | /* | ||
1792 | * ======== cmmwrap_calloc_buf ======== | ||
1793 | */ | ||
1794 | u32 __deprecated cmmwrap_calloc_buf(union trapped_args *args, void *pr_ctxt) | ||
1795 | { | ||
1796 | /* This operation is done in kernel */ | ||
1797 | pr_err("%s: deprecated dspbridge ioctl\n", __func__); | ||
1798 | return -ENOSYS; | ||
1799 | } | ||
1800 | |||
1801 | /* | ||
1802 | * ======== cmmwrap_free_buf ======== | ||
1803 | */ | ||
1804 | u32 __deprecated cmmwrap_free_buf(union trapped_args *args, void *pr_ctxt) | ||
1805 | { | ||
1806 | /* This operation is done in kernel */ | ||
1807 | pr_err("%s: deprecated dspbridge ioctl\n", __func__); | ||
1808 | return -ENOSYS; | ||
1809 | } | ||
1810 | |||
1811 | /* | ||
1812 | * ======== cmmwrap_get_handle ======== | ||
1813 | */ | ||
1814 | u32 cmmwrap_get_handle(union trapped_args *args, void *pr_ctxt) | ||
1815 | { | ||
1816 | int status = 0; | ||
1817 | struct cmm_object *hcmm_mgr; | ||
1818 | void *hprocessor = ((struct process_context *)pr_ctxt)->processor; | ||
1819 | |||
1820 | status = cmm_get_handle(hprocessor, &hcmm_mgr); | ||
1821 | |||
1822 | CP_TO_USR(args->args_cmm_gethandle.cmm_mgr, &hcmm_mgr, status, 1); | ||
1823 | |||
1824 | return status; | ||
1825 | } | ||
1826 | |||
1827 | /* | ||
1828 | * ======== cmmwrap_get_info ======== | ||
1829 | */ | ||
1830 | u32 cmmwrap_get_info(union trapped_args *args, void *pr_ctxt) | ||
1831 | { | ||
1832 | int status = 0; | ||
1833 | struct cmm_info cmm_info_obj; | ||
1834 | |||
1835 | status = cmm_get_info(args->args_cmm_getinfo.cmm_mgr, &cmm_info_obj); | ||
1836 | |||
1837 | CP_TO_USR(args->args_cmm_getinfo.cmm_info_obj, &cmm_info_obj, status, | ||
1838 | 1); | ||
1839 | |||
1840 | return status; | ||
1841 | } | ||
diff --git a/drivers/staging/tidspbridge/pmgr/io.c b/drivers/staging/tidspbridge/pmgr/io.c deleted file mode 100644 index 4073c9c672fd..000000000000 --- a/drivers/staging/tidspbridge/pmgr/io.c +++ /dev/null | |||
@@ -1,93 +0,0 @@ | |||
1 | /* | ||
2 | * io.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * IO manager interface: Manages IO between CHNL and msg_ctrl. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | #include <linux/types.h> | ||
19 | |||
20 | /* ----------------------------------- Host OS */ | ||
21 | #include <dspbridge/host_os.h> | ||
22 | |||
23 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
24 | #include <dspbridge/dbdefs.h> | ||
25 | |||
26 | /* ----------------------------------- Platform Manager */ | ||
27 | #include <dspbridge/dev.h> | ||
28 | |||
29 | /* ----------------------------------- This */ | ||
30 | #include <ioobj.h> | ||
31 | #include <dspbridge/io.h> | ||
32 | |||
33 | /* | ||
34 | * ======== io_create ======== | ||
35 | * Purpose: | ||
36 | * Create an IO manager object, responsible for managing IO between | ||
37 | * CHNL and msg_ctrl | ||
38 | */ | ||
39 | int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj, | ||
40 | const struct io_attrs *mgr_attrts) | ||
41 | { | ||
42 | struct bridge_drv_interface *intf_fxns; | ||
43 | struct io_mgr *hio_mgr = NULL; | ||
44 | struct io_mgr_ *pio_mgr = NULL; | ||
45 | int status = 0; | ||
46 | |||
47 | *io_man = NULL; | ||
48 | |||
49 | /* A memory base of 0 implies no memory base: */ | ||
50 | if ((mgr_attrts->shm_base != 0) && (mgr_attrts->sm_length == 0)) | ||
51 | status = -EINVAL; | ||
52 | |||
53 | if (mgr_attrts->word_size == 0) | ||
54 | status = -EINVAL; | ||
55 | |||
56 | if (!status) { | ||
57 | dev_get_intf_fxns(hdev_obj, &intf_fxns); | ||
58 | |||
59 | /* Let Bridge channel module finish the create: */ | ||
60 | status = (*intf_fxns->io_create) (&hio_mgr, hdev_obj, | ||
61 | mgr_attrts); | ||
62 | |||
63 | if (!status) { | ||
64 | pio_mgr = (struct io_mgr_ *)hio_mgr; | ||
65 | pio_mgr->intf_fxns = intf_fxns; | ||
66 | pio_mgr->dev_obj = hdev_obj; | ||
67 | |||
68 | /* Return the new channel manager handle: */ | ||
69 | *io_man = hio_mgr; | ||
70 | } | ||
71 | } | ||
72 | |||
73 | return status; | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * ======== io_destroy ======== | ||
78 | * Purpose: | ||
79 | * Delete IO manager. | ||
80 | */ | ||
81 | int io_destroy(struct io_mgr *hio_mgr) | ||
82 | { | ||
83 | struct bridge_drv_interface *intf_fxns; | ||
84 | struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr; | ||
85 | int status; | ||
86 | |||
87 | intf_fxns = pio_mgr->intf_fxns; | ||
88 | |||
89 | /* Let Bridge channel module destroy the io_mgr: */ | ||
90 | status = (*intf_fxns->io_destroy) (hio_mgr); | ||
91 | |||
92 | return status; | ||
93 | } | ||
diff --git a/drivers/staging/tidspbridge/pmgr/ioobj.h b/drivers/staging/tidspbridge/pmgr/ioobj.h deleted file mode 100644 index 7defd9481458..000000000000 --- a/drivers/staging/tidspbridge/pmgr/ioobj.h +++ /dev/null | |||
@@ -1,38 +0,0 @@ | |||
1 | /* | ||
2 | * ioobj.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Structure subcomponents of channel class library IO objects which | ||
7 | * are exposed to DSP API from Bridge driver. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef IOOBJ_ | ||
21 | #define IOOBJ_ | ||
22 | |||
23 | #include <dspbridge/devdefs.h> | ||
24 | #include <dspbridge/dspdefs.h> | ||
25 | |||
26 | /* | ||
27 | * This struct is the first field in a io_mgr struct. Other, implementation | ||
28 | * specific fields follow this structure in memory. | ||
29 | */ | ||
30 | struct io_mgr_ { | ||
31 | /* These must be the first fields in a io_mgr struct: */ | ||
32 | struct bridge_dev_context *bridge_context; /* Bridge context. */ | ||
33 | /* Function interface to Bridge driver. */ | ||
34 | struct bridge_drv_interface *intf_fxns; | ||
35 | struct dev_object *dev_obj; /* Device this board represents. */ | ||
36 | }; | ||
37 | |||
38 | #endif /* IOOBJ_ */ | ||
diff --git a/drivers/staging/tidspbridge/pmgr/msg.c b/drivers/staging/tidspbridge/pmgr/msg.c deleted file mode 100644 index f093cfb51c00..000000000000 --- a/drivers/staging/tidspbridge/pmgr/msg.c +++ /dev/null | |||
@@ -1,91 +0,0 @@ | |||
1 | /* | ||
2 | * msg.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge msg_ctrl Module. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | #include <linux/types.h> | ||
19 | |||
20 | /* ----------------------------------- Host OS */ | ||
21 | #include <dspbridge/host_os.h> | ||
22 | |||
23 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
24 | #include <dspbridge/dbdefs.h> | ||
25 | |||
26 | /* ----------------------------------- Bridge Driver */ | ||
27 | #include <dspbridge/dspdefs.h> | ||
28 | |||
29 | /* ----------------------------------- Platform Manager */ | ||
30 | #include <dspbridge/dev.h> | ||
31 | |||
32 | /* ----------------------------------- This */ | ||
33 | #include <msgobj.h> | ||
34 | #include <dspbridge/msg.h> | ||
35 | |||
36 | /* | ||
37 | * ======== msg_create ======== | ||
38 | * Purpose: | ||
39 | * Create an object to manage message queues. Only one of these objects | ||
40 | * can exist per device object. | ||
41 | */ | ||
42 | int msg_create(struct msg_mgr **msg_man, | ||
43 | struct dev_object *hdev_obj, msg_onexit msg_callback) | ||
44 | { | ||
45 | struct bridge_drv_interface *intf_fxns; | ||
46 | struct msg_mgr_ *msg_mgr_obj; | ||
47 | struct msg_mgr *hmsg_mgr; | ||
48 | int status = 0; | ||
49 | |||
50 | *msg_man = NULL; | ||
51 | |||
52 | dev_get_intf_fxns(hdev_obj, &intf_fxns); | ||
53 | |||
54 | /* Let Bridge message module finish the create: */ | ||
55 | status = | ||
56 | (*intf_fxns->msg_create) (&hmsg_mgr, hdev_obj, msg_callback); | ||
57 | |||
58 | if (!status) { | ||
59 | /* Fill in DSP API message module's fields of the msg_mgr | ||
60 | * structure */ | ||
61 | msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr; | ||
62 | msg_mgr_obj->intf_fxns = intf_fxns; | ||
63 | |||
64 | /* Finally, return the new message manager handle: */ | ||
65 | *msg_man = hmsg_mgr; | ||
66 | } else { | ||
67 | status = -EPERM; | ||
68 | } | ||
69 | return status; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * ======== msg_delete ======== | ||
74 | * Purpose: | ||
75 | * Delete a msg_ctrl manager allocated in msg_create(). | ||
76 | */ | ||
77 | void msg_delete(struct msg_mgr *hmsg_mgr) | ||
78 | { | ||
79 | struct msg_mgr_ *msg_mgr_obj = (struct msg_mgr_ *)hmsg_mgr; | ||
80 | struct bridge_drv_interface *intf_fxns; | ||
81 | |||
82 | if (msg_mgr_obj) { | ||
83 | intf_fxns = msg_mgr_obj->intf_fxns; | ||
84 | |||
85 | /* Let Bridge message module destroy the msg_mgr: */ | ||
86 | (*intf_fxns->msg_delete) (hmsg_mgr); | ||
87 | } else { | ||
88 | dev_dbg(bridge, "%s: Error hmsg_mgr handle: %p\n", | ||
89 | __func__, hmsg_mgr); | ||
90 | } | ||
91 | } | ||
diff --git a/drivers/staging/tidspbridge/pmgr/msgobj.h b/drivers/staging/tidspbridge/pmgr/msgobj.h deleted file mode 100644 index 14ca633c56cb..000000000000 --- a/drivers/staging/tidspbridge/pmgr/msgobj.h +++ /dev/null | |||
@@ -1,38 +0,0 @@ | |||
1 | /* | ||
2 | * msgobj.h | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Structure subcomponents of channel class library msg_ctrl objects which | ||
7 | * are exposed to DSP API from Bridge driver. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | |||
20 | #ifndef MSGOBJ_ | ||
21 | #define MSGOBJ_ | ||
22 | |||
23 | #include <dspbridge/dspdefs.h> | ||
24 | |||
25 | #include <dspbridge/msgdefs.h> | ||
26 | |||
27 | /* | ||
28 | * This struct is the first field in a msg_mgr struct. Other, implementation | ||
29 | * specific fields follow this structure in memory. | ||
30 | */ | ||
31 | struct msg_mgr_ { | ||
32 | /* The first field must match that in _msg_sm.h */ | ||
33 | |||
34 | /* Function interface to Bridge driver. */ | ||
35 | struct bridge_drv_interface *intf_fxns; | ||
36 | }; | ||
37 | |||
38 | #endif /* MSGOBJ_ */ | ||
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c deleted file mode 100644 index c91d1d7d0884..000000000000 --- a/drivers/staging/tidspbridge/rmgr/dbdcd.c +++ /dev/null | |||
@@ -1,1484 +0,0 @@ | |||
1 | /* | ||
2 | * dbdcd.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * This file contains the implementation of the DSP/BIOS Bridge | ||
7 | * Configuration Database (DCD). | ||
8 | * | ||
9 | * Notes: | ||
10 | * The fxn dcd_get_objects can apply a callback fxn to each DCD object | ||
11 | * that is located in a specified COFF file. At the moment, | ||
12 | * dcd_auto_register, dcd_auto_unregister, and NLDR module all use | ||
13 | * dcd_get_objects. | ||
14 | * | ||
15 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
16 | * | ||
17 | * This package is free software; you can redistribute it and/or modify | ||
18 | * it under the terms of the GNU General Public License version 2 as | ||
19 | * published by the Free Software Foundation. | ||
20 | * | ||
21 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
22 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
23 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
24 | */ | ||
25 | #include <linux/types.h> | ||
26 | |||
27 | /* ----------------------------------- Host OS */ | ||
28 | #include <dspbridge/host_os.h> | ||
29 | |||
30 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
31 | #include <dspbridge/dbdefs.h> | ||
32 | |||
33 | /* ----------------------------------- Platform Manager */ | ||
34 | #include <dspbridge/cod.h> | ||
35 | |||
36 | /* ----------------------------------- Others */ | ||
37 | #include <dspbridge/uuidutil.h> | ||
38 | |||
39 | /* ----------------------------------- This */ | ||
40 | #include <dspbridge/dbdcd.h> | ||
41 | |||
42 | /* ----------------------------------- Global defines. */ | ||
43 | #define MAX_INT2CHAR_LENGTH 16 /* Max int2char len of 32 bit int */ | ||
44 | |||
45 | /* Name of section containing dependent libraries */ | ||
46 | #define DEPLIBSECT ".dspbridge_deplibs" | ||
47 | |||
48 | /* DCD specific structures. */ | ||
49 | struct dcd_manager { | ||
50 | struct cod_manager *cod_mgr; /* Handle to COD manager object. */ | ||
51 | }; | ||
52 | |||
53 | /* Pointer to the registry support key */ | ||
54 | static struct list_head reg_key_list; | ||
55 | static DEFINE_SPINLOCK(dbdcd_lock); | ||
56 | |||
57 | /* Global reference variables. */ | ||
58 | static u32 refs; | ||
59 | static u32 enum_refs; | ||
60 | |||
61 | /* Helper function prototypes. */ | ||
62 | static s32 atoi(char *psz_buf); | ||
63 | static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size, | ||
64 | enum dsp_dcdobjtype obj_type, | ||
65 | struct dcd_genericobj *gen_obj); | ||
66 | static void compress_buf(char *psz_buf, u32 ul_buf_size, s32 char_size); | ||
67 | static char dsp_char2_gpp_char(char *word, s32 dsp_char_size); | ||
68 | static int get_dep_lib_info(struct dcd_manager *hdcd_mgr, | ||
69 | struct dsp_uuid *uuid_obj, | ||
70 | u16 *num_libs, | ||
71 | u16 *num_pers_libs, | ||
72 | struct dsp_uuid *dep_lib_uuids, | ||
73 | bool *prstnt_dep_libs, | ||
74 | enum nldr_phase phase); | ||
75 | |||
76 | /* | ||
77 | * ======== dcd_uuid_from_string ======== | ||
78 | * Purpose: | ||
79 | * Converts an ANSI string to a dsp_uuid. | ||
80 | * Parameters: | ||
81 | * sz_uuid: Pointer to a string that represents a dsp_uuid object. | ||
82 | * uuid_obj: Pointer to a dsp_uuid object. | ||
83 | * Returns: | ||
84 | * 0: Success. | ||
85 | * -EINVAL: Coversion failed | ||
86 | * Requires: | ||
87 | * uuid_obj & sz_uuid are non-NULL values. | ||
88 | * Ensures: | ||
89 | * Details: | ||
90 | * We assume the string representation of a UUID has the following format: | ||
91 | * "12345678_1234_1234_1234_123456789abc". | ||
92 | */ | ||
93 | static int dcd_uuid_from_string(char *sz_uuid, struct dsp_uuid *uuid_obj) | ||
94 | { | ||
95 | char c; | ||
96 | u64 t; | ||
97 | struct dsp_uuid uuid_tmp; | ||
98 | |||
99 | /* | ||
100 | * sscanf implementation cannot deal with hh format modifier | ||
101 | * if the converted value doesn't fit in u32. So, convert the | ||
102 | * last six bytes to u64 and memcpy what is needed | ||
103 | */ | ||
104 | if (sscanf(sz_uuid, "%8x%c%4hx%c%4hx%c%2hhx%2hhx%c%llx", | ||
105 | &uuid_tmp.data1, &c, &uuid_tmp.data2, &c, | ||
106 | &uuid_tmp.data3, &c, &uuid_tmp.data4, | ||
107 | &uuid_tmp.data5, &c, &t) != 10) | ||
108 | return -EINVAL; | ||
109 | |||
110 | t = cpu_to_be64(t); | ||
111 | memcpy(&uuid_tmp.data6[0], ((char *)&t) + 2, 6); | ||
112 | *uuid_obj = uuid_tmp; | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * ======== dcd_auto_register ======== | ||
119 | * Purpose: | ||
120 | * Parses the supplied image and resigsters with DCD. | ||
121 | */ | ||
122 | int dcd_auto_register(struct dcd_manager *hdcd_mgr, | ||
123 | char *sz_coff_path) | ||
124 | { | ||
125 | int status = 0; | ||
126 | |||
127 | if (hdcd_mgr) | ||
128 | status = dcd_get_objects(hdcd_mgr, sz_coff_path, | ||
129 | (dcd_registerfxn) dcd_register_object, | ||
130 | (void *)sz_coff_path); | ||
131 | else | ||
132 | status = -EFAULT; | ||
133 | |||
134 | return status; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * ======== dcd_auto_unregister ======== | ||
139 | * Purpose: | ||
140 | * Parses the supplied DSP image and unresiters from DCD. | ||
141 | */ | ||
142 | int dcd_auto_unregister(struct dcd_manager *hdcd_mgr, | ||
143 | char *sz_coff_path) | ||
144 | { | ||
145 | int status = 0; | ||
146 | |||
147 | if (hdcd_mgr) | ||
148 | status = dcd_get_objects(hdcd_mgr, sz_coff_path, | ||
149 | (dcd_registerfxn) dcd_register_object, | ||
150 | NULL); | ||
151 | else | ||
152 | status = -EFAULT; | ||
153 | |||
154 | return status; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * ======== dcd_create_manager ======== | ||
159 | * Purpose: | ||
160 | * Creates DCD manager. | ||
161 | */ | ||
162 | int dcd_create_manager(char *sz_zl_dll_name, | ||
163 | struct dcd_manager **dcd_mgr) | ||
164 | { | ||
165 | struct cod_manager *cod_mgr; /* COD manager handle */ | ||
166 | struct dcd_manager *dcd_mgr_obj = NULL; /* DCD Manager pointer */ | ||
167 | int status = 0; | ||
168 | |||
169 | status = cod_create(&cod_mgr, sz_zl_dll_name); | ||
170 | if (status) | ||
171 | goto func_end; | ||
172 | |||
173 | /* Create a DCD object. */ | ||
174 | dcd_mgr_obj = kzalloc(sizeof(struct dcd_manager), GFP_KERNEL); | ||
175 | if (dcd_mgr_obj != NULL) { | ||
176 | /* Fill out the object. */ | ||
177 | dcd_mgr_obj->cod_mgr = cod_mgr; | ||
178 | |||
179 | /* Return handle to this DCD interface. */ | ||
180 | *dcd_mgr = dcd_mgr_obj; | ||
181 | } else { | ||
182 | status = -ENOMEM; | ||
183 | |||
184 | /* | ||
185 | * If allocation of DcdManager object failed, delete the | ||
186 | * COD manager. | ||
187 | */ | ||
188 | cod_delete(cod_mgr); | ||
189 | } | ||
190 | |||
191 | func_end: | ||
192 | return status; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * ======== dcd_destroy_manager ======== | ||
197 | * Purpose: | ||
198 | * Frees DCD Manager object. | ||
199 | */ | ||
200 | int dcd_destroy_manager(struct dcd_manager *hdcd_mgr) | ||
201 | { | ||
202 | struct dcd_manager *dcd_mgr_obj = hdcd_mgr; | ||
203 | int status = -EFAULT; | ||
204 | |||
205 | if (hdcd_mgr) { | ||
206 | /* Delete the COD manager. */ | ||
207 | cod_delete(dcd_mgr_obj->cod_mgr); | ||
208 | |||
209 | /* Deallocate a DCD manager object. */ | ||
210 | kfree(dcd_mgr_obj); | ||
211 | |||
212 | status = 0; | ||
213 | } | ||
214 | |||
215 | return status; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * ======== dcd_enumerate_object ======== | ||
220 | * Purpose: | ||
221 | * Enumerates objects in the DCD. | ||
222 | */ | ||
223 | int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type, | ||
224 | struct dsp_uuid *uuid_obj) | ||
225 | { | ||
226 | int status = 0; | ||
227 | char sz_reg_key[DCD_MAXPATHLENGTH]; | ||
228 | char sz_value[DCD_MAXPATHLENGTH]; | ||
229 | struct dsp_uuid dsp_uuid_obj; | ||
230 | char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */ | ||
231 | u32 dw_key_len = 0; | ||
232 | struct dcd_key_elem *dcd_key; | ||
233 | int len; | ||
234 | |||
235 | if ((index != 0) && (enum_refs == 0)) { | ||
236 | /* | ||
237 | * If an enumeration is being performed on an index greater | ||
238 | * than zero, then the current enum_refs must have been | ||
239 | * incremented to greater than zero. | ||
240 | */ | ||
241 | status = -EIDRM; | ||
242 | } else { | ||
243 | /* | ||
244 | * Pre-determine final key length. It's length of DCD_REGKEY + | ||
245 | * "_\0" + length of sz_obj_type string + terminating NULL. | ||
246 | */ | ||
247 | dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1; | ||
248 | |||
249 | /* Create proper REG key; concatenate DCD_REGKEY with | ||
250 | * obj_type. */ | ||
251 | strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1); | ||
252 | if ((strlen(sz_reg_key) + strlen("_\0")) < | ||
253 | DCD_MAXPATHLENGTH) { | ||
254 | strncat(sz_reg_key, "_\0", 2); | ||
255 | } else { | ||
256 | status = -EPERM; | ||
257 | } | ||
258 | |||
259 | /* This snprintf is guaranteed not to exceed max size of an | ||
260 | * integer. */ | ||
261 | status = snprintf(sz_obj_type, MAX_INT2CHAR_LENGTH, "%d", | ||
262 | obj_type); | ||
263 | |||
264 | if (status == -1) { | ||
265 | status = -EPERM; | ||
266 | } else { | ||
267 | status = 0; | ||
268 | if ((strlen(sz_reg_key) + strlen(sz_obj_type)) < | ||
269 | DCD_MAXPATHLENGTH) { | ||
270 | strncat(sz_reg_key, sz_obj_type, | ||
271 | strlen(sz_obj_type) + 1); | ||
272 | } else { | ||
273 | status = -EPERM; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | if (!status) { | ||
278 | len = strlen(sz_reg_key); | ||
279 | spin_lock(&dbdcd_lock); | ||
280 | list_for_each_entry(dcd_key, ®_key_list, link) { | ||
281 | if (!strncmp(dcd_key->name, sz_reg_key, len) | ||
282 | && !index--) { | ||
283 | strncpy(sz_value, &dcd_key->name[len], | ||
284 | strlen(&dcd_key->name[len]) + 1); | ||
285 | break; | ||
286 | } | ||
287 | } | ||
288 | spin_unlock(&dbdcd_lock); | ||
289 | |||
290 | if (&dcd_key->link == ®_key_list) | ||
291 | status = -ENODATA; | ||
292 | } | ||
293 | |||
294 | if (!status) { | ||
295 | /* Create UUID value using string retrieved from | ||
296 | * registry. */ | ||
297 | status = dcd_uuid_from_string(sz_value, &dsp_uuid_obj); | ||
298 | |||
299 | if (!status) { | ||
300 | *uuid_obj = dsp_uuid_obj; | ||
301 | |||
302 | /* Increment enum_refs to update reference | ||
303 | * count. */ | ||
304 | enum_refs++; | ||
305 | } | ||
306 | } else if (status == -ENODATA) { | ||
307 | /* At the end of enumeration. Reset enum_refs. */ | ||
308 | enum_refs = 0; | ||
309 | |||
310 | /* | ||
311 | * TODO: Revisit, this is not an error case but code | ||
312 | * expects non-zero value. | ||
313 | */ | ||
314 | status = ENODATA; | ||
315 | } else { | ||
316 | status = -EPERM; | ||
317 | } | ||
318 | } | ||
319 | |||
320 | return status; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * ======== dcd_exit ======== | ||
325 | * Purpose: | ||
326 | * Discontinue usage of the DCD module. | ||
327 | */ | ||
328 | void dcd_exit(void) | ||
329 | { | ||
330 | struct dcd_key_elem *rv, *rv_tmp; | ||
331 | |||
332 | refs--; | ||
333 | if (refs == 0) { | ||
334 | list_for_each_entry_safe(rv, rv_tmp, ®_key_list, link) { | ||
335 | list_del(&rv->link); | ||
336 | kfree(rv->path); | ||
337 | kfree(rv); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | } | ||
342 | |||
343 | /* | ||
344 | * ======== dcd_get_dep_libs ======== | ||
345 | */ | ||
346 | int dcd_get_dep_libs(struct dcd_manager *hdcd_mgr, | ||
347 | struct dsp_uuid *uuid_obj, | ||
348 | u16 num_libs, struct dsp_uuid *dep_lib_uuids, | ||
349 | bool *prstnt_dep_libs, | ||
350 | enum nldr_phase phase) | ||
351 | { | ||
352 | int status = 0; | ||
353 | |||
354 | status = | ||
355 | get_dep_lib_info(hdcd_mgr, uuid_obj, &num_libs, NULL, dep_lib_uuids, | ||
356 | prstnt_dep_libs, phase); | ||
357 | |||
358 | return status; | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * ======== dcd_get_num_dep_libs ======== | ||
363 | */ | ||
364 | int dcd_get_num_dep_libs(struct dcd_manager *hdcd_mgr, | ||
365 | struct dsp_uuid *uuid_obj, | ||
366 | u16 *num_libs, u16 *num_pers_libs, | ||
367 | enum nldr_phase phase) | ||
368 | { | ||
369 | int status = 0; | ||
370 | |||
371 | status = get_dep_lib_info(hdcd_mgr, uuid_obj, num_libs, num_pers_libs, | ||
372 | NULL, NULL, phase); | ||
373 | |||
374 | return status; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * ======== dcd_get_object_def ======== | ||
379 | * Purpose: | ||
380 | * Retrieves the properties of a node or processor based on the UUID and | ||
381 | * object type. | ||
382 | */ | ||
383 | int dcd_get_object_def(struct dcd_manager *hdcd_mgr, | ||
384 | struct dsp_uuid *obj_uuid, | ||
385 | enum dsp_dcdobjtype obj_type, | ||
386 | struct dcd_genericobj *obj_def) | ||
387 | { | ||
388 | struct dcd_manager *dcd_mgr_obj = hdcd_mgr; /* ptr to DCD mgr */ | ||
389 | struct cod_libraryobj *lib = NULL; | ||
390 | int status = 0; | ||
391 | int len; | ||
392 | u32 ul_addr = 0; /* Used by cod_get_section */ | ||
393 | u32 ul_len = 0; /* Used by cod_get_section */ | ||
394 | u32 dw_buf_size; /* Used by REG functions */ | ||
395 | char sz_reg_key[DCD_MAXPATHLENGTH]; | ||
396 | char *sz_uuid; /*[MAXUUIDLEN]; */ | ||
397 | char *tmp; | ||
398 | struct dcd_key_elem *dcd_key = NULL; | ||
399 | char sz_sect_name[MAXUUIDLEN + 2]; /* ".[UUID]\0" */ | ||
400 | char *psz_coff_buf; | ||
401 | u32 dw_key_len; /* Len of REG key. */ | ||
402 | char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */ | ||
403 | |||
404 | sz_uuid = kzalloc(MAXUUIDLEN, GFP_KERNEL); | ||
405 | if (!sz_uuid) { | ||
406 | status = -ENOMEM; | ||
407 | goto func_end; | ||
408 | } | ||
409 | |||
410 | if (!hdcd_mgr) { | ||
411 | status = -EFAULT; | ||
412 | goto func_end; | ||
413 | } | ||
414 | |||
415 | /* Pre-determine final key length. It's length of DCD_REGKEY + | ||
416 | * "_\0" + length of sz_obj_type string + terminating NULL */ | ||
417 | dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1; | ||
418 | |||
419 | /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */ | ||
420 | strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1); | ||
421 | |||
422 | if ((strlen(sz_reg_key) + strlen("_\0")) < DCD_MAXPATHLENGTH) | ||
423 | strncat(sz_reg_key, "_\0", 2); | ||
424 | else | ||
425 | status = -EPERM; | ||
426 | |||
427 | status = snprintf(sz_obj_type, MAX_INT2CHAR_LENGTH, "%d", obj_type); | ||
428 | if (status == -1) { | ||
429 | status = -EPERM; | ||
430 | } else { | ||
431 | status = 0; | ||
432 | |||
433 | if ((strlen(sz_reg_key) + strlen(sz_obj_type)) < | ||
434 | DCD_MAXPATHLENGTH) { | ||
435 | strncat(sz_reg_key, sz_obj_type, | ||
436 | strlen(sz_obj_type) + 1); | ||
437 | } else { | ||
438 | status = -EPERM; | ||
439 | } | ||
440 | |||
441 | /* Create UUID value to set in registry. */ | ||
442 | snprintf(sz_uuid, MAXUUIDLEN, "%pUL", obj_uuid); | ||
443 | |||
444 | if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH) | ||
445 | strncat(sz_reg_key, sz_uuid, MAXUUIDLEN); | ||
446 | else | ||
447 | status = -EPERM; | ||
448 | |||
449 | /* Retrieve paths from the registry based on struct dsp_uuid */ | ||
450 | dw_buf_size = DCD_MAXPATHLENGTH; | ||
451 | } | ||
452 | if (!status) { | ||
453 | spin_lock(&dbdcd_lock); | ||
454 | list_for_each_entry(dcd_key, ®_key_list, link) { | ||
455 | if (!strncmp(dcd_key->name, sz_reg_key, | ||
456 | strlen(sz_reg_key) + 1)) | ||
457 | break; | ||
458 | } | ||
459 | spin_unlock(&dbdcd_lock); | ||
460 | if (&dcd_key->link == ®_key_list) { | ||
461 | status = -ENOKEY; | ||
462 | goto func_end; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | |||
467 | /* Open COFF file. */ | ||
468 | status = cod_open(dcd_mgr_obj->cod_mgr, dcd_key->path, | ||
469 | COD_NOLOAD, &lib); | ||
470 | if (status) { | ||
471 | status = -EACCES; | ||
472 | goto func_end; | ||
473 | } | ||
474 | |||
475 | /* Ensure sz_uuid + 1 is not greater than sizeof sz_sect_name. */ | ||
476 | len = strlen(sz_uuid); | ||
477 | if (len + 1 > sizeof(sz_sect_name)) { | ||
478 | status = -EPERM; | ||
479 | goto func_end; | ||
480 | } | ||
481 | |||
482 | /* Create section name based on node UUID. A period is | ||
483 | * pre-pended to the UUID string to form the section name. | ||
484 | * I.e. ".24BC8D90_BB45_11d4_B756_006008BDB66F" */ | ||
485 | |||
486 | len -= 4; /* uuid has 4 delimiters '-' */ | ||
487 | tmp = sz_uuid; | ||
488 | |||
489 | strncpy(sz_sect_name, ".", 2); | ||
490 | do { | ||
491 | char *uuid = strsep(&tmp, "-"); | ||
492 | |||
493 | if (!uuid) | ||
494 | break; | ||
495 | len -= strlen(uuid); | ||
496 | strncat(sz_sect_name, uuid, strlen(uuid) + 1); | ||
497 | } while (len && strncat(sz_sect_name, "_", 2)); | ||
498 | |||
499 | /* Get section information. */ | ||
500 | status = cod_get_section(lib, sz_sect_name, &ul_addr, &ul_len); | ||
501 | if (status) { | ||
502 | status = -EACCES; | ||
503 | goto func_end; | ||
504 | } | ||
505 | |||
506 | /* Allocate zeroed buffer. */ | ||
507 | psz_coff_buf = kzalloc(ul_len + 4, GFP_KERNEL); | ||
508 | if (psz_coff_buf == NULL) { | ||
509 | status = -ENOMEM; | ||
510 | goto func_end; | ||
511 | } | ||
512 | #ifdef _DB_TIOMAP | ||
513 | if (strstr(dcd_key->path, "iva") == NULL) { | ||
514 | /* Locate section by objectID and read its content. */ | ||
515 | status = | ||
516 | cod_read_section(lib, sz_sect_name, psz_coff_buf, ul_len); | ||
517 | } else { | ||
518 | status = | ||
519 | cod_read_section(lib, sz_sect_name, psz_coff_buf, ul_len); | ||
520 | dev_dbg(bridge, "%s: Skipped Byte swap for IVA!!\n", __func__); | ||
521 | } | ||
522 | #else | ||
523 | status = cod_read_section(lib, sz_sect_name, psz_coff_buf, ul_len); | ||
524 | #endif | ||
525 | if (!status) { | ||
526 | /* Compress DSP buffer to conform to PC format. */ | ||
527 | if (strstr(dcd_key->path, "iva") == NULL) { | ||
528 | compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE); | ||
529 | } else { | ||
530 | compress_buf(psz_coff_buf, ul_len, 1); | ||
531 | dev_dbg(bridge, "%s: Compressing IVA COFF buffer by 1 " | ||
532 | "for IVA!!\n", __func__); | ||
533 | } | ||
534 | |||
535 | /* Parse the content of the COFF buffer. */ | ||
536 | status = | ||
537 | get_attrs_from_buf(psz_coff_buf, ul_len, obj_type, obj_def); | ||
538 | if (status) | ||
539 | status = -EACCES; | ||
540 | } else { | ||
541 | status = -EACCES; | ||
542 | } | ||
543 | |||
544 | /* Free the previously allocated dynamic buffer. */ | ||
545 | kfree(psz_coff_buf); | ||
546 | func_end: | ||
547 | if (lib) | ||
548 | cod_close(lib); | ||
549 | |||
550 | kfree(sz_uuid); | ||
551 | |||
552 | return status; | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * ======== dcd_get_objects ======== | ||
557 | */ | ||
558 | int dcd_get_objects(struct dcd_manager *hdcd_mgr, | ||
559 | char *sz_coff_path, dcd_registerfxn register_fxn, | ||
560 | void *handle) | ||
561 | { | ||
562 | struct dcd_manager *dcd_mgr_obj = hdcd_mgr; | ||
563 | int status = 0; | ||
564 | char *psz_coff_buf; | ||
565 | char *psz_cur; | ||
566 | struct cod_libraryobj *lib = NULL; | ||
567 | u32 ul_addr = 0; /* Used by cod_get_section */ | ||
568 | u32 ul_len = 0; /* Used by cod_get_section */ | ||
569 | char seps[] = ":, "; | ||
570 | char *token = NULL; | ||
571 | struct dsp_uuid dsp_uuid_obj; | ||
572 | s32 object_type; | ||
573 | |||
574 | if (!hdcd_mgr) { | ||
575 | status = -EFAULT; | ||
576 | goto func_end; | ||
577 | } | ||
578 | |||
579 | /* Open DSP coff file, don't load symbols. */ | ||
580 | status = cod_open(dcd_mgr_obj->cod_mgr, sz_coff_path, COD_NOLOAD, &lib); | ||
581 | if (status) { | ||
582 | status = -EACCES; | ||
583 | goto func_cont; | ||
584 | } | ||
585 | |||
586 | /* Get DCD_RESIGER_SECTION section information. */ | ||
587 | status = cod_get_section(lib, DCD_REGISTER_SECTION, &ul_addr, &ul_len); | ||
588 | if (status || !(ul_len > 0)) { | ||
589 | status = -EACCES; | ||
590 | goto func_cont; | ||
591 | } | ||
592 | |||
593 | /* Allocate zeroed buffer. */ | ||
594 | psz_coff_buf = kzalloc(ul_len + 4, GFP_KERNEL); | ||
595 | if (psz_coff_buf == NULL) { | ||
596 | status = -ENOMEM; | ||
597 | goto func_cont; | ||
598 | } | ||
599 | #ifdef _DB_TIOMAP | ||
600 | if (strstr(sz_coff_path, "iva") == NULL) { | ||
601 | /* Locate section by objectID and read its content. */ | ||
602 | status = cod_read_section(lib, DCD_REGISTER_SECTION, | ||
603 | psz_coff_buf, ul_len); | ||
604 | } else { | ||
605 | dev_dbg(bridge, "%s: Skipped Byte swap for IVA!!\n", __func__); | ||
606 | status = cod_read_section(lib, DCD_REGISTER_SECTION, | ||
607 | psz_coff_buf, ul_len); | ||
608 | } | ||
609 | #else | ||
610 | status = | ||
611 | cod_read_section(lib, DCD_REGISTER_SECTION, psz_coff_buf, ul_len); | ||
612 | #endif | ||
613 | if (!status) { | ||
614 | /* Compress DSP buffer to conform to PC format. */ | ||
615 | if (strstr(sz_coff_path, "iva") == NULL) { | ||
616 | compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE); | ||
617 | } else { | ||
618 | compress_buf(psz_coff_buf, ul_len, 1); | ||
619 | dev_dbg(bridge, "%s: Compress COFF buffer with 1 word " | ||
620 | "for IVA!!\n", __func__); | ||
621 | } | ||
622 | |||
623 | /* Read from buffer and register object in buffer. */ | ||
624 | psz_cur = psz_coff_buf; | ||
625 | while ((token = strsep(&psz_cur, seps)) && *token != '\0') { | ||
626 | /* Retrieve UUID string. */ | ||
627 | status = dcd_uuid_from_string(token, &dsp_uuid_obj); | ||
628 | |||
629 | if (!status) { | ||
630 | /* Retrieve object type */ | ||
631 | token = strsep(&psz_cur, seps); | ||
632 | |||
633 | /* Retrieve object type */ | ||
634 | object_type = atoi(token); | ||
635 | |||
636 | /* | ||
637 | * Apply register_fxn to the found DCD object. | ||
638 | * Possible actions include: | ||
639 | * | ||
640 | * 1) Register found DCD object. | ||
641 | * 2) Unregister found DCD object | ||
642 | * (when handle == NULL) | ||
643 | * 3) Add overlay node. | ||
644 | */ | ||
645 | status = | ||
646 | register_fxn(&dsp_uuid_obj, object_type, | ||
647 | handle); | ||
648 | } | ||
649 | if (status) { | ||
650 | /* if error occurs, break from while loop. */ | ||
651 | break; | ||
652 | } | ||
653 | } | ||
654 | } else { | ||
655 | status = -EACCES; | ||
656 | } | ||
657 | |||
658 | /* Free the previously allocated dynamic buffer. */ | ||
659 | kfree(psz_coff_buf); | ||
660 | func_cont: | ||
661 | if (lib) | ||
662 | cod_close(lib); | ||
663 | |||
664 | func_end: | ||
665 | return status; | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * ======== dcd_get_library_name ======== | ||
670 | * Purpose: | ||
671 | * Retrieves the library name for the given UUID. | ||
672 | * | ||
673 | */ | ||
674 | int dcd_get_library_name(struct dcd_manager *hdcd_mgr, | ||
675 | struct dsp_uuid *uuid_obj, | ||
676 | char *str_lib_name, | ||
677 | u32 *buff_size, | ||
678 | enum nldr_phase phase, bool *phase_split) | ||
679 | { | ||
680 | char sz_reg_key[DCD_MAXPATHLENGTH]; | ||
681 | char sz_uuid[MAXUUIDLEN]; | ||
682 | u32 dw_key_len; /* Len of REG key. */ | ||
683 | char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */ | ||
684 | int status = 0; | ||
685 | struct dcd_key_elem *dcd_key = NULL; | ||
686 | |||
687 | dev_dbg(bridge, "%s: hdcd_mgr %p, uuid_obj %p, str_lib_name %p," | ||
688 | " buff_size %p\n", __func__, hdcd_mgr, uuid_obj, str_lib_name, | ||
689 | buff_size); | ||
690 | |||
691 | /* | ||
692 | * Pre-determine final key length. It's length of DCD_REGKEY + | ||
693 | * "_\0" + length of sz_obj_type string + terminating NULL. | ||
694 | */ | ||
695 | dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1; | ||
696 | |||
697 | /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */ | ||
698 | strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1); | ||
699 | if ((strlen(sz_reg_key) + strlen("_\0")) < DCD_MAXPATHLENGTH) | ||
700 | strncat(sz_reg_key, "_\0", 2); | ||
701 | else | ||
702 | status = -EPERM; | ||
703 | |||
704 | switch (phase) { | ||
705 | case NLDR_CREATE: | ||
706 | /* create phase type */ | ||
707 | sprintf(sz_obj_type, "%d", DSP_DCDCREATELIBTYPE); | ||
708 | break; | ||
709 | case NLDR_EXECUTE: | ||
710 | /* execute phase type */ | ||
711 | sprintf(sz_obj_type, "%d", DSP_DCDEXECUTELIBTYPE); | ||
712 | break; | ||
713 | case NLDR_DELETE: | ||
714 | /* delete phase type */ | ||
715 | sprintf(sz_obj_type, "%d", DSP_DCDDELETELIBTYPE); | ||
716 | break; | ||
717 | case NLDR_NOPHASE: | ||
718 | /* known to be a dependent library */ | ||
719 | sprintf(sz_obj_type, "%d", DSP_DCDLIBRARYTYPE); | ||
720 | break; | ||
721 | default: | ||
722 | status = -EINVAL; | ||
723 | } | ||
724 | if (!status) { | ||
725 | if ((strlen(sz_reg_key) + strlen(sz_obj_type)) < | ||
726 | DCD_MAXPATHLENGTH) { | ||
727 | strncat(sz_reg_key, sz_obj_type, | ||
728 | strlen(sz_obj_type) + 1); | ||
729 | } else { | ||
730 | status = -EPERM; | ||
731 | } | ||
732 | /* Create UUID value to find match in registry. */ | ||
733 | snprintf(sz_uuid, MAXUUIDLEN, "%pUL", uuid_obj); | ||
734 | if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH) | ||
735 | strncat(sz_reg_key, sz_uuid, MAXUUIDLEN); | ||
736 | else | ||
737 | status = -EPERM; | ||
738 | } | ||
739 | if (!status) { | ||
740 | spin_lock(&dbdcd_lock); | ||
741 | list_for_each_entry(dcd_key, ®_key_list, link) { | ||
742 | /* See if the name matches. */ | ||
743 | if (!strncmp(dcd_key->name, sz_reg_key, | ||
744 | strlen(sz_reg_key) + 1)) | ||
745 | break; | ||
746 | } | ||
747 | spin_unlock(&dbdcd_lock); | ||
748 | } | ||
749 | |||
750 | if (&dcd_key->link == ®_key_list) | ||
751 | status = -ENOKEY; | ||
752 | |||
753 | /* If can't find, phases might be registered as generic LIBRARYTYPE */ | ||
754 | if (status && phase != NLDR_NOPHASE) { | ||
755 | if (phase_split) | ||
756 | *phase_split = false; | ||
757 | |||
758 | strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1); | ||
759 | if ((strlen(sz_reg_key) + strlen("_\0")) < | ||
760 | DCD_MAXPATHLENGTH) { | ||
761 | strncat(sz_reg_key, "_\0", 2); | ||
762 | } else { | ||
763 | status = -EPERM; | ||
764 | } | ||
765 | sprintf(sz_obj_type, "%d", DSP_DCDLIBRARYTYPE); | ||
766 | if ((strlen(sz_reg_key) + strlen(sz_obj_type)) | ||
767 | < DCD_MAXPATHLENGTH) { | ||
768 | strncat(sz_reg_key, sz_obj_type, | ||
769 | strlen(sz_obj_type) + 1); | ||
770 | } else { | ||
771 | status = -EPERM; | ||
772 | } | ||
773 | snprintf(sz_uuid, MAXUUIDLEN, "%pUL", uuid_obj); | ||
774 | if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH) | ||
775 | strncat(sz_reg_key, sz_uuid, MAXUUIDLEN); | ||
776 | else | ||
777 | status = -EPERM; | ||
778 | |||
779 | spin_lock(&dbdcd_lock); | ||
780 | list_for_each_entry(dcd_key, ®_key_list, link) { | ||
781 | /* See if the name matches. */ | ||
782 | if (!strncmp(dcd_key->name, sz_reg_key, | ||
783 | strlen(sz_reg_key) + 1)) | ||
784 | break; | ||
785 | } | ||
786 | spin_unlock(&dbdcd_lock); | ||
787 | |||
788 | status = (&dcd_key->link != ®_key_list) ? | ||
789 | 0 : -ENOKEY; | ||
790 | } | ||
791 | |||
792 | if (!status) | ||
793 | memcpy(str_lib_name, dcd_key->path, strlen(dcd_key->path) + 1); | ||
794 | return status; | ||
795 | } | ||
796 | |||
797 | /* | ||
798 | * ======== dcd_init ======== | ||
799 | * Purpose: | ||
800 | * Initialize the DCD module. | ||
801 | */ | ||
802 | bool dcd_init(void) | ||
803 | { | ||
804 | bool ret = true; | ||
805 | |||
806 | if (refs == 0) | ||
807 | INIT_LIST_HEAD(®_key_list); | ||
808 | |||
809 | if (ret) | ||
810 | refs++; | ||
811 | |||
812 | return ret; | ||
813 | } | ||
814 | |||
815 | /* | ||
816 | * ======== dcd_register_object ======== | ||
817 | * Purpose: | ||
818 | * Registers a node or a processor with the DCD. | ||
819 | * If psz_path_name == NULL, unregister the specified DCD object. | ||
820 | */ | ||
821 | int dcd_register_object(struct dsp_uuid *uuid_obj, | ||
822 | enum dsp_dcdobjtype obj_type, | ||
823 | char *psz_path_name) | ||
824 | { | ||
825 | int status = 0; | ||
826 | char sz_reg_key[DCD_MAXPATHLENGTH]; | ||
827 | char sz_uuid[MAXUUIDLEN + 1]; | ||
828 | u32 dw_path_size = 0; | ||
829 | u32 dw_key_len; /* Len of REG key. */ | ||
830 | char sz_obj_type[MAX_INT2CHAR_LENGTH]; /* str. rep. of obj_type. */ | ||
831 | struct dcd_key_elem *dcd_key = NULL; | ||
832 | |||
833 | dev_dbg(bridge, "%s: object UUID %p, obj_type %d, szPathName %s\n", | ||
834 | __func__, uuid_obj, obj_type, psz_path_name); | ||
835 | |||
836 | /* | ||
837 | * Pre-determine final key length. It's length of DCD_REGKEY + | ||
838 | * "_\0" + length of sz_obj_type string + terminating NULL. | ||
839 | */ | ||
840 | dw_key_len = strlen(DCD_REGKEY) + 1 + sizeof(sz_obj_type) + 1; | ||
841 | |||
842 | /* Create proper REG key; concatenate DCD_REGKEY with obj_type. */ | ||
843 | strncpy(sz_reg_key, DCD_REGKEY, strlen(DCD_REGKEY) + 1); | ||
844 | if ((strlen(sz_reg_key) + strlen("_\0")) < DCD_MAXPATHLENGTH) | ||
845 | strncat(sz_reg_key, "_\0", 2); | ||
846 | else { | ||
847 | status = -EPERM; | ||
848 | goto func_end; | ||
849 | } | ||
850 | |||
851 | status = snprintf(sz_obj_type, MAX_INT2CHAR_LENGTH, "%d", obj_type); | ||
852 | if (status == -1) { | ||
853 | status = -EPERM; | ||
854 | } else { | ||
855 | status = 0; | ||
856 | if ((strlen(sz_reg_key) + strlen(sz_obj_type)) < | ||
857 | DCD_MAXPATHLENGTH) { | ||
858 | strncat(sz_reg_key, sz_obj_type, | ||
859 | strlen(sz_obj_type) + 1); | ||
860 | } else | ||
861 | status = -EPERM; | ||
862 | |||
863 | /* Create UUID value to set in registry. */ | ||
864 | snprintf(sz_uuid, MAXUUIDLEN, "%pUL", uuid_obj); | ||
865 | if ((strlen(sz_reg_key) + MAXUUIDLEN) < DCD_MAXPATHLENGTH) | ||
866 | strncat(sz_reg_key, sz_uuid, MAXUUIDLEN); | ||
867 | else | ||
868 | status = -EPERM; | ||
869 | } | ||
870 | |||
871 | if (status) | ||
872 | goto func_end; | ||
873 | |||
874 | /* | ||
875 | * If psz_path_name != NULL, perform registration, otherwise, | ||
876 | * perform unregistration. | ||
877 | */ | ||
878 | |||
879 | if (psz_path_name) { | ||
880 | dw_path_size = strlen(psz_path_name) + 1; | ||
881 | spin_lock(&dbdcd_lock); | ||
882 | list_for_each_entry(dcd_key, ®_key_list, link) { | ||
883 | /* See if the name matches. */ | ||
884 | if (!strncmp(dcd_key->name, sz_reg_key, | ||
885 | strlen(sz_reg_key) + 1)) | ||
886 | break; | ||
887 | } | ||
888 | spin_unlock(&dbdcd_lock); | ||
889 | if (&dcd_key->link == ®_key_list) { | ||
890 | /* | ||
891 | * Add new reg value (UUID+obj_type) | ||
892 | * with COFF path info | ||
893 | */ | ||
894 | |||
895 | dcd_key = kmalloc(sizeof(struct dcd_key_elem), | ||
896 | GFP_KERNEL); | ||
897 | if (!dcd_key) { | ||
898 | status = -ENOMEM; | ||
899 | goto func_end; | ||
900 | } | ||
901 | |||
902 | dcd_key->path = kmalloc(dw_path_size, GFP_KERNEL); | ||
903 | |||
904 | if (!dcd_key->path) { | ||
905 | kfree(dcd_key); | ||
906 | status = -ENOMEM; | ||
907 | goto func_end; | ||
908 | } | ||
909 | |||
910 | strncpy(dcd_key->name, sz_reg_key, | ||
911 | strlen(sz_reg_key) + 1); | ||
912 | strncpy(dcd_key->path, psz_path_name , | ||
913 | dw_path_size); | ||
914 | spin_lock(&dbdcd_lock); | ||
915 | list_add_tail(&dcd_key->link, ®_key_list); | ||
916 | spin_unlock(&dbdcd_lock); | ||
917 | } else { | ||
918 | /* Make sure the new data is the same. */ | ||
919 | if (strncmp(dcd_key->path, psz_path_name, | ||
920 | dw_path_size)) { | ||
921 | /* The caller needs a different data size! */ | ||
922 | kfree(dcd_key->path); | ||
923 | dcd_key->path = kmalloc(dw_path_size, | ||
924 | GFP_KERNEL); | ||
925 | if (dcd_key->path == NULL) { | ||
926 | status = -ENOMEM; | ||
927 | goto func_end; | ||
928 | } | ||
929 | } | ||
930 | |||
931 | /* We have a match! Copy out the data. */ | ||
932 | memcpy(dcd_key->path, psz_path_name, dw_path_size); | ||
933 | } | ||
934 | dev_dbg(bridge, "%s: psz_path_name=%s, dw_path_size=%d\n", | ||
935 | __func__, psz_path_name, dw_path_size); | ||
936 | } else { | ||
937 | /* Deregister an existing object */ | ||
938 | spin_lock(&dbdcd_lock); | ||
939 | list_for_each_entry(dcd_key, ®_key_list, link) { | ||
940 | if (!strncmp(dcd_key->name, sz_reg_key, | ||
941 | strlen(sz_reg_key) + 1)) { | ||
942 | list_del(&dcd_key->link); | ||
943 | kfree(dcd_key->path); | ||
944 | kfree(dcd_key); | ||
945 | break; | ||
946 | } | ||
947 | } | ||
948 | spin_unlock(&dbdcd_lock); | ||
949 | if (&dcd_key->link == ®_key_list) | ||
950 | status = -EPERM; | ||
951 | } | ||
952 | |||
953 | if (!status) { | ||
954 | /* | ||
955 | * Because the node database has been updated through a | ||
956 | * successful object registration/de-registration operation, | ||
957 | * we need to reset the object enumeration counter to allow | ||
958 | * current enumerations to reflect this update in the node | ||
959 | * database. | ||
960 | */ | ||
961 | enum_refs = 0; | ||
962 | } | ||
963 | func_end: | ||
964 | return status; | ||
965 | } | ||
966 | |||
967 | /* | ||
968 | * ======== dcd_unregister_object ======== | ||
969 | * Call DCD_Register object with psz_path_name set to NULL to | ||
970 | * perform actual object de-registration. | ||
971 | */ | ||
972 | int dcd_unregister_object(struct dsp_uuid *uuid_obj, | ||
973 | enum dsp_dcdobjtype obj_type) | ||
974 | { | ||
975 | int status = 0; | ||
976 | |||
977 | /* | ||
978 | * When dcd_register_object is called with NULL as pathname, | ||
979 | * it indicates an unregister object operation. | ||
980 | */ | ||
981 | status = dcd_register_object(uuid_obj, obj_type, NULL); | ||
982 | |||
983 | return status; | ||
984 | } | ||
985 | |||
986 | /* | ||
987 | ********************************************************************** | ||
988 | * DCD Helper Functions | ||
989 | ********************************************************************** | ||
990 | */ | ||
991 | |||
992 | /* | ||
993 | * ======== atoi ======== | ||
994 | * Purpose: | ||
995 | * This function converts strings in decimal or hex format to integers. | ||
996 | */ | ||
997 | static s32 atoi(char *psz_buf) | ||
998 | { | ||
999 | char *pch = psz_buf; | ||
1000 | s32 base = 0; | ||
1001 | |||
1002 | while (isspace(*pch)) | ||
1003 | pch++; | ||
1004 | |||
1005 | if (*pch == '-' || *pch == '+') { | ||
1006 | base = 10; | ||
1007 | pch++; | ||
1008 | } else if (*pch && tolower(pch[strlen(pch) - 1]) == 'h') { | ||
1009 | base = 16; | ||
1010 | } | ||
1011 | |||
1012 | return simple_strtoul(pch, NULL, base); | ||
1013 | } | ||
1014 | |||
1015 | /* | ||
1016 | * ======== get_attrs_from_buf ======== | ||
1017 | * Purpose: | ||
1018 | * Parse the content of a buffer filled with DSP-side data and | ||
1019 | * retrieve an object's attributes from it. IMPORTANT: Assume the | ||
1020 | * buffer has been converted from DSP format to GPP format. | ||
1021 | */ | ||
1022 | static int get_attrs_from_buf(char *psz_buf, u32 ul_buf_size, | ||
1023 | enum dsp_dcdobjtype obj_type, | ||
1024 | struct dcd_genericobj *gen_obj) | ||
1025 | { | ||
1026 | int status = 0; | ||
1027 | char seps[] = ", "; | ||
1028 | char *psz_cur; | ||
1029 | char *token; | ||
1030 | s32 token_len = 0; | ||
1031 | u32 i = 0; | ||
1032 | #ifdef _DB_TIOMAP | ||
1033 | s32 entry_id; | ||
1034 | #endif | ||
1035 | |||
1036 | switch (obj_type) { | ||
1037 | case DSP_DCDNODETYPE: | ||
1038 | /* | ||
1039 | * Parse COFF sect buffer to retrieve individual tokens used | ||
1040 | * to fill in object attrs. | ||
1041 | */ | ||
1042 | psz_cur = psz_buf; | ||
1043 | token = strsep(&psz_cur, seps); | ||
1044 | |||
1045 | /* u32 cb_struct */ | ||
1046 | gen_obj->obj_data.node_obj.ndb_props.cb_struct = | ||
1047 | (u32) atoi(token); | ||
1048 | token = strsep(&psz_cur, seps); | ||
1049 | |||
1050 | /* dsp_uuid ui_node_id */ | ||
1051 | status = dcd_uuid_from_string(token, | ||
1052 | &gen_obj->obj_data.node_obj. | ||
1053 | ndb_props.ui_node_id); | ||
1054 | if (status) | ||
1055 | break; | ||
1056 | |||
1057 | token = strsep(&psz_cur, seps); | ||
1058 | |||
1059 | /* ac_name */ | ||
1060 | token_len = strlen(token); | ||
1061 | if (token_len > DSP_MAXNAMELEN - 1) | ||
1062 | token_len = DSP_MAXNAMELEN - 1; | ||
1063 | |||
1064 | strncpy(gen_obj->obj_data.node_obj.ndb_props.ac_name, | ||
1065 | token, token_len); | ||
1066 | gen_obj->obj_data.node_obj.ndb_props.ac_name[token_len] = '\0'; | ||
1067 | token = strsep(&psz_cur, seps); | ||
1068 | /* u32 ntype */ | ||
1069 | gen_obj->obj_data.node_obj.ndb_props.ntype = atoi(token); | ||
1070 | token = strsep(&psz_cur, seps); | ||
1071 | /* u32 cache_on_gpp */ | ||
1072 | gen_obj->obj_data.node_obj.ndb_props.cache_on_gpp = atoi(token); | ||
1073 | token = strsep(&psz_cur, seps); | ||
1074 | /* dsp_resourcereqmts dsp_resource_reqmts */ | ||
1075 | gen_obj->obj_data.node_obj.ndb_props.dsp_resource_reqmts. | ||
1076 | cb_struct = (u32) atoi(token); | ||
1077 | token = strsep(&psz_cur, seps); | ||
1078 | |||
1079 | gen_obj->obj_data.node_obj.ndb_props. | ||
1080 | dsp_resource_reqmts.static_data_size = atoi(token); | ||
1081 | token = strsep(&psz_cur, seps); | ||
1082 | gen_obj->obj_data.node_obj.ndb_props. | ||
1083 | dsp_resource_reqmts.global_data_size = atoi(token); | ||
1084 | token = strsep(&psz_cur, seps); | ||
1085 | gen_obj->obj_data.node_obj.ndb_props. | ||
1086 | dsp_resource_reqmts.program_mem_size = atoi(token); | ||
1087 | token = strsep(&psz_cur, seps); | ||
1088 | gen_obj->obj_data.node_obj.ndb_props. | ||
1089 | dsp_resource_reqmts.wc_execution_time = atoi(token); | ||
1090 | token = strsep(&psz_cur, seps); | ||
1091 | gen_obj->obj_data.node_obj.ndb_props. | ||
1092 | dsp_resource_reqmts.wc_period = atoi(token); | ||
1093 | token = strsep(&psz_cur, seps); | ||
1094 | |||
1095 | gen_obj->obj_data.node_obj.ndb_props. | ||
1096 | dsp_resource_reqmts.wc_deadline = atoi(token); | ||
1097 | token = strsep(&psz_cur, seps); | ||
1098 | |||
1099 | gen_obj->obj_data.node_obj.ndb_props. | ||
1100 | dsp_resource_reqmts.avg_exection_time = atoi(token); | ||
1101 | token = strsep(&psz_cur, seps); | ||
1102 | |||
1103 | gen_obj->obj_data.node_obj.ndb_props. | ||
1104 | dsp_resource_reqmts.minimum_period = atoi(token); | ||
1105 | token = strsep(&psz_cur, seps); | ||
1106 | |||
1107 | /* s32 prio */ | ||
1108 | gen_obj->obj_data.node_obj.ndb_props.prio = atoi(token); | ||
1109 | token = strsep(&psz_cur, seps); | ||
1110 | |||
1111 | /* u32 stack_size */ | ||
1112 | gen_obj->obj_data.node_obj.ndb_props.stack_size = atoi(token); | ||
1113 | token = strsep(&psz_cur, seps); | ||
1114 | |||
1115 | /* u32 sys_stack_size */ | ||
1116 | gen_obj->obj_data.node_obj.ndb_props.sys_stack_size = | ||
1117 | atoi(token); | ||
1118 | token = strsep(&psz_cur, seps); | ||
1119 | |||
1120 | /* u32 stack_seg */ | ||
1121 | gen_obj->obj_data.node_obj.ndb_props.stack_seg = atoi(token); | ||
1122 | token = strsep(&psz_cur, seps); | ||
1123 | |||
1124 | /* u32 message_depth */ | ||
1125 | gen_obj->obj_data.node_obj.ndb_props.message_depth = | ||
1126 | atoi(token); | ||
1127 | token = strsep(&psz_cur, seps); | ||
1128 | |||
1129 | /* u32 num_input_streams */ | ||
1130 | gen_obj->obj_data.node_obj.ndb_props.num_input_streams = | ||
1131 | atoi(token); | ||
1132 | token = strsep(&psz_cur, seps); | ||
1133 | |||
1134 | /* u32 num_output_streams */ | ||
1135 | gen_obj->obj_data.node_obj.ndb_props.num_output_streams = | ||
1136 | atoi(token); | ||
1137 | token = strsep(&psz_cur, seps); | ||
1138 | |||
1139 | /* u32 timeout */ | ||
1140 | gen_obj->obj_data.node_obj.ndb_props.timeout = atoi(token); | ||
1141 | token = strsep(&psz_cur, seps); | ||
1142 | |||
1143 | /* char *str_create_phase_fxn */ | ||
1144 | token_len = strlen(token); | ||
1145 | gen_obj->obj_data.node_obj.str_create_phase_fxn = | ||
1146 | kzalloc(token_len + 1, GFP_KERNEL); | ||
1147 | strncpy(gen_obj->obj_data.node_obj.str_create_phase_fxn, | ||
1148 | token, token_len); | ||
1149 | gen_obj->obj_data.node_obj.str_create_phase_fxn[token_len] = | ||
1150 | '\0'; | ||
1151 | token = strsep(&psz_cur, seps); | ||
1152 | |||
1153 | /* char *str_execute_phase_fxn */ | ||
1154 | token_len = strlen(token); | ||
1155 | gen_obj->obj_data.node_obj.str_execute_phase_fxn = | ||
1156 | kzalloc(token_len + 1, GFP_KERNEL); | ||
1157 | strncpy(gen_obj->obj_data.node_obj.str_execute_phase_fxn, | ||
1158 | token, token_len); | ||
1159 | gen_obj->obj_data.node_obj.str_execute_phase_fxn[token_len] = | ||
1160 | '\0'; | ||
1161 | token = strsep(&psz_cur, seps); | ||
1162 | |||
1163 | /* char *str_delete_phase_fxn */ | ||
1164 | token_len = strlen(token); | ||
1165 | gen_obj->obj_data.node_obj.str_delete_phase_fxn = | ||
1166 | kzalloc(token_len + 1, GFP_KERNEL); | ||
1167 | strncpy(gen_obj->obj_data.node_obj.str_delete_phase_fxn, | ||
1168 | token, token_len); | ||
1169 | gen_obj->obj_data.node_obj.str_delete_phase_fxn[token_len] = | ||
1170 | '\0'; | ||
1171 | token = strsep(&psz_cur, seps); | ||
1172 | |||
1173 | /* Segment id for message buffers */ | ||
1174 | gen_obj->obj_data.node_obj.msg_segid = atoi(token); | ||
1175 | token = strsep(&psz_cur, seps); | ||
1176 | |||
1177 | /* Message notification type */ | ||
1178 | gen_obj->obj_data.node_obj.msg_notify_type = atoi(token); | ||
1179 | token = strsep(&psz_cur, seps); | ||
1180 | |||
1181 | /* char *str_i_alg_name */ | ||
1182 | if (token) { | ||
1183 | token_len = strlen(token); | ||
1184 | gen_obj->obj_data.node_obj.str_i_alg_name = | ||
1185 | kzalloc(token_len + 1, GFP_KERNEL); | ||
1186 | strncpy(gen_obj->obj_data.node_obj.str_i_alg_name, | ||
1187 | token, token_len); | ||
1188 | gen_obj->obj_data.node_obj.str_i_alg_name[token_len] = | ||
1189 | '\0'; | ||
1190 | token = strsep(&psz_cur, seps); | ||
1191 | } | ||
1192 | |||
1193 | /* Load type (static, dynamic, or overlay) */ | ||
1194 | if (token) { | ||
1195 | gen_obj->obj_data.node_obj.load_type = atoi(token); | ||
1196 | token = strsep(&psz_cur, seps); | ||
1197 | } | ||
1198 | |||
1199 | /* Dynamic load data requirements */ | ||
1200 | if (token) { | ||
1201 | gen_obj->obj_data.node_obj.data_mem_seg_mask = | ||
1202 | atoi(token); | ||
1203 | token = strsep(&psz_cur, seps); | ||
1204 | } | ||
1205 | |||
1206 | /* Dynamic load code requirements */ | ||
1207 | if (token) { | ||
1208 | gen_obj->obj_data.node_obj.code_mem_seg_mask = | ||
1209 | atoi(token); | ||
1210 | token = strsep(&psz_cur, seps); | ||
1211 | } | ||
1212 | |||
1213 | /* Extract node profiles into node properties */ | ||
1214 | if (token) { | ||
1215 | |||
1216 | gen_obj->obj_data.node_obj.ndb_props.count_profiles = | ||
1217 | atoi(token); | ||
1218 | for (i = 0; | ||
1219 | i < | ||
1220 | gen_obj->obj_data.node_obj. | ||
1221 | ndb_props.count_profiles; i++) { | ||
1222 | token = strsep(&psz_cur, seps); | ||
1223 | if (token) { | ||
1224 | /* Heap Size for the node */ | ||
1225 | gen_obj->obj_data.node_obj. | ||
1226 | ndb_props.node_profiles[i]. | ||
1227 | heap_size = atoi(token); | ||
1228 | } | ||
1229 | } | ||
1230 | } | ||
1231 | token = strsep(&psz_cur, seps); | ||
1232 | if (token) { | ||
1233 | gen_obj->obj_data.node_obj.ndb_props.stack_seg_name = | ||
1234 | (u32) (token); | ||
1235 | } | ||
1236 | |||
1237 | break; | ||
1238 | |||
1239 | case DSP_DCDPROCESSORTYPE: | ||
1240 | /* | ||
1241 | * Parse COFF sect buffer to retrieve individual tokens used | ||
1242 | * to fill in object attrs. | ||
1243 | */ | ||
1244 | psz_cur = psz_buf; | ||
1245 | token = strsep(&psz_cur, seps); | ||
1246 | |||
1247 | gen_obj->obj_data.proc_info.cb_struct = atoi(token); | ||
1248 | token = strsep(&psz_cur, seps); | ||
1249 | |||
1250 | gen_obj->obj_data.proc_info.processor_family = atoi(token); | ||
1251 | token = strsep(&psz_cur, seps); | ||
1252 | |||
1253 | gen_obj->obj_data.proc_info.processor_type = atoi(token); | ||
1254 | token = strsep(&psz_cur, seps); | ||
1255 | |||
1256 | gen_obj->obj_data.proc_info.clock_rate = atoi(token); | ||
1257 | token = strsep(&psz_cur, seps); | ||
1258 | |||
1259 | gen_obj->obj_data.proc_info.internal_mem_size = atoi(token); | ||
1260 | token = strsep(&psz_cur, seps); | ||
1261 | |||
1262 | gen_obj->obj_data.proc_info.external_mem_size = atoi(token); | ||
1263 | token = strsep(&psz_cur, seps); | ||
1264 | |||
1265 | gen_obj->obj_data.proc_info.processor_id = atoi(token); | ||
1266 | token = strsep(&psz_cur, seps); | ||
1267 | |||
1268 | gen_obj->obj_data.proc_info.ty_running_rtos = atoi(token); | ||
1269 | token = strsep(&psz_cur, seps); | ||
1270 | |||
1271 | gen_obj->obj_data.proc_info.node_min_priority = atoi(token); | ||
1272 | token = strsep(&psz_cur, seps); | ||
1273 | |||
1274 | gen_obj->obj_data.proc_info.node_max_priority = atoi(token); | ||
1275 | |||
1276 | #ifdef _DB_TIOMAP | ||
1277 | /* Proc object may contain additional(extended) attributes. */ | ||
1278 | /* attr must match proc.hxx */ | ||
1279 | for (entry_id = 0; entry_id < 7; entry_id++) { | ||
1280 | token = strsep(&psz_cur, seps); | ||
1281 | gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id]. | ||
1282 | gpp_phys = atoi(token); | ||
1283 | |||
1284 | token = strsep(&psz_cur, seps); | ||
1285 | gen_obj->obj_data.ext_proc_obj.ty_tlb[entry_id]. | ||
1286 | dsp_virt = atoi(token); | ||
1287 | } | ||
1288 | #endif | ||
1289 | |||
1290 | break; | ||
1291 | |||
1292 | default: | ||
1293 | status = -EPERM; | ||
1294 | break; | ||
1295 | } | ||
1296 | |||
1297 | return status; | ||
1298 | } | ||
1299 | |||
1300 | /* | ||
1301 | * ======== CompressBuffer ======== | ||
1302 | * Purpose: | ||
1303 | * Compress the DSP buffer, if necessary, to conform to PC format. | ||
1304 | */ | ||
1305 | static void compress_buf(char *psz_buf, u32 ul_buf_size, s32 char_size) | ||
1306 | { | ||
1307 | char *p; | ||
1308 | char ch; | ||
1309 | char *q; | ||
1310 | |||
1311 | p = psz_buf; | ||
1312 | if (p == NULL) | ||
1313 | return; | ||
1314 | |||
1315 | for (q = psz_buf; q < (psz_buf + ul_buf_size);) { | ||
1316 | ch = dsp_char2_gpp_char(q, char_size); | ||
1317 | if (ch == '\\') { | ||
1318 | q += char_size; | ||
1319 | ch = dsp_char2_gpp_char(q, char_size); | ||
1320 | switch (ch) { | ||
1321 | case 't': | ||
1322 | *p = '\t'; | ||
1323 | break; | ||
1324 | |||
1325 | case 'n': | ||
1326 | *p = '\n'; | ||
1327 | break; | ||
1328 | |||
1329 | case 'r': | ||
1330 | *p = '\r'; | ||
1331 | break; | ||
1332 | |||
1333 | case '0': | ||
1334 | *p = '\0'; | ||
1335 | break; | ||
1336 | |||
1337 | default: | ||
1338 | *p = ch; | ||
1339 | break; | ||
1340 | } | ||
1341 | } else { | ||
1342 | *p = ch; | ||
1343 | } | ||
1344 | p++; | ||
1345 | q += char_size; | ||
1346 | } | ||
1347 | |||
1348 | /* NULL out remainder of buffer. */ | ||
1349 | while (p < q) | ||
1350 | *p++ = '\0'; | ||
1351 | } | ||
1352 | |||
1353 | /* | ||
1354 | * ======== dsp_char2_gpp_char ======== | ||
1355 | * Purpose: | ||
1356 | * Convert DSP char to host GPP char in a portable manner | ||
1357 | */ | ||
1358 | static char dsp_char2_gpp_char(char *word, s32 dsp_char_size) | ||
1359 | { | ||
1360 | char ch = '\0'; | ||
1361 | char *ch_src; | ||
1362 | s32 i; | ||
1363 | |||
1364 | for (ch_src = word, i = dsp_char_size; i > 0; i--) | ||
1365 | ch |= *ch_src++; | ||
1366 | |||
1367 | return ch; | ||
1368 | } | ||
1369 | |||
1370 | /* | ||
1371 | * ======== get_dep_lib_info ======== | ||
1372 | */ | ||
1373 | static int get_dep_lib_info(struct dcd_manager *hdcd_mgr, | ||
1374 | struct dsp_uuid *uuid_obj, | ||
1375 | u16 *num_libs, | ||
1376 | u16 *num_pers_libs, | ||
1377 | struct dsp_uuid *dep_lib_uuids, | ||
1378 | bool *prstnt_dep_libs, | ||
1379 | enum nldr_phase phase) | ||
1380 | { | ||
1381 | struct dcd_manager *dcd_mgr_obj = hdcd_mgr; | ||
1382 | char *psz_coff_buf = NULL; | ||
1383 | char *psz_cur; | ||
1384 | char *psz_file_name = NULL; | ||
1385 | struct cod_libraryobj *lib = NULL; | ||
1386 | u32 ul_addr = 0; /* Used by cod_get_section */ | ||
1387 | u32 ul_len = 0; /* Used by cod_get_section */ | ||
1388 | u32 dw_data_size = COD_MAXPATHLENGTH; | ||
1389 | char seps[] = ", "; | ||
1390 | char *token = NULL; | ||
1391 | bool get_uuids = (dep_lib_uuids != NULL); | ||
1392 | u16 dep_libs = 0; | ||
1393 | int status = 0; | ||
1394 | |||
1395 | /* Initialize to 0 dependent libraries, if only counting number of | ||
1396 | * dependent libraries */ | ||
1397 | if (!get_uuids) { | ||
1398 | *num_libs = 0; | ||
1399 | *num_pers_libs = 0; | ||
1400 | } | ||
1401 | |||
1402 | /* Allocate a buffer for file name */ | ||
1403 | psz_file_name = kzalloc(dw_data_size, GFP_KERNEL); | ||
1404 | if (psz_file_name == NULL) { | ||
1405 | status = -ENOMEM; | ||
1406 | } else { | ||
1407 | /* Get the name of the library */ | ||
1408 | status = dcd_get_library_name(hdcd_mgr, uuid_obj, psz_file_name, | ||
1409 | &dw_data_size, phase, NULL); | ||
1410 | } | ||
1411 | |||
1412 | /* Open the library */ | ||
1413 | if (!status) { | ||
1414 | status = cod_open(dcd_mgr_obj->cod_mgr, psz_file_name, | ||
1415 | COD_NOLOAD, &lib); | ||
1416 | } | ||
1417 | if (!status) { | ||
1418 | /* Get dependent library section information. */ | ||
1419 | status = cod_get_section(lib, DEPLIBSECT, &ul_addr, &ul_len); | ||
1420 | |||
1421 | if (status) { | ||
1422 | /* Ok, no dependent libraries */ | ||
1423 | ul_len = 0; | ||
1424 | status = 0; | ||
1425 | } | ||
1426 | } | ||
1427 | |||
1428 | if (status || !(ul_len > 0)) | ||
1429 | goto func_cont; | ||
1430 | |||
1431 | /* Allocate zeroed buffer. */ | ||
1432 | psz_coff_buf = kzalloc(ul_len + 4, GFP_KERNEL); | ||
1433 | if (psz_coff_buf == NULL) | ||
1434 | status = -ENOMEM; | ||
1435 | |||
1436 | /* Read section contents. */ | ||
1437 | status = cod_read_section(lib, DEPLIBSECT, psz_coff_buf, ul_len); | ||
1438 | if (status) | ||
1439 | goto func_cont; | ||
1440 | |||
1441 | /* Compress and format DSP buffer to conform to PC format. */ | ||
1442 | compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE); | ||
1443 | |||
1444 | /* Read from buffer */ | ||
1445 | psz_cur = psz_coff_buf; | ||
1446 | while ((token = strsep(&psz_cur, seps)) && *token != '\0') { | ||
1447 | if (get_uuids) { | ||
1448 | if (dep_libs >= *num_libs) { | ||
1449 | /* Gone beyond the limit */ | ||
1450 | break; | ||
1451 | } else { | ||
1452 | /* Retrieve UUID string. */ | ||
1453 | status = dcd_uuid_from_string(token, | ||
1454 | &(dep_lib_uuids | ||
1455 | [dep_libs])); | ||
1456 | if (status) | ||
1457 | break; | ||
1458 | |||
1459 | /* Is this library persistent? */ | ||
1460 | token = strsep(&psz_cur, seps); | ||
1461 | prstnt_dep_libs[dep_libs] = atoi(token); | ||
1462 | dep_libs++; | ||
1463 | } | ||
1464 | } else { | ||
1465 | /* Advanc to next token */ | ||
1466 | token = strsep(&psz_cur, seps); | ||
1467 | if (atoi(token)) | ||
1468 | (*num_pers_libs)++; | ||
1469 | |||
1470 | /* Just counting number of dependent libraries */ | ||
1471 | (*num_libs)++; | ||
1472 | } | ||
1473 | } | ||
1474 | func_cont: | ||
1475 | if (lib) | ||
1476 | cod_close(lib); | ||
1477 | |||
1478 | /* Free previously allocated dynamic buffers. */ | ||
1479 | kfree(psz_file_name); | ||
1480 | |||
1481 | kfree(psz_coff_buf); | ||
1482 | |||
1483 | return status; | ||
1484 | } | ||
diff --git a/drivers/staging/tidspbridge/rmgr/disp.c b/drivers/staging/tidspbridge/rmgr/disp.c deleted file mode 100644 index 4af51b75aeab..000000000000 --- a/drivers/staging/tidspbridge/rmgr/disp.c +++ /dev/null | |||
@@ -1,655 +0,0 @@ | |||
1 | /* | ||
2 | * disp.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Node Dispatcher interface. Communicates with Resource Manager Server | ||
7 | * (RMS) on DSP. Access to RMS is synchronized in NODE. | ||
8 | * | ||
9 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
10 | * | ||
11 | * This package is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
16 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | */ | ||
19 | #include <linux/types.h> | ||
20 | |||
21 | /* ----------------------------------- Host OS */ | ||
22 | #include <dspbridge/host_os.h> | ||
23 | |||
24 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
25 | #include <dspbridge/dbdefs.h> | ||
26 | |||
27 | /* ----------------------------------- OS Adaptation Layer */ | ||
28 | #include <dspbridge/sync.h> | ||
29 | |||
30 | /* ----------------------------------- Link Driver */ | ||
31 | #include <dspbridge/dspdefs.h> | ||
32 | |||
33 | /* ----------------------------------- Platform Manager */ | ||
34 | #include <dspbridge/dev.h> | ||
35 | #include <dspbridge/chnldefs.h> | ||
36 | |||
37 | /* ----------------------------------- Resource Manager */ | ||
38 | #include <dspbridge/nodedefs.h> | ||
39 | #include <dspbridge/nodepriv.h> | ||
40 | #include <dspbridge/rms_sh.h> | ||
41 | |||
42 | /* ----------------------------------- This */ | ||
43 | #include <dspbridge/disp.h> | ||
44 | |||
45 | /* Size of a reply from RMS */ | ||
46 | #define REPLYSIZE (3 * sizeof(rms_word)) | ||
47 | |||
48 | /* Reserved channel offsets for communication with RMS */ | ||
49 | #define CHNLTORMSOFFSET 0 | ||
50 | #define CHNLFROMRMSOFFSET 1 | ||
51 | |||
52 | #define CHNLIOREQS 1 | ||
53 | |||
54 | /* | ||
55 | * ======== disp_object ======== | ||
56 | */ | ||
57 | struct disp_object { | ||
58 | struct dev_object *dev_obj; /* Device for this processor */ | ||
59 | /* Function interface to Bridge driver */ | ||
60 | struct bridge_drv_interface *intf_fxns; | ||
61 | struct chnl_mgr *chnl_mgr; /* Channel manager */ | ||
62 | struct chnl_object *chnl_to_dsp; /* Chnl for commands to RMS */ | ||
63 | struct chnl_object *chnl_from_dsp; /* Chnl for replies from RMS */ | ||
64 | u8 *buf; /* Buffer for commands, replies */ | ||
65 | u32 bufsize; /* buf size in bytes */ | ||
66 | u32 bufsize_rms; /* buf size in RMS words */ | ||
67 | u32 char_size; /* Size of DSP character */ | ||
68 | u32 word_size; /* Size of DSP word */ | ||
69 | u32 data_mau_size; /* Size of DSP Data MAU */ | ||
70 | }; | ||
71 | |||
72 | static void delete_disp(struct disp_object *disp_obj); | ||
73 | static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset, | ||
74 | struct node_strmdef strm_def, u32 max, | ||
75 | u32 chars_in_rms_word); | ||
76 | static int send_message(struct disp_object *disp_obj, u32 timeout, | ||
77 | u32 ul_bytes, u32 *pdw_arg); | ||
78 | |||
79 | /* | ||
80 | * ======== disp_create ======== | ||
81 | * Create a NODE Dispatcher object. | ||
82 | */ | ||
83 | int disp_create(struct disp_object **dispatch_obj, | ||
84 | struct dev_object *hdev_obj, | ||
85 | const struct disp_attr *disp_attrs) | ||
86 | { | ||
87 | struct disp_object *disp_obj; | ||
88 | struct bridge_drv_interface *intf_fxns; | ||
89 | u32 ul_chnl_id; | ||
90 | struct chnl_attr chnl_attr_obj; | ||
91 | int status = 0; | ||
92 | u8 dev_type; | ||
93 | |||
94 | *dispatch_obj = NULL; | ||
95 | |||
96 | /* Allocate Node Dispatcher object */ | ||
97 | disp_obj = kzalloc(sizeof(struct disp_object), GFP_KERNEL); | ||
98 | if (disp_obj == NULL) | ||
99 | status = -ENOMEM; | ||
100 | else | ||
101 | disp_obj->dev_obj = hdev_obj; | ||
102 | |||
103 | /* Get Channel manager and Bridge function interface */ | ||
104 | if (!status) { | ||
105 | status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->chnl_mgr)); | ||
106 | if (!status) { | ||
107 | (void)dev_get_intf_fxns(hdev_obj, &intf_fxns); | ||
108 | disp_obj->intf_fxns = intf_fxns; | ||
109 | } | ||
110 | } | ||
111 | |||
112 | /* check device type and decide if streams or messag'ing is used for | ||
113 | * RMS/EDS */ | ||
114 | if (status) | ||
115 | goto func_cont; | ||
116 | |||
117 | status = dev_get_dev_type(hdev_obj, &dev_type); | ||
118 | |||
119 | if (status) | ||
120 | goto func_cont; | ||
121 | |||
122 | if (dev_type != DSP_UNIT) { | ||
123 | status = -EPERM; | ||
124 | goto func_cont; | ||
125 | } | ||
126 | |||
127 | disp_obj->char_size = DSPWORDSIZE; | ||
128 | disp_obj->word_size = DSPWORDSIZE; | ||
129 | disp_obj->data_mau_size = DSPWORDSIZE; | ||
130 | /* Open channels for communicating with the RMS */ | ||
131 | chnl_attr_obj.uio_reqs = CHNLIOREQS; | ||
132 | chnl_attr_obj.event_obj = NULL; | ||
133 | ul_chnl_id = disp_attrs->chnl_offset + CHNLTORMSOFFSET; | ||
134 | status = (*intf_fxns->chnl_open) (&(disp_obj->chnl_to_dsp), | ||
135 | disp_obj->chnl_mgr, | ||
136 | CHNL_MODETODSP, ul_chnl_id, | ||
137 | &chnl_attr_obj); | ||
138 | |||
139 | if (!status) { | ||
140 | ul_chnl_id = disp_attrs->chnl_offset + CHNLFROMRMSOFFSET; | ||
141 | status = | ||
142 | (*intf_fxns->chnl_open) (&(disp_obj->chnl_from_dsp), | ||
143 | disp_obj->chnl_mgr, | ||
144 | CHNL_MODEFROMDSP, ul_chnl_id, | ||
145 | &chnl_attr_obj); | ||
146 | } | ||
147 | if (!status) { | ||
148 | /* Allocate buffer for commands, replies */ | ||
149 | disp_obj->bufsize = disp_attrs->chnl_buf_size; | ||
150 | disp_obj->bufsize_rms = RMS_COMMANDBUFSIZE; | ||
151 | disp_obj->buf = kzalloc(disp_obj->bufsize, GFP_KERNEL); | ||
152 | if (disp_obj->buf == NULL) | ||
153 | status = -ENOMEM; | ||
154 | } | ||
155 | func_cont: | ||
156 | if (!status) | ||
157 | *dispatch_obj = disp_obj; | ||
158 | else | ||
159 | delete_disp(disp_obj); | ||
160 | |||
161 | return status; | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * ======== disp_delete ======== | ||
166 | * Delete the NODE Dispatcher. | ||
167 | */ | ||
168 | void disp_delete(struct disp_object *disp_obj) | ||
169 | { | ||
170 | delete_disp(disp_obj); | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * ======== disp_node_change_priority ======== | ||
175 | * Change the priority of a node currently running on the target. | ||
176 | */ | ||
177 | int disp_node_change_priority(struct disp_object *disp_obj, | ||
178 | struct node_object *hnode, | ||
179 | u32 rms_fxn, nodeenv node_env, s32 prio) | ||
180 | { | ||
181 | u32 dw_arg; | ||
182 | struct rms_command *rms_cmd; | ||
183 | int status = 0; | ||
184 | |||
185 | /* Send message to RMS to change priority */ | ||
186 | rms_cmd = (struct rms_command *)(disp_obj->buf); | ||
187 | rms_cmd->fxn = (rms_word) (rms_fxn); | ||
188 | rms_cmd->arg1 = (rms_word) node_env; | ||
189 | rms_cmd->arg2 = prio; | ||
190 | status = send_message(disp_obj, node_get_timeout(hnode), | ||
191 | sizeof(struct rms_command), &dw_arg); | ||
192 | |||
193 | return status; | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * ======== disp_node_create ======== | ||
198 | * Create a node on the DSP by remotely calling the node's create function. | ||
199 | */ | ||
200 | int disp_node_create(struct disp_object *disp_obj, | ||
201 | struct node_object *hnode, u32 rms_fxn, | ||
202 | u32 ul_create_fxn, | ||
203 | const struct node_createargs *pargs, | ||
204 | nodeenv *node_env) | ||
205 | { | ||
206 | struct node_msgargs node_msg_args; | ||
207 | struct node_taskargs task_arg_obj; | ||
208 | struct rms_command *rms_cmd; | ||
209 | struct rms_msg_args *pmsg_args; | ||
210 | struct rms_more_task_args *more_task_args; | ||
211 | enum node_type node_type; | ||
212 | u32 dw_length; | ||
213 | rms_word *pdw_buf = NULL; | ||
214 | u32 ul_bytes; | ||
215 | u32 i; | ||
216 | u32 total; | ||
217 | u32 chars_in_rms_word; | ||
218 | s32 task_args_offset; | ||
219 | s32 sio_in_def_offset; | ||
220 | s32 sio_out_def_offset; | ||
221 | s32 sio_defs_offset; | ||
222 | s32 args_offset = -1; | ||
223 | s32 offset; | ||
224 | struct node_strmdef strm_def; | ||
225 | u32 max; | ||
226 | int status = 0; | ||
227 | struct dsp_nodeinfo node_info; | ||
228 | u8 dev_type; | ||
229 | |||
230 | status = dev_get_dev_type(disp_obj->dev_obj, &dev_type); | ||
231 | |||
232 | if (status) | ||
233 | goto func_end; | ||
234 | |||
235 | if (dev_type != DSP_UNIT) { | ||
236 | dev_dbg(bridge, "%s: unknown device type = 0x%x\n", | ||
237 | __func__, dev_type); | ||
238 | goto func_end; | ||
239 | } | ||
240 | node_type = node_get_type(hnode); | ||
241 | node_msg_args = pargs->asa.node_msg_args; | ||
242 | max = disp_obj->bufsize_rms; /*Max # of RMS words that can be sent */ | ||
243 | chars_in_rms_word = sizeof(rms_word) / disp_obj->char_size; | ||
244 | /* Number of RMS words needed to hold arg data */ | ||
245 | dw_length = | ||
246 | (node_msg_args.arg_length + chars_in_rms_word - | ||
247 | 1) / chars_in_rms_word; | ||
248 | /* Make sure msg args and command fit in buffer */ | ||
249 | total = sizeof(struct rms_command) / sizeof(rms_word) + | ||
250 | sizeof(struct rms_msg_args) | ||
251 | / sizeof(rms_word) - 1 + dw_length; | ||
252 | if (total >= max) { | ||
253 | status = -EPERM; | ||
254 | dev_dbg(bridge, "%s: Message args too large for buffer! size " | ||
255 | "= %d, max = %d\n", __func__, total, max); | ||
256 | } | ||
257 | /* | ||
258 | * Fill in buffer to send to RMS. | ||
259 | * The buffer will have the following format: | ||
260 | * | ||
261 | * RMS command: | ||
262 | * Address of RMS_CreateNode() | ||
263 | * Address of node's create function | ||
264 | * dummy argument | ||
265 | * node type | ||
266 | * | ||
267 | * Message Args: | ||
268 | * max number of messages | ||
269 | * segid for message buffer allocation | ||
270 | * notification type to use when message is received | ||
271 | * length of message arg data | ||
272 | * message args data | ||
273 | * | ||
274 | * Task Args (if task or socket node): | ||
275 | * priority | ||
276 | * stack size | ||
277 | * system stack size | ||
278 | * stack segment | ||
279 | * misc | ||
280 | * number of input streams | ||
281 | * pSTRMInDef[] - offsets of STRM definitions for input streams | ||
282 | * number of output streams | ||
283 | * pSTRMOutDef[] - offsets of STRM definitions for output | ||
284 | * streams | ||
285 | * STRMInDef[] - array of STRM definitions for input streams | ||
286 | * STRMOutDef[] - array of STRM definitions for output streams | ||
287 | * | ||
288 | * Socket Args (if DAIS socket node): | ||
289 | * | ||
290 | */ | ||
291 | if (!status) { | ||
292 | total = 0; /* Total number of words in buffer so far */ | ||
293 | pdw_buf = (rms_word *) disp_obj->buf; | ||
294 | rms_cmd = (struct rms_command *)pdw_buf; | ||
295 | rms_cmd->fxn = (rms_word) (rms_fxn); | ||
296 | rms_cmd->arg1 = (rms_word) (ul_create_fxn); | ||
297 | if (node_get_load_type(hnode) == NLDR_DYNAMICLOAD) { | ||
298 | /* Flush ICACHE on Load */ | ||
299 | rms_cmd->arg2 = 1; /* dummy argument */ | ||
300 | } else { | ||
301 | /* Do not flush ICACHE */ | ||
302 | rms_cmd->arg2 = 0; /* dummy argument */ | ||
303 | } | ||
304 | rms_cmd->data = node_get_type(hnode); | ||
305 | /* | ||
306 | * args_offset is the offset of the data field in struct | ||
307 | * rms_command structure. We need this to calculate stream | ||
308 | * definition offsets. | ||
309 | */ | ||
310 | args_offset = 3; | ||
311 | total += sizeof(struct rms_command) / sizeof(rms_word); | ||
312 | /* Message args */ | ||
313 | pmsg_args = (struct rms_msg_args *)(pdw_buf + total); | ||
314 | pmsg_args->max_msgs = node_msg_args.max_msgs; | ||
315 | pmsg_args->segid = node_msg_args.seg_id; | ||
316 | pmsg_args->notify_type = node_msg_args.notify_type; | ||
317 | pmsg_args->arg_length = node_msg_args.arg_length; | ||
318 | total += sizeof(struct rms_msg_args) / sizeof(rms_word) - 1; | ||
319 | memcpy(pdw_buf + total, node_msg_args.pdata, | ||
320 | node_msg_args.arg_length); | ||
321 | total += dw_length; | ||
322 | } | ||
323 | if (status) | ||
324 | goto func_end; | ||
325 | |||
326 | /* If node is a task node, copy task create arguments into buffer */ | ||
327 | if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) { | ||
328 | task_arg_obj = pargs->asa.task_arg_obj; | ||
329 | task_args_offset = total; | ||
330 | total += sizeof(struct rms_more_task_args) / sizeof(rms_word) + | ||
331 | 1 + task_arg_obj.num_inputs + task_arg_obj.num_outputs; | ||
332 | /* Copy task arguments */ | ||
333 | if (total < max) { | ||
334 | total = task_args_offset; | ||
335 | more_task_args = (struct rms_more_task_args *)(pdw_buf + | ||
336 | total); | ||
337 | /* | ||
338 | * Get some important info about the node. Note that we | ||
339 | * don't just reach into the hnode struct because | ||
340 | * that would break the node object's abstraction. | ||
341 | */ | ||
342 | get_node_info(hnode, &node_info); | ||
343 | more_task_args->priority = node_info.execution_priority; | ||
344 | more_task_args->stack_size = task_arg_obj.stack_size; | ||
345 | more_task_args->sysstack_size = | ||
346 | task_arg_obj.sys_stack_size; | ||
347 | more_task_args->stack_seg = task_arg_obj.stack_seg; | ||
348 | more_task_args->heap_addr = task_arg_obj.dsp_heap_addr; | ||
349 | more_task_args->heap_size = task_arg_obj.heap_size; | ||
350 | more_task_args->misc = task_arg_obj.dais_arg; | ||
351 | more_task_args->num_input_streams = | ||
352 | task_arg_obj.num_inputs; | ||
353 | total += | ||
354 | sizeof(struct rms_more_task_args) / | ||
355 | sizeof(rms_word); | ||
356 | dev_dbg(bridge, "%s: dsp_heap_addr %x, heap_size %x\n", | ||
357 | __func__, task_arg_obj.dsp_heap_addr, | ||
358 | task_arg_obj.heap_size); | ||
359 | /* Keep track of pSIOInDef[] and pSIOOutDef[] | ||
360 | * positions in the buffer, since this needs to be | ||
361 | * filled in later. */ | ||
362 | sio_in_def_offset = total; | ||
363 | total += task_arg_obj.num_inputs; | ||
364 | pdw_buf[total++] = task_arg_obj.num_outputs; | ||
365 | sio_out_def_offset = total; | ||
366 | total += task_arg_obj.num_outputs; | ||
367 | sio_defs_offset = total; | ||
368 | /* Fill SIO defs and offsets */ | ||
369 | offset = sio_defs_offset; | ||
370 | for (i = 0; i < task_arg_obj.num_inputs; i++) { | ||
371 | if (status) | ||
372 | break; | ||
373 | |||
374 | pdw_buf[sio_in_def_offset + i] = | ||
375 | (offset - args_offset) | ||
376 | * (sizeof(rms_word) / DSPWORDSIZE); | ||
377 | strm_def = task_arg_obj.strm_in_def[i]; | ||
378 | status = | ||
379 | fill_stream_def(pdw_buf, &total, offset, | ||
380 | strm_def, max, | ||
381 | chars_in_rms_word); | ||
382 | offset = total; | ||
383 | } | ||
384 | for (i = 0; (i < task_arg_obj.num_outputs) && | ||
385 | (!status); i++) { | ||
386 | pdw_buf[sio_out_def_offset + i] = | ||
387 | (offset - args_offset) | ||
388 | * (sizeof(rms_word) / DSPWORDSIZE); | ||
389 | strm_def = task_arg_obj.strm_out_def[i]; | ||
390 | status = | ||
391 | fill_stream_def(pdw_buf, &total, offset, | ||
392 | strm_def, max, | ||
393 | chars_in_rms_word); | ||
394 | offset = total; | ||
395 | } | ||
396 | } else { | ||
397 | /* Args won't fit */ | ||
398 | status = -EPERM; | ||
399 | } | ||
400 | } | ||
401 | if (!status) { | ||
402 | ul_bytes = total * sizeof(rms_word); | ||
403 | status = send_message(disp_obj, node_get_timeout(hnode), | ||
404 | ul_bytes, node_env); | ||
405 | } | ||
406 | func_end: | ||
407 | return status; | ||
408 | } | ||
409 | |||
410 | /* | ||
411 | * ======== disp_node_delete ======== | ||
412 | * purpose: | ||
413 | * Delete a node on the DSP by remotely calling the node's delete function. | ||
414 | * | ||
415 | */ | ||
416 | int disp_node_delete(struct disp_object *disp_obj, | ||
417 | struct node_object *hnode, u32 rms_fxn, | ||
418 | u32 ul_delete_fxn, nodeenv node_env) | ||
419 | { | ||
420 | u32 dw_arg; | ||
421 | struct rms_command *rms_cmd; | ||
422 | int status = 0; | ||
423 | u8 dev_type; | ||
424 | |||
425 | status = dev_get_dev_type(disp_obj->dev_obj, &dev_type); | ||
426 | |||
427 | if (!status) { | ||
428 | |||
429 | if (dev_type == DSP_UNIT) { | ||
430 | |||
431 | /* | ||
432 | * Fill in buffer to send to RMS | ||
433 | */ | ||
434 | rms_cmd = (struct rms_command *)disp_obj->buf; | ||
435 | rms_cmd->fxn = (rms_word) (rms_fxn); | ||
436 | rms_cmd->arg1 = (rms_word) node_env; | ||
437 | rms_cmd->arg2 = (rms_word) (ul_delete_fxn); | ||
438 | rms_cmd->data = node_get_type(hnode); | ||
439 | |||
440 | status = send_message(disp_obj, node_get_timeout(hnode), | ||
441 | sizeof(struct rms_command), | ||
442 | &dw_arg); | ||
443 | } | ||
444 | } | ||
445 | return status; | ||
446 | } | ||
447 | |||
448 | /* | ||
449 | * ======== disp_node_run ======== | ||
450 | * purpose: | ||
451 | * Start execution of a node's execute phase, or resume execution of a node | ||
452 | * that has been suspended (via DISP_NodePause()) on the DSP. | ||
453 | */ | ||
454 | int disp_node_run(struct disp_object *disp_obj, | ||
455 | struct node_object *hnode, u32 rms_fxn, | ||
456 | u32 ul_execute_fxn, nodeenv node_env) | ||
457 | { | ||
458 | u32 dw_arg; | ||
459 | struct rms_command *rms_cmd; | ||
460 | int status = 0; | ||
461 | u8 dev_type; | ||
462 | |||
463 | status = dev_get_dev_type(disp_obj->dev_obj, &dev_type); | ||
464 | |||
465 | if (!status) { | ||
466 | |||
467 | if (dev_type == DSP_UNIT) { | ||
468 | |||
469 | /* | ||
470 | * Fill in buffer to send to RMS. | ||
471 | */ | ||
472 | rms_cmd = (struct rms_command *)disp_obj->buf; | ||
473 | rms_cmd->fxn = (rms_word) (rms_fxn); | ||
474 | rms_cmd->arg1 = (rms_word) node_env; | ||
475 | rms_cmd->arg2 = (rms_word) (ul_execute_fxn); | ||
476 | rms_cmd->data = node_get_type(hnode); | ||
477 | |||
478 | status = send_message(disp_obj, node_get_timeout(hnode), | ||
479 | sizeof(struct rms_command), | ||
480 | &dw_arg); | ||
481 | } | ||
482 | } | ||
483 | |||
484 | return status; | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * ======== delete_disp ======== | ||
489 | * purpose: | ||
490 | * Frees the resources allocated for the dispatcher. | ||
491 | */ | ||
492 | static void delete_disp(struct disp_object *disp_obj) | ||
493 | { | ||
494 | int status = 0; | ||
495 | struct bridge_drv_interface *intf_fxns; | ||
496 | |||
497 | if (disp_obj) { | ||
498 | intf_fxns = disp_obj->intf_fxns; | ||
499 | |||
500 | /* Free Node Dispatcher resources */ | ||
501 | if (disp_obj->chnl_from_dsp) { | ||
502 | /* Channel close can fail only if the channel handle | ||
503 | * is invalid. */ | ||
504 | status = (*intf_fxns->chnl_close) | ||
505 | (disp_obj->chnl_from_dsp); | ||
506 | if (status) { | ||
507 | dev_dbg(bridge, "%s: Failed to close channel " | ||
508 | "from RMS: 0x%x\n", __func__, status); | ||
509 | } | ||
510 | } | ||
511 | if (disp_obj->chnl_to_dsp) { | ||
512 | status = | ||
513 | (*intf_fxns->chnl_close) (disp_obj-> | ||
514 | chnl_to_dsp); | ||
515 | if (status) { | ||
516 | dev_dbg(bridge, "%s: Failed to close channel to" | ||
517 | " RMS: 0x%x\n", __func__, status); | ||
518 | } | ||
519 | } | ||
520 | kfree(disp_obj->buf); | ||
521 | |||
522 | kfree(disp_obj); | ||
523 | } | ||
524 | } | ||
525 | |||
526 | /* | ||
527 | * ======== fill_stream_def ======== | ||
528 | * purpose: | ||
529 | * Fills stream definitions. | ||
530 | */ | ||
531 | static int fill_stream_def(rms_word *pdw_buf, u32 *ptotal, u32 offset, | ||
532 | struct node_strmdef strm_def, u32 max, | ||
533 | u32 chars_in_rms_word) | ||
534 | { | ||
535 | struct rms_strm_def *strm_def_obj; | ||
536 | u32 total = *ptotal; | ||
537 | u32 name_len; | ||
538 | u32 dw_length; | ||
539 | int status = 0; | ||
540 | |||
541 | if (total + sizeof(struct rms_strm_def) / sizeof(rms_word) >= max) { | ||
542 | status = -EPERM; | ||
543 | } else { | ||
544 | strm_def_obj = (struct rms_strm_def *)(pdw_buf + total); | ||
545 | strm_def_obj->bufsize = strm_def.buf_size; | ||
546 | strm_def_obj->nbufs = strm_def.num_bufs; | ||
547 | strm_def_obj->segid = strm_def.seg_id; | ||
548 | strm_def_obj->align = strm_def.buf_alignment; | ||
549 | strm_def_obj->timeout = strm_def.timeout; | ||
550 | } | ||
551 | |||
552 | if (!status) { | ||
553 | /* | ||
554 | * Since we haven't added the device name yet, subtract | ||
555 | * 1 from total. | ||
556 | */ | ||
557 | total += sizeof(struct rms_strm_def) / sizeof(rms_word) - 1; | ||
558 | dw_length = strlen(strm_def.sz_device) + 1; | ||
559 | |||
560 | /* Number of RMS_WORDS needed to hold device name */ | ||
561 | name_len = | ||
562 | (dw_length + chars_in_rms_word - 1) / chars_in_rms_word; | ||
563 | |||
564 | if (total + name_len >= max) { | ||
565 | status = -EPERM; | ||
566 | } else { | ||
567 | /* | ||
568 | * Zero out last word, since the device name may not | ||
569 | * extend to completely fill this word. | ||
570 | */ | ||
571 | pdw_buf[total + name_len - 1] = 0; | ||
572 | /** TODO USE SERVICES * */ | ||
573 | memcpy(pdw_buf + total, strm_def.sz_device, dw_length); | ||
574 | total += name_len; | ||
575 | *ptotal = total; | ||
576 | } | ||
577 | } | ||
578 | |||
579 | return status; | ||
580 | } | ||
581 | |||
582 | /* | ||
583 | * ======== send_message ====== | ||
584 | * Send command message to RMS, get reply from RMS. | ||
585 | */ | ||
586 | static int send_message(struct disp_object *disp_obj, u32 timeout, | ||
587 | u32 ul_bytes, u32 *pdw_arg) | ||
588 | { | ||
589 | struct bridge_drv_interface *intf_fxns; | ||
590 | struct chnl_object *chnl_obj; | ||
591 | u32 dw_arg = 0; | ||
592 | u8 *pbuf; | ||
593 | struct chnl_ioc chnl_ioc_obj; | ||
594 | int status = 0; | ||
595 | |||
596 | *pdw_arg = (u32) NULL; | ||
597 | intf_fxns = disp_obj->intf_fxns; | ||
598 | chnl_obj = disp_obj->chnl_to_dsp; | ||
599 | pbuf = disp_obj->buf; | ||
600 | |||
601 | /* Send the command */ | ||
602 | status = (*intf_fxns->chnl_add_io_req) (chnl_obj, pbuf, ul_bytes, 0, | ||
603 | 0L, dw_arg); | ||
604 | if (status) | ||
605 | goto func_end; | ||
606 | |||
607 | status = | ||
608 | (*intf_fxns->chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj); | ||
609 | if (!status) { | ||
610 | if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) { | ||
611 | if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) | ||
612 | status = -ETIME; | ||
613 | else | ||
614 | status = -EPERM; | ||
615 | } | ||
616 | } | ||
617 | /* Get the reply */ | ||
618 | if (status) | ||
619 | goto func_end; | ||
620 | |||
621 | chnl_obj = disp_obj->chnl_from_dsp; | ||
622 | ul_bytes = REPLYSIZE; | ||
623 | status = (*intf_fxns->chnl_add_io_req) (chnl_obj, pbuf, ul_bytes, | ||
624 | 0, 0L, dw_arg); | ||
625 | if (status) | ||
626 | goto func_end; | ||
627 | |||
628 | status = | ||
629 | (*intf_fxns->chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj); | ||
630 | if (!status) { | ||
631 | if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) { | ||
632 | status = -ETIME; | ||
633 | } else if (chnl_ioc_obj.byte_size < ul_bytes) { | ||
634 | /* Did not get all of the reply from the RMS */ | ||
635 | status = -EPERM; | ||
636 | } else { | ||
637 | if (CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) { | ||
638 | if (*((int *)chnl_ioc_obj.buf) < 0) { | ||
639 | /* Translate DSP's to kernel error */ | ||
640 | status = -EREMOTEIO; | ||
641 | dev_dbg(bridge, "%s: DSP-side failed:" | ||
642 | " DSP errcode = 0x%x, Kernel " | ||
643 | "errcode = %d\n", __func__, | ||
644 | *(int *)pbuf, status); | ||
645 | } | ||
646 | *pdw_arg = | ||
647 | (((rms_word *) (chnl_ioc_obj.buf))[1]); | ||
648 | } else { | ||
649 | status = -EPERM; | ||
650 | } | ||
651 | } | ||
652 | } | ||
653 | func_end: | ||
654 | return status; | ||
655 | } | ||
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c deleted file mode 100644 index 757ae20b38ee..000000000000 --- a/drivers/staging/tidspbridge/rmgr/drv.c +++ /dev/null | |||
@@ -1,816 +0,0 @@ | |||
1 | /* | ||
2 | * drv.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge resource allocation module. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/list.h> | ||
20 | |||
21 | /* ----------------------------------- Host OS */ | ||
22 | #include <dspbridge/host_os.h> | ||
23 | |||
24 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
25 | #include <dspbridge/dbdefs.h> | ||
26 | |||
27 | /* ----------------------------------- This */ | ||
28 | #include <dspbridge/drv.h> | ||
29 | #include <dspbridge/dev.h> | ||
30 | |||
31 | #include <dspbridge/node.h> | ||
32 | #include <dspbridge/proc.h> | ||
33 | #include <dspbridge/strm.h> | ||
34 | #include <dspbridge/nodepriv.h> | ||
35 | #include <dspbridge/dspchnl.h> | ||
36 | #include <dspbridge/resourcecleanup.h> | ||
37 | |||
38 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
39 | struct drv_object { | ||
40 | struct list_head dev_list; | ||
41 | struct list_head dev_node_string; | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * This is the Device Extension. Named with the Prefix | ||
46 | * DRV_ since it is living in this module | ||
47 | */ | ||
48 | struct drv_ext { | ||
49 | struct list_head link; | ||
50 | char sz_string[MAXREGPATHLENGTH]; | ||
51 | }; | ||
52 | |||
53 | /* ----------------------------------- Globals */ | ||
54 | static bool ext_phys_mem_pool_enabled; | ||
55 | struct ext_phys_mem_pool { | ||
56 | u32 phys_mem_base; | ||
57 | u32 phys_mem_size; | ||
58 | u32 virt_mem_base; | ||
59 | u32 next_phys_alloc_ptr; | ||
60 | }; | ||
61 | static struct ext_phys_mem_pool ext_mem_pool; | ||
62 | |||
63 | /* ----------------------------------- Function Prototypes */ | ||
64 | static int request_bridge_resources(struct cfg_hostres *res); | ||
65 | |||
66 | |||
67 | /* GPP PROCESS CLEANUP CODE */ | ||
68 | |||
69 | static int drv_proc_free_node_res(int id, void *p, void *data); | ||
70 | |||
71 | /* Allocate and add a node resource element | ||
72 | * This function is called from .Node_Allocate. */ | ||
73 | int drv_insert_node_res_element(void *hnode, void *node_resource, | ||
74 | void *process_ctxt) | ||
75 | { | ||
76 | struct node_res_object **node_res_obj = | ||
77 | (struct node_res_object **)node_resource; | ||
78 | struct process_context *ctxt = (struct process_context *)process_ctxt; | ||
79 | int retval; | ||
80 | |||
81 | *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL); | ||
82 | if (!*node_res_obj) | ||
83 | return -ENOMEM; | ||
84 | |||
85 | (*node_res_obj)->node = hnode; | ||
86 | retval = idr_alloc(ctxt->node_id, *node_res_obj, 0, 0, GFP_KERNEL); | ||
87 | if (retval >= 0) { | ||
88 | (*node_res_obj)->id = retval; | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | kfree(*node_res_obj); | ||
93 | |||
94 | if (retval == -ENOSPC) { | ||
95 | pr_err("%s: FAILED, IDR is FULL\n", __func__); | ||
96 | return -EFAULT; | ||
97 | } else { | ||
98 | pr_err("%s: OUT OF MEMORY\n", __func__); | ||
99 | return -ENOMEM; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | /* Release all Node resources and its context | ||
104 | * Actual Node De-Allocation */ | ||
105 | static int drv_proc_free_node_res(int id, void *p, void *data) | ||
106 | { | ||
107 | struct process_context *ctxt = data; | ||
108 | int status; | ||
109 | struct node_res_object *node_res_obj = p; | ||
110 | u32 node_state; | ||
111 | |||
112 | if (node_res_obj->node_allocated) { | ||
113 | node_state = node_get_state(node_res_obj->node); | ||
114 | if (node_state <= NODE_DELETING) { | ||
115 | if ((node_state == NODE_RUNNING) || | ||
116 | (node_state == NODE_PAUSED) || | ||
117 | (node_state == NODE_TERMINATING)) | ||
118 | node_terminate | ||
119 | (node_res_obj->node, &status); | ||
120 | |||
121 | node_delete(node_res_obj, ctxt); | ||
122 | } | ||
123 | } | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | /* Release all Mapped and Reserved DMM resources */ | ||
129 | int drv_remove_all_dmm_res_elements(void *process_ctxt) | ||
130 | { | ||
131 | struct process_context *ctxt = (struct process_context *)process_ctxt; | ||
132 | int status = 0; | ||
133 | struct dmm_map_object *temp_map, *map_obj; | ||
134 | struct dmm_rsv_object *temp_rsv, *rsv_obj; | ||
135 | |||
136 | /* Free DMM mapped memory resources */ | ||
137 | list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { | ||
138 | status = proc_un_map(ctxt->processor, | ||
139 | (void *)map_obj->dsp_addr, ctxt); | ||
140 | if (status) | ||
141 | pr_err("%s: proc_un_map failed!" | ||
142 | " status = 0x%xn", __func__, status); | ||
143 | } | ||
144 | |||
145 | /* Free DMM reserved memory resources */ | ||
146 | list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) { | ||
147 | status = proc_un_reserve_memory(ctxt->processor, (void *) | ||
148 | rsv_obj->dsp_reserved_addr, | ||
149 | ctxt); | ||
150 | if (status) | ||
151 | pr_err("%s: proc_un_reserve_memory failed!" | ||
152 | " status = 0x%xn", __func__, status); | ||
153 | } | ||
154 | return status; | ||
155 | } | ||
156 | |||
157 | /* Update Node allocation status */ | ||
158 | void drv_proc_node_update_status(void *node_resource, s32 status) | ||
159 | { | ||
160 | struct node_res_object *node_res_obj = | ||
161 | (struct node_res_object *)node_resource; | ||
162 | node_res_obj->node_allocated = status; | ||
163 | } | ||
164 | |||
165 | /* Update Node Heap status */ | ||
166 | void drv_proc_node_update_heap_status(void *node_resource, s32 status) | ||
167 | { | ||
168 | struct node_res_object *node_res_obj = | ||
169 | (struct node_res_object *)node_resource; | ||
170 | node_res_obj->heap_allocated = status; | ||
171 | } | ||
172 | |||
173 | /* Release all Node resources and its context | ||
174 | * This is called from .bridge_release. | ||
175 | */ | ||
176 | int drv_remove_all_node_res_elements(void *process_ctxt) | ||
177 | { | ||
178 | struct process_context *ctxt = process_ctxt; | ||
179 | |||
180 | idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt); | ||
181 | idr_destroy(ctxt->node_id); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | /* Allocate the STRM resource element | ||
187 | * This is called after the actual resource is allocated | ||
188 | */ | ||
189 | int drv_proc_insert_strm_res_element(void *stream_obj, | ||
190 | void *strm_res, void *process_ctxt) | ||
191 | { | ||
192 | struct strm_res_object **pstrm_res = | ||
193 | (struct strm_res_object **)strm_res; | ||
194 | struct process_context *ctxt = (struct process_context *)process_ctxt; | ||
195 | int retval; | ||
196 | |||
197 | *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL); | ||
198 | if (*pstrm_res == NULL) | ||
199 | return -EFAULT; | ||
200 | |||
201 | (*pstrm_res)->stream = stream_obj; | ||
202 | retval = idr_alloc(ctxt->stream_id, *pstrm_res, 0, 0, GFP_KERNEL); | ||
203 | if (retval >= 0) { | ||
204 | (*pstrm_res)->id = retval; | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | if (retval == -ENOSPC) { | ||
209 | pr_err("%s: FAILED, IDR is FULL\n", __func__); | ||
210 | return -EPERM; | ||
211 | } else { | ||
212 | pr_err("%s: OUT OF MEMORY\n", __func__); | ||
213 | return -ENOMEM; | ||
214 | } | ||
215 | } | ||
216 | |||
217 | static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt) | ||
218 | { | ||
219 | struct process_context *ctxt = process_ctxt; | ||
220 | struct strm_res_object *strm_res = p; | ||
221 | struct stream_info strm_info; | ||
222 | struct dsp_streaminfo user; | ||
223 | u8 **ap_buffer = NULL; | ||
224 | u8 *buf_ptr; | ||
225 | u32 ul_bytes; | ||
226 | u32 dw_arg; | ||
227 | s32 ul_buf_size; | ||
228 | |||
229 | if (strm_res->num_bufs) { | ||
230 | ap_buffer = kmalloc((strm_res->num_bufs * | ||
231 | sizeof(u8 *)), GFP_KERNEL); | ||
232 | if (ap_buffer) { | ||
233 | strm_free_buffer(strm_res, | ||
234 | ap_buffer, | ||
235 | strm_res->num_bufs, | ||
236 | ctxt); | ||
237 | kfree(ap_buffer); | ||
238 | } | ||
239 | } | ||
240 | strm_info.user_strm = &user; | ||
241 | user.number_bufs_in_stream = 0; | ||
242 | strm_get_info(strm_res->stream, &strm_info, sizeof(strm_info)); | ||
243 | while (user.number_bufs_in_stream--) | ||
244 | strm_reclaim(strm_res->stream, &buf_ptr, &ul_bytes, | ||
245 | (u32 *) &ul_buf_size, &dw_arg); | ||
246 | strm_close(strm_res, ctxt); | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | /* Release all Stream resources and its context | ||
251 | * This is called from .bridge_release. | ||
252 | */ | ||
253 | int drv_remove_all_strm_res_elements(void *process_ctxt) | ||
254 | { | ||
255 | struct process_context *ctxt = process_ctxt; | ||
256 | |||
257 | idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt); | ||
258 | idr_destroy(ctxt->stream_id); | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | /* Updating the stream resource element */ | ||
264 | int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources) | ||
265 | { | ||
266 | int status = 0; | ||
267 | struct strm_res_object **strm_res = | ||
268 | (struct strm_res_object **)strm_resources; | ||
269 | |||
270 | (*strm_res)->num_bufs = num_bufs; | ||
271 | return status; | ||
272 | } | ||
273 | |||
274 | /* GPP PROCESS CLEANUP CODE END */ | ||
275 | |||
276 | /* | ||
277 | * ======== = drv_create ======== = | ||
278 | * Purpose: | ||
279 | * DRV Object gets created only once during Driver Loading. | ||
280 | */ | ||
281 | int drv_create(struct drv_object **drv_obj) | ||
282 | { | ||
283 | int status = 0; | ||
284 | struct drv_object *pdrv_object = NULL; | ||
285 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
286 | |||
287 | pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL); | ||
288 | if (pdrv_object) { | ||
289 | /* Create and Initialize List of device objects */ | ||
290 | INIT_LIST_HEAD(&pdrv_object->dev_list); | ||
291 | INIT_LIST_HEAD(&pdrv_object->dev_node_string); | ||
292 | } else { | ||
293 | status = -ENOMEM; | ||
294 | } | ||
295 | /* Store the DRV Object in the driver data */ | ||
296 | if (!status) { | ||
297 | if (drv_datap) { | ||
298 | drv_datap->drv_object = (void *)pdrv_object; | ||
299 | } else { | ||
300 | status = -EPERM; | ||
301 | pr_err("%s: Failed to store DRV object\n", __func__); | ||
302 | } | ||
303 | } | ||
304 | |||
305 | if (!status) { | ||
306 | *drv_obj = pdrv_object; | ||
307 | } else { | ||
308 | /* Free the DRV Object */ | ||
309 | kfree(pdrv_object); | ||
310 | } | ||
311 | |||
312 | return status; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * ======== = drv_destroy ======== = | ||
317 | * purpose: | ||
318 | * Invoked during bridge de-initialization | ||
319 | */ | ||
320 | int drv_destroy(struct drv_object *driver_obj) | ||
321 | { | ||
322 | int status = 0; | ||
323 | struct drv_object *pdrv_object = (struct drv_object *)driver_obj; | ||
324 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
325 | |||
326 | kfree(pdrv_object); | ||
327 | /* Update the DRV Object in the driver data */ | ||
328 | if (drv_datap) { | ||
329 | drv_datap->drv_object = NULL; | ||
330 | } else { | ||
331 | status = -EPERM; | ||
332 | pr_err("%s: Failed to store DRV object\n", __func__); | ||
333 | } | ||
334 | |||
335 | return status; | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * ======== drv_get_dev_object ======== | ||
340 | * Purpose: | ||
341 | * Given a index, returns a handle to DevObject from the list. | ||
342 | */ | ||
343 | int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj, | ||
344 | struct dev_object **device_obj) | ||
345 | { | ||
346 | int status = 0; | ||
347 | struct dev_object *dev_obj; | ||
348 | u32 i; | ||
349 | |||
350 | dev_obj = (struct dev_object *)drv_get_first_dev_object(); | ||
351 | for (i = 0; i < index; i++) { | ||
352 | dev_obj = | ||
353 | (struct dev_object *)drv_get_next_dev_object((u32) dev_obj); | ||
354 | } | ||
355 | if (dev_obj) { | ||
356 | *device_obj = (struct dev_object *)dev_obj; | ||
357 | } else { | ||
358 | *device_obj = NULL; | ||
359 | status = -EPERM; | ||
360 | } | ||
361 | |||
362 | return status; | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * ======== drv_get_first_dev_object ======== | ||
367 | * Purpose: | ||
368 | * Retrieve the first Device Object handle from an internal linked list of | ||
369 | * of DEV_OBJECTs maintained by DRV. | ||
370 | */ | ||
371 | u32 drv_get_first_dev_object(void) | ||
372 | { | ||
373 | u32 dw_dev_object = 0; | ||
374 | struct drv_object *pdrv_obj; | ||
375 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
376 | |||
377 | if (drv_datap && drv_datap->drv_object) { | ||
378 | pdrv_obj = drv_datap->drv_object; | ||
379 | if (!list_empty(&pdrv_obj->dev_list)) | ||
380 | dw_dev_object = (u32) pdrv_obj->dev_list.next; | ||
381 | } else { | ||
382 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | ||
383 | } | ||
384 | |||
385 | return dw_dev_object; | ||
386 | } | ||
387 | |||
388 | /* | ||
389 | * ======== DRV_GetFirstDevNodeString ======== | ||
390 | * Purpose: | ||
391 | * Retrieve the first Device Extension from an internal linked list of | ||
392 | * of Pointer to dev_node Strings maintained by DRV. | ||
393 | */ | ||
394 | u32 drv_get_first_dev_extension(void) | ||
395 | { | ||
396 | u32 dw_dev_extension = 0; | ||
397 | struct drv_object *pdrv_obj; | ||
398 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
399 | |||
400 | if (drv_datap && drv_datap->drv_object) { | ||
401 | pdrv_obj = drv_datap->drv_object; | ||
402 | if (!list_empty(&pdrv_obj->dev_node_string)) { | ||
403 | dw_dev_extension = | ||
404 | (u32) pdrv_obj->dev_node_string.next; | ||
405 | } | ||
406 | } else { | ||
407 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | ||
408 | } | ||
409 | |||
410 | return dw_dev_extension; | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * ======== drv_get_next_dev_object ======== | ||
415 | * Purpose: | ||
416 | * Retrieve the next Device Object handle from an internal linked list of | ||
417 | * of DEV_OBJECTs maintained by DRV, after having previously called | ||
418 | * drv_get_first_dev_object() and zero or more DRV_GetNext. | ||
419 | */ | ||
420 | u32 drv_get_next_dev_object(u32 hdev_obj) | ||
421 | { | ||
422 | u32 dw_next_dev_object = 0; | ||
423 | struct drv_object *pdrv_obj; | ||
424 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
425 | struct list_head *curr; | ||
426 | |||
427 | if (drv_datap && drv_datap->drv_object) { | ||
428 | pdrv_obj = drv_datap->drv_object; | ||
429 | if (!list_empty(&pdrv_obj->dev_list)) { | ||
430 | curr = (struct list_head *)hdev_obj; | ||
431 | if (list_is_last(curr, &pdrv_obj->dev_list)) | ||
432 | return 0; | ||
433 | dw_next_dev_object = (u32) curr->next; | ||
434 | } | ||
435 | } else { | ||
436 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | ||
437 | } | ||
438 | |||
439 | return dw_next_dev_object; | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * ======== drv_get_next_dev_extension ======== | ||
444 | * Purpose: | ||
445 | * Retrieve the next Device Extension from an internal linked list of | ||
446 | * of pointer to DevNodeString maintained by DRV, after having previously | ||
447 | * called drv_get_first_dev_extension() and zero or more | ||
448 | * drv_get_next_dev_extension(). | ||
449 | */ | ||
450 | u32 drv_get_next_dev_extension(u32 dev_extension) | ||
451 | { | ||
452 | u32 dw_dev_extension = 0; | ||
453 | struct drv_object *pdrv_obj; | ||
454 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
455 | struct list_head *curr; | ||
456 | |||
457 | if (drv_datap && drv_datap->drv_object) { | ||
458 | pdrv_obj = drv_datap->drv_object; | ||
459 | if (!list_empty(&pdrv_obj->dev_node_string)) { | ||
460 | curr = (struct list_head *)dev_extension; | ||
461 | if (list_is_last(curr, &pdrv_obj->dev_node_string)) | ||
462 | return 0; | ||
463 | dw_dev_extension = (u32) curr->next; | ||
464 | } | ||
465 | } else { | ||
466 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | ||
467 | } | ||
468 | |||
469 | return dw_dev_extension; | ||
470 | } | ||
471 | |||
472 | /* | ||
473 | * ======== drv_insert_dev_object ======== | ||
474 | * Purpose: | ||
475 | * Insert a DevObject into the list of Manager object. | ||
476 | */ | ||
477 | int drv_insert_dev_object(struct drv_object *driver_obj, | ||
478 | struct dev_object *hdev_obj) | ||
479 | { | ||
480 | struct drv_object *pdrv_object = (struct drv_object *)driver_obj; | ||
481 | |||
482 | list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list); | ||
483 | |||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * ======== drv_remove_dev_object ======== | ||
489 | * Purpose: | ||
490 | * Search for and remove a DeviceObject from the given list of DRV | ||
491 | * objects. | ||
492 | */ | ||
493 | int drv_remove_dev_object(struct drv_object *driver_obj, | ||
494 | struct dev_object *hdev_obj) | ||
495 | { | ||
496 | int status = -EPERM; | ||
497 | struct drv_object *pdrv_object = (struct drv_object *)driver_obj; | ||
498 | struct list_head *cur_elem; | ||
499 | |||
500 | /* Search list for p_proc_object: */ | ||
501 | list_for_each(cur_elem, &pdrv_object->dev_list) { | ||
502 | /* If found, remove it. */ | ||
503 | if ((struct dev_object *)cur_elem == hdev_obj) { | ||
504 | list_del(cur_elem); | ||
505 | status = 0; | ||
506 | break; | ||
507 | } | ||
508 | } | ||
509 | |||
510 | return status; | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * ======== drv_request_resources ======== | ||
515 | * Purpose: | ||
516 | * Requests resources from the OS. | ||
517 | */ | ||
518 | int drv_request_resources(u32 dw_context, u32 *dev_node_strg) | ||
519 | { | ||
520 | int status = 0; | ||
521 | struct drv_object *pdrv_object; | ||
522 | struct drv_ext *pszdev_node; | ||
523 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
524 | |||
525 | /* | ||
526 | * Allocate memory to hold the string. This will live until | ||
527 | * it is freed in the Release resources. Update the driver object | ||
528 | * list. | ||
529 | */ | ||
530 | |||
531 | if (!drv_datap || !drv_datap->drv_object) | ||
532 | status = -ENODATA; | ||
533 | else | ||
534 | pdrv_object = drv_datap->drv_object; | ||
535 | |||
536 | if (!status) { | ||
537 | pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL); | ||
538 | if (pszdev_node) { | ||
539 | strncpy(pszdev_node->sz_string, | ||
540 | (char *)dw_context, MAXREGPATHLENGTH - 1); | ||
541 | pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0'; | ||
542 | /* Update the Driver Object List */ | ||
543 | *dev_node_strg = (u32) pszdev_node->sz_string; | ||
544 | list_add_tail(&pszdev_node->link, | ||
545 | &pdrv_object->dev_node_string); | ||
546 | } else { | ||
547 | status = -ENOMEM; | ||
548 | *dev_node_strg = 0; | ||
549 | } | ||
550 | } else { | ||
551 | dev_dbg(bridge, "%s: Failed to get Driver Object from Registry", | ||
552 | __func__); | ||
553 | *dev_node_strg = 0; | ||
554 | } | ||
555 | |||
556 | return status; | ||
557 | } | ||
558 | |||
559 | /* | ||
560 | * ======== drv_release_resources ======== | ||
561 | * Purpose: | ||
562 | * Releases resources from the OS. | ||
563 | */ | ||
564 | int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj) | ||
565 | { | ||
566 | int status = 0; | ||
567 | struct drv_ext *pszdev_node; | ||
568 | |||
569 | /* | ||
570 | * Irrespective of the status go ahead and clean it | ||
571 | * The following will over write the status. | ||
572 | */ | ||
573 | for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension(); | ||
574 | pszdev_node != NULL; pszdev_node = (struct drv_ext *) | ||
575 | drv_get_next_dev_extension((u32) pszdev_node)) { | ||
576 | if ((u32) pszdev_node == dw_context) { | ||
577 | /* Found it */ | ||
578 | /* Delete from the Driver object list */ | ||
579 | list_del(&pszdev_node->link); | ||
580 | kfree(pszdev_node); | ||
581 | break; | ||
582 | } | ||
583 | } | ||
584 | return status; | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * ======== request_bridge_resources ======== | ||
589 | * Purpose: | ||
590 | * Reserves shared memory for bridge. | ||
591 | */ | ||
592 | static int request_bridge_resources(struct cfg_hostres *res) | ||
593 | { | ||
594 | struct cfg_hostres *host_res = res; | ||
595 | |||
596 | /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */ | ||
597 | host_res->num_mem_windows = 2; | ||
598 | |||
599 | /* First window is for DSP internal memory */ | ||
600 | dev_dbg(bridge, "mem_base[0] 0x%x\n", host_res->mem_base[0]); | ||
601 | dev_dbg(bridge, "mem_base[3] 0x%x\n", host_res->mem_base[3]); | ||
602 | dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base); | ||
603 | |||
604 | /* for 24xx base port is not mapping the mamory for DSP | ||
605 | * internal memory TODO Do a ioremap here */ | ||
606 | /* Second window is for DSP external memory shared with MPU */ | ||
607 | |||
608 | /* These are hard-coded values */ | ||
609 | host_res->birq_registers = 0; | ||
610 | host_res->birq_attrib = 0; | ||
611 | host_res->offset_for_monitor = 0; | ||
612 | host_res->chnl_offset = 0; | ||
613 | /* CHNL_MAXCHANNELS */ | ||
614 | host_res->num_chnls = CHNL_MAXCHANNELS; | ||
615 | host_res->chnl_buf_size = 0x400; | ||
616 | |||
617 | return 0; | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * ======== drv_request_bridge_res_dsp ======== | ||
622 | * Purpose: | ||
623 | * Reserves shared memory for bridge. | ||
624 | */ | ||
625 | int drv_request_bridge_res_dsp(void **phost_resources) | ||
626 | { | ||
627 | int status = 0; | ||
628 | struct cfg_hostres *host_res; | ||
629 | u32 dw_buff_size; | ||
630 | u32 dma_addr; | ||
631 | u32 shm_size; | ||
632 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
633 | |||
634 | dw_buff_size = sizeof(struct cfg_hostres); | ||
635 | |||
636 | host_res = kzalloc(dw_buff_size, GFP_KERNEL); | ||
637 | |||
638 | if (host_res != NULL) { | ||
639 | request_bridge_resources(host_res); | ||
640 | /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */ | ||
641 | host_res->num_mem_windows = 4; | ||
642 | |||
643 | host_res->mem_base[0] = 0; | ||
644 | host_res->mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE, | ||
645 | OMAP_DSP_MEM1_SIZE); | ||
646 | host_res->mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE, | ||
647 | OMAP_DSP_MEM2_SIZE); | ||
648 | host_res->mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE, | ||
649 | OMAP_DSP_MEM3_SIZE); | ||
650 | host_res->per_base = ioremap(OMAP_PER_CM_BASE, | ||
651 | OMAP_PER_CM_SIZE); | ||
652 | host_res->per_pm_base = ioremap(OMAP_PER_PRM_BASE, | ||
653 | OMAP_PER_PRM_SIZE); | ||
654 | host_res->core_pm_base = ioremap(OMAP_CORE_PRM_BASE, | ||
655 | OMAP_CORE_PRM_SIZE); | ||
656 | host_res->dmmu_base = ioremap(OMAP_DMMU_BASE, | ||
657 | OMAP_DMMU_SIZE); | ||
658 | |||
659 | dev_dbg(bridge, "mem_base[0] 0x%x\n", | ||
660 | host_res->mem_base[0]); | ||
661 | dev_dbg(bridge, "mem_base[1] 0x%x\n", | ||
662 | host_res->mem_base[1]); | ||
663 | dev_dbg(bridge, "mem_base[2] 0x%x\n", | ||
664 | host_res->mem_base[2]); | ||
665 | dev_dbg(bridge, "mem_base[3] 0x%x\n", | ||
666 | host_res->mem_base[3]); | ||
667 | dev_dbg(bridge, "mem_base[4] 0x%x\n", | ||
668 | host_res->mem_base[4]); | ||
669 | dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base); | ||
670 | |||
671 | shm_size = drv_datap->shm_size; | ||
672 | if (shm_size >= 0x10000) { | ||
673 | /* Allocate Physically contiguous, | ||
674 | * non-cacheable memory */ | ||
675 | host_res->mem_base[1] = | ||
676 | (u32) mem_alloc_phys_mem(shm_size, 0x100000, | ||
677 | &dma_addr); | ||
678 | if (host_res->mem_base[1] == 0) { | ||
679 | status = -ENOMEM; | ||
680 | pr_err("shm reservation Failed\n"); | ||
681 | } else { | ||
682 | host_res->mem_length[1] = shm_size; | ||
683 | host_res->mem_phys[1] = dma_addr; | ||
684 | |||
685 | dev_dbg(bridge, "%s: Bridge shm address 0x%x " | ||
686 | "dma_addr %x size %x\n", __func__, | ||
687 | host_res->mem_base[1], | ||
688 | dma_addr, shm_size); | ||
689 | } | ||
690 | } | ||
691 | if (!status) { | ||
692 | /* These are hard-coded values */ | ||
693 | host_res->birq_registers = 0; | ||
694 | host_res->birq_attrib = 0; | ||
695 | host_res->offset_for_monitor = 0; | ||
696 | host_res->chnl_offset = 0; | ||
697 | /* CHNL_MAXCHANNELS */ | ||
698 | host_res->num_chnls = CHNL_MAXCHANNELS; | ||
699 | host_res->chnl_buf_size = 0x400; | ||
700 | dw_buff_size = sizeof(struct cfg_hostres); | ||
701 | } | ||
702 | *phost_resources = host_res; | ||
703 | } | ||
704 | /* End Mem alloc */ | ||
705 | return status; | ||
706 | } | ||
707 | |||
708 | void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size) | ||
709 | { | ||
710 | u32 pool_virt_base; | ||
711 | |||
712 | /* get the virtual address for the physical memory pool passed */ | ||
713 | pool_virt_base = (u32) ioremap(pool_phys_base, pool_size); | ||
714 | |||
715 | if ((void **)pool_virt_base == NULL) { | ||
716 | pr_err("%s: external physical memory map failed\n", __func__); | ||
717 | ext_phys_mem_pool_enabled = false; | ||
718 | } else { | ||
719 | ext_mem_pool.phys_mem_base = pool_phys_base; | ||
720 | ext_mem_pool.phys_mem_size = pool_size; | ||
721 | ext_mem_pool.virt_mem_base = pool_virt_base; | ||
722 | ext_mem_pool.next_phys_alloc_ptr = pool_phys_base; | ||
723 | ext_phys_mem_pool_enabled = true; | ||
724 | } | ||
725 | } | ||
726 | |||
727 | void mem_ext_phys_pool_release(void) | ||
728 | { | ||
729 | if (ext_phys_mem_pool_enabled) { | ||
730 | iounmap((void *)(ext_mem_pool.virt_mem_base)); | ||
731 | ext_phys_mem_pool_enabled = false; | ||
732 | } | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * ======== mem_ext_phys_mem_alloc ======== | ||
737 | * Purpose: | ||
738 | * Allocate physically contiguous, uncached memory from external memory pool | ||
739 | */ | ||
740 | |||
741 | static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 *phys_addr) | ||
742 | { | ||
743 | u32 new_alloc_ptr; | ||
744 | u32 offset; | ||
745 | u32 virt_addr; | ||
746 | |||
747 | if (align == 0) | ||
748 | align = 1; | ||
749 | |||
750 | if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size) | ||
751 | - ext_mem_pool.next_phys_alloc_ptr)) { | ||
752 | phys_addr = NULL; | ||
753 | return NULL; | ||
754 | } else { | ||
755 | offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1)); | ||
756 | if (offset == 0) | ||
757 | new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr; | ||
758 | else | ||
759 | new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) + | ||
760 | (align - offset); | ||
761 | if ((new_alloc_ptr + bytes) <= | ||
762 | (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) { | ||
763 | /* we can allocate */ | ||
764 | *phys_addr = new_alloc_ptr; | ||
765 | ext_mem_pool.next_phys_alloc_ptr = | ||
766 | new_alloc_ptr + bytes; | ||
767 | virt_addr = | ||
768 | ext_mem_pool.virt_mem_base + (new_alloc_ptr - | ||
769 | ext_mem_pool. | ||
770 | phys_mem_base); | ||
771 | return (void *)virt_addr; | ||
772 | } else { | ||
773 | *phys_addr = 0; | ||
774 | return NULL; | ||
775 | } | ||
776 | } | ||
777 | } | ||
778 | |||
779 | /* | ||
780 | * ======== mem_alloc_phys_mem ======== | ||
781 | * Purpose: | ||
782 | * Allocate physically contiguous, uncached memory | ||
783 | */ | ||
784 | void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask, | ||
785 | u32 *physical_address) | ||
786 | { | ||
787 | void *va_mem = NULL; | ||
788 | dma_addr_t pa_mem; | ||
789 | |||
790 | if (byte_size > 0) { | ||
791 | if (ext_phys_mem_pool_enabled) { | ||
792 | va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask, | ||
793 | (u32 *) &pa_mem); | ||
794 | } else | ||
795 | va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem, | ||
796 | GFP_KERNEL); | ||
797 | if (va_mem == NULL) | ||
798 | *physical_address = 0; | ||
799 | else | ||
800 | *physical_address = pa_mem; | ||
801 | } | ||
802 | return va_mem; | ||
803 | } | ||
804 | |||
805 | /* | ||
806 | * ======== mem_free_phys_mem ======== | ||
807 | * Purpose: | ||
808 | * Free the given block of physically contiguous memory. | ||
809 | */ | ||
810 | void mem_free_phys_mem(void *virtual_address, u32 physical_address, | ||
811 | u32 byte_size) | ||
812 | { | ||
813 | if (!ext_phys_mem_pool_enabled) | ||
814 | dma_free_coherent(NULL, byte_size, virtual_address, | ||
815 | physical_address); | ||
816 | } | ||
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c deleted file mode 100644 index e3918d2efa17..000000000000 --- a/drivers/staging/tidspbridge/rmgr/drv_interface.c +++ /dev/null | |||
@@ -1,650 +0,0 @@ | |||
1 | /* | ||
2 | * drv_interface.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge driver interface. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/platform_data/dsp-omap.h> | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/pm.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/device.h> | ||
26 | #include <linux/moduleparam.h> | ||
27 | #include <linux/cdev.h> | ||
28 | |||
29 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
30 | #include <dspbridge/dbdefs.h> | ||
31 | |||
32 | /* ----------------------------------- OS Adaptation Layer */ | ||
33 | #include <dspbridge/clk.h> | ||
34 | |||
35 | /* ----------------------------------- Platform Manager */ | ||
36 | #include <dspbridge/dspapi.h> | ||
37 | #include <dspbridge/dspdrv.h> | ||
38 | |||
39 | /* ----------------------------------- Resource Manager */ | ||
40 | #include <dspbridge/pwr.h> | ||
41 | |||
42 | #include <dspbridge/resourcecleanup.h> | ||
43 | #include <dspbridge/proc.h> | ||
44 | #include <dspbridge/dev.h> | ||
45 | |||
46 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
47 | #include <mach-omap2/omap3-opp.h> | ||
48 | #endif | ||
49 | |||
50 | /* ----------------------------------- Globals */ | ||
51 | #define DSPBRIDGE_VERSION "0.3" | ||
52 | s32 dsp_debug; | ||
53 | |||
54 | struct platform_device *omap_dspbridge_dev; | ||
55 | struct device *bridge; | ||
56 | |||
57 | /* This is a test variable used by Bridge to test different sleep states */ | ||
58 | s32 dsp_test_sleepstate; | ||
59 | |||
60 | static struct cdev bridge_cdev; | ||
61 | |||
62 | static struct class *bridge_class; | ||
63 | |||
64 | static u32 driver_context; | ||
65 | static s32 driver_major; | ||
66 | static char *base_img; | ||
67 | static s32 shm_size = 0x500000; /* 5 MB */ | ||
68 | static int tc_wordswapon; /* Default value is always false */ | ||
69 | #ifdef CONFIG_TIDSPBRIDGE_RECOVERY | ||
70 | #define REC_TIMEOUT 5000 /*recovery timeout in msecs */ | ||
71 | static atomic_t bridge_cref; /* number of bridge open handles */ | ||
72 | static struct workqueue_struct *bridge_rec_queue; | ||
73 | static struct work_struct bridge_recovery_work; | ||
74 | static DECLARE_COMPLETION(bridge_comp); | ||
75 | static DECLARE_COMPLETION(bridge_open_comp); | ||
76 | static bool recover; | ||
77 | #endif | ||
78 | |||
79 | #ifdef CONFIG_PM | ||
80 | struct omap34_xx_bridge_suspend_data { | ||
81 | int suspended; | ||
82 | wait_queue_head_t suspend_wq; | ||
83 | }; | ||
84 | |||
85 | static struct omap34_xx_bridge_suspend_data bridge_suspend_data; | ||
86 | |||
87 | static int omap34_xxbridge_suspend_lockout(struct omap34_xx_bridge_suspend_data | ||
88 | *s, struct file *f) | ||
89 | { | ||
90 | if ((s)->suspended) { | ||
91 | if ((f)->f_flags & O_NONBLOCK) | ||
92 | return -EPERM; | ||
93 | wait_event_interruptible((s)->suspend_wq, (s)->suspended == 0); | ||
94 | } | ||
95 | return 0; | ||
96 | } | ||
97 | #endif | ||
98 | |||
99 | module_param(dsp_debug, int, 0); | ||
100 | MODULE_PARM_DESC(dsp_debug, "Wait after loading DSP image. default = false"); | ||
101 | |||
102 | module_param(dsp_test_sleepstate, int, 0); | ||
103 | MODULE_PARM_DESC(dsp_test_sleepstate, "DSP Sleep state = 0"); | ||
104 | |||
105 | module_param(base_img, charp, 0); | ||
106 | MODULE_PARM_DESC(base_img, "DSP base image, default = NULL"); | ||
107 | |||
108 | module_param(shm_size, int, 0); | ||
109 | MODULE_PARM_DESC(shm_size, "shm size, default = 4 MB, minimum = 64 KB"); | ||
110 | |||
111 | module_param(tc_wordswapon, int, 0); | ||
112 | MODULE_PARM_DESC(tc_wordswapon, "TC Word Swap Option. default = 0"); | ||
113 | |||
114 | MODULE_AUTHOR("Texas Instruments"); | ||
115 | MODULE_LICENSE("GPL"); | ||
116 | MODULE_VERSION(DSPBRIDGE_VERSION); | ||
117 | |||
118 | /* | ||
119 | * This function is called when an application opens handle to the | ||
120 | * bridge driver. | ||
121 | */ | ||
122 | static int bridge_open(struct inode *ip, struct file *filp) | ||
123 | { | ||
124 | int status = 0; | ||
125 | struct process_context *pr_ctxt = NULL; | ||
126 | |||
127 | /* | ||
128 | * Allocate a new process context and insert it into global | ||
129 | * process context list. | ||
130 | */ | ||
131 | |||
132 | #ifdef CONFIG_TIDSPBRIDGE_RECOVERY | ||
133 | if (recover) { | ||
134 | if (filp->f_flags & O_NONBLOCK || | ||
135 | wait_for_completion_interruptible(&bridge_open_comp)) | ||
136 | return -EBUSY; | ||
137 | } | ||
138 | #endif | ||
139 | pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL); | ||
140 | if (!pr_ctxt) | ||
141 | return -ENOMEM; | ||
142 | |||
143 | pr_ctxt->res_state = PROC_RES_ALLOCATED; | ||
144 | spin_lock_init(&pr_ctxt->dmm_map_lock); | ||
145 | INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); | ||
146 | spin_lock_init(&pr_ctxt->dmm_rsv_lock); | ||
147 | INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list); | ||
148 | |||
149 | pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); | ||
150 | if (!pr_ctxt->node_id) { | ||
151 | status = -ENOMEM; | ||
152 | goto err1; | ||
153 | } | ||
154 | |||
155 | idr_init(pr_ctxt->node_id); | ||
156 | |||
157 | pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL); | ||
158 | if (!pr_ctxt->stream_id) { | ||
159 | status = -ENOMEM; | ||
160 | goto err2; | ||
161 | } | ||
162 | |||
163 | idr_init(pr_ctxt->stream_id); | ||
164 | |||
165 | filp->private_data = pr_ctxt; | ||
166 | |||
167 | #ifdef CONFIG_TIDSPBRIDGE_RECOVERY | ||
168 | atomic_inc(&bridge_cref); | ||
169 | #endif | ||
170 | return 0; | ||
171 | |||
172 | err2: | ||
173 | kfree(pr_ctxt->node_id); | ||
174 | err1: | ||
175 | kfree(pr_ctxt); | ||
176 | return status; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * This function is called when an application closes handle to the bridge | ||
181 | * driver. | ||
182 | */ | ||
183 | static int bridge_release(struct inode *ip, struct file *filp) | ||
184 | { | ||
185 | int status = 0; | ||
186 | struct process_context *pr_ctxt; | ||
187 | |||
188 | if (!filp->private_data) { | ||
189 | status = -EIO; | ||
190 | goto err; | ||
191 | } | ||
192 | |||
193 | pr_ctxt = filp->private_data; | ||
194 | flush_signals(current); | ||
195 | drv_remove_all_resources(pr_ctxt); | ||
196 | proc_detach(pr_ctxt); | ||
197 | kfree(pr_ctxt->node_id); | ||
198 | kfree(pr_ctxt->stream_id); | ||
199 | kfree(pr_ctxt); | ||
200 | |||
201 | filp->private_data = NULL; | ||
202 | |||
203 | err: | ||
204 | #ifdef CONFIG_TIDSPBRIDGE_RECOVERY | ||
205 | if (!atomic_dec_return(&bridge_cref)) | ||
206 | complete(&bridge_comp); | ||
207 | #endif | ||
208 | return status; | ||
209 | } | ||
210 | |||
211 | /* This function provides IO interface to the bridge driver. */ | ||
212 | static long bridge_ioctl(struct file *filp, unsigned int code, | ||
213 | unsigned long args) | ||
214 | { | ||
215 | int status; | ||
216 | u32 retval = 0; | ||
217 | union trapped_args buf_in; | ||
218 | |||
219 | #ifdef CONFIG_TIDSPBRIDGE_RECOVERY | ||
220 | if (recover) { | ||
221 | status = -EIO; | ||
222 | goto err; | ||
223 | } | ||
224 | #endif | ||
225 | #ifdef CONFIG_PM | ||
226 | status = omap34_xxbridge_suspend_lockout(&bridge_suspend_data, filp); | ||
227 | if (status != 0) | ||
228 | return status; | ||
229 | #endif | ||
230 | |||
231 | if (!filp->private_data) { | ||
232 | status = -EIO; | ||
233 | goto err; | ||
234 | } | ||
235 | |||
236 | status = copy_from_user(&buf_in, (union trapped_args *)args, | ||
237 | sizeof(union trapped_args)); | ||
238 | |||
239 | if (!status) { | ||
240 | status = api_call_dev_ioctl(code, &buf_in, &retval, | ||
241 | filp->private_data); | ||
242 | |||
243 | if (!status) { | ||
244 | status = retval; | ||
245 | } else { | ||
246 | dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x " | ||
247 | "status 0x%x\n", __func__, code, status); | ||
248 | status = -1; | ||
249 | } | ||
250 | |||
251 | } | ||
252 | |||
253 | err: | ||
254 | return status; | ||
255 | } | ||
256 | |||
257 | /* This function maps kernel space memory to user space memory. */ | ||
258 | static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) | ||
259 | { | ||
260 | unsigned long base_pgoff; | ||
261 | int status; | ||
262 | struct omap_dsp_platform_data *pdata = | ||
263 | omap_dspbridge_dev->dev.platform_data; | ||
264 | |||
265 | /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ | ||
266 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
267 | |||
268 | dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx " | ||
269 | "flags %lx\n", __func__, filp, | ||
270 | vma->vm_start, vma->vm_end, vma->vm_page_prot, | ||
271 | vma->vm_flags); | ||
272 | |||
273 | /* | ||
274 | * vm_iomap_memory() expects vma->vm_pgoff to be expressed as an offset | ||
275 | * from the start of the physical memory pool, but we're called with | ||
276 | * a pfn (physical page number) stored there instead. | ||
277 | * | ||
278 | * To avoid duplicating lots of tricky overflow checking logic, | ||
279 | * temporarily convert vma->vm_pgoff to the offset vm_iomap_memory() | ||
280 | * expects, but restore the original value once the mapping has been | ||
281 | * created. | ||
282 | */ | ||
283 | base_pgoff = pdata->phys_mempool_base >> PAGE_SHIFT; | ||
284 | |||
285 | if (vma->vm_pgoff < base_pgoff) | ||
286 | return -EINVAL; | ||
287 | |||
288 | vma->vm_pgoff -= base_pgoff; | ||
289 | |||
290 | status = vm_iomap_memory(vma, | ||
291 | pdata->phys_mempool_base, | ||
292 | pdata->phys_mempool_size); | ||
293 | |||
294 | /* Restore the original value of vma->vm_pgoff */ | ||
295 | vma->vm_pgoff += base_pgoff; | ||
296 | |||
297 | return status; | ||
298 | } | ||
299 | |||
300 | static const struct file_operations bridge_fops = { | ||
301 | .open = bridge_open, | ||
302 | .release = bridge_release, | ||
303 | .unlocked_ioctl = bridge_ioctl, | ||
304 | .mmap = bridge_mmap, | ||
305 | .llseek = noop_llseek, | ||
306 | }; | ||
307 | |||
308 | #ifdef CONFIG_PM | ||
309 | static u32 time_out = 1000; | ||
310 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
311 | s32 dsp_max_opps = VDD1_OPP5; | ||
312 | #endif | ||
313 | |||
314 | /* Maximum Opps that can be requested by IVA */ | ||
315 | /*vdd1 rate table */ | ||
316 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
317 | const struct omap_opp vdd1_rate_table_bridge[] = { | ||
318 | {0, 0, 0}, | ||
319 | /*OPP1 */ | ||
320 | {S125M, VDD1_OPP1, 0}, | ||
321 | /*OPP2 */ | ||
322 | {S250M, VDD1_OPP2, 0}, | ||
323 | /*OPP3 */ | ||
324 | {S500M, VDD1_OPP3, 0}, | ||
325 | /*OPP4 */ | ||
326 | {S550M, VDD1_OPP4, 0}, | ||
327 | /*OPP5 */ | ||
328 | {S600M, VDD1_OPP5, 0}, | ||
329 | }; | ||
330 | #endif | ||
331 | #endif | ||
332 | |||
333 | struct omap_dsp_platform_data *omap_dspbridge_pdata; | ||
334 | |||
335 | u32 vdd1_dsp_freq[6][4] = { | ||
336 | {0, 0, 0, 0}, | ||
337 | /*OPP1 */ | ||
338 | {0, 90000, 0, 86000}, | ||
339 | /*OPP2 */ | ||
340 | {0, 180000, 80000, 170000}, | ||
341 | /*OPP3 */ | ||
342 | {0, 360000, 160000, 340000}, | ||
343 | /*OPP4 */ | ||
344 | {0, 396000, 325000, 376000}, | ||
345 | /*OPP5 */ | ||
346 | {0, 430000, 355000, 430000}, | ||
347 | }; | ||
348 | |||
349 | #ifdef CONFIG_TIDSPBRIDGE_RECOVERY | ||
350 | static void bridge_recover(struct work_struct *work) | ||
351 | { | ||
352 | struct dev_object *dev; | ||
353 | struct cfg_devnode *dev_node; | ||
354 | |||
355 | if (atomic_read(&bridge_cref)) { | ||
356 | reinit_completion(&bridge_comp); | ||
357 | while (!wait_for_completion_timeout(&bridge_comp, | ||
358 | msecs_to_jiffies(REC_TIMEOUT))) | ||
359 | pr_info("%s:%d handle(s) still opened\n", | ||
360 | __func__, atomic_read(&bridge_cref)); | ||
361 | } | ||
362 | dev = dev_get_first(); | ||
363 | dev_get_dev_node(dev, &dev_node); | ||
364 | if (!dev_node || proc_auto_start(dev_node, dev)) | ||
365 | pr_err("DSP could not be restarted\n"); | ||
366 | recover = false; | ||
367 | complete_all(&bridge_open_comp); | ||
368 | } | ||
369 | |||
370 | void bridge_recover_schedule(void) | ||
371 | { | ||
372 | reinit_completion(&bridge_open_comp); | ||
373 | recover = true; | ||
374 | queue_work(bridge_rec_queue, &bridge_recovery_work); | ||
375 | } | ||
376 | #endif | ||
377 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
378 | static int dspbridge_scale_notification(struct notifier_block *op, | ||
379 | unsigned long val, void *ptr) | ||
380 | { | ||
381 | struct omap_dsp_platform_data *pdata = | ||
382 | omap_dspbridge_dev->dev.platform_data; | ||
383 | |||
384 | if (CPUFREQ_POSTCHANGE == val && pdata->dsp_get_opp) | ||
385 | pwr_pm_post_scale(PRCM_VDD1, pdata->dsp_get_opp()); | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static struct notifier_block iva_clk_notifier = { | ||
391 | .notifier_call = dspbridge_scale_notification, | ||
392 | NULL, | ||
393 | }; | ||
394 | #endif | ||
395 | |||
396 | /** | ||
397 | * omap3_bridge_startup() - perform low lever initializations | ||
398 | * @pdev: pointer to platform device | ||
399 | * | ||
400 | * Initializes recovery, PM and DVFS required data, before calling | ||
401 | * clk and memory init routines. | ||
402 | */ | ||
403 | static int omap3_bridge_startup(struct platform_device *pdev) | ||
404 | { | ||
405 | struct omap_dsp_platform_data *pdata = pdev->dev.platform_data; | ||
406 | struct drv_data *drv_datap = NULL; | ||
407 | u32 phys_membase, phys_memsize; | ||
408 | int err; | ||
409 | |||
410 | #ifdef CONFIG_TIDSPBRIDGE_RECOVERY | ||
411 | bridge_rec_queue = create_workqueue("bridge_rec_queue"); | ||
412 | INIT_WORK(&bridge_recovery_work, bridge_recover); | ||
413 | reinit_completion(&bridge_comp); | ||
414 | #endif | ||
415 | |||
416 | #ifdef CONFIG_PM | ||
417 | /* Initialize the wait queue */ | ||
418 | bridge_suspend_data.suspended = 0; | ||
419 | init_waitqueue_head(&bridge_suspend_data.suspend_wq); | ||
420 | |||
421 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
422 | for (i = 0; i < 6; i++) | ||
423 | pdata->mpu_speed[i] = vdd1_rate_table_bridge[i].rate; | ||
424 | |||
425 | err = cpufreq_register_notifier(&iva_clk_notifier, | ||
426 | CPUFREQ_TRANSITION_NOTIFIER); | ||
427 | if (err) | ||
428 | pr_err("%s: clk_notifier_register failed for iva2_ck\n", | ||
429 | __func__); | ||
430 | #endif | ||
431 | #endif | ||
432 | |||
433 | dsp_clk_init(); | ||
434 | |||
435 | drv_datap = kzalloc(sizeof(struct drv_data), GFP_KERNEL); | ||
436 | if (!drv_datap) { | ||
437 | err = -ENOMEM; | ||
438 | goto err1; | ||
439 | } | ||
440 | |||
441 | drv_datap->shm_size = shm_size; | ||
442 | drv_datap->tc_wordswapon = tc_wordswapon; | ||
443 | |||
444 | if (base_img) { | ||
445 | drv_datap->base_img = kstrdup(base_img, GFP_KERNEL); | ||
446 | if (!drv_datap->base_img) { | ||
447 | err = -ENOMEM; | ||
448 | goto err2; | ||
449 | } | ||
450 | } | ||
451 | |||
452 | dev_set_drvdata(bridge, drv_datap); | ||
453 | |||
454 | if (shm_size < 0x10000) { /* 64 KB */ | ||
455 | err = -EINVAL; | ||
456 | pr_err("%s: shm size must be at least 64 KB\n", __func__); | ||
457 | goto err3; | ||
458 | } | ||
459 | dev_dbg(bridge, "%s: requested shm_size = 0x%x\n", __func__, shm_size); | ||
460 | |||
461 | phys_membase = pdata->phys_mempool_base; | ||
462 | phys_memsize = pdata->phys_mempool_size; | ||
463 | if (phys_membase > 0 && phys_memsize > 0) | ||
464 | mem_ext_phys_pool_init(phys_membase, phys_memsize); | ||
465 | |||
466 | if (tc_wordswapon) | ||
467 | dev_dbg(bridge, "%s: TC Word Swap is enabled\n", __func__); | ||
468 | |||
469 | driver_context = dsp_init(&err); | ||
470 | if (err) { | ||
471 | pr_err("DSP Bridge driver initialization failed\n"); | ||
472 | goto err4; | ||
473 | } | ||
474 | |||
475 | return 0; | ||
476 | |||
477 | err4: | ||
478 | mem_ext_phys_pool_release(); | ||
479 | err3: | ||
480 | kfree(drv_datap->base_img); | ||
481 | err2: | ||
482 | kfree(drv_datap); | ||
483 | err1: | ||
484 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
485 | cpufreq_unregister_notifier(&iva_clk_notifier, | ||
486 | CPUFREQ_TRANSITION_NOTIFIER); | ||
487 | #endif | ||
488 | dsp_clk_exit(); | ||
489 | |||
490 | return err; | ||
491 | } | ||
492 | |||
493 | static int omap34_xx_bridge_probe(struct platform_device *pdev) | ||
494 | { | ||
495 | int err; | ||
496 | dev_t dev = 0; | ||
497 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
498 | int i = 0; | ||
499 | #endif | ||
500 | |||
501 | omap_dspbridge_dev = pdev; | ||
502 | |||
503 | /* Global bridge device */ | ||
504 | bridge = &omap_dspbridge_dev->dev; | ||
505 | |||
506 | /* Bridge low level initializations */ | ||
507 | err = omap3_bridge_startup(pdev); | ||
508 | if (err) | ||
509 | goto err1; | ||
510 | |||
511 | /* use 2.6 device model */ | ||
512 | err = alloc_chrdev_region(&dev, 0, 1, "DspBridge"); | ||
513 | if (err) { | ||
514 | pr_err("%s: Can't get major %d\n", __func__, driver_major); | ||
515 | goto err1; | ||
516 | } | ||
517 | |||
518 | cdev_init(&bridge_cdev, &bridge_fops); | ||
519 | bridge_cdev.owner = THIS_MODULE; | ||
520 | |||
521 | err = cdev_add(&bridge_cdev, dev, 1); | ||
522 | if (err) { | ||
523 | pr_err("%s: Failed to add bridge device\n", __func__); | ||
524 | goto err2; | ||
525 | } | ||
526 | |||
527 | /* udev support */ | ||
528 | bridge_class = class_create(THIS_MODULE, "ti_bridge"); | ||
529 | if (IS_ERR(bridge_class)) { | ||
530 | pr_err("%s: Error creating bridge class\n", __func__); | ||
531 | err = PTR_ERR(bridge_class); | ||
532 | goto err3; | ||
533 | } | ||
534 | |||
535 | driver_major = MAJOR(dev); | ||
536 | device_create(bridge_class, NULL, MKDEV(driver_major, 0), | ||
537 | NULL, "DspBridge"); | ||
538 | pr_info("DSP Bridge driver loaded\n"); | ||
539 | |||
540 | return 0; | ||
541 | |||
542 | err3: | ||
543 | cdev_del(&bridge_cdev); | ||
544 | err2: | ||
545 | unregister_chrdev_region(dev, 1); | ||
546 | err1: | ||
547 | return err; | ||
548 | } | ||
549 | |||
550 | static int omap34_xx_bridge_remove(struct platform_device *pdev) | ||
551 | { | ||
552 | dev_t devno; | ||
553 | int status = 0; | ||
554 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
555 | |||
556 | /* Retrieve the Object handle from the driver data */ | ||
557 | if (!drv_datap || !drv_datap->drv_object) { | ||
558 | status = -ENODATA; | ||
559 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | ||
560 | goto func_cont; | ||
561 | } | ||
562 | |||
563 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | ||
564 | if (cpufreq_unregister_notifier(&iva_clk_notifier, | ||
565 | CPUFREQ_TRANSITION_NOTIFIER)) | ||
566 | pr_err("%s: cpufreq_unregister_notifier failed for iva2_ck\n", | ||
567 | __func__); | ||
568 | #endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */ | ||
569 | |||
570 | if (driver_context) { | ||
571 | /* Put the DSP in reset state */ | ||
572 | dsp_deinit(driver_context); | ||
573 | driver_context = 0; | ||
574 | } | ||
575 | |||
576 | kfree(drv_datap); | ||
577 | dev_set_drvdata(bridge, NULL); | ||
578 | |||
579 | func_cont: | ||
580 | mem_ext_phys_pool_release(); | ||
581 | |||
582 | dsp_clk_exit(); | ||
583 | |||
584 | devno = MKDEV(driver_major, 0); | ||
585 | cdev_del(&bridge_cdev); | ||
586 | unregister_chrdev_region(devno, 1); | ||
587 | if (bridge_class) { | ||
588 | /* remove the device from sysfs */ | ||
589 | device_destroy(bridge_class, MKDEV(driver_major, 0)); | ||
590 | class_destroy(bridge_class); | ||
591 | |||
592 | } | ||
593 | return status; | ||
594 | } | ||
595 | |||
596 | #ifdef CONFIG_PM | ||
597 | static int bridge_suspend(struct platform_device *pdev, pm_message_t state) | ||
598 | { | ||
599 | u32 status; | ||
600 | u32 command = PWR_EMERGENCYDEEPSLEEP; | ||
601 | |||
602 | status = pwr_sleep_dsp(command, time_out); | ||
603 | if (status) | ||
604 | return -1; | ||
605 | |||
606 | bridge_suspend_data.suspended = 1; | ||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | static int bridge_resume(struct platform_device *pdev) | ||
611 | { | ||
612 | u32 status; | ||
613 | |||
614 | status = pwr_wake_dsp(time_out); | ||
615 | if (status) | ||
616 | return -1; | ||
617 | |||
618 | bridge_suspend_data.suspended = 0; | ||
619 | wake_up(&bridge_suspend_data.suspend_wq); | ||
620 | return 0; | ||
621 | } | ||
622 | #endif | ||
623 | |||
624 | static struct platform_driver bridge_driver = { | ||
625 | .driver = { | ||
626 | .name = "omap-dsp", | ||
627 | }, | ||
628 | .probe = omap34_xx_bridge_probe, | ||
629 | .remove = omap34_xx_bridge_remove, | ||
630 | #ifdef CONFIG_PM | ||
631 | .suspend = bridge_suspend, | ||
632 | .resume = bridge_resume, | ||
633 | #endif | ||
634 | }; | ||
635 | |||
636 | /* To remove all process resources before removing the process from the | ||
637 | * process context list */ | ||
638 | int drv_remove_all_resources(void *process_ctxt) | ||
639 | { | ||
640 | int status = 0; | ||
641 | struct process_context *ctxt = (struct process_context *)process_ctxt; | ||
642 | |||
643 | drv_remove_all_strm_res_elements(ctxt); | ||
644 | drv_remove_all_node_res_elements(ctxt); | ||
645 | drv_remove_all_dmm_res_elements(ctxt); | ||
646 | ctxt->res_state = PROC_RES_FREED; | ||
647 | return status; | ||
648 | } | ||
649 | |||
650 | module_platform_driver(bridge_driver); | ||
diff --git a/drivers/staging/tidspbridge/rmgr/dspdrv.c b/drivers/staging/tidspbridge/rmgr/dspdrv.c deleted file mode 100644 index 012e4a38d2db..000000000000 --- a/drivers/staging/tidspbridge/rmgr/dspdrv.c +++ /dev/null | |||
@@ -1,139 +0,0 @@ | |||
1 | /* | ||
2 | * dspdrv.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Interface to allocate and free bridge resources. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | /* ----------------------------------- Host OS */ | ||
20 | #include <linux/types.h> | ||
21 | #include <dspbridge/host_os.h> | ||
22 | |||
23 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
24 | #include <dspbridge/dbdefs.h> | ||
25 | |||
26 | /* ----------------------------------- Platform Manager */ | ||
27 | #include <dspbridge/drv.h> | ||
28 | #include <dspbridge/dev.h> | ||
29 | #include <dspbridge/dspapi.h> | ||
30 | |||
31 | /* ----------------------------------- Resource Manager */ | ||
32 | #include <dspbridge/mgr.h> | ||
33 | |||
34 | /* ----------------------------------- This */ | ||
35 | #include <dspbridge/dspdrv.h> | ||
36 | |||
37 | /* | ||
38 | * ======== dsp_init ======== | ||
39 | * Allocates bridge resources. Loads a base image onto DSP, if specified. | ||
40 | */ | ||
41 | u32 dsp_init(u32 *init_status) | ||
42 | { | ||
43 | char dev_node[MAXREGPATHLENGTH] = "TIOMAP1510"; | ||
44 | int status = -EPERM; | ||
45 | struct drv_object *drv_obj = NULL; | ||
46 | u32 device_node; | ||
47 | u32 device_node_string; | ||
48 | |||
49 | if (!api_init()) | ||
50 | goto func_cont; | ||
51 | |||
52 | status = drv_create(&drv_obj); | ||
53 | if (status) { | ||
54 | api_exit(); | ||
55 | goto func_cont; | ||
56 | } | ||
57 | |||
58 | /* End drv_create */ | ||
59 | /* Request Resources */ | ||
60 | status = drv_request_resources((u32) &dev_node, &device_node_string); | ||
61 | if (!status) { | ||
62 | /* Attempt to Start the Device */ | ||
63 | status = dev_start_device((struct cfg_devnode *) | ||
64 | device_node_string); | ||
65 | if (status) | ||
66 | (void)drv_release_resources | ||
67 | ((u32) device_node_string, drv_obj); | ||
68 | } else { | ||
69 | dev_dbg(bridge, "%s: drv_request_resources Failed\n", __func__); | ||
70 | status = -EPERM; | ||
71 | } | ||
72 | |||
73 | /* Unwind whatever was loaded */ | ||
74 | if (status) { | ||
75 | /* irrespective of the status of dev_remove_device we continue | ||
76 | * unloading. Get the Driver Object iterate through and remove. | ||
77 | * Reset the status to E_FAIL to avoid going through | ||
78 | * api_init_complete2. */ | ||
79 | for (device_node = drv_get_first_dev_extension(); | ||
80 | device_node != 0; | ||
81 | device_node = drv_get_next_dev_extension(device_node)) { | ||
82 | (void)dev_remove_device((struct cfg_devnode *) | ||
83 | device_node); | ||
84 | (void)drv_release_resources((u32) device_node, drv_obj); | ||
85 | } | ||
86 | /* Remove the Driver Object */ | ||
87 | (void)drv_destroy(drv_obj); | ||
88 | drv_obj = NULL; | ||
89 | api_exit(); | ||
90 | dev_dbg(bridge, "%s: Logical device failed init\n", __func__); | ||
91 | } /* Unwinding the loaded drivers */ | ||
92 | func_cont: | ||
93 | /* Attempt to Start the Board */ | ||
94 | if (!status) { | ||
95 | /* BRD_AutoStart could fail if the dsp executable is not the | ||
96 | * correct one. We should not propagate that error | ||
97 | * into the device loader. */ | ||
98 | (void)api_init_complete2(); | ||
99 | } else { | ||
100 | dev_dbg(bridge, "%s: Failed\n", __func__); | ||
101 | } /* End api_init_complete2 */ | ||
102 | *init_status = status; | ||
103 | /* Return the Driver Object */ | ||
104 | return (u32) drv_obj; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * ======== dsp_deinit ======== | ||
109 | * Frees the resources allocated for bridge. | ||
110 | */ | ||
111 | bool dsp_deinit(u32 device_context) | ||
112 | { | ||
113 | bool ret = true; | ||
114 | u32 device_node; | ||
115 | struct mgr_object *mgr_obj = NULL; | ||
116 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
117 | |||
118 | while ((device_node = drv_get_first_dev_extension()) != 0) { | ||
119 | (void)dev_remove_device((struct cfg_devnode *)device_node); | ||
120 | |||
121 | (void)drv_release_resources((u32) device_node, | ||
122 | (struct drv_object *)device_context); | ||
123 | } | ||
124 | |||
125 | (void)drv_destroy((struct drv_object *)device_context); | ||
126 | |||
127 | /* Get the Manager Object from driver data | ||
128 | * MGR Destroy will unload the DCD dll */ | ||
129 | if (drv_datap && drv_datap->mgr_object) { | ||
130 | mgr_obj = drv_datap->mgr_object; | ||
131 | (void)mgr_destroy(mgr_obj); | ||
132 | } else { | ||
133 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | ||
134 | } | ||
135 | |||
136 | api_exit(); | ||
137 | |||
138 | return ret; | ||
139 | } | ||
diff --git a/drivers/staging/tidspbridge/rmgr/mgr.c b/drivers/staging/tidspbridge/rmgr/mgr.c deleted file mode 100644 index 93e6282f122b..000000000000 --- a/drivers/staging/tidspbridge/rmgr/mgr.c +++ /dev/null | |||
@@ -1,352 +0,0 @@ | |||
1 | /* | ||
2 | * mgr.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Implementation of Manager interface to the device object at the | ||
7 | * driver level. This queries the NDB data base and retrieves the | ||
8 | * data about Node and Processor. | ||
9 | * | ||
10 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
11 | * | ||
12 | * This package is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
17 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
18 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
19 | */ | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | |||
23 | /* ----------------------------------- Host OS */ | ||
24 | #include <dspbridge/host_os.h> | ||
25 | |||
26 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
27 | #include <dspbridge/dbdefs.h> | ||
28 | |||
29 | /* ----------------------------------- OS Adaptation Layer */ | ||
30 | #include <dspbridge/sync.h> | ||
31 | |||
32 | /* ----------------------------------- Others */ | ||
33 | #include <dspbridge/dbdcd.h> | ||
34 | #include <dspbridge/drv.h> | ||
35 | #include <dspbridge/dev.h> | ||
36 | |||
37 | /* ----------------------------------- This */ | ||
38 | #include <dspbridge/mgr.h> | ||
39 | |||
40 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
41 | #define ZLDLLNAME "" | ||
42 | |||
43 | struct mgr_object { | ||
44 | struct dcd_manager *dcd_mgr; /* Proc/Node data manager */ | ||
45 | }; | ||
46 | |||
47 | /* ----------------------------------- Globals */ | ||
48 | static u32 refs; | ||
49 | |||
50 | /* | ||
51 | * ========= mgr_create ========= | ||
52 | * Purpose: | ||
53 | * MGR Object gets created only once during driver Loading. | ||
54 | */ | ||
55 | int mgr_create(struct mgr_object **mgr_obj, | ||
56 | struct cfg_devnode *dev_node_obj) | ||
57 | { | ||
58 | int status = 0; | ||
59 | struct mgr_object *pmgr_obj = NULL; | ||
60 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
61 | |||
62 | pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL); | ||
63 | if (pmgr_obj) { | ||
64 | status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->dcd_mgr); | ||
65 | if (!status) { | ||
66 | /* If succeeded store the handle in the MGR Object */ | ||
67 | if (drv_datap) { | ||
68 | drv_datap->mgr_object = (void *)pmgr_obj; | ||
69 | } else { | ||
70 | status = -EPERM; | ||
71 | pr_err("%s: Failed to store MGR object\n", | ||
72 | __func__); | ||
73 | } | ||
74 | |||
75 | if (!status) { | ||
76 | *mgr_obj = pmgr_obj; | ||
77 | } else { | ||
78 | dcd_destroy_manager(pmgr_obj->dcd_mgr); | ||
79 | kfree(pmgr_obj); | ||
80 | } | ||
81 | } else { | ||
82 | /* failed to Create DCD Manager */ | ||
83 | kfree(pmgr_obj); | ||
84 | } | ||
85 | } else { | ||
86 | status = -ENOMEM; | ||
87 | } | ||
88 | |||
89 | return status; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * ========= mgr_destroy ========= | ||
94 | * This function is invoked during bridge driver unloading.Frees MGR object. | ||
95 | */ | ||
96 | int mgr_destroy(struct mgr_object *hmgr_obj) | ||
97 | { | ||
98 | int status = 0; | ||
99 | struct mgr_object *pmgr_obj = (struct mgr_object *)hmgr_obj; | ||
100 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
101 | |||
102 | /* Free resources */ | ||
103 | if (hmgr_obj->dcd_mgr) | ||
104 | dcd_destroy_manager(hmgr_obj->dcd_mgr); | ||
105 | |||
106 | kfree(pmgr_obj); | ||
107 | /* Update the driver data with NULL for MGR Object */ | ||
108 | if (drv_datap) { | ||
109 | drv_datap->mgr_object = NULL; | ||
110 | } else { | ||
111 | status = -EPERM; | ||
112 | pr_err("%s: Failed to store MGR object\n", __func__); | ||
113 | } | ||
114 | |||
115 | return status; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * ======== mgr_enum_node_info ======== | ||
120 | * Enumerate and get configuration information about nodes configured | ||
121 | * in the node database. | ||
122 | */ | ||
123 | int mgr_enum_node_info(u32 node_id, struct dsp_ndbprops *pndb_props, | ||
124 | u32 undb_props_size, u32 *pu_num_nodes) | ||
125 | { | ||
126 | int status = 0; | ||
127 | struct dsp_uuid node_uuid; | ||
128 | u32 node_index = 0; | ||
129 | struct dcd_genericobj gen_obj; | ||
130 | struct mgr_object *pmgr_obj = NULL; | ||
131 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
132 | |||
133 | *pu_num_nodes = 0; | ||
134 | /* Get the Manager Object from the driver data */ | ||
135 | if (!drv_datap || !drv_datap->mgr_object) { | ||
136 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | ||
137 | return -ENODATA; | ||
138 | } | ||
139 | pmgr_obj = drv_datap->mgr_object; | ||
140 | |||
141 | /* Forever loop till we hit failed or no more items in the | ||
142 | * Enumeration. We will exit the loop other than 0; */ | ||
143 | while (!status) { | ||
144 | status = dcd_enumerate_object(node_index++, DSP_DCDNODETYPE, | ||
145 | &node_uuid); | ||
146 | if (status) | ||
147 | break; | ||
148 | *pu_num_nodes = node_index; | ||
149 | if (node_id == (node_index - 1)) { | ||
150 | status = dcd_get_object_def(pmgr_obj->dcd_mgr, | ||
151 | &node_uuid, DSP_DCDNODETYPE, &gen_obj); | ||
152 | if (status) | ||
153 | break; | ||
154 | /* Get the Obj def */ | ||
155 | *pndb_props = gen_obj.obj_data.node_obj.ndb_props; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | /* the last status is not 0, but neither an error */ | ||
160 | if (status > 0) | ||
161 | status = 0; | ||
162 | |||
163 | return status; | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * ======== mgr_enum_processor_info ======== | ||
168 | * Enumerate and get configuration information about available | ||
169 | * DSP processors. | ||
170 | */ | ||
171 | int mgr_enum_processor_info(u32 processor_id, | ||
172 | struct dsp_processorinfo * | ||
173 | processor_info, u32 processor_info_size, | ||
174 | u8 *pu_num_procs) | ||
175 | { | ||
176 | int status = 0; | ||
177 | int status1 = 0; | ||
178 | int status2 = 0; | ||
179 | struct dsp_uuid temp_uuid; | ||
180 | u32 temp_index = 0; | ||
181 | u32 proc_index = 0; | ||
182 | struct dcd_genericobj gen_obj; | ||
183 | struct mgr_object *pmgr_obj = NULL; | ||
184 | struct mgr_processorextinfo *ext_info; | ||
185 | struct dev_object *hdev_obj; | ||
186 | struct drv_object *hdrv_obj; | ||
187 | u8 dev_type; | ||
188 | struct cfg_devnode *dev_node; | ||
189 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
190 | bool proc_detect = false; | ||
191 | |||
192 | *pu_num_procs = 0; | ||
193 | |||
194 | /* Retrieve the Object handle from the driver data */ | ||
195 | if (!drv_datap || !drv_datap->drv_object) { | ||
196 | status = -ENODATA; | ||
197 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | ||
198 | } else { | ||
199 | hdrv_obj = drv_datap->drv_object; | ||
200 | } | ||
201 | |||
202 | if (!status) { | ||
203 | status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj); | ||
204 | if (!status) { | ||
205 | status = dev_get_dev_type(hdev_obj, (u8 *) &dev_type); | ||
206 | status = dev_get_dev_node(hdev_obj, &dev_node); | ||
207 | if (dev_type != DSP_UNIT) | ||
208 | status = -EPERM; | ||
209 | |||
210 | if (!status) | ||
211 | processor_info->processor_type = DSPTYPE64; | ||
212 | } | ||
213 | } | ||
214 | if (status) | ||
215 | goto func_end; | ||
216 | |||
217 | /* Get The Manager Object from the driver data */ | ||
218 | if (drv_datap && drv_datap->mgr_object) { | ||
219 | pmgr_obj = drv_datap->mgr_object; | ||
220 | } else { | ||
221 | dev_dbg(bridge, "%s: Failed to get MGR Object\n", __func__); | ||
222 | goto func_end; | ||
223 | } | ||
224 | /* Forever loop till we hit no more items in the | ||
225 | * Enumeration. We will exit the loop other than 0; */ | ||
226 | while (status1 == 0) { | ||
227 | status1 = dcd_enumerate_object(temp_index++, | ||
228 | DSP_DCDPROCESSORTYPE, | ||
229 | &temp_uuid); | ||
230 | if (status1 != 0) | ||
231 | break; | ||
232 | |||
233 | proc_index++; | ||
234 | /* Get the Object properties to find the Device/Processor | ||
235 | * Type */ | ||
236 | if (proc_detect != false) | ||
237 | continue; | ||
238 | |||
239 | status2 = dcd_get_object_def(pmgr_obj->dcd_mgr, | ||
240 | (struct dsp_uuid *)&temp_uuid, | ||
241 | DSP_DCDPROCESSORTYPE, &gen_obj); | ||
242 | if (!status2) { | ||
243 | /* Get the Obj def */ | ||
244 | if (processor_info_size < | ||
245 | sizeof(struct mgr_processorextinfo)) { | ||
246 | *processor_info = gen_obj.obj_data.proc_info; | ||
247 | } else { | ||
248 | /* extended info */ | ||
249 | ext_info = (struct mgr_processorextinfo *) | ||
250 | processor_info; | ||
251 | *ext_info = gen_obj.obj_data.ext_proc_obj; | ||
252 | } | ||
253 | dev_dbg(bridge, "%s: Got proctype from DCD %x\n", | ||
254 | __func__, processor_info->processor_type); | ||
255 | /* See if we got the needed processor */ | ||
256 | if (dev_type == DSP_UNIT) { | ||
257 | if (processor_info->processor_type == | ||
258 | DSPPROCTYPE_C64) | ||
259 | proc_detect = true; | ||
260 | } else if (dev_type == IVA_UNIT) { | ||
261 | if (processor_info->processor_type == | ||
262 | IVAPROCTYPE_ARM7) | ||
263 | proc_detect = true; | ||
264 | } | ||
265 | /* User applications only check for chip type, so | ||
266 | * this is a clumsy overwrite */ | ||
267 | processor_info->processor_type = DSPTYPE64; | ||
268 | } else { | ||
269 | dev_dbg(bridge, "%s: Failed to get DCD processor info %x\n", | ||
270 | __func__, status2); | ||
271 | status = -EPERM; | ||
272 | } | ||
273 | } | ||
274 | *pu_num_procs = proc_index; | ||
275 | if (proc_detect == false) { | ||
276 | dev_dbg(bridge, "%s: Failed to get proc info from DCD, so use CFG registry\n", | ||
277 | __func__); | ||
278 | processor_info->processor_type = DSPTYPE64; | ||
279 | } | ||
280 | func_end: | ||
281 | return status; | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * ======== mgr_exit ======== | ||
286 | * Decrement reference count, and free resources when reference count is | ||
287 | * 0. | ||
288 | */ | ||
289 | void mgr_exit(void) | ||
290 | { | ||
291 | refs--; | ||
292 | if (refs == 0) | ||
293 | dcd_exit(); | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * ======== mgr_get_dcd_handle ======== | ||
298 | * Retrieves the MGR handle. Accessor Function. | ||
299 | */ | ||
300 | int mgr_get_dcd_handle(struct mgr_object *mgr_handle, | ||
301 | u32 *dcd_handle) | ||
302 | { | ||
303 | int status = -EPERM; | ||
304 | struct mgr_object *pmgr_obj = (struct mgr_object *)mgr_handle; | ||
305 | |||
306 | *dcd_handle = (u32) NULL; | ||
307 | if (pmgr_obj) { | ||
308 | *dcd_handle = (u32) pmgr_obj->dcd_mgr; | ||
309 | status = 0; | ||
310 | } | ||
311 | |||
312 | return status; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * ======== mgr_init ======== | ||
317 | * Initialize MGR's private state, keeping a reference count on each call. | ||
318 | */ | ||
319 | bool mgr_init(void) | ||
320 | { | ||
321 | bool ret = true; | ||
322 | |||
323 | if (refs == 0) | ||
324 | ret = dcd_init(); /* DCD Module */ | ||
325 | |||
326 | if (ret) | ||
327 | refs++; | ||
328 | |||
329 | return ret; | ||
330 | } | ||
331 | |||
332 | /* | ||
333 | * ======== mgr_wait_for_bridge_events ======== | ||
334 | * Block on any Bridge event(s) | ||
335 | */ | ||
336 | int mgr_wait_for_bridge_events(struct dsp_notification **anotifications, | ||
337 | u32 count, u32 *pu_index, | ||
338 | u32 utimeout) | ||
339 | { | ||
340 | int status; | ||
341 | struct sync_object *sync_events[MAX_EVENTS]; | ||
342 | u32 i; | ||
343 | |||
344 | for (i = 0; i < count; i++) | ||
345 | sync_events[i] = anotifications[i]->handle; | ||
346 | |||
347 | status = sync_wait_on_multiple_events(sync_events, count, utimeout, | ||
348 | pu_index); | ||
349 | |||
350 | return status; | ||
351 | |||
352 | } | ||
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c deleted file mode 100644 index 900585ab059a..000000000000 --- a/drivers/staging/tidspbridge/rmgr/nldr.c +++ /dev/null | |||
@@ -1,1861 +0,0 @@ | |||
1 | /* | ||
2 | * nldr.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge dynamic + overlay Node loader. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | |||
21 | #include <dspbridge/host_os.h> | ||
22 | |||
23 | #include <dspbridge/dbdefs.h> | ||
24 | |||
25 | /* Platform manager */ | ||
26 | #include <dspbridge/cod.h> | ||
27 | #include <dspbridge/dev.h> | ||
28 | |||
29 | /* Resource manager */ | ||
30 | #include <dspbridge/dbll.h> | ||
31 | #include <dspbridge/dbdcd.h> | ||
32 | #include <dspbridge/rmm.h> | ||
33 | #include <dspbridge/uuidutil.h> | ||
34 | |||
35 | #include <dspbridge/nldr.h> | ||
36 | #include <linux/lcm.h> | ||
37 | |||
38 | /* Name of section containing dynamic load mem */ | ||
39 | #define DYNMEMSECT ".dspbridge_mem" | ||
40 | |||
41 | /* Name of section containing dependent library information */ | ||
42 | #define DEPLIBSECT ".dspbridge_deplibs" | ||
43 | |||
44 | /* Max depth of recursion for loading node's dependent libraries */ | ||
45 | #define MAXDEPTH 5 | ||
46 | |||
47 | /* Max number of persistent libraries kept by a node */ | ||
48 | #define MAXLIBS 5 | ||
49 | |||
50 | /* | ||
51 | * Defines for extracting packed dynamic load memory requirements from two | ||
52 | * masks. | ||
53 | * These defines must match node.cdb and dynm.cdb | ||
54 | * Format of data/code mask is: | ||
55 | * uuuuuuuu|fueeeeee|fudddddd|fucccccc| | ||
56 | * where | ||
57 | * u = unused | ||
58 | * cccccc = preferred/required dynamic mem segid for create phase data/code | ||
59 | * dddddd = preferred/required dynamic mem segid for delete phase data/code | ||
60 | * eeeeee = preferred/req. dynamic mem segid for execute phase data/code | ||
61 | * f = flag indicating if memory is preferred or required: | ||
62 | * f = 1 if required, f = 0 if preferred. | ||
63 | * | ||
64 | * The 6 bits of the segid are interpreted as follows: | ||
65 | * | ||
66 | * If the 6th bit (bit 5) is not set, then this specifies a memory segment | ||
67 | * between 0 and 31 (a maximum of 32 dynamic loading memory segments). | ||
68 | * If the 6th bit (bit 5) is set, segid has the following interpretation: | ||
69 | * segid = 32 - Any internal memory segment can be used. | ||
70 | * segid = 33 - Any external memory segment can be used. | ||
71 | * segid = 63 - Any memory segment can be used (in this case the | ||
72 | * required/preferred flag is irrelevant). | ||
73 | * | ||
74 | */ | ||
75 | /* Maximum allowed dynamic loading memory segments */ | ||
76 | #define MAXMEMSEGS 32 | ||
77 | |||
78 | #define MAXSEGID 3 /* Largest possible (real) segid */ | ||
79 | #define MEMINTERNALID 32 /* Segid meaning use internal mem */ | ||
80 | #define MEMEXTERNALID 33 /* Segid meaning use external mem */ | ||
81 | #define NULLID 63 /* Segid meaning no memory req/pref */ | ||
82 | #define FLAGBIT 7 /* 7th bit is pref./req. flag */ | ||
83 | #define SEGMASK 0x3f /* Bits 0 - 5 */ | ||
84 | |||
85 | #define CREATEBIT 0 /* Create segid starts at bit 0 */ | ||
86 | #define DELETEBIT 8 /* Delete segid starts at bit 8 */ | ||
87 | #define EXECUTEBIT 16 /* Execute segid starts at bit 16 */ | ||
88 | |||
89 | /* | ||
90 | * Masks that define memory type. Must match defines in dynm.cdb. | ||
91 | */ | ||
92 | #define DYNM_CODE 0x2 | ||
93 | #define DYNM_DATA 0x4 | ||
94 | #define DYNM_CODEDATA (DYNM_CODE | DYNM_DATA) | ||
95 | #define DYNM_INTERNAL 0x8 | ||
96 | #define DYNM_EXTERNAL 0x10 | ||
97 | |||
98 | /* | ||
99 | * Defines for packing memory requirement/preference flags for code and | ||
100 | * data of each of the node's phases into one mask. | ||
101 | * The bit is set if the segid is required for loading code/data of the | ||
102 | * given phase. The bit is not set, if the segid is preferred only. | ||
103 | * | ||
104 | * These defines are also used as indeces into a segid array for the node. | ||
105 | * eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the | ||
106 | * create phase data is required or preferred to be loaded into. | ||
107 | */ | ||
108 | #define CREATEDATAFLAGBIT 0 | ||
109 | #define CREATECODEFLAGBIT 1 | ||
110 | #define EXECUTEDATAFLAGBIT 2 | ||
111 | #define EXECUTECODEFLAGBIT 3 | ||
112 | #define DELETEDATAFLAGBIT 4 | ||
113 | #define DELETECODEFLAGBIT 5 | ||
114 | #define MAXFLAGS 6 | ||
115 | |||
116 | /* | ||
117 | * These names may be embedded in overlay sections to identify which | ||
118 | * node phase the section should be overlayed. | ||
119 | */ | ||
120 | #define PCREATE "create" | ||
121 | #define PDELETE "delete" | ||
122 | #define PEXECUTE "execute" | ||
123 | |||
124 | static inline bool is_equal_uuid(struct dsp_uuid *uuid1, | ||
125 | struct dsp_uuid *uuid2) | ||
126 | { | ||
127 | return !memcmp(uuid1, uuid2, sizeof(struct dsp_uuid)); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * ======== mem_seg_info ======== | ||
132 | * Format of dynamic loading memory segment info in coff file. | ||
133 | * Must match dynm.h55. | ||
134 | */ | ||
135 | struct mem_seg_info { | ||
136 | u32 segid; /* Dynamic loading memory segment number */ | ||
137 | u32 base; | ||
138 | u32 len; | ||
139 | u32 type; /* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */ | ||
140 | }; | ||
141 | |||
142 | /* | ||
143 | * ======== lib_node ======== | ||
144 | * For maintaining a tree of library dependencies. | ||
145 | */ | ||
146 | struct lib_node { | ||
147 | struct dbll_library_obj *lib; /* The library */ | ||
148 | u16 dep_libs; /* Number of dependent libraries */ | ||
149 | struct lib_node *dep_libs_tree; /* Dependent libraries of lib */ | ||
150 | }; | ||
151 | |||
152 | /* | ||
153 | * ======== ovly_sect ======== | ||
154 | * Information needed to overlay a section. | ||
155 | */ | ||
156 | struct ovly_sect { | ||
157 | struct ovly_sect *next_sect; | ||
158 | u32 sect_load_addr; /* Load address of section */ | ||
159 | u32 sect_run_addr; /* Run address of section */ | ||
160 | u32 size; /* Size of section */ | ||
161 | u16 page; /* DBL_CODE, DBL_DATA */ | ||
162 | }; | ||
163 | |||
164 | /* | ||
165 | * ======== ovly_node ======== | ||
166 | * For maintaining a list of overlay nodes, with sections that need to be | ||
167 | * overlayed for each of the nodes phases. | ||
168 | */ | ||
169 | struct ovly_node { | ||
170 | struct dsp_uuid uuid; | ||
171 | char *node_name; | ||
172 | struct ovly_sect *create_sects_list; | ||
173 | struct ovly_sect *delete_sects_list; | ||
174 | struct ovly_sect *execute_sects_list; | ||
175 | struct ovly_sect *other_sects_list; | ||
176 | u16 create_sects; | ||
177 | u16 delete_sects; | ||
178 | u16 execute_sects; | ||
179 | u16 other_sects; | ||
180 | u16 create_ref; | ||
181 | u16 delete_ref; | ||
182 | u16 execute_ref; | ||
183 | u16 other_ref; | ||
184 | }; | ||
185 | |||
186 | /* | ||
187 | * ======== nldr_object ======== | ||
188 | * Overlay loader object. | ||
189 | */ | ||
190 | struct nldr_object { | ||
191 | struct dev_object *dev_obj; /* Device object */ | ||
192 | struct dcd_manager *dcd_mgr; /* Proc/Node data manager */ | ||
193 | struct dbll_tar_obj *dbll; /* The DBL loader */ | ||
194 | struct dbll_library_obj *base_lib; /* Base image library */ | ||
195 | struct rmm_target_obj *rmm; /* Remote memory manager for DSP */ | ||
196 | struct dbll_fxns ldr_fxns; /* Loader function table */ | ||
197 | struct dbll_attrs ldr_attrs; /* attrs to pass to loader functions */ | ||
198 | nldr_ovlyfxn ovly_fxn; /* "write" for overlay nodes */ | ||
199 | nldr_writefxn write_fxn; /* "write" for dynamic nodes */ | ||
200 | struct ovly_node *ovly_table; /* Table of overlay nodes */ | ||
201 | u16 ovly_nodes; /* Number of overlay nodes in base */ | ||
202 | u16 ovly_nid; /* Index for tracking overlay nodes */ | ||
203 | u16 dload_segs; /* Number of dynamic load mem segs */ | ||
204 | u32 *seg_table; /* memtypes of dynamic memory segs | ||
205 | * indexed by segid | ||
206 | */ | ||
207 | u16 dsp_mau_size; /* Size of DSP MAU */ | ||
208 | u16 dsp_word_size; /* Size of DSP word */ | ||
209 | }; | ||
210 | |||
211 | /* | ||
212 | * ======== nldr_nodeobject ======== | ||
213 | * Dynamic node object. This object is created when a node is allocated. | ||
214 | */ | ||
215 | struct nldr_nodeobject { | ||
216 | struct nldr_object *nldr_obj; /* Dynamic loader handle */ | ||
217 | void *priv_ref; /* Handle to pass to dbl_write_fxn */ | ||
218 | struct dsp_uuid uuid; /* Node's UUID */ | ||
219 | bool dynamic; /* Dynamically loaded node? */ | ||
220 | bool overlay; /* Overlay node? */ | ||
221 | bool *phase_split; /* Multiple phase libraries? */ | ||
222 | struct lib_node root; /* Library containing node phase */ | ||
223 | struct lib_node create_lib; /* Library with create phase lib */ | ||
224 | struct lib_node execute_lib; /* Library with execute phase lib */ | ||
225 | struct lib_node delete_lib; /* Library with delete phase lib */ | ||
226 | /* libs remain loaded until Delete */ | ||
227 | struct lib_node pers_lib_table[MAXLIBS]; | ||
228 | s32 pers_libs; /* Number of persistent libraries */ | ||
229 | /* Path in lib dependency tree */ | ||
230 | struct dbll_library_obj *lib_path[MAXDEPTH + 1]; | ||
231 | enum nldr_phase phase; /* Node phase currently being loaded */ | ||
232 | |||
233 | /* | ||
234 | * Dynamic loading memory segments for data and code of each phase. | ||
235 | */ | ||
236 | u16 seg_id[MAXFLAGS]; | ||
237 | |||
238 | /* | ||
239 | * Mask indicating whether each mem segment specified in seg_id[] | ||
240 | * is preferred or required. | ||
241 | * For example | ||
242 | * if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0, | ||
243 | * then it is required to load execute phase data into the memory | ||
244 | * specified by seg_id[EXECUTEDATAFLAGBIT]. | ||
245 | */ | ||
246 | u32 code_data_flag_mask; | ||
247 | }; | ||
248 | |||
249 | /* Dynamic loader function table */ | ||
250 | static struct dbll_fxns ldr_fxns = { | ||
251 | (dbll_close_fxn) dbll_close, | ||
252 | (dbll_create_fxn) dbll_create, | ||
253 | (dbll_delete_fxn) dbll_delete, | ||
254 | (dbll_exit_fxn) dbll_exit, | ||
255 | (dbll_get_attrs_fxn) dbll_get_attrs, | ||
256 | (dbll_get_addr_fxn) dbll_get_addr, | ||
257 | (dbll_get_c_addr_fxn) dbll_get_c_addr, | ||
258 | (dbll_get_sect_fxn) dbll_get_sect, | ||
259 | (dbll_init_fxn) dbll_init, | ||
260 | (dbll_load_fxn) dbll_load, | ||
261 | (dbll_open_fxn) dbll_open, | ||
262 | (dbll_read_sect_fxn) dbll_read_sect, | ||
263 | (dbll_unload_fxn) dbll_unload, | ||
264 | }; | ||
265 | |||
266 | static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info, | ||
267 | u32 addr, u32 bytes); | ||
268 | static int add_ovly_node(struct dsp_uuid *uuid_obj, | ||
269 | enum dsp_dcdobjtype obj_type, void *handle); | ||
270 | static int add_ovly_sect(struct nldr_object *nldr_obj, | ||
271 | struct ovly_sect **lst, | ||
272 | struct dbll_sect_info *sect_inf, | ||
273 | bool *exists, u32 addr, u32 bytes); | ||
274 | static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes, | ||
275 | s32 mtype); | ||
276 | static void free_sects(struct nldr_object *nldr_obj, | ||
277 | struct ovly_sect *phase_sects, u16 alloc_num); | ||
278 | static bool get_symbol_value(void *handle, void *parg, void *rmm_handle, | ||
279 | char *sym_name, struct dbll_sym_val **sym); | ||
280 | static int load_lib(struct nldr_nodeobject *nldr_node_obj, | ||
281 | struct lib_node *root, struct dsp_uuid uuid, | ||
282 | bool root_prstnt, | ||
283 | struct dbll_library_obj **lib_path, | ||
284 | enum nldr_phase phase, u16 depth); | ||
285 | static int load_ovly(struct nldr_nodeobject *nldr_node_obj, | ||
286 | enum nldr_phase phase); | ||
287 | static int remote_alloc(void **ref, u16 mem_sect, u32 size, | ||
288 | u32 align, u32 *dsp_address, | ||
289 | s32 segmnt_id, | ||
290 | s32 req, bool reserve); | ||
291 | static int remote_free(void **ref, u16 space, u32 dsp_address, u32 size, | ||
292 | bool reserve); | ||
293 | |||
294 | static void unload_lib(struct nldr_nodeobject *nldr_node_obj, | ||
295 | struct lib_node *root); | ||
296 | static void unload_ovly(struct nldr_nodeobject *nldr_node_obj, | ||
297 | enum nldr_phase phase); | ||
298 | static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj, | ||
299 | struct dbll_library_obj *lib); | ||
300 | |||
301 | /* | ||
302 | * ======== nldr_allocate ======== | ||
303 | */ | ||
304 | int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref, | ||
305 | const struct dcd_nodeprops *node_props, | ||
306 | struct nldr_nodeobject **nldr_nodeobj, | ||
307 | bool *pf_phase_split) | ||
308 | { | ||
309 | struct nldr_nodeobject *nldr_node_obj = NULL; | ||
310 | int status = 0; | ||
311 | |||
312 | /* Initialize handle in case of failure */ | ||
313 | *nldr_nodeobj = NULL; | ||
314 | /* Allocate node object */ | ||
315 | nldr_node_obj = kzalloc(sizeof(struct nldr_nodeobject), GFP_KERNEL); | ||
316 | |||
317 | if (nldr_node_obj == NULL) { | ||
318 | status = -ENOMEM; | ||
319 | } else { | ||
320 | nldr_node_obj->phase_split = pf_phase_split; | ||
321 | nldr_node_obj->pers_libs = 0; | ||
322 | nldr_node_obj->nldr_obj = nldr_obj; | ||
323 | nldr_node_obj->priv_ref = priv_ref; | ||
324 | /* Save node's UUID. */ | ||
325 | nldr_node_obj->uuid = node_props->ndb_props.ui_node_id; | ||
326 | /* | ||
327 | * Determine if node is a dynamically loaded node from | ||
328 | * ndb_props. | ||
329 | */ | ||
330 | if (node_props->load_type == NLDR_DYNAMICLOAD) { | ||
331 | /* Dynamic node */ | ||
332 | nldr_node_obj->dynamic = true; | ||
333 | /* | ||
334 | * Extract memory requirements from ndb_props masks | ||
335 | */ | ||
336 | /* Create phase */ | ||
337 | nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16) | ||
338 | (node_props->data_mem_seg_mask >> CREATEBIT) & | ||
339 | SEGMASK; | ||
340 | nldr_node_obj->code_data_flag_mask |= | ||
341 | ((node_props->data_mem_seg_mask >> | ||
342 | (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT; | ||
343 | nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16) | ||
344 | (node_props->code_mem_seg_mask >> | ||
345 | CREATEBIT) & SEGMASK; | ||
346 | nldr_node_obj->code_data_flag_mask |= | ||
347 | ((node_props->code_mem_seg_mask >> | ||
348 | (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT; | ||
349 | /* Execute phase */ | ||
350 | nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16) | ||
351 | (node_props->data_mem_seg_mask >> | ||
352 | EXECUTEBIT) & SEGMASK; | ||
353 | nldr_node_obj->code_data_flag_mask |= | ||
354 | ((node_props->data_mem_seg_mask >> | ||
355 | (EXECUTEBIT + FLAGBIT)) & 1) << | ||
356 | EXECUTEDATAFLAGBIT; | ||
357 | nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16) | ||
358 | (node_props->code_mem_seg_mask >> | ||
359 | EXECUTEBIT) & SEGMASK; | ||
360 | nldr_node_obj->code_data_flag_mask |= | ||
361 | ((node_props->code_mem_seg_mask >> | ||
362 | (EXECUTEBIT + FLAGBIT)) & 1) << | ||
363 | EXECUTECODEFLAGBIT; | ||
364 | /* Delete phase */ | ||
365 | nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16) | ||
366 | (node_props->data_mem_seg_mask >> DELETEBIT) & | ||
367 | SEGMASK; | ||
368 | nldr_node_obj->code_data_flag_mask |= | ||
369 | ((node_props->data_mem_seg_mask >> | ||
370 | (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT; | ||
371 | nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16) | ||
372 | (node_props->code_mem_seg_mask >> | ||
373 | DELETEBIT) & SEGMASK; | ||
374 | nldr_node_obj->code_data_flag_mask |= | ||
375 | ((node_props->code_mem_seg_mask >> | ||
376 | (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT; | ||
377 | } else { | ||
378 | /* Non-dynamically loaded nodes are part of the | ||
379 | * base image */ | ||
380 | nldr_node_obj->root.lib = nldr_obj->base_lib; | ||
381 | /* Check for overlay node */ | ||
382 | if (node_props->load_type == NLDR_OVLYLOAD) | ||
383 | nldr_node_obj->overlay = true; | ||
384 | |||
385 | } | ||
386 | *nldr_nodeobj = (struct nldr_nodeobject *)nldr_node_obj; | ||
387 | } | ||
388 | /* Cleanup on failure */ | ||
389 | if (status && nldr_node_obj) | ||
390 | kfree(nldr_node_obj); | ||
391 | |||
392 | return status; | ||
393 | } | ||
394 | |||
395 | /* | ||
396 | * ======== nldr_create ======== | ||
397 | */ | ||
398 | int nldr_create(struct nldr_object **nldr, | ||
399 | struct dev_object *hdev_obj, | ||
400 | const struct nldr_attrs *pattrs) | ||
401 | { | ||
402 | struct cod_manager *cod_mgr; /* COD manager */ | ||
403 | char *psz_coff_buf = NULL; | ||
404 | char sz_zl_file[COD_MAXPATHLENGTH]; | ||
405 | struct nldr_object *nldr_obj = NULL; | ||
406 | struct dbll_attrs save_attrs; | ||
407 | struct dbll_attrs new_attrs; | ||
408 | dbll_flags flags; | ||
409 | u32 ul_entry; | ||
410 | u16 dload_segs = 0; | ||
411 | struct mem_seg_info *mem_info_obj; | ||
412 | u32 ul_len = 0; | ||
413 | u32 ul_addr; | ||
414 | struct rmm_segment *rmm_segs = NULL; | ||
415 | u16 i; | ||
416 | int status = 0; | ||
417 | |||
418 | /* Allocate dynamic loader object */ | ||
419 | nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL); | ||
420 | if (nldr_obj) { | ||
421 | nldr_obj->dev_obj = hdev_obj; | ||
422 | /* warning, lazy status checking alert! */ | ||
423 | dev_get_cod_mgr(hdev_obj, &cod_mgr); | ||
424 | if (cod_mgr) { | ||
425 | status = cod_get_loader(cod_mgr, &nldr_obj->dbll); | ||
426 | status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib); | ||
427 | status = | ||
428 | cod_get_base_name(cod_mgr, sz_zl_file, | ||
429 | COD_MAXPATHLENGTH); | ||
430 | } | ||
431 | status = 0; | ||
432 | /* end lazy status checking */ | ||
433 | nldr_obj->dsp_mau_size = pattrs->dsp_mau_size; | ||
434 | nldr_obj->dsp_word_size = pattrs->dsp_word_size; | ||
435 | nldr_obj->ldr_fxns = ldr_fxns; | ||
436 | if (!(nldr_obj->ldr_fxns.init_fxn())) | ||
437 | status = -ENOMEM; | ||
438 | |||
439 | } else { | ||
440 | status = -ENOMEM; | ||
441 | } | ||
442 | /* Create the DCD Manager */ | ||
443 | if (!status) | ||
444 | status = dcd_create_manager(NULL, &nldr_obj->dcd_mgr); | ||
445 | |||
446 | /* Get dynamic loading memory sections from base lib */ | ||
447 | if (!status) { | ||
448 | status = | ||
449 | nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib, | ||
450 | DYNMEMSECT, &ul_addr, | ||
451 | &ul_len); | ||
452 | if (!status) { | ||
453 | psz_coff_buf = | ||
454 | kzalloc(ul_len * nldr_obj->dsp_mau_size, | ||
455 | GFP_KERNEL); | ||
456 | if (!psz_coff_buf) | ||
457 | status = -ENOMEM; | ||
458 | } else { | ||
459 | /* Ok to not have dynamic loading memory */ | ||
460 | status = 0; | ||
461 | ul_len = 0; | ||
462 | dev_dbg(bridge, "%s: failed - no dynamic loading mem " | ||
463 | "segments: 0x%x\n", __func__, status); | ||
464 | } | ||
465 | } | ||
466 | if (!status && ul_len > 0) { | ||
467 | /* Read section containing dynamic load mem segments */ | ||
468 | status = | ||
469 | nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib, | ||
470 | DYNMEMSECT, psz_coff_buf, | ||
471 | ul_len); | ||
472 | } | ||
473 | if (!status && ul_len > 0) { | ||
474 | /* Parse memory segment data */ | ||
475 | dload_segs = (u16) (*((u32 *) psz_coff_buf)); | ||
476 | if (dload_segs > MAXMEMSEGS) | ||
477 | status = -EBADF; | ||
478 | } | ||
479 | /* Parse dynamic load memory segments */ | ||
480 | if (!status && dload_segs > 0) { | ||
481 | rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs, | ||
482 | GFP_KERNEL); | ||
483 | nldr_obj->seg_table = | ||
484 | kzalloc(sizeof(u32) * dload_segs, GFP_KERNEL); | ||
485 | if (rmm_segs == NULL || nldr_obj->seg_table == NULL) { | ||
486 | status = -ENOMEM; | ||
487 | } else { | ||
488 | nldr_obj->dload_segs = dload_segs; | ||
489 | mem_info_obj = (struct mem_seg_info *)(psz_coff_buf + | ||
490 | sizeof(u32)); | ||
491 | for (i = 0; i < dload_segs; i++) { | ||
492 | rmm_segs[i].base = (mem_info_obj + i)->base; | ||
493 | rmm_segs[i].length = (mem_info_obj + i)->len; | ||
494 | rmm_segs[i].space = 0; | ||
495 | nldr_obj->seg_table[i] = | ||
496 | (mem_info_obj + i)->type; | ||
497 | dev_dbg(bridge, | ||
498 | "(proc) DLL MEMSEGMENT: %d, " | ||
499 | "Base: 0x%x, Length: 0x%x\n", i, | ||
500 | rmm_segs[i].base, rmm_segs[i].length); | ||
501 | } | ||
502 | } | ||
503 | } | ||
504 | /* Create Remote memory manager */ | ||
505 | if (!status) | ||
506 | status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs); | ||
507 | |||
508 | if (!status) { | ||
509 | /* set the alloc, free, write functions for loader */ | ||
510 | nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs); | ||
511 | new_attrs = save_attrs; | ||
512 | new_attrs.alloc = (dbll_alloc_fxn) remote_alloc; | ||
513 | new_attrs.free = (dbll_free_fxn) remote_free; | ||
514 | new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value; | ||
515 | new_attrs.sym_handle = nldr_obj; | ||
516 | new_attrs.write = (dbll_write_fxn) pattrs->write; | ||
517 | nldr_obj->ovly_fxn = pattrs->ovly; | ||
518 | nldr_obj->write_fxn = pattrs->write; | ||
519 | nldr_obj->ldr_attrs = new_attrs; | ||
520 | } | ||
521 | kfree(rmm_segs); | ||
522 | |||
523 | kfree(psz_coff_buf); | ||
524 | |||
525 | /* Get overlay nodes */ | ||
526 | if (!status) { | ||
527 | status = | ||
528 | cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH); | ||
529 | /* lazy check */ | ||
530 | /* First count number of overlay nodes */ | ||
531 | status = | ||
532 | dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file, | ||
533 | add_ovly_node, (void *)nldr_obj); | ||
534 | /* Now build table of overlay nodes */ | ||
535 | if (!status && nldr_obj->ovly_nodes > 0) { | ||
536 | /* Allocate table for overlay nodes */ | ||
537 | nldr_obj->ovly_table = | ||
538 | kzalloc(sizeof(struct ovly_node) * | ||
539 | nldr_obj->ovly_nodes, GFP_KERNEL); | ||
540 | /* Put overlay nodes in the table */ | ||
541 | nldr_obj->ovly_nid = 0; | ||
542 | status = dcd_get_objects(nldr_obj->dcd_mgr, sz_zl_file, | ||
543 | add_ovly_node, | ||
544 | (void *)nldr_obj); | ||
545 | } | ||
546 | } | ||
547 | /* Do a fake reload of the base image to get overlay section info */ | ||
548 | if (!status && nldr_obj->ovly_nodes > 0) { | ||
549 | save_attrs.write = fake_ovly_write; | ||
550 | save_attrs.log_write = add_ovly_info; | ||
551 | save_attrs.log_write_handle = nldr_obj; | ||
552 | flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB; | ||
553 | status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags, | ||
554 | &save_attrs, &ul_entry); | ||
555 | } | ||
556 | if (!status) { | ||
557 | *nldr = (struct nldr_object *)nldr_obj; | ||
558 | } else { | ||
559 | if (nldr_obj) | ||
560 | nldr_delete((struct nldr_object *)nldr_obj); | ||
561 | |||
562 | *nldr = NULL; | ||
563 | } | ||
564 | /* FIXME:Temp. Fix. Must be removed */ | ||
565 | return status; | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * ======== nldr_delete ======== | ||
570 | */ | ||
571 | void nldr_delete(struct nldr_object *nldr_obj) | ||
572 | { | ||
573 | struct ovly_sect *ovly_section; | ||
574 | struct ovly_sect *next; | ||
575 | u16 i; | ||
576 | |||
577 | nldr_obj->ldr_fxns.exit_fxn(); | ||
578 | if (nldr_obj->rmm) | ||
579 | rmm_delete(nldr_obj->rmm); | ||
580 | |||
581 | kfree(nldr_obj->seg_table); | ||
582 | |||
583 | if (nldr_obj->dcd_mgr) | ||
584 | dcd_destroy_manager(nldr_obj->dcd_mgr); | ||
585 | |||
586 | /* Free overlay node information */ | ||
587 | if (nldr_obj->ovly_table) { | ||
588 | for (i = 0; i < nldr_obj->ovly_nodes; i++) { | ||
589 | ovly_section = | ||
590 | nldr_obj->ovly_table[i].create_sects_list; | ||
591 | while (ovly_section) { | ||
592 | next = ovly_section->next_sect; | ||
593 | kfree(ovly_section); | ||
594 | ovly_section = next; | ||
595 | } | ||
596 | ovly_section = | ||
597 | nldr_obj->ovly_table[i].delete_sects_list; | ||
598 | while (ovly_section) { | ||
599 | next = ovly_section->next_sect; | ||
600 | kfree(ovly_section); | ||
601 | ovly_section = next; | ||
602 | } | ||
603 | ovly_section = | ||
604 | nldr_obj->ovly_table[i].execute_sects_list; | ||
605 | while (ovly_section) { | ||
606 | next = ovly_section->next_sect; | ||
607 | kfree(ovly_section); | ||
608 | ovly_section = next; | ||
609 | } | ||
610 | ovly_section = nldr_obj->ovly_table[i].other_sects_list; | ||
611 | while (ovly_section) { | ||
612 | next = ovly_section->next_sect; | ||
613 | kfree(ovly_section); | ||
614 | ovly_section = next; | ||
615 | } | ||
616 | } | ||
617 | kfree(nldr_obj->ovly_table); | ||
618 | } | ||
619 | kfree(nldr_obj); | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * ======== nldr_get_fxn_addr ======== | ||
624 | */ | ||
625 | int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj, | ||
626 | char *str_fxn, u32 *addr) | ||
627 | { | ||
628 | struct dbll_sym_val *dbll_sym; | ||
629 | struct nldr_object *nldr_obj; | ||
630 | int status = 0; | ||
631 | bool status1 = false; | ||
632 | s32 i = 0; | ||
633 | struct lib_node root = { NULL, 0, NULL }; | ||
634 | |||
635 | nldr_obj = nldr_node_obj->nldr_obj; | ||
636 | /* Called from node_create(), node_delete(), or node_run(). */ | ||
637 | if (nldr_node_obj->dynamic && *nldr_node_obj->phase_split) { | ||
638 | switch (nldr_node_obj->phase) { | ||
639 | case NLDR_CREATE: | ||
640 | root = nldr_node_obj->create_lib; | ||
641 | break; | ||
642 | case NLDR_EXECUTE: | ||
643 | root = nldr_node_obj->execute_lib; | ||
644 | break; | ||
645 | case NLDR_DELETE: | ||
646 | root = nldr_node_obj->delete_lib; | ||
647 | break; | ||
648 | default: | ||
649 | break; | ||
650 | } | ||
651 | } else { | ||
652 | /* for Overlay nodes or non-split Dynamic nodes */ | ||
653 | root = nldr_node_obj->root; | ||
654 | } | ||
655 | status1 = | ||
656 | nldr_obj->ldr_fxns.get_c_addr_fxn(root.lib, str_fxn, &dbll_sym); | ||
657 | if (!status1) | ||
658 | status1 = | ||
659 | nldr_obj->ldr_fxns.get_addr_fxn(root.lib, str_fxn, | ||
660 | &dbll_sym); | ||
661 | |||
662 | /* If symbol not found, check dependent libraries */ | ||
663 | if (!status1) { | ||
664 | for (i = 0; i < root.dep_libs; i++) { | ||
665 | status1 = | ||
666 | nldr_obj->ldr_fxns.get_addr_fxn(root.dep_libs_tree | ||
667 | [i].lib, str_fxn, | ||
668 | &dbll_sym); | ||
669 | if (!status1) { | ||
670 | status1 = | ||
671 | nldr_obj->ldr_fxns. | ||
672 | get_c_addr_fxn(root.dep_libs_tree[i].lib, | ||
673 | str_fxn, &dbll_sym); | ||
674 | } | ||
675 | if (status1) { | ||
676 | /* Symbol found */ | ||
677 | break; | ||
678 | } | ||
679 | } | ||
680 | } | ||
681 | /* Check persistent libraries */ | ||
682 | if (!status1) { | ||
683 | for (i = 0; i < nldr_node_obj->pers_libs; i++) { | ||
684 | status1 = | ||
685 | nldr_obj->ldr_fxns. | ||
686 | get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib, | ||
687 | str_fxn, &dbll_sym); | ||
688 | if (!status1) { | ||
689 | status1 = | ||
690 | nldr_obj->ldr_fxns. | ||
691 | get_c_addr_fxn(nldr_node_obj->pers_lib_table | ||
692 | [i].lib, str_fxn, &dbll_sym); | ||
693 | } | ||
694 | if (status1) { | ||
695 | /* Symbol found */ | ||
696 | break; | ||
697 | } | ||
698 | } | ||
699 | } | ||
700 | |||
701 | if (status1) | ||
702 | *addr = dbll_sym->value; | ||
703 | else | ||
704 | status = -ESPIPE; | ||
705 | |||
706 | return status; | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * ======== nldr_get_rmm_manager ======== | ||
711 | * Given a NLDR object, retrieve RMM Manager Handle | ||
712 | */ | ||
713 | int nldr_get_rmm_manager(struct nldr_object *nldr, | ||
714 | struct rmm_target_obj **rmm_mgr) | ||
715 | { | ||
716 | int status = 0; | ||
717 | struct nldr_object *nldr_obj = nldr; | ||
718 | |||
719 | if (nldr) { | ||
720 | *rmm_mgr = nldr_obj->rmm; | ||
721 | } else { | ||
722 | *rmm_mgr = NULL; | ||
723 | status = -EFAULT; | ||
724 | } | ||
725 | |||
726 | return status; | ||
727 | } | ||
728 | |||
729 | /* | ||
730 | * ======== nldr_load ======== | ||
731 | */ | ||
732 | int nldr_load(struct nldr_nodeobject *nldr_node_obj, | ||
733 | enum nldr_phase phase) | ||
734 | { | ||
735 | struct nldr_object *nldr_obj; | ||
736 | struct dsp_uuid lib_uuid; | ||
737 | int status = 0; | ||
738 | |||
739 | nldr_obj = nldr_node_obj->nldr_obj; | ||
740 | |||
741 | if (nldr_node_obj->dynamic) { | ||
742 | nldr_node_obj->phase = phase; | ||
743 | |||
744 | lib_uuid = nldr_node_obj->uuid; | ||
745 | |||
746 | /* At this point, we may not know if node is split into | ||
747 | * different libraries. So we'll go ahead and load the | ||
748 | * library, and then save the pointer to the appropriate | ||
749 | * location after we know. */ | ||
750 | |||
751 | status = | ||
752 | load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid, | ||
753 | false, nldr_node_obj->lib_path, phase, 0); | ||
754 | |||
755 | if (!status) { | ||
756 | if (*nldr_node_obj->phase_split) { | ||
757 | switch (phase) { | ||
758 | case NLDR_CREATE: | ||
759 | nldr_node_obj->create_lib = | ||
760 | nldr_node_obj->root; | ||
761 | break; | ||
762 | |||
763 | case NLDR_EXECUTE: | ||
764 | nldr_node_obj->execute_lib = | ||
765 | nldr_node_obj->root; | ||
766 | break; | ||
767 | |||
768 | case NLDR_DELETE: | ||
769 | nldr_node_obj->delete_lib = | ||
770 | nldr_node_obj->root; | ||
771 | break; | ||
772 | |||
773 | default: | ||
774 | break; | ||
775 | } | ||
776 | } | ||
777 | } | ||
778 | } else { | ||
779 | if (nldr_node_obj->overlay) | ||
780 | status = load_ovly(nldr_node_obj, phase); | ||
781 | |||
782 | } | ||
783 | |||
784 | return status; | ||
785 | } | ||
786 | |||
787 | /* | ||
788 | * ======== nldr_unload ======== | ||
789 | */ | ||
790 | int nldr_unload(struct nldr_nodeobject *nldr_node_obj, | ||
791 | enum nldr_phase phase) | ||
792 | { | ||
793 | int status = 0; | ||
794 | struct lib_node *root_lib = NULL; | ||
795 | s32 i = 0; | ||
796 | |||
797 | if (nldr_node_obj != NULL) { | ||
798 | if (nldr_node_obj->dynamic) { | ||
799 | if (*nldr_node_obj->phase_split) { | ||
800 | switch (phase) { | ||
801 | case NLDR_CREATE: | ||
802 | root_lib = &nldr_node_obj->create_lib; | ||
803 | break; | ||
804 | case NLDR_EXECUTE: | ||
805 | root_lib = &nldr_node_obj->execute_lib; | ||
806 | break; | ||
807 | case NLDR_DELETE: | ||
808 | root_lib = &nldr_node_obj->delete_lib; | ||
809 | /* Unload persistent libraries */ | ||
810 | for (i = 0; | ||
811 | i < nldr_node_obj->pers_libs; | ||
812 | i++) { | ||
813 | unload_lib(nldr_node_obj, | ||
814 | &nldr_node_obj-> | ||
815 | pers_lib_table[i]); | ||
816 | } | ||
817 | nldr_node_obj->pers_libs = 0; | ||
818 | break; | ||
819 | default: | ||
820 | break; | ||
821 | } | ||
822 | } else { | ||
823 | /* Unload main library */ | ||
824 | root_lib = &nldr_node_obj->root; | ||
825 | } | ||
826 | if (root_lib) | ||
827 | unload_lib(nldr_node_obj, root_lib); | ||
828 | } else { | ||
829 | if (nldr_node_obj->overlay) | ||
830 | unload_ovly(nldr_node_obj, phase); | ||
831 | |||
832 | } | ||
833 | } | ||
834 | return status; | ||
835 | } | ||
836 | |||
837 | /* | ||
838 | * ======== add_ovly_info ======== | ||
839 | */ | ||
840 | static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info, | ||
841 | u32 addr, u32 bytes) | ||
842 | { | ||
843 | char *node_name; | ||
844 | char *sect_name = (char *)sect_info->name; | ||
845 | bool sect_exists = false; | ||
846 | char seps = ':'; | ||
847 | char *pch; | ||
848 | u16 i; | ||
849 | struct nldr_object *nldr_obj = (struct nldr_object *)handle; | ||
850 | int status = 0; | ||
851 | |||
852 | /* Is this an overlay section (load address != run address)? */ | ||
853 | if (sect_info->sect_load_addr == sect_info->sect_run_addr) | ||
854 | goto func_end; | ||
855 | |||
856 | /* Find the node it belongs to */ | ||
857 | for (i = 0; i < nldr_obj->ovly_nodes; i++) { | ||
858 | node_name = nldr_obj->ovly_table[i].node_name; | ||
859 | if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) { | ||
860 | /* Found the node */ | ||
861 | break; | ||
862 | } | ||
863 | } | ||
864 | if (!(i < nldr_obj->ovly_nodes)) | ||
865 | goto func_end; | ||
866 | |||
867 | /* Determine which phase this section belongs to */ | ||
868 | for (pch = sect_name + 1; *pch && *pch != seps; pch++) | ||
869 | ; | ||
870 | |||
871 | if (*pch) { | ||
872 | pch++; /* Skip over the ':' */ | ||
873 | if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) { | ||
874 | status = | ||
875 | add_ovly_sect(nldr_obj, | ||
876 | &nldr_obj-> | ||
877 | ovly_table[i].create_sects_list, | ||
878 | sect_info, §_exists, addr, bytes); | ||
879 | if (!status && !sect_exists) | ||
880 | nldr_obj->ovly_table[i].create_sects++; | ||
881 | |||
882 | } else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) { | ||
883 | status = | ||
884 | add_ovly_sect(nldr_obj, | ||
885 | &nldr_obj-> | ||
886 | ovly_table[i].delete_sects_list, | ||
887 | sect_info, §_exists, addr, bytes); | ||
888 | if (!status && !sect_exists) | ||
889 | nldr_obj->ovly_table[i].delete_sects++; | ||
890 | |||
891 | } else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) { | ||
892 | status = | ||
893 | add_ovly_sect(nldr_obj, | ||
894 | &nldr_obj-> | ||
895 | ovly_table[i].execute_sects_list, | ||
896 | sect_info, §_exists, addr, bytes); | ||
897 | if (!status && !sect_exists) | ||
898 | nldr_obj->ovly_table[i].execute_sects++; | ||
899 | |||
900 | } else { | ||
901 | /* Put in "other" sections */ | ||
902 | status = | ||
903 | add_ovly_sect(nldr_obj, | ||
904 | &nldr_obj-> | ||
905 | ovly_table[i].other_sects_list, | ||
906 | sect_info, §_exists, addr, bytes); | ||
907 | if (!status && !sect_exists) | ||
908 | nldr_obj->ovly_table[i].other_sects++; | ||
909 | |||
910 | } | ||
911 | } | ||
912 | func_end: | ||
913 | return status; | ||
914 | } | ||
915 | |||
916 | /* | ||
917 | * ======== add_ovly_node ========= | ||
918 | * Callback function passed to dcd_get_objects. | ||
919 | */ | ||
920 | static int add_ovly_node(struct dsp_uuid *uuid_obj, | ||
921 | enum dsp_dcdobjtype obj_type, void *handle) | ||
922 | { | ||
923 | struct nldr_object *nldr_obj = (struct nldr_object *)handle; | ||
924 | char *node_name = NULL; | ||
925 | char *pbuf = NULL; | ||
926 | u32 len; | ||
927 | struct dcd_genericobj obj_def; | ||
928 | int status = 0; | ||
929 | |||
930 | if (obj_type != DSP_DCDNODETYPE) | ||
931 | goto func_end; | ||
932 | |||
933 | status = | ||
934 | dcd_get_object_def(nldr_obj->dcd_mgr, uuid_obj, obj_type, | ||
935 | &obj_def); | ||
936 | if (status) | ||
937 | goto func_end; | ||
938 | |||
939 | /* If overlay node, add to the list */ | ||
940 | if (obj_def.obj_data.node_obj.load_type == NLDR_OVLYLOAD) { | ||
941 | if (nldr_obj->ovly_table == NULL) { | ||
942 | nldr_obj->ovly_nodes++; | ||
943 | } else { | ||
944 | /* Add node to table */ | ||
945 | nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid = | ||
946 | *uuid_obj; | ||
947 | len = | ||
948 | strlen(obj_def.obj_data.node_obj.ndb_props.ac_name); | ||
949 | node_name = obj_def.obj_data.node_obj.ndb_props.ac_name; | ||
950 | pbuf = kzalloc(len + 1, GFP_KERNEL); | ||
951 | if (pbuf == NULL) { | ||
952 | status = -ENOMEM; | ||
953 | } else { | ||
954 | strncpy(pbuf, node_name, len); | ||
955 | nldr_obj->ovly_table[nldr_obj->ovly_nid]. | ||
956 | node_name = pbuf; | ||
957 | nldr_obj->ovly_nid++; | ||
958 | } | ||
959 | } | ||
960 | } | ||
961 | /* These were allocated in dcd_get_object_def */ | ||
962 | kfree(obj_def.obj_data.node_obj.str_create_phase_fxn); | ||
963 | |||
964 | kfree(obj_def.obj_data.node_obj.str_execute_phase_fxn); | ||
965 | |||
966 | kfree(obj_def.obj_data.node_obj.str_delete_phase_fxn); | ||
967 | |||
968 | kfree(obj_def.obj_data.node_obj.str_i_alg_name); | ||
969 | |||
970 | func_end: | ||
971 | return status; | ||
972 | } | ||
973 | |||
974 | /* | ||
975 | * ======== add_ovly_sect ======== | ||
976 | */ | ||
977 | static int add_ovly_sect(struct nldr_object *nldr_obj, | ||
978 | struct ovly_sect **lst, | ||
979 | struct dbll_sect_info *sect_inf, | ||
980 | bool *exists, u32 addr, u32 bytes) | ||
981 | { | ||
982 | struct ovly_sect *new_sect = NULL; | ||
983 | struct ovly_sect *last_sect; | ||
984 | struct ovly_sect *ovly_section; | ||
985 | int status = 0; | ||
986 | |||
987 | ovly_section = last_sect = *lst; | ||
988 | *exists = false; | ||
989 | while (ovly_section) { | ||
990 | /* | ||
991 | * Make sure section has not already been added. Multiple | ||
992 | * 'write' calls may be made to load the section. | ||
993 | */ | ||
994 | if (ovly_section->sect_load_addr == addr) { | ||
995 | /* Already added */ | ||
996 | *exists = true; | ||
997 | break; | ||
998 | } | ||
999 | last_sect = ovly_section; | ||
1000 | ovly_section = ovly_section->next_sect; | ||
1001 | } | ||
1002 | |||
1003 | if (!ovly_section) { | ||
1004 | /* New section */ | ||
1005 | new_sect = kzalloc(sizeof(struct ovly_sect), GFP_KERNEL); | ||
1006 | if (new_sect == NULL) { | ||
1007 | status = -ENOMEM; | ||
1008 | } else { | ||
1009 | new_sect->sect_load_addr = addr; | ||
1010 | new_sect->sect_run_addr = sect_inf->sect_run_addr + | ||
1011 | (addr - sect_inf->sect_load_addr); | ||
1012 | new_sect->size = bytes; | ||
1013 | new_sect->page = sect_inf->type; | ||
1014 | } | ||
1015 | |||
1016 | /* Add to the list */ | ||
1017 | if (!status) { | ||
1018 | if (*lst == NULL) { | ||
1019 | /* First in the list */ | ||
1020 | *lst = new_sect; | ||
1021 | } else { | ||
1022 | last_sect->next_sect = new_sect; | ||
1023 | } | ||
1024 | } | ||
1025 | } | ||
1026 | |||
1027 | return status; | ||
1028 | } | ||
1029 | |||
1030 | /* | ||
1031 | * ======== fake_ovly_write ======== | ||
1032 | */ | ||
1033 | static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes, | ||
1034 | s32 mtype) | ||
1035 | { | ||
1036 | return (s32) bytes; | ||
1037 | } | ||
1038 | |||
1039 | /* | ||
1040 | * ======== free_sects ======== | ||
1041 | */ | ||
1042 | static void free_sects(struct nldr_object *nldr_obj, | ||
1043 | struct ovly_sect *phase_sects, u16 alloc_num) | ||
1044 | { | ||
1045 | struct ovly_sect *ovly_section = phase_sects; | ||
1046 | u16 i = 0; | ||
1047 | bool ret; | ||
1048 | |||
1049 | while (ovly_section && i < alloc_num) { | ||
1050 | /* 'Deallocate' */ | ||
1051 | /* segid - page not supported yet */ | ||
1052 | /* Reserved memory */ | ||
1053 | ret = | ||
1054 | rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr, | ||
1055 | ovly_section->size, true); | ||
1056 | ovly_section = ovly_section->next_sect; | ||
1057 | i++; | ||
1058 | } | ||
1059 | } | ||
1060 | |||
1061 | /* | ||
1062 | * ======== get_symbol_value ======== | ||
1063 | * Find symbol in library's base image. If not there, check dependent | ||
1064 | * libraries. | ||
1065 | */ | ||
1066 | static bool get_symbol_value(void *handle, void *parg, void *rmm_handle, | ||
1067 | char *sym_name, struct dbll_sym_val **sym) | ||
1068 | { | ||
1069 | struct nldr_object *nldr_obj = (struct nldr_object *)handle; | ||
1070 | struct nldr_nodeobject *nldr_node_obj = | ||
1071 | (struct nldr_nodeobject *)rmm_handle; | ||
1072 | struct lib_node *root = (struct lib_node *)parg; | ||
1073 | u16 i; | ||
1074 | bool status = false; | ||
1075 | |||
1076 | /* check the base image */ | ||
1077 | status = nldr_obj->ldr_fxns.get_addr_fxn(nldr_obj->base_lib, | ||
1078 | sym_name, sym); | ||
1079 | if (!status) | ||
1080 | status = | ||
1081 | nldr_obj->ldr_fxns.get_c_addr_fxn(nldr_obj->base_lib, | ||
1082 | sym_name, sym); | ||
1083 | |||
1084 | /* | ||
1085 | * Check in root lib itself. If the library consists of | ||
1086 | * multiple object files linked together, some symbols in the | ||
1087 | * library may need to be resolved. | ||
1088 | */ | ||
1089 | if (!status) { | ||
1090 | status = nldr_obj->ldr_fxns.get_addr_fxn(root->lib, sym_name, | ||
1091 | sym); | ||
1092 | if (!status) { | ||
1093 | status = | ||
1094 | nldr_obj->ldr_fxns.get_c_addr_fxn(root->lib, | ||
1095 | sym_name, sym); | ||
1096 | } | ||
1097 | } | ||
1098 | |||
1099 | /* | ||
1100 | * Check in root lib's dependent libraries, but not dependent | ||
1101 | * libraries' dependents. | ||
1102 | */ | ||
1103 | if (!status) { | ||
1104 | for (i = 0; i < root->dep_libs; i++) { | ||
1105 | status = | ||
1106 | nldr_obj->ldr_fxns.get_addr_fxn(root-> | ||
1107 | dep_libs_tree | ||
1108 | [i].lib, | ||
1109 | sym_name, sym); | ||
1110 | if (!status) { | ||
1111 | status = | ||
1112 | nldr_obj->ldr_fxns. | ||
1113 | get_c_addr_fxn(root->dep_libs_tree[i].lib, | ||
1114 | sym_name, sym); | ||
1115 | } | ||
1116 | if (status) { | ||
1117 | /* Symbol found */ | ||
1118 | break; | ||
1119 | } | ||
1120 | } | ||
1121 | } | ||
1122 | /* | ||
1123 | * Check in persistent libraries | ||
1124 | */ | ||
1125 | if (!status) { | ||
1126 | for (i = 0; i < nldr_node_obj->pers_libs; i++) { | ||
1127 | status = | ||
1128 | nldr_obj->ldr_fxns. | ||
1129 | get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib, | ||
1130 | sym_name, sym); | ||
1131 | if (!status) { | ||
1132 | status = nldr_obj->ldr_fxns.get_c_addr_fxn | ||
1133 | (nldr_node_obj->pers_lib_table[i].lib, | ||
1134 | sym_name, sym); | ||
1135 | } | ||
1136 | if (status) { | ||
1137 | /* Symbol found */ | ||
1138 | break; | ||
1139 | } | ||
1140 | } | ||
1141 | } | ||
1142 | |||
1143 | return status; | ||
1144 | } | ||
1145 | |||
1146 | /* | ||
1147 | * ======== load_lib ======== | ||
1148 | * Recursively load library and all its dependent libraries. The library | ||
1149 | * we're loading is specified by a uuid. | ||
1150 | */ | ||
1151 | static int load_lib(struct nldr_nodeobject *nldr_node_obj, | ||
1152 | struct lib_node *root, struct dsp_uuid uuid, | ||
1153 | bool root_prstnt, | ||
1154 | struct dbll_library_obj **lib_path, | ||
1155 | enum nldr_phase phase, u16 depth) | ||
1156 | { | ||
1157 | struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj; | ||
1158 | u16 nd_libs = 0; /* Number of dependent libraries */ | ||
1159 | u16 np_libs = 0; /* Number of persistent libraries */ | ||
1160 | u16 nd_libs_loaded = 0; /* Number of dep. libraries loaded */ | ||
1161 | u16 i; | ||
1162 | u32 entry; | ||
1163 | u32 dw_buf_size = NLDR_MAXPATHLENGTH; | ||
1164 | dbll_flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC; | ||
1165 | struct dbll_attrs new_attrs; | ||
1166 | char *psz_file_name = NULL; | ||
1167 | struct dsp_uuid *dep_lib_uui_ds = NULL; | ||
1168 | bool *persistent_dep_libs = NULL; | ||
1169 | int status = 0; | ||
1170 | bool lib_status = false; | ||
1171 | struct lib_node *dep_lib; | ||
1172 | |||
1173 | if (depth > MAXDEPTH) { | ||
1174 | /* Error */ | ||
1175 | } | ||
1176 | root->lib = NULL; | ||
1177 | /* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */ | ||
1178 | psz_file_name = kzalloc(DBLL_MAXPATHLENGTH, GFP_KERNEL); | ||
1179 | if (psz_file_name == NULL) | ||
1180 | status = -ENOMEM; | ||
1181 | |||
1182 | if (!status) { | ||
1183 | /* Get the name of the library */ | ||
1184 | if (depth == 0) { | ||
1185 | status = | ||
1186 | dcd_get_library_name(nldr_node_obj->nldr_obj-> | ||
1187 | dcd_mgr, &uuid, psz_file_name, | ||
1188 | &dw_buf_size, phase, | ||
1189 | nldr_node_obj->phase_split); | ||
1190 | } else { | ||
1191 | /* Dependent libraries are registered with a phase */ | ||
1192 | status = | ||
1193 | dcd_get_library_name(nldr_node_obj->nldr_obj-> | ||
1194 | dcd_mgr, &uuid, psz_file_name, | ||
1195 | &dw_buf_size, NLDR_NOPHASE, | ||
1196 | NULL); | ||
1197 | } | ||
1198 | } | ||
1199 | if (!status) { | ||
1200 | /* Open the library, don't load symbols */ | ||
1201 | status = | ||
1202 | nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name, | ||
1203 | DBLL_NOLOAD, &root->lib); | ||
1204 | } | ||
1205 | /* Done with file name */ | ||
1206 | kfree(psz_file_name); | ||
1207 | |||
1208 | /* Check to see if library not already loaded */ | ||
1209 | if (!status && root_prstnt) { | ||
1210 | lib_status = | ||
1211 | find_in_persistent_lib_array(nldr_node_obj, root->lib); | ||
1212 | /* Close library */ | ||
1213 | if (lib_status) { | ||
1214 | nldr_obj->ldr_fxns.close_fxn(root->lib); | ||
1215 | return 0; | ||
1216 | } | ||
1217 | } | ||
1218 | if (!status) { | ||
1219 | /* Check for circular dependencies. */ | ||
1220 | for (i = 0; i < depth; i++) { | ||
1221 | if (root->lib == lib_path[i]) { | ||
1222 | /* This condition could be checked by a | ||
1223 | * tool at build time. */ | ||
1224 | status = -EILSEQ; | ||
1225 | } | ||
1226 | } | ||
1227 | } | ||
1228 | if (!status) { | ||
1229 | /* Add library to current path in dependency tree */ | ||
1230 | lib_path[depth] = root->lib; | ||
1231 | depth++; | ||
1232 | /* Get number of dependent libraries */ | ||
1233 | status = | ||
1234 | dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->dcd_mgr, | ||
1235 | &uuid, &nd_libs, &np_libs, phase); | ||
1236 | } | ||
1237 | if (!status) { | ||
1238 | if (!(*nldr_node_obj->phase_split)) | ||
1239 | np_libs = 0; | ||
1240 | |||
1241 | /* nd_libs = #of dependent libraries */ | ||
1242 | root->dep_libs = nd_libs - np_libs; | ||
1243 | if (nd_libs > 0) { | ||
1244 | dep_lib_uui_ds = kzalloc(sizeof(struct dsp_uuid) * | ||
1245 | nd_libs, GFP_KERNEL); | ||
1246 | persistent_dep_libs = | ||
1247 | kzalloc(sizeof(bool) * nd_libs, GFP_KERNEL); | ||
1248 | if (!dep_lib_uui_ds || !persistent_dep_libs) | ||
1249 | status = -ENOMEM; | ||
1250 | |||
1251 | if (root->dep_libs > 0) { | ||
1252 | /* Allocate arrays for dependent lib UUIDs, | ||
1253 | * lib nodes */ | ||
1254 | root->dep_libs_tree = kzalloc | ||
1255 | (sizeof(struct lib_node) * | ||
1256 | (root->dep_libs), GFP_KERNEL); | ||
1257 | if (!(root->dep_libs_tree)) | ||
1258 | status = -ENOMEM; | ||
1259 | |||
1260 | } | ||
1261 | |||
1262 | if (!status) { | ||
1263 | /* Get the dependent library UUIDs */ | ||
1264 | status = | ||
1265 | dcd_get_dep_libs(nldr_node_obj-> | ||
1266 | nldr_obj->dcd_mgr, &uuid, | ||
1267 | nd_libs, dep_lib_uui_ds, | ||
1268 | persistent_dep_libs, | ||
1269 | phase); | ||
1270 | } | ||
1271 | } | ||
1272 | } | ||
1273 | |||
1274 | /* | ||
1275 | * Recursively load dependent libraries. | ||
1276 | */ | ||
1277 | if (!status) { | ||
1278 | for (i = 0; i < nd_libs; i++) { | ||
1279 | /* If root library is NOT persistent, and dep library | ||
1280 | * is, then record it. If root library IS persistent, | ||
1281 | * the deplib is already included */ | ||
1282 | if (!root_prstnt && persistent_dep_libs[i] && | ||
1283 | *nldr_node_obj->phase_split) { | ||
1284 | if ((nldr_node_obj->pers_libs) >= MAXLIBS) { | ||
1285 | status = -EILSEQ; | ||
1286 | break; | ||
1287 | } | ||
1288 | |||
1289 | /* Allocate library outside of phase */ | ||
1290 | dep_lib = | ||
1291 | &nldr_node_obj->pers_lib_table | ||
1292 | [nldr_node_obj->pers_libs]; | ||
1293 | } else { | ||
1294 | if (root_prstnt) | ||
1295 | persistent_dep_libs[i] = true; | ||
1296 | |||
1297 | /* Allocate library within phase */ | ||
1298 | dep_lib = &root->dep_libs_tree[nd_libs_loaded]; | ||
1299 | } | ||
1300 | |||
1301 | status = load_lib(nldr_node_obj, dep_lib, | ||
1302 | dep_lib_uui_ds[i], | ||
1303 | persistent_dep_libs[i], lib_path, | ||
1304 | phase, depth); | ||
1305 | |||
1306 | if (!status) { | ||
1307 | if ((status != 0) && | ||
1308 | !root_prstnt && persistent_dep_libs[i] && | ||
1309 | *nldr_node_obj->phase_split) { | ||
1310 | (nldr_node_obj->pers_libs)++; | ||
1311 | } else { | ||
1312 | if (!persistent_dep_libs[i] || | ||
1313 | !(*nldr_node_obj->phase_split)) { | ||
1314 | nd_libs_loaded++; | ||
1315 | } | ||
1316 | } | ||
1317 | } else { | ||
1318 | break; | ||
1319 | } | ||
1320 | } | ||
1321 | } | ||
1322 | |||
1323 | /* Now we can load the root library */ | ||
1324 | if (!status) { | ||
1325 | new_attrs = nldr_obj->ldr_attrs; | ||
1326 | new_attrs.sym_arg = root; | ||
1327 | new_attrs.rmm_handle = nldr_node_obj; | ||
1328 | new_attrs.input_params = nldr_node_obj->priv_ref; | ||
1329 | new_attrs.base_image = false; | ||
1330 | |||
1331 | status = | ||
1332 | nldr_obj->ldr_fxns.load_fxn(root->lib, flags, &new_attrs, | ||
1333 | &entry); | ||
1334 | } | ||
1335 | |||
1336 | /* | ||
1337 | * In case of failure, unload any dependent libraries that | ||
1338 | * were loaded, and close the root library. | ||
1339 | * (Persistent libraries are unloaded from the very top) | ||
1340 | */ | ||
1341 | if (status) { | ||
1342 | if (phase != NLDR_EXECUTE) { | ||
1343 | for (i = 0; i < nldr_node_obj->pers_libs; i++) | ||
1344 | unload_lib(nldr_node_obj, | ||
1345 | &nldr_node_obj->pers_lib_table[i]); | ||
1346 | |||
1347 | nldr_node_obj->pers_libs = 0; | ||
1348 | } | ||
1349 | for (i = 0; i < nd_libs_loaded; i++) | ||
1350 | unload_lib(nldr_node_obj, &root->dep_libs_tree[i]); | ||
1351 | |||
1352 | if (root->lib) | ||
1353 | nldr_obj->ldr_fxns.close_fxn(root->lib); | ||
1354 | |||
1355 | } | ||
1356 | |||
1357 | /* Going up one node in the dependency tree */ | ||
1358 | depth--; | ||
1359 | |||
1360 | kfree(dep_lib_uui_ds); | ||
1361 | dep_lib_uui_ds = NULL; | ||
1362 | |||
1363 | kfree(persistent_dep_libs); | ||
1364 | persistent_dep_libs = NULL; | ||
1365 | |||
1366 | return status; | ||
1367 | } | ||
1368 | |||
1369 | /* | ||
1370 | * ======== load_ovly ======== | ||
1371 | */ | ||
1372 | static int load_ovly(struct nldr_nodeobject *nldr_node_obj, | ||
1373 | enum nldr_phase phase) | ||
1374 | { | ||
1375 | struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj; | ||
1376 | struct ovly_node *po_node = NULL; | ||
1377 | struct ovly_sect *phase_sects = NULL; | ||
1378 | struct ovly_sect *other_sects_list = NULL; | ||
1379 | u16 i; | ||
1380 | u16 alloc_num = 0; | ||
1381 | u16 other_alloc = 0; | ||
1382 | u16 *ref_count = NULL; | ||
1383 | u16 *other_ref = NULL; | ||
1384 | u32 bytes; | ||
1385 | struct ovly_sect *ovly_section; | ||
1386 | int status = 0; | ||
1387 | |||
1388 | /* Find the node in the table */ | ||
1389 | for (i = 0; i < nldr_obj->ovly_nodes; i++) { | ||
1390 | if (is_equal_uuid | ||
1391 | (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) { | ||
1392 | /* Found it */ | ||
1393 | po_node = &(nldr_obj->ovly_table[i]); | ||
1394 | break; | ||
1395 | } | ||
1396 | } | ||
1397 | |||
1398 | |||
1399 | if (!po_node) { | ||
1400 | status = -ENOENT; | ||
1401 | goto func_end; | ||
1402 | } | ||
1403 | |||
1404 | switch (phase) { | ||
1405 | case NLDR_CREATE: | ||
1406 | ref_count = &(po_node->create_ref); | ||
1407 | other_ref = &(po_node->other_ref); | ||
1408 | phase_sects = po_node->create_sects_list; | ||
1409 | other_sects_list = po_node->other_sects_list; | ||
1410 | break; | ||
1411 | |||
1412 | case NLDR_EXECUTE: | ||
1413 | ref_count = &(po_node->execute_ref); | ||
1414 | phase_sects = po_node->execute_sects_list; | ||
1415 | break; | ||
1416 | |||
1417 | case NLDR_DELETE: | ||
1418 | ref_count = &(po_node->delete_ref); | ||
1419 | phase_sects = po_node->delete_sects_list; | ||
1420 | break; | ||
1421 | |||
1422 | default: | ||
1423 | break; | ||
1424 | } | ||
1425 | |||
1426 | if (ref_count == NULL) | ||
1427 | goto func_end; | ||
1428 | |||
1429 | if (*ref_count != 0) | ||
1430 | goto func_end; | ||
1431 | |||
1432 | /* 'Allocate' memory for overlay sections of this phase */ | ||
1433 | ovly_section = phase_sects; | ||
1434 | while (ovly_section) { | ||
1435 | /* allocate *//* page not supported yet */ | ||
1436 | /* reserve *//* align */ | ||
1437 | status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0, | ||
1438 | &(ovly_section->sect_run_addr), true); | ||
1439 | if (!status) { | ||
1440 | ovly_section = ovly_section->next_sect; | ||
1441 | alloc_num++; | ||
1442 | } else { | ||
1443 | break; | ||
1444 | } | ||
1445 | } | ||
1446 | if (other_ref && *other_ref == 0) { | ||
1447 | /* 'Allocate' memory for other overlay sections | ||
1448 | * (create phase) */ | ||
1449 | if (!status) { | ||
1450 | ovly_section = other_sects_list; | ||
1451 | while (ovly_section) { | ||
1452 | /* page not supported *//* align */ | ||
1453 | /* reserve */ | ||
1454 | status = | ||
1455 | rmm_alloc(nldr_obj->rmm, 0, | ||
1456 | ovly_section->size, 0, | ||
1457 | &(ovly_section->sect_run_addr), | ||
1458 | true); | ||
1459 | if (!status) { | ||
1460 | ovly_section = ovly_section->next_sect; | ||
1461 | other_alloc++; | ||
1462 | } else { | ||
1463 | break; | ||
1464 | } | ||
1465 | } | ||
1466 | } | ||
1467 | } | ||
1468 | if (*ref_count == 0) { | ||
1469 | if (!status) { | ||
1470 | /* Load sections for this phase */ | ||
1471 | ovly_section = phase_sects; | ||
1472 | while (ovly_section && !status) { | ||
1473 | bytes = | ||
1474 | (*nldr_obj->ovly_fxn) (nldr_node_obj-> | ||
1475 | priv_ref, | ||
1476 | ovly_section-> | ||
1477 | sect_run_addr, | ||
1478 | ovly_section-> | ||
1479 | sect_load_addr, | ||
1480 | ovly_section->size, | ||
1481 | ovly_section->page); | ||
1482 | if (bytes != ovly_section->size) | ||
1483 | status = -EPERM; | ||
1484 | |||
1485 | ovly_section = ovly_section->next_sect; | ||
1486 | } | ||
1487 | } | ||
1488 | } | ||
1489 | if (other_ref && *other_ref == 0) { | ||
1490 | if (!status) { | ||
1491 | /* Load other sections (create phase) */ | ||
1492 | ovly_section = other_sects_list; | ||
1493 | while (ovly_section && !status) { | ||
1494 | bytes = | ||
1495 | (*nldr_obj->ovly_fxn) (nldr_node_obj-> | ||
1496 | priv_ref, | ||
1497 | ovly_section-> | ||
1498 | sect_run_addr, | ||
1499 | ovly_section-> | ||
1500 | sect_load_addr, | ||
1501 | ovly_section->size, | ||
1502 | ovly_section->page); | ||
1503 | if (bytes != ovly_section->size) | ||
1504 | status = -EPERM; | ||
1505 | |||
1506 | ovly_section = ovly_section->next_sect; | ||
1507 | } | ||
1508 | } | ||
1509 | } | ||
1510 | if (status) { | ||
1511 | /* 'Deallocate' memory */ | ||
1512 | free_sects(nldr_obj, phase_sects, alloc_num); | ||
1513 | free_sects(nldr_obj, other_sects_list, other_alloc); | ||
1514 | } | ||
1515 | func_end: | ||
1516 | if (!status && (ref_count != NULL)) { | ||
1517 | *ref_count += 1; | ||
1518 | if (other_ref) | ||
1519 | *other_ref += 1; | ||
1520 | |||
1521 | } | ||
1522 | |||
1523 | return status; | ||
1524 | } | ||
1525 | |||
1526 | /* | ||
1527 | * ======== remote_alloc ======== | ||
1528 | */ | ||
1529 | static int remote_alloc(void **ref, u16 mem_sect, u32 size, | ||
1530 | u32 align, u32 *dsp_address, | ||
1531 | s32 segmnt_id, s32 req, | ||
1532 | bool reserve) | ||
1533 | { | ||
1534 | struct nldr_nodeobject *hnode = (struct nldr_nodeobject *)ref; | ||
1535 | struct nldr_object *nldr_obj; | ||
1536 | struct rmm_target_obj *rmm; | ||
1537 | u16 mem_phase_bit = MAXFLAGS; | ||
1538 | u16 segid = 0; | ||
1539 | u16 i; | ||
1540 | u16 mem_sect_type; | ||
1541 | u32 word_size; | ||
1542 | struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address; | ||
1543 | bool mem_load_req = false; | ||
1544 | int status = -ENOMEM; /* Set to fail */ | ||
1545 | |||
1546 | nldr_obj = hnode->nldr_obj; | ||
1547 | rmm = nldr_obj->rmm; | ||
1548 | /* Convert size to DSP words */ | ||
1549 | word_size = | ||
1550 | (size + nldr_obj->dsp_word_size - | ||
1551 | 1) / nldr_obj->dsp_word_size; | ||
1552 | /* Modify memory 'align' to account for DSP cache line size */ | ||
1553 | align = lcm(GEM_CACHE_LINE_SIZE, align); | ||
1554 | dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align); | ||
1555 | if (segmnt_id != -1) { | ||
1556 | rmm_addr_obj->segid = segmnt_id; | ||
1557 | segid = segmnt_id; | ||
1558 | mem_load_req = req; | ||
1559 | } else { | ||
1560 | switch (hnode->phase) { | ||
1561 | case NLDR_CREATE: | ||
1562 | mem_phase_bit = CREATEDATAFLAGBIT; | ||
1563 | break; | ||
1564 | case NLDR_DELETE: | ||
1565 | mem_phase_bit = DELETEDATAFLAGBIT; | ||
1566 | break; | ||
1567 | case NLDR_EXECUTE: | ||
1568 | mem_phase_bit = EXECUTEDATAFLAGBIT; | ||
1569 | break; | ||
1570 | default: | ||
1571 | break; | ||
1572 | } | ||
1573 | if (mem_sect == DBLL_CODE) | ||
1574 | mem_phase_bit++; | ||
1575 | |||
1576 | if (mem_phase_bit < MAXFLAGS) | ||
1577 | segid = hnode->seg_id[mem_phase_bit]; | ||
1578 | |||
1579 | /* Determine if there is a memory loading requirement */ | ||
1580 | if ((hnode->code_data_flag_mask >> mem_phase_bit) & 0x1) | ||
1581 | mem_load_req = true; | ||
1582 | |||
1583 | } | ||
1584 | mem_sect_type = (mem_sect == DBLL_CODE) ? DYNM_CODE : DYNM_DATA; | ||
1585 | |||
1586 | /* Find an appropriate segment based on mem_sect */ | ||
1587 | if (segid == NULLID) { | ||
1588 | /* No memory requirements of preferences */ | ||
1589 | goto func_cont; | ||
1590 | } | ||
1591 | if (segid <= MAXSEGID) { | ||
1592 | /* Attempt to allocate from segid first. */ | ||
1593 | rmm_addr_obj->segid = segid; | ||
1594 | status = | ||
1595 | rmm_alloc(rmm, segid, word_size, align, dsp_address, false); | ||
1596 | if (status) { | ||
1597 | dev_dbg(bridge, "%s: Unable allocate from segment %d\n", | ||
1598 | __func__, segid); | ||
1599 | } | ||
1600 | } else { | ||
1601 | /* segid > MAXSEGID ==> Internal or external memory */ | ||
1602 | /* Check for any internal or external memory segment, | ||
1603 | * depending on segid. */ | ||
1604 | mem_sect_type |= segid == MEMINTERNALID ? | ||
1605 | DYNM_INTERNAL : DYNM_EXTERNAL; | ||
1606 | for (i = 0; i < nldr_obj->dload_segs; i++) { | ||
1607 | if ((nldr_obj->seg_table[i] & mem_sect_type) != | ||
1608 | mem_sect_type) | ||
1609 | continue; | ||
1610 | |||
1611 | status = rmm_alloc(rmm, i, word_size, align, | ||
1612 | dsp_address, false); | ||
1613 | if (!status) { | ||
1614 | /* Save segid for freeing later */ | ||
1615 | rmm_addr_obj->segid = i; | ||
1616 | break; | ||
1617 | } | ||
1618 | } | ||
1619 | } | ||
1620 | func_cont: | ||
1621 | /* Haven't found memory yet, attempt to find any segment that works */ | ||
1622 | if (status == -ENOMEM && !mem_load_req) { | ||
1623 | dev_dbg(bridge, "%s: Preferred segment unavailable, trying " | ||
1624 | "another\n", __func__); | ||
1625 | for (i = 0; i < nldr_obj->dload_segs; i++) { | ||
1626 | /* All bits of mem_sect_type must be set */ | ||
1627 | if ((nldr_obj->seg_table[i] & mem_sect_type) != | ||
1628 | mem_sect_type) | ||
1629 | continue; | ||
1630 | |||
1631 | status = rmm_alloc(rmm, i, word_size, align, | ||
1632 | dsp_address, false); | ||
1633 | if (!status) { | ||
1634 | /* Save segid */ | ||
1635 | rmm_addr_obj->segid = i; | ||
1636 | break; | ||
1637 | } | ||
1638 | } | ||
1639 | } | ||
1640 | |||
1641 | return status; | ||
1642 | } | ||
1643 | |||
1644 | static int remote_free(void **ref, u16 space, u32 dsp_address, | ||
1645 | u32 size, bool reserve) | ||
1646 | { | ||
1647 | struct nldr_object *nldr_obj = (struct nldr_object *)ref; | ||
1648 | struct rmm_target_obj *rmm; | ||
1649 | u32 word_size; | ||
1650 | int status = -ENOMEM; /* Set to fail */ | ||
1651 | |||
1652 | rmm = nldr_obj->rmm; | ||
1653 | |||
1654 | /* Convert size to DSP words */ | ||
1655 | word_size = | ||
1656 | (size + nldr_obj->dsp_word_size - | ||
1657 | 1) / nldr_obj->dsp_word_size; | ||
1658 | |||
1659 | if (rmm_free(rmm, space, dsp_address, word_size, reserve)) | ||
1660 | status = 0; | ||
1661 | |||
1662 | return status; | ||
1663 | } | ||
1664 | |||
1665 | /* | ||
1666 | * ======== unload_lib ======== | ||
1667 | */ | ||
1668 | static void unload_lib(struct nldr_nodeobject *nldr_node_obj, | ||
1669 | struct lib_node *root) | ||
1670 | { | ||
1671 | struct dbll_attrs new_attrs; | ||
1672 | struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj; | ||
1673 | u16 i; | ||
1674 | |||
1675 | |||
1676 | /* Unload dependent libraries */ | ||
1677 | for (i = 0; i < root->dep_libs; i++) | ||
1678 | unload_lib(nldr_node_obj, &root->dep_libs_tree[i]); | ||
1679 | |||
1680 | root->dep_libs = 0; | ||
1681 | |||
1682 | new_attrs = nldr_obj->ldr_attrs; | ||
1683 | new_attrs.rmm_handle = nldr_obj->rmm; | ||
1684 | new_attrs.input_params = nldr_node_obj->priv_ref; | ||
1685 | new_attrs.base_image = false; | ||
1686 | new_attrs.sym_arg = root; | ||
1687 | |||
1688 | if (root->lib) { | ||
1689 | /* Unload the root library */ | ||
1690 | nldr_obj->ldr_fxns.unload_fxn(root->lib, &new_attrs); | ||
1691 | nldr_obj->ldr_fxns.close_fxn(root->lib); | ||
1692 | } | ||
1693 | |||
1694 | /* Free dependent library list */ | ||
1695 | kfree(root->dep_libs_tree); | ||
1696 | root->dep_libs_tree = NULL; | ||
1697 | } | ||
1698 | |||
1699 | /* | ||
1700 | * ======== unload_ovly ======== | ||
1701 | */ | ||
1702 | static void unload_ovly(struct nldr_nodeobject *nldr_node_obj, | ||
1703 | enum nldr_phase phase) | ||
1704 | { | ||
1705 | struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj; | ||
1706 | struct ovly_node *po_node = NULL; | ||
1707 | struct ovly_sect *phase_sects = NULL; | ||
1708 | struct ovly_sect *other_sects_list = NULL; | ||
1709 | u16 i; | ||
1710 | u16 alloc_num = 0; | ||
1711 | u16 other_alloc = 0; | ||
1712 | u16 *ref_count = NULL; | ||
1713 | u16 *other_ref = NULL; | ||
1714 | |||
1715 | /* Find the node in the table */ | ||
1716 | for (i = 0; i < nldr_obj->ovly_nodes; i++) { | ||
1717 | if (is_equal_uuid | ||
1718 | (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) { | ||
1719 | /* Found it */ | ||
1720 | po_node = &(nldr_obj->ovly_table[i]); | ||
1721 | break; | ||
1722 | } | ||
1723 | } | ||
1724 | |||
1725 | |||
1726 | if (!po_node) | ||
1727 | /* TODO: Should we print warning here? */ | ||
1728 | return; | ||
1729 | |||
1730 | switch (phase) { | ||
1731 | case NLDR_CREATE: | ||
1732 | ref_count = &(po_node->create_ref); | ||
1733 | phase_sects = po_node->create_sects_list; | ||
1734 | alloc_num = po_node->create_sects; | ||
1735 | break; | ||
1736 | case NLDR_EXECUTE: | ||
1737 | ref_count = &(po_node->execute_ref); | ||
1738 | phase_sects = po_node->execute_sects_list; | ||
1739 | alloc_num = po_node->execute_sects; | ||
1740 | break; | ||
1741 | case NLDR_DELETE: | ||
1742 | ref_count = &(po_node->delete_ref); | ||
1743 | other_ref = &(po_node->other_ref); | ||
1744 | phase_sects = po_node->delete_sects_list; | ||
1745 | /* 'Other' overlay sections are unloaded in the delete phase */ | ||
1746 | other_sects_list = po_node->other_sects_list; | ||
1747 | alloc_num = po_node->delete_sects; | ||
1748 | other_alloc = po_node->other_sects; | ||
1749 | break; | ||
1750 | default: | ||
1751 | break; | ||
1752 | } | ||
1753 | if (ref_count && (*ref_count > 0)) { | ||
1754 | *ref_count -= 1; | ||
1755 | if (other_ref) | ||
1756 | *other_ref -= 1; | ||
1757 | } | ||
1758 | |||
1759 | if (ref_count && *ref_count == 0) { | ||
1760 | /* 'Deallocate' memory */ | ||
1761 | free_sects(nldr_obj, phase_sects, alloc_num); | ||
1762 | } | ||
1763 | if (other_ref && *other_ref == 0) | ||
1764 | free_sects(nldr_obj, other_sects_list, other_alloc); | ||
1765 | } | ||
1766 | |||
1767 | /* | ||
1768 | * ======== find_in_persistent_lib_array ======== | ||
1769 | */ | ||
1770 | static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj, | ||
1771 | struct dbll_library_obj *lib) | ||
1772 | { | ||
1773 | s32 i = 0; | ||
1774 | |||
1775 | for (i = 0; i < nldr_node_obj->pers_libs; i++) { | ||
1776 | if (lib == nldr_node_obj->pers_lib_table[i].lib) | ||
1777 | return true; | ||
1778 | |||
1779 | } | ||
1780 | |||
1781 | return false; | ||
1782 | } | ||
1783 | |||
1784 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
1785 | /** | ||
1786 | * nldr_find_addr() - Find the closest symbol to the given address based on | ||
1787 | * dynamic node object. | ||
1788 | * | ||
1789 | * @nldr_node: Dynamic node object | ||
1790 | * @sym_addr: Given address to find the dsp symbol | ||
1791 | * @offset_range: offset range to look for dsp symbol | ||
1792 | * @offset_output: Symbol Output address | ||
1793 | * @sym_name: String with the dsp symbol | ||
1794 | * | ||
1795 | * This function finds the node library for a given address and | ||
1796 | * retrieves the dsp symbol by calling dbll_find_dsp_symbol. | ||
1797 | */ | ||
1798 | int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr, | ||
1799 | u32 offset_range, void *offset_output, char *sym_name) | ||
1800 | { | ||
1801 | int status = 0; | ||
1802 | bool status1 = false; | ||
1803 | s32 i = 0; | ||
1804 | struct lib_node root = { NULL, 0, NULL }; | ||
1805 | |||
1806 | if (nldr_node->dynamic && *nldr_node->phase_split) { | ||
1807 | switch (nldr_node->phase) { | ||
1808 | case NLDR_CREATE: | ||
1809 | root = nldr_node->create_lib; | ||
1810 | break; | ||
1811 | case NLDR_EXECUTE: | ||
1812 | root = nldr_node->execute_lib; | ||
1813 | break; | ||
1814 | case NLDR_DELETE: | ||
1815 | root = nldr_node->delete_lib; | ||
1816 | break; | ||
1817 | default: | ||
1818 | break; | ||
1819 | } | ||
1820 | } else { | ||
1821 | /* for Overlay nodes or non-split Dynamic nodes */ | ||
1822 | root = nldr_node->root; | ||
1823 | } | ||
1824 | |||
1825 | status1 = dbll_find_dsp_symbol(root.lib, sym_addr, | ||
1826 | offset_range, offset_output, sym_name); | ||
1827 | |||
1828 | /* If symbol not found, check dependent libraries */ | ||
1829 | if (!status1) | ||
1830 | for (i = 0; i < root.dep_libs; i++) { | ||
1831 | status1 = dbll_find_dsp_symbol( | ||
1832 | root.dep_libs_tree[i].lib, sym_addr, | ||
1833 | offset_range, offset_output, sym_name); | ||
1834 | if (status1) | ||
1835 | /* Symbol found */ | ||
1836 | break; | ||
1837 | } | ||
1838 | /* Check persistent libraries */ | ||
1839 | if (!status1) | ||
1840 | for (i = 0; i < nldr_node->pers_libs; i++) { | ||
1841 | status1 = dbll_find_dsp_symbol( | ||
1842 | nldr_node->pers_lib_table[i].lib, sym_addr, | ||
1843 | offset_range, offset_output, sym_name); | ||
1844 | if (status1) | ||
1845 | /* Symbol found */ | ||
1846 | break; | ||
1847 | } | ||
1848 | |||
1849 | if (!status1) { | ||
1850 | pr_debug("%s: Address 0x%x not found in range %d.\n", | ||
1851 | __func__, sym_addr, offset_range); | ||
1852 | status = -ESPIPE; | ||
1853 | } else { | ||
1854 | pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", | ||
1855 | __func__, (u32) nldr_node, sym_addr, offset_range, | ||
1856 | (u32) offset_output, sym_name); | ||
1857 | } | ||
1858 | |||
1859 | return status; | ||
1860 | } | ||
1861 | #endif | ||
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c deleted file mode 100644 index 133f2dbc3762..000000000000 --- a/drivers/staging/tidspbridge/rmgr/node.c +++ /dev/null | |||
@@ -1,3031 +0,0 @@ | |||
1 | /* | ||
2 | * node.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge Node Manager. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | #include <linux/bitmap.h> | ||
21 | #include <linux/list.h> | ||
22 | |||
23 | /* ----------------------------------- Host OS */ | ||
24 | #include <dspbridge/host_os.h> | ||
25 | |||
26 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
27 | #include <dspbridge/dbdefs.h> | ||
28 | |||
29 | /* ----------------------------------- OS Adaptation Layer */ | ||
30 | #include <dspbridge/memdefs.h> | ||
31 | #include <dspbridge/proc.h> | ||
32 | #include <dspbridge/strm.h> | ||
33 | #include <dspbridge/sync.h> | ||
34 | #include <dspbridge/ntfy.h> | ||
35 | |||
36 | /* ----------------------------------- Platform Manager */ | ||
37 | #include <dspbridge/cmm.h> | ||
38 | #include <dspbridge/cod.h> | ||
39 | #include <dspbridge/dev.h> | ||
40 | #include <dspbridge/msg.h> | ||
41 | |||
42 | /* ----------------------------------- Resource Manager */ | ||
43 | #include <dspbridge/dbdcd.h> | ||
44 | #include <dspbridge/disp.h> | ||
45 | #include <dspbridge/rms_sh.h> | ||
46 | |||
47 | /* ----------------------------------- Link Driver */ | ||
48 | #include <dspbridge/dspdefs.h> | ||
49 | #include <dspbridge/dspioctl.h> | ||
50 | |||
51 | /* ----------------------------------- Others */ | ||
52 | #include <dspbridge/uuidutil.h> | ||
53 | |||
54 | /* ----------------------------------- This */ | ||
55 | #include <dspbridge/nodepriv.h> | ||
56 | #include <dspbridge/node.h> | ||
57 | #include <dspbridge/dmm.h> | ||
58 | |||
59 | /* Static/Dynamic Loader includes */ | ||
60 | #include <dspbridge/dbll.h> | ||
61 | #include <dspbridge/nldr.h> | ||
62 | |||
63 | #include <dspbridge/drv.h> | ||
64 | #include <dspbridge/resourcecleanup.h> | ||
65 | #include <_tiomap.h> | ||
66 | |||
67 | #include <dspbridge/dspdeh.h> | ||
68 | |||
69 | #define HOSTPREFIX "/host" | ||
70 | #define PIPEPREFIX "/dbpipe" | ||
71 | |||
72 | #define MAX_INPUTS(h) \ | ||
73 | ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams) | ||
74 | #define MAX_OUTPUTS(h) \ | ||
75 | ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams) | ||
76 | |||
77 | #define NODE_GET_PRIORITY(h) ((h)->prio) | ||
78 | #define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio) | ||
79 | #define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state) | ||
80 | |||
81 | #define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */ | ||
82 | #define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */ | ||
83 | |||
84 | #define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN) | ||
85 | #define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN) | ||
86 | |||
87 | #define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */ | ||
88 | #define CREATEPHASE 1 | ||
89 | #define EXECUTEPHASE 2 | ||
90 | #define DELETEPHASE 3 | ||
91 | |||
92 | /* Define default STRM parameters */ | ||
93 | /* | ||
94 | * TBD: Put in header file, make global DSP_STRMATTRS with defaults, | ||
95 | * or make defaults configurable. | ||
96 | */ | ||
97 | #define DEFAULTBUFSIZE 32 | ||
98 | #define DEFAULTNBUFS 2 | ||
99 | #define DEFAULTSEGID 0 | ||
100 | #define DEFAULTALIGNMENT 0 | ||
101 | #define DEFAULTTIMEOUT 10000 | ||
102 | |||
103 | #define RMSQUERYSERVER 0 | ||
104 | #define RMSCONFIGURESERVER 1 | ||
105 | #define RMSCREATENODE 2 | ||
106 | #define RMSEXECUTENODE 3 | ||
107 | #define RMSDELETENODE 4 | ||
108 | #define RMSCHANGENODEPRIORITY 5 | ||
109 | #define RMSREADMEMORY 6 | ||
110 | #define RMSWRITEMEMORY 7 | ||
111 | #define RMSCOPY 8 | ||
112 | #define MAXTIMEOUT 2000 | ||
113 | |||
114 | #define NUMRMSFXNS 9 | ||
115 | |||
116 | #define PWR_TIMEOUT 500 /* default PWR timeout in msec */ | ||
117 | |||
118 | #define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */ | ||
119 | |||
120 | /* | ||
121 | * ======== node_mgr ======== | ||
122 | */ | ||
123 | struct node_mgr { | ||
124 | struct dev_object *dev_obj; /* Device object */ | ||
125 | /* Function interface to Bridge driver */ | ||
126 | struct bridge_drv_interface *intf_fxns; | ||
127 | struct dcd_manager *dcd_mgr; /* Proc/Node data manager */ | ||
128 | struct disp_object *disp_obj; /* Node dispatcher */ | ||
129 | struct list_head node_list; /* List of all allocated nodes */ | ||
130 | u32 num_nodes; /* Number of nodes in node_list */ | ||
131 | u32 num_created; /* Number of nodes *created* on DSP */ | ||
132 | DECLARE_BITMAP(pipe_map, MAXPIPES); /* Pipe connection bitmap */ | ||
133 | DECLARE_BITMAP(pipe_done_map, MAXPIPES); /* Pipes that are half free */ | ||
134 | /* Channel allocation bitmap */ | ||
135 | DECLARE_BITMAP(chnl_map, CHNL_MAXCHANNELS); | ||
136 | /* DMA Channel allocation bitmap */ | ||
137 | DECLARE_BITMAP(dma_chnl_map, CHNL_MAXCHANNELS); | ||
138 | /* Zero-Copy Channel alloc bitmap */ | ||
139 | DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS); | ||
140 | struct ntfy_object *ntfy_obj; /* Manages registered notifications */ | ||
141 | struct mutex node_mgr_lock; /* For critical sections */ | ||
142 | u32 fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */ | ||
143 | struct msg_mgr *msg_mgr_obj; | ||
144 | |||
145 | /* Processor properties needed by Node Dispatcher */ | ||
146 | u32 num_chnls; /* Total number of channels */ | ||
147 | u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */ | ||
148 | u32 chnl_buf_size; /* Buffer size for data to RMS */ | ||
149 | int proc_family; /* eg, 5000 */ | ||
150 | int proc_type; /* eg, 5510 */ | ||
151 | u32 dsp_word_size; /* Size of DSP word on host bytes */ | ||
152 | u32 dsp_data_mau_size; /* Size of DSP data MAU */ | ||
153 | u32 dsp_mau_size; /* Size of MAU */ | ||
154 | s32 min_pri; /* Minimum runtime priority for node */ | ||
155 | s32 max_pri; /* Maximum runtime priority for node */ | ||
156 | |||
157 | struct strm_mgr *strm_mgr_obj; /* STRM manager */ | ||
158 | |||
159 | /* Loader properties */ | ||
160 | struct nldr_object *nldr_obj; /* Handle to loader */ | ||
161 | struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */ | ||
162 | }; | ||
163 | |||
164 | /* | ||
165 | * ======== connecttype ======== | ||
166 | */ | ||
167 | enum connecttype { | ||
168 | NOTCONNECTED = 0, | ||
169 | NODECONNECT, | ||
170 | HOSTCONNECT, | ||
171 | DEVICECONNECT, | ||
172 | }; | ||
173 | |||
174 | /* | ||
175 | * ======== stream_chnl ======== | ||
176 | */ | ||
177 | struct stream_chnl { | ||
178 | enum connecttype type; /* Type of stream connection */ | ||
179 | u32 dev_id; /* pipe or channel id */ | ||
180 | }; | ||
181 | |||
182 | /* | ||
183 | * ======== node_object ======== | ||
184 | */ | ||
185 | struct node_object { | ||
186 | struct list_head list_elem; | ||
187 | struct node_mgr *node_mgr; /* The manager of this node */ | ||
188 | struct proc_object *processor; /* Back pointer to processor */ | ||
189 | struct dsp_uuid node_uuid; /* Node's ID */ | ||
190 | s32 prio; /* Node's current priority */ | ||
191 | u32 timeout; /* Timeout for blocking NODE calls */ | ||
192 | u32 heap_size; /* Heap Size */ | ||
193 | u32 dsp_heap_virt_addr; /* Heap Size */ | ||
194 | u32 gpp_heap_virt_addr; /* Heap Size */ | ||
195 | enum node_type ntype; /* Type of node: message, task, etc */ | ||
196 | enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */ | ||
197 | u32 num_inputs; /* Current number of inputs */ | ||
198 | u32 num_outputs; /* Current number of outputs */ | ||
199 | u32 max_input_index; /* Current max input stream index */ | ||
200 | u32 max_output_index; /* Current max output stream index */ | ||
201 | struct stream_chnl *inputs; /* Node's input streams */ | ||
202 | struct stream_chnl *outputs; /* Node's output streams */ | ||
203 | struct node_createargs create_args; /* Args for node create func */ | ||
204 | nodeenv node_env; /* Environment returned by RMS */ | ||
205 | struct dcd_genericobj dcd_props; /* Node properties from DCD */ | ||
206 | struct dsp_cbdata *args; /* Optional args to pass to node */ | ||
207 | struct ntfy_object *ntfy_obj; /* Manages registered notifications */ | ||
208 | char *str_dev_name; /* device name, if device node */ | ||
209 | struct sync_object *sync_done; /* Synchronize node_terminate */ | ||
210 | s32 exit_status; /* execute function return status */ | ||
211 | |||
212 | /* Information needed for node_get_attr() */ | ||
213 | void *device_owner; /* If dev node, task that owns it */ | ||
214 | u32 num_gpp_inputs; /* Current # of from GPP streams */ | ||
215 | u32 num_gpp_outputs; /* Current # of to GPP streams */ | ||
216 | /* Current stream connections */ | ||
217 | struct dsp_streamconnect *stream_connect; | ||
218 | |||
219 | /* Message queue */ | ||
220 | struct msg_queue *msg_queue_obj; | ||
221 | |||
222 | /* These fields used for SM messaging */ | ||
223 | struct cmm_xlatorobject *xlator; /* Node's SM addr translator */ | ||
224 | |||
225 | /* Handle to pass to dynamic loader */ | ||
226 | struct nldr_nodeobject *nldr_node_obj; | ||
227 | bool loaded; /* Code is (dynamically) loaded */ | ||
228 | bool phase_split; /* Phases split in many libs or ovly */ | ||
229 | |||
230 | }; | ||
231 | |||
232 | /* Default buffer attributes */ | ||
233 | static struct dsp_bufferattr node_dfltbufattrs = { | ||
234 | .cb_struct = 0, | ||
235 | .segment_id = 1, | ||
236 | .buf_alignment = 0, | ||
237 | }; | ||
238 | |||
239 | static void delete_node(struct node_object *hnode, | ||
240 | struct process_context *pr_ctxt); | ||
241 | static void delete_node_mgr(struct node_mgr *hnode_mgr); | ||
242 | static void fill_stream_connect(struct node_object *node1, | ||
243 | struct node_object *node2, u32 stream1, | ||
244 | u32 stream2); | ||
245 | static void fill_stream_def(struct node_object *hnode, | ||
246 | struct node_strmdef *pstrm_def, | ||
247 | struct dsp_strmattr *pattrs); | ||
248 | static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream); | ||
249 | static int get_fxn_address(struct node_object *hnode, u32 *fxn_addr, | ||
250 | u32 phase); | ||
251 | static int get_node_props(struct dcd_manager *hdcd_mgr, | ||
252 | struct node_object *hnode, | ||
253 | const struct dsp_uuid *node_uuid, | ||
254 | struct dcd_genericobj *dcd_prop); | ||
255 | static int get_proc_props(struct node_mgr *hnode_mgr, | ||
256 | struct dev_object *hdev_obj); | ||
257 | static int get_rms_fxns(struct node_mgr *hnode_mgr); | ||
258 | static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr, | ||
259 | u32 ul_num_bytes, u32 mem_space); | ||
260 | static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf, | ||
261 | u32 ul_num_bytes, u32 mem_space); | ||
262 | |||
263 | /* Dynamic loader functions. */ | ||
264 | static struct node_ldr_fxns nldr_fxns = { | ||
265 | nldr_allocate, | ||
266 | nldr_create, | ||
267 | nldr_delete, | ||
268 | nldr_get_fxn_addr, | ||
269 | nldr_load, | ||
270 | nldr_unload, | ||
271 | }; | ||
272 | |||
273 | enum node_state node_get_state(void *hnode) | ||
274 | { | ||
275 | struct node_object *pnode = (struct node_object *)hnode; | ||
276 | |||
277 | if (!pnode) | ||
278 | return -1; | ||
279 | return pnode->node_state; | ||
280 | } | ||
281 | |||
282 | /* | ||
283 | * ======== node_allocate ======== | ||
284 | * Purpose: | ||
285 | * Allocate GPP resources to manage a node on the DSP. | ||
286 | */ | ||
287 | int node_allocate(struct proc_object *hprocessor, | ||
288 | const struct dsp_uuid *node_uuid, | ||
289 | const struct dsp_cbdata *pargs, | ||
290 | const struct dsp_nodeattrin *attr_in, | ||
291 | struct node_res_object **noderes, | ||
292 | struct process_context *pr_ctxt) | ||
293 | { | ||
294 | struct node_mgr *hnode_mgr; | ||
295 | struct dev_object *hdev_obj; | ||
296 | struct node_object *pnode = NULL; | ||
297 | enum node_type node_type = NODE_TASK; | ||
298 | struct node_msgargs *pmsg_args; | ||
299 | struct node_taskargs *ptask_args; | ||
300 | u32 num_streams; | ||
301 | struct bridge_drv_interface *intf_fxns; | ||
302 | int status = 0; | ||
303 | struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */ | ||
304 | u32 proc_id; | ||
305 | u32 pul_value; | ||
306 | u32 dynext_base; | ||
307 | u32 off_set = 0; | ||
308 | u32 ul_stack_seg_val; | ||
309 | struct cfg_hostres *host_res; | ||
310 | struct bridge_dev_context *pbridge_context; | ||
311 | u32 mapped_addr = 0; | ||
312 | u32 map_attrs = 0x0; | ||
313 | struct dsp_processorstate proc_state; | ||
314 | #ifdef DSP_DMM_DEBUG | ||
315 | struct dmm_object *dmm_mgr; | ||
316 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
317 | #endif | ||
318 | |||
319 | void *node_res; | ||
320 | |||
321 | *noderes = NULL; | ||
322 | |||
323 | status = proc_get_processor_id(hprocessor, &proc_id); | ||
324 | |||
325 | if (proc_id != DSP_UNIT) | ||
326 | goto func_end; | ||
327 | |||
328 | status = proc_get_dev_object(hprocessor, &hdev_obj); | ||
329 | if (!status) { | ||
330 | status = dev_get_node_manager(hdev_obj, &hnode_mgr); | ||
331 | if (hnode_mgr == NULL) | ||
332 | status = -EPERM; | ||
333 | |||
334 | } | ||
335 | |||
336 | if (status) | ||
337 | goto func_end; | ||
338 | |||
339 | status = dev_get_bridge_context(hdev_obj, &pbridge_context); | ||
340 | if (!pbridge_context) { | ||
341 | status = -EFAULT; | ||
342 | goto func_end; | ||
343 | } | ||
344 | |||
345 | status = proc_get_state(hprocessor, &proc_state, | ||
346 | sizeof(struct dsp_processorstate)); | ||
347 | if (status) | ||
348 | goto func_end; | ||
349 | /* If processor is in error state then don't attempt | ||
350 | to send the message */ | ||
351 | if (proc_state.proc_state == PROC_ERROR) { | ||
352 | status = -EPERM; | ||
353 | goto func_end; | ||
354 | } | ||
355 | |||
356 | /* Assuming that 0 is not a valid function address */ | ||
357 | if (hnode_mgr->fxn_addrs[0] == 0) { | ||
358 | /* No RMS on target - we currently can't handle this */ | ||
359 | pr_err("%s: Failed, no RMS in base image\n", __func__); | ||
360 | status = -EPERM; | ||
361 | } else { | ||
362 | /* Validate attr_in fields, if non-NULL */ | ||
363 | if (attr_in) { | ||
364 | /* Check if attr_in->prio is within range */ | ||
365 | if (attr_in->prio < hnode_mgr->min_pri || | ||
366 | attr_in->prio > hnode_mgr->max_pri) | ||
367 | status = -EDOM; | ||
368 | } | ||
369 | } | ||
370 | /* Allocate node object and fill in */ | ||
371 | if (status) | ||
372 | goto func_end; | ||
373 | |||
374 | pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL); | ||
375 | if (pnode == NULL) { | ||
376 | status = -ENOMEM; | ||
377 | goto func_end; | ||
378 | } | ||
379 | pnode->node_mgr = hnode_mgr; | ||
380 | /* This critical section protects get_node_props */ | ||
381 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
382 | |||
383 | /* Get dsp_ndbprops from node database */ | ||
384 | status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid, | ||
385 | &(pnode->dcd_props)); | ||
386 | if (status) | ||
387 | goto func_cont; | ||
388 | |||
389 | pnode->node_uuid = *node_uuid; | ||
390 | pnode->processor = hprocessor; | ||
391 | pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype; | ||
392 | pnode->timeout = pnode->dcd_props.obj_data.node_obj.ndb_props.timeout; | ||
393 | pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio; | ||
394 | |||
395 | /* Currently only C64 DSP builds support Node Dynamic * heaps */ | ||
396 | /* Allocate memory for node heap */ | ||
397 | pnode->create_args.asa.task_arg_obj.heap_size = 0; | ||
398 | pnode->create_args.asa.task_arg_obj.dsp_heap_addr = 0; | ||
399 | pnode->create_args.asa.task_arg_obj.dsp_heap_res_addr = 0; | ||
400 | pnode->create_args.asa.task_arg_obj.gpp_heap_addr = 0; | ||
401 | if (!attr_in) | ||
402 | goto func_cont; | ||
403 | |||
404 | /* Check if we have a user allocated node heap */ | ||
405 | if (!(attr_in->pgpp_virt_addr)) | ||
406 | goto func_cont; | ||
407 | |||
408 | /* check for page aligned Heap size */ | ||
409 | if (((attr_in->heap_size) & (PG_SIZE4K - 1))) { | ||
410 | pr_err("%s: node heap size not aligned to 4K, size = 0x%x\n", | ||
411 | __func__, attr_in->heap_size); | ||
412 | status = -EINVAL; | ||
413 | } else { | ||
414 | pnode->create_args.asa.task_arg_obj.heap_size = | ||
415 | attr_in->heap_size; | ||
416 | pnode->create_args.asa.task_arg_obj.gpp_heap_addr = | ||
417 | (u32) attr_in->pgpp_virt_addr; | ||
418 | } | ||
419 | if (status) | ||
420 | goto func_cont; | ||
421 | |||
422 | status = proc_reserve_memory(hprocessor, | ||
423 | pnode->create_args.asa.task_arg_obj. | ||
424 | heap_size + PAGE_SIZE, | ||
425 | (void **)&(pnode->create_args.asa. | ||
426 | task_arg_obj.dsp_heap_res_addr), | ||
427 | pr_ctxt); | ||
428 | if (status) { | ||
429 | pr_err("%s: Failed to reserve memory for heap: 0x%x\n", | ||
430 | __func__, status); | ||
431 | goto func_cont; | ||
432 | } | ||
433 | #ifdef DSP_DMM_DEBUG | ||
434 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
435 | if (!dmm_mgr) { | ||
436 | status = DSP_EHANDLE; | ||
437 | goto func_cont; | ||
438 | } | ||
439 | |||
440 | dmm_mem_map_dump(dmm_mgr); | ||
441 | #endif | ||
442 | |||
443 | map_attrs |= DSP_MAPLITTLEENDIAN; | ||
444 | map_attrs |= DSP_MAPELEMSIZE32; | ||
445 | map_attrs |= DSP_MAPVIRTUALADDR; | ||
446 | status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, | ||
447 | pnode->create_args.asa.task_arg_obj.heap_size, | ||
448 | (void *)pnode->create_args.asa.task_arg_obj. | ||
449 | dsp_heap_res_addr, (void **)&mapped_addr, map_attrs, | ||
450 | pr_ctxt); | ||
451 | if (status) | ||
452 | pr_err("%s: Failed to map memory for Heap: 0x%x\n", | ||
453 | __func__, status); | ||
454 | else | ||
455 | pnode->create_args.asa.task_arg_obj.dsp_heap_addr = | ||
456 | (u32) mapped_addr; | ||
457 | |||
458 | func_cont: | ||
459 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
460 | if (attr_in != NULL) { | ||
461 | /* Overrides of NBD properties */ | ||
462 | pnode->timeout = attr_in->timeout; | ||
463 | pnode->prio = attr_in->prio; | ||
464 | } | ||
465 | /* Create object to manage notifications */ | ||
466 | if (!status) { | ||
467 | pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object), | ||
468 | GFP_KERNEL); | ||
469 | if (pnode->ntfy_obj) | ||
470 | ntfy_init(pnode->ntfy_obj); | ||
471 | else | ||
472 | status = -ENOMEM; | ||
473 | } | ||
474 | |||
475 | if (!status) { | ||
476 | node_type = node_get_type(pnode); | ||
477 | /* Allocate dsp_streamconnect array for device, task, and | ||
478 | * dais socket nodes. */ | ||
479 | if (node_type != NODE_MESSAGE) { | ||
480 | num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode); | ||
481 | pnode->stream_connect = kzalloc(num_streams * | ||
482 | sizeof(struct dsp_streamconnect), | ||
483 | GFP_KERNEL); | ||
484 | if (num_streams > 0 && pnode->stream_connect == NULL) | ||
485 | status = -ENOMEM; | ||
486 | |||
487 | } | ||
488 | if (!status && (node_type == NODE_TASK || | ||
489 | node_type == NODE_DAISSOCKET)) { | ||
490 | /* Allocate arrays for maintainig stream connections */ | ||
491 | pnode->inputs = kzalloc(MAX_INPUTS(pnode) * | ||
492 | sizeof(struct stream_chnl), GFP_KERNEL); | ||
493 | pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) * | ||
494 | sizeof(struct stream_chnl), GFP_KERNEL); | ||
495 | ptask_args = &(pnode->create_args.asa.task_arg_obj); | ||
496 | ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) * | ||
497 | sizeof(struct node_strmdef), | ||
498 | GFP_KERNEL); | ||
499 | ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) * | ||
500 | sizeof(struct node_strmdef), | ||
501 | GFP_KERNEL); | ||
502 | if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL || | ||
503 | ptask_args->strm_in_def | ||
504 | == NULL)) | ||
505 | || (MAX_OUTPUTS(pnode) > 0 | ||
506 | && (pnode->outputs == NULL | ||
507 | || ptask_args->strm_out_def == NULL))) | ||
508 | status = -ENOMEM; | ||
509 | } | ||
510 | } | ||
511 | if (!status && (node_type != NODE_DEVICE)) { | ||
512 | /* Create an event that will be posted when RMS_EXIT is | ||
513 | * received. */ | ||
514 | pnode->sync_done = kzalloc(sizeof(struct sync_object), | ||
515 | GFP_KERNEL); | ||
516 | if (pnode->sync_done) | ||
517 | sync_init_event(pnode->sync_done); | ||
518 | else | ||
519 | status = -ENOMEM; | ||
520 | |||
521 | if (!status) { | ||
522 | /*Get the shared mem mgr for this nodes dev object */ | ||
523 | status = cmm_get_handle(hprocessor, &hcmm_mgr); | ||
524 | if (!status) { | ||
525 | /* Allocate a SM addr translator for this node | ||
526 | * w/ deflt attr */ | ||
527 | status = cmm_xlator_create(&pnode->xlator, | ||
528 | hcmm_mgr, NULL); | ||
529 | } | ||
530 | } | ||
531 | if (!status) { | ||
532 | /* Fill in message args */ | ||
533 | if ((pargs != NULL) && (pargs->cb_data > 0)) { | ||
534 | pmsg_args = | ||
535 | &(pnode->create_args.asa.node_msg_args); | ||
536 | pmsg_args->pdata = kzalloc(pargs->cb_data, | ||
537 | GFP_KERNEL); | ||
538 | if (pmsg_args->pdata == NULL) { | ||
539 | status = -ENOMEM; | ||
540 | } else { | ||
541 | pmsg_args->arg_length = pargs->cb_data; | ||
542 | memcpy(pmsg_args->pdata, | ||
543 | pargs->node_data, | ||
544 | pargs->cb_data); | ||
545 | } | ||
546 | } | ||
547 | } | ||
548 | } | ||
549 | |||
550 | if (!status && node_type != NODE_DEVICE) { | ||
551 | /* Create a message queue for this node */ | ||
552 | intf_fxns = hnode_mgr->intf_fxns; | ||
553 | status = | ||
554 | (*intf_fxns->msg_create_queue) (hnode_mgr->msg_mgr_obj, | ||
555 | &pnode->msg_queue_obj, | ||
556 | 0, | ||
557 | pnode->create_args.asa. | ||
558 | node_msg_args.max_msgs, | ||
559 | pnode); | ||
560 | } | ||
561 | |||
562 | if (!status) { | ||
563 | /* Create object for dynamic loading */ | ||
564 | |||
565 | status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj, | ||
566 | (void *)pnode, | ||
567 | &pnode->dcd_props. | ||
568 | obj_data.node_obj, | ||
569 | &pnode-> | ||
570 | nldr_node_obj, | ||
571 | &pnode->phase_split); | ||
572 | } | ||
573 | |||
574 | /* Compare value read from Node Properties and check if it is same as | ||
575 | * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate | ||
576 | * GPP Address, Read the value in that address and override the | ||
577 | * stack_seg value in task args */ | ||
578 | if (!status && | ||
579 | (char *)pnode->dcd_props.obj_data.node_obj.ndb_props. | ||
580 | stack_seg_name != NULL) { | ||
581 | if (strcmp((char *) | ||
582 | pnode->dcd_props.obj_data.node_obj.ndb_props. | ||
583 | stack_seg_name, STACKSEGLABEL) == 0) { | ||
584 | void __iomem *stack_seg; | ||
585 | u32 stack_seg_pa; | ||
586 | |||
587 | status = | ||
588 | hnode_mgr->nldr_fxns. | ||
589 | get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG", | ||
590 | &dynext_base); | ||
591 | if (status) | ||
592 | pr_err("%s: Failed to get addr for DYNEXT_BEG" | ||
593 | " status = 0x%x\n", __func__, status); | ||
594 | |||
595 | status = | ||
596 | hnode_mgr->nldr_fxns. | ||
597 | get_fxn_addr(pnode->nldr_node_obj, | ||
598 | "L1DSRAM_HEAP", &pul_value); | ||
599 | |||
600 | if (status) | ||
601 | pr_err("%s: Failed to get addr for L1DSRAM_HEAP" | ||
602 | " status = 0x%x\n", __func__, status); | ||
603 | |||
604 | host_res = pbridge_context->resources; | ||
605 | if (!host_res) | ||
606 | status = -EPERM; | ||
607 | |||
608 | if (status) { | ||
609 | pr_err("%s: Failed to get host resource, status" | ||
610 | " = 0x%x\n", __func__, status); | ||
611 | goto func_end; | ||
612 | } | ||
613 | |||
614 | off_set = pul_value - dynext_base; | ||
615 | stack_seg_pa = host_res->mem_phys[1] + off_set; | ||
616 | stack_seg = ioremap(stack_seg_pa, SZ_32); | ||
617 | if (!stack_seg) { | ||
618 | status = -ENOMEM; | ||
619 | goto func_end; | ||
620 | } | ||
621 | |||
622 | ul_stack_seg_val = readl(stack_seg); | ||
623 | |||
624 | iounmap(stack_seg); | ||
625 | |||
626 | dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr =" | ||
627 | " 0x%x\n", __func__, ul_stack_seg_val, | ||
628 | host_res->mem_base[1] + off_set); | ||
629 | |||
630 | pnode->create_args.asa.task_arg_obj.stack_seg = | ||
631 | ul_stack_seg_val; | ||
632 | |||
633 | } | ||
634 | } | ||
635 | |||
636 | if (!status) { | ||
637 | /* Add the node to the node manager's list of allocated | ||
638 | * nodes. */ | ||
639 | NODE_SET_STATE(pnode, NODE_ALLOCATED); | ||
640 | |||
641 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
642 | |||
643 | list_add_tail(&pnode->list_elem, &hnode_mgr->node_list); | ||
644 | ++(hnode_mgr->num_nodes); | ||
645 | |||
646 | /* Exit critical section */ | ||
647 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
648 | |||
649 | /* Preset this to assume phases are split | ||
650 | * (for overlay and dll) */ | ||
651 | pnode->phase_split = true; | ||
652 | |||
653 | /* Notify all clients registered for DSP_NODESTATECHANGE. */ | ||
654 | proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE); | ||
655 | } else { | ||
656 | /* Cleanup */ | ||
657 | if (pnode) | ||
658 | delete_node(pnode, pr_ctxt); | ||
659 | |||
660 | } | ||
661 | |||
662 | if (!status) { | ||
663 | status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt); | ||
664 | if (status) { | ||
665 | delete_node(pnode, pr_ctxt); | ||
666 | goto func_end; | ||
667 | } | ||
668 | |||
669 | *noderes = (struct node_res_object *)node_res; | ||
670 | drv_proc_node_update_heap_status(node_res, true); | ||
671 | drv_proc_node_update_status(node_res, true); | ||
672 | } | ||
673 | func_end: | ||
674 | dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p " | ||
675 | "node_res: %p status: 0x%x\n", __func__, hprocessor, | ||
676 | node_uuid, pargs, attr_in, noderes, status); | ||
677 | return status; | ||
678 | } | ||
679 | |||
680 | /* | ||
681 | * ======== node_alloc_msg_buf ======== | ||
682 | * Purpose: | ||
683 | * Allocates buffer for zero copy messaging. | ||
684 | */ | ||
685 | DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize, | ||
686 | struct dsp_bufferattr *pattr, | ||
687 | u8 **pbuffer) | ||
688 | { | ||
689 | struct node_object *pnode = (struct node_object *)hnode; | ||
690 | int status = 0; | ||
691 | bool va_flag = false; | ||
692 | bool set_info; | ||
693 | u32 proc_id; | ||
694 | |||
695 | if (!pnode) | ||
696 | status = -EFAULT; | ||
697 | else if (node_get_type(pnode) == NODE_DEVICE) | ||
698 | status = -EPERM; | ||
699 | |||
700 | if (status) | ||
701 | goto func_end; | ||
702 | |||
703 | if (pattr == NULL) | ||
704 | pattr = &node_dfltbufattrs; /* set defaults */ | ||
705 | |||
706 | status = proc_get_processor_id(pnode->processor, &proc_id); | ||
707 | if (proc_id != DSP_UNIT) | ||
708 | goto func_end; | ||
709 | |||
710 | /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a | ||
711 | * virt address, so set this info in this node's translator | ||
712 | * object for future ref. If MEM_GETVIRTUALSEGID then retrieve | ||
713 | * virtual address from node's translator. */ | ||
714 | if ((pattr->segment_id & MEM_SETVIRTUALSEGID) || | ||
715 | (pattr->segment_id & MEM_GETVIRTUALSEGID)) { | ||
716 | va_flag = true; | ||
717 | set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ? | ||
718 | true : false; | ||
719 | /* Clear mask bits */ | ||
720 | pattr->segment_id &= ~MEM_MASKVIRTUALSEGID; | ||
721 | /* Set/get this node's translators virtual address base/size */ | ||
722 | status = cmm_xlator_info(pnode->xlator, pbuffer, usize, | ||
723 | pattr->segment_id, set_info); | ||
724 | } | ||
725 | if (!status && (!va_flag)) { | ||
726 | if (pattr->segment_id != 1) { | ||
727 | /* Node supports single SM segment only. */ | ||
728 | status = -EBADR; | ||
729 | } | ||
730 | /* Arbitrary SM buffer alignment not supported for host side | ||
731 | * allocs, but guaranteed for the following alignment | ||
732 | * values. */ | ||
733 | switch (pattr->buf_alignment) { | ||
734 | case 0: | ||
735 | case 1: | ||
736 | case 2: | ||
737 | case 4: | ||
738 | break; | ||
739 | default: | ||
740 | /* alignment value not supportted */ | ||
741 | status = -EPERM; | ||
742 | break; | ||
743 | } | ||
744 | if (!status) { | ||
745 | /* allocate physical buffer from seg_id in node's | ||
746 | * translator */ | ||
747 | (void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer, | ||
748 | usize); | ||
749 | if (*pbuffer == NULL) { | ||
750 | pr_err("%s: error - Out of shared memory\n", | ||
751 | __func__); | ||
752 | status = -ENOMEM; | ||
753 | } | ||
754 | } | ||
755 | } | ||
756 | func_end: | ||
757 | return status; | ||
758 | } | ||
759 | |||
760 | /* | ||
761 | * ======== node_change_priority ======== | ||
762 | * Purpose: | ||
763 | * Change the priority of a node in the allocated state, or that is | ||
764 | * currently running or paused on the target. | ||
765 | */ | ||
766 | int node_change_priority(struct node_object *hnode, s32 prio) | ||
767 | { | ||
768 | struct node_object *pnode = (struct node_object *)hnode; | ||
769 | struct node_mgr *hnode_mgr = NULL; | ||
770 | enum node_type node_type; | ||
771 | enum node_state state; | ||
772 | int status = 0; | ||
773 | u32 proc_id; | ||
774 | |||
775 | if (!hnode || !hnode->node_mgr) { | ||
776 | status = -EFAULT; | ||
777 | } else { | ||
778 | hnode_mgr = hnode->node_mgr; | ||
779 | node_type = node_get_type(hnode); | ||
780 | if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) | ||
781 | status = -EPERM; | ||
782 | else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri) | ||
783 | status = -EDOM; | ||
784 | } | ||
785 | if (status) | ||
786 | goto func_end; | ||
787 | |||
788 | /* Enter critical section */ | ||
789 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
790 | |||
791 | state = node_get_state(hnode); | ||
792 | if (state == NODE_ALLOCATED || state == NODE_PAUSED) { | ||
793 | NODE_SET_PRIORITY(hnode, prio); | ||
794 | } else { | ||
795 | if (state != NODE_RUNNING) { | ||
796 | status = -EBADR; | ||
797 | goto func_cont; | ||
798 | } | ||
799 | status = proc_get_processor_id(pnode->processor, &proc_id); | ||
800 | if (proc_id == DSP_UNIT) { | ||
801 | status = | ||
802 | disp_node_change_priority(hnode_mgr->disp_obj, | ||
803 | hnode, | ||
804 | hnode_mgr->fxn_addrs | ||
805 | [RMSCHANGENODEPRIORITY], | ||
806 | hnode->node_env, prio); | ||
807 | } | ||
808 | if (status >= 0) | ||
809 | NODE_SET_PRIORITY(hnode, prio); | ||
810 | |||
811 | } | ||
812 | func_cont: | ||
813 | /* Leave critical section */ | ||
814 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
815 | func_end: | ||
816 | return status; | ||
817 | } | ||
818 | |||
819 | /* | ||
820 | * ======== node_connect ======== | ||
821 | * Purpose: | ||
822 | * Connect two nodes on the DSP, or a node on the DSP to the GPP. | ||
823 | */ | ||
824 | int node_connect(struct node_object *node1, u32 stream1, | ||
825 | struct node_object *node2, | ||
826 | u32 stream2, struct dsp_strmattr *pattrs, | ||
827 | struct dsp_cbdata *conn_param) | ||
828 | { | ||
829 | struct node_mgr *hnode_mgr; | ||
830 | char *pstr_dev_name = NULL; | ||
831 | enum node_type node1_type = NODE_TASK; | ||
832 | enum node_type node2_type = NODE_TASK; | ||
833 | enum dsp_strmmode strm_mode; | ||
834 | struct node_strmdef *pstrm_def; | ||
835 | struct node_strmdef *input = NULL; | ||
836 | struct node_strmdef *output = NULL; | ||
837 | struct node_object *dev_node_obj; | ||
838 | struct node_object *hnode; | ||
839 | struct stream_chnl *pstream; | ||
840 | u32 pipe_id; | ||
841 | u32 chnl_id; | ||
842 | s8 chnl_mode; | ||
843 | u32 dw_length; | ||
844 | int status = 0; | ||
845 | |||
846 | if (!node1 || !node2) | ||
847 | return -EFAULT; | ||
848 | |||
849 | /* The two nodes must be on the same processor */ | ||
850 | if (node1 != (struct node_object *)DSP_HGPPNODE && | ||
851 | node2 != (struct node_object *)DSP_HGPPNODE && | ||
852 | node1->node_mgr != node2->node_mgr) | ||
853 | return -EPERM; | ||
854 | |||
855 | /* Cannot connect a node to itself */ | ||
856 | if (node1 == node2) | ||
857 | return -EPERM; | ||
858 | |||
859 | /* node_get_type() will return NODE_GPP if hnode = DSP_HGPPNODE. */ | ||
860 | node1_type = node_get_type(node1); | ||
861 | node2_type = node_get_type(node2); | ||
862 | /* Check stream indices ranges */ | ||
863 | if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE && | ||
864 | stream1 >= MAX_OUTPUTS(node1)) || | ||
865 | (node2_type != NODE_GPP && node2_type != NODE_DEVICE && | ||
866 | stream2 >= MAX_INPUTS(node2))) | ||
867 | return -EINVAL; | ||
868 | |||
869 | /* | ||
870 | * Only the following types of connections are allowed: | ||
871 | * task/dais socket < == > task/dais socket | ||
872 | * task/dais socket < == > device | ||
873 | * task/dais socket < == > GPP | ||
874 | * | ||
875 | * ie, no message nodes, and at least one task or dais | ||
876 | * socket node. | ||
877 | */ | ||
878 | if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE || | ||
879 | (node1_type != NODE_TASK && | ||
880 | node1_type != NODE_DAISSOCKET && | ||
881 | node2_type != NODE_TASK && | ||
882 | node2_type != NODE_DAISSOCKET)) | ||
883 | return -EPERM; | ||
884 | /* | ||
885 | * Check stream mode. Default is STRMMODE_PROCCOPY. | ||
886 | */ | ||
887 | if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY) | ||
888 | return -EPERM; /* illegal stream mode */ | ||
889 | |||
890 | if (node1_type != NODE_GPP) | ||
891 | hnode_mgr = node1->node_mgr; | ||
892 | else | ||
893 | hnode_mgr = node2->node_mgr; | ||
894 | |||
895 | /* Enter critical section */ | ||
896 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
897 | |||
898 | /* Nodes must be in the allocated state */ | ||
899 | if (node1_type != NODE_GPP && | ||
900 | node_get_state(node1) != NODE_ALLOCATED) { | ||
901 | status = -EBADR; | ||
902 | goto out_unlock; | ||
903 | } | ||
904 | |||
905 | if (node2_type != NODE_GPP && | ||
906 | node_get_state(node2) != NODE_ALLOCATED) { | ||
907 | status = -EBADR; | ||
908 | goto out_unlock; | ||
909 | } | ||
910 | |||
911 | /* | ||
912 | * Check that stream indices for task and dais socket nodes | ||
913 | * are not already be used. (Device nodes checked later) | ||
914 | */ | ||
915 | if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) { | ||
916 | output = &(node1->create_args.asa. | ||
917 | task_arg_obj.strm_out_def[stream1]); | ||
918 | if (output->sz_device) { | ||
919 | status = -EISCONN; | ||
920 | goto out_unlock; | ||
921 | } | ||
922 | |||
923 | } | ||
924 | if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) { | ||
925 | input = &(node2->create_args.asa. | ||
926 | task_arg_obj.strm_in_def[stream2]); | ||
927 | if (input->sz_device) { | ||
928 | status = -EISCONN; | ||
929 | goto out_unlock; | ||
930 | } | ||
931 | |||
932 | } | ||
933 | /* Connecting two task nodes? */ | ||
934 | if ((node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) && | ||
935 | (node2_type == NODE_TASK || | ||
936 | node2_type == NODE_DAISSOCKET)) { | ||
937 | /* Find available pipe */ | ||
938 | pipe_id = find_first_zero_bit(hnode_mgr->pipe_map, MAXPIPES); | ||
939 | if (pipe_id == MAXPIPES) { | ||
940 | status = -ECONNREFUSED; | ||
941 | goto out_unlock; | ||
942 | } | ||
943 | set_bit(pipe_id, hnode_mgr->pipe_map); | ||
944 | node1->outputs[stream1].type = NODECONNECT; | ||
945 | node2->inputs[stream2].type = NODECONNECT; | ||
946 | node1->outputs[stream1].dev_id = pipe_id; | ||
947 | node2->inputs[stream2].dev_id = pipe_id; | ||
948 | output->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL); | ||
949 | input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL); | ||
950 | if (!output->sz_device || !input->sz_device) { | ||
951 | /* Undo the connection */ | ||
952 | kfree(output->sz_device); | ||
953 | kfree(input->sz_device); | ||
954 | clear_bit(pipe_id, hnode_mgr->pipe_map); | ||
955 | status = -ENOMEM; | ||
956 | goto out_unlock; | ||
957 | } | ||
958 | /* Copy "/dbpipe<pipId>" name to device names */ | ||
959 | sprintf(output->sz_device, "%s%d", PIPEPREFIX, pipe_id); | ||
960 | strcpy(input->sz_device, output->sz_device); | ||
961 | } | ||
962 | /* Connecting task node to host? */ | ||
963 | if (node1_type == NODE_GPP || node2_type == NODE_GPP) { | ||
964 | pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL); | ||
965 | if (!pstr_dev_name) { | ||
966 | status = -ENOMEM; | ||
967 | goto out_unlock; | ||
968 | } | ||
969 | |||
970 | chnl_mode = (node1_type == NODE_GPP) ? | ||
971 | CHNL_MODETODSP : CHNL_MODEFROMDSP; | ||
972 | |||
973 | /* | ||
974 | * Reserve a channel id. We need to put the name "/host<id>" | ||
975 | * in the node's create_args, but the host | ||
976 | * side channel will not be opened until DSPStream_Open is | ||
977 | * called for this node. | ||
978 | */ | ||
979 | strm_mode = pattrs ? pattrs->strm_mode : STRMMODE_PROCCOPY; | ||
980 | switch (strm_mode) { | ||
981 | case STRMMODE_RDMA: | ||
982 | chnl_id = find_first_zero_bit(hnode_mgr->dma_chnl_map, | ||
983 | CHNL_MAXCHANNELS); | ||
984 | if (chnl_id < CHNL_MAXCHANNELS) { | ||
985 | set_bit(chnl_id, hnode_mgr->dma_chnl_map); | ||
986 | /* dma chans are 2nd transport chnl set | ||
987 | * ids(e.g. 16-31) */ | ||
988 | chnl_id = chnl_id + hnode_mgr->num_chnls; | ||
989 | } | ||
990 | break; | ||
991 | case STRMMODE_ZEROCOPY: | ||
992 | chnl_id = find_first_zero_bit(hnode_mgr->zc_chnl_map, | ||
993 | CHNL_MAXCHANNELS); | ||
994 | if (chnl_id < CHNL_MAXCHANNELS) { | ||
995 | set_bit(chnl_id, hnode_mgr->zc_chnl_map); | ||
996 | /* zero-copy chans are 3nd transport set | ||
997 | * (e.g. 32-47) */ | ||
998 | chnl_id = chnl_id + | ||
999 | (2 * hnode_mgr->num_chnls); | ||
1000 | } | ||
1001 | break; | ||
1002 | case STRMMODE_PROCCOPY: | ||
1003 | chnl_id = find_first_zero_bit(hnode_mgr->chnl_map, | ||
1004 | CHNL_MAXCHANNELS); | ||
1005 | if (chnl_id < CHNL_MAXCHANNELS) | ||
1006 | set_bit(chnl_id, hnode_mgr->chnl_map); | ||
1007 | break; | ||
1008 | default: | ||
1009 | status = -EINVAL; | ||
1010 | goto out_unlock; | ||
1011 | } | ||
1012 | if (chnl_id == CHNL_MAXCHANNELS) { | ||
1013 | status = -ECONNREFUSED; | ||
1014 | goto out_unlock; | ||
1015 | } | ||
1016 | |||
1017 | if (node1 == (struct node_object *)DSP_HGPPNODE) { | ||
1018 | node2->inputs[stream2].type = HOSTCONNECT; | ||
1019 | node2->inputs[stream2].dev_id = chnl_id; | ||
1020 | input->sz_device = pstr_dev_name; | ||
1021 | } else { | ||
1022 | node1->outputs[stream1].type = HOSTCONNECT; | ||
1023 | node1->outputs[stream1].dev_id = chnl_id; | ||
1024 | output->sz_device = pstr_dev_name; | ||
1025 | } | ||
1026 | sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id); | ||
1027 | } | ||
1028 | /* Connecting task node to device node? */ | ||
1029 | if ((node1_type == NODE_DEVICE) || (node2_type == NODE_DEVICE)) { | ||
1030 | if (node2_type == NODE_DEVICE) { | ||
1031 | /* node1 == > device */ | ||
1032 | dev_node_obj = node2; | ||
1033 | hnode = node1; | ||
1034 | pstream = &(node1->outputs[stream1]); | ||
1035 | pstrm_def = output; | ||
1036 | } else { | ||
1037 | /* device == > node2 */ | ||
1038 | dev_node_obj = node1; | ||
1039 | hnode = node2; | ||
1040 | pstream = &(node2->inputs[stream2]); | ||
1041 | pstrm_def = input; | ||
1042 | } | ||
1043 | /* Set up create args */ | ||
1044 | pstream->type = DEVICECONNECT; | ||
1045 | dw_length = strlen(dev_node_obj->str_dev_name); | ||
1046 | if (conn_param) | ||
1047 | pstrm_def->sz_device = kzalloc(dw_length + 1 + | ||
1048 | conn_param->cb_data, | ||
1049 | GFP_KERNEL); | ||
1050 | else | ||
1051 | pstrm_def->sz_device = kzalloc(dw_length + 1, | ||
1052 | GFP_KERNEL); | ||
1053 | if (!pstrm_def->sz_device) { | ||
1054 | status = -ENOMEM; | ||
1055 | goto out_unlock; | ||
1056 | } | ||
1057 | /* Copy device name */ | ||
1058 | strncpy(pstrm_def->sz_device, | ||
1059 | dev_node_obj->str_dev_name, dw_length); | ||
1060 | if (conn_param) | ||
1061 | strncat(pstrm_def->sz_device, | ||
1062 | (char *)conn_param->node_data, | ||
1063 | (u32) conn_param->cb_data); | ||
1064 | dev_node_obj->device_owner = hnode; | ||
1065 | } | ||
1066 | /* Fill in create args */ | ||
1067 | if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) { | ||
1068 | node1->create_args.asa.task_arg_obj.num_outputs++; | ||
1069 | fill_stream_def(node1, output, pattrs); | ||
1070 | } | ||
1071 | if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) { | ||
1072 | node2->create_args.asa.task_arg_obj.num_inputs++; | ||
1073 | fill_stream_def(node2, input, pattrs); | ||
1074 | } | ||
1075 | /* Update node1 and node2 stream_connect */ | ||
1076 | if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) { | ||
1077 | node1->num_outputs++; | ||
1078 | if (stream1 > node1->max_output_index) | ||
1079 | node1->max_output_index = stream1; | ||
1080 | |||
1081 | } | ||
1082 | if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) { | ||
1083 | node2->num_inputs++; | ||
1084 | if (stream2 > node2->max_input_index) | ||
1085 | node2->max_input_index = stream2; | ||
1086 | |||
1087 | } | ||
1088 | fill_stream_connect(node1, node2, stream1, stream2); | ||
1089 | /* end of sync_enter_cs */ | ||
1090 | /* Exit critical section */ | ||
1091 | out_unlock: | ||
1092 | if (status && pstr_dev_name) | ||
1093 | kfree(pstr_dev_name); | ||
1094 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
1095 | dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d" | ||
1096 | "pattrs: %p status: 0x%x\n", __func__, node1, | ||
1097 | stream1, node2, stream2, pattrs, status); | ||
1098 | return status; | ||
1099 | } | ||
1100 | |||
1101 | /* | ||
1102 | * ======== node_create ======== | ||
1103 | * Purpose: | ||
1104 | * Create a node on the DSP by remotely calling the node's create function. | ||
1105 | */ | ||
1106 | int node_create(struct node_object *hnode) | ||
1107 | { | ||
1108 | struct node_object *pnode = (struct node_object *)hnode; | ||
1109 | struct node_mgr *hnode_mgr; | ||
1110 | struct bridge_drv_interface *intf_fxns; | ||
1111 | u32 ul_create_fxn; | ||
1112 | enum node_type node_type; | ||
1113 | int status = 0; | ||
1114 | int status1 = 0; | ||
1115 | struct dsp_cbdata cb_data; | ||
1116 | u32 proc_id = 255; | ||
1117 | struct dsp_processorstate proc_state; | ||
1118 | struct proc_object *hprocessor; | ||
1119 | #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) | ||
1120 | struct dspbridge_platform_data *pdata = | ||
1121 | omap_dspbridge_dev->dev.platform_data; | ||
1122 | #endif | ||
1123 | |||
1124 | if (!pnode) { | ||
1125 | status = -EFAULT; | ||
1126 | goto func_end; | ||
1127 | } | ||
1128 | hprocessor = hnode->processor; | ||
1129 | status = proc_get_state(hprocessor, &proc_state, | ||
1130 | sizeof(struct dsp_processorstate)); | ||
1131 | if (status) | ||
1132 | goto func_end; | ||
1133 | /* If processor is in error state then don't attempt to create | ||
1134 | new node */ | ||
1135 | if (proc_state.proc_state == PROC_ERROR) { | ||
1136 | status = -EPERM; | ||
1137 | goto func_end; | ||
1138 | } | ||
1139 | /* create struct dsp_cbdata struct for PWR calls */ | ||
1140 | cb_data.cb_data = PWR_TIMEOUT; | ||
1141 | node_type = node_get_type(hnode); | ||
1142 | hnode_mgr = hnode->node_mgr; | ||
1143 | intf_fxns = hnode_mgr->intf_fxns; | ||
1144 | /* Get access to node dispatcher */ | ||
1145 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
1146 | |||
1147 | /* Check node state */ | ||
1148 | if (node_get_state(hnode) != NODE_ALLOCATED) | ||
1149 | status = -EBADR; | ||
1150 | |||
1151 | if (!status) | ||
1152 | status = proc_get_processor_id(pnode->processor, &proc_id); | ||
1153 | |||
1154 | if (status) | ||
1155 | goto func_cont2; | ||
1156 | |||
1157 | if (proc_id != DSP_UNIT) | ||
1158 | goto func_cont2; | ||
1159 | |||
1160 | /* Make sure streams are properly connected */ | ||
1161 | if ((hnode->num_inputs && hnode->max_input_index > | ||
1162 | hnode->num_inputs - 1) || | ||
1163 | (hnode->num_outputs && hnode->max_output_index > | ||
1164 | hnode->num_outputs - 1)) | ||
1165 | status = -ENOTCONN; | ||
1166 | |||
1167 | if (!status) { | ||
1168 | /* If node's create function is not loaded, load it */ | ||
1169 | /* Boost the OPP level to max level that DSP can be requested */ | ||
1170 | #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) | ||
1171 | if (pdata->cpu_set_freq) | ||
1172 | (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]); | ||
1173 | #endif | ||
1174 | status = hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj, | ||
1175 | NLDR_CREATE); | ||
1176 | /* Get address of node's create function */ | ||
1177 | if (!status) { | ||
1178 | hnode->loaded = true; | ||
1179 | if (node_type != NODE_DEVICE) { | ||
1180 | status = get_fxn_address(hnode, &ul_create_fxn, | ||
1181 | CREATEPHASE); | ||
1182 | } | ||
1183 | } else { | ||
1184 | pr_err("%s: failed to load create code: 0x%x\n", | ||
1185 | __func__, status); | ||
1186 | } | ||
1187 | /* Request the lowest OPP level */ | ||
1188 | #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) | ||
1189 | if (pdata->cpu_set_freq) | ||
1190 | (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]); | ||
1191 | #endif | ||
1192 | /* Get address of iAlg functions, if socket node */ | ||
1193 | if (!status) { | ||
1194 | if (node_type == NODE_DAISSOCKET) { | ||
1195 | status = hnode_mgr->nldr_fxns.get_fxn_addr | ||
1196 | (hnode->nldr_node_obj, | ||
1197 | hnode->dcd_props.obj_data.node_obj. | ||
1198 | str_i_alg_name, | ||
1199 | &hnode->create_args.asa. | ||
1200 | task_arg_obj.dais_arg); | ||
1201 | } | ||
1202 | } | ||
1203 | } | ||
1204 | if (!status) { | ||
1205 | if (node_type != NODE_DEVICE) { | ||
1206 | status = disp_node_create(hnode_mgr->disp_obj, hnode, | ||
1207 | hnode_mgr->fxn_addrs | ||
1208 | [RMSCREATENODE], | ||
1209 | ul_create_fxn, | ||
1210 | &(hnode->create_args), | ||
1211 | &(hnode->node_env)); | ||
1212 | if (status >= 0) { | ||
1213 | /* Set the message queue id to the node env | ||
1214 | * pointer */ | ||
1215 | intf_fxns = hnode_mgr->intf_fxns; | ||
1216 | (*intf_fxns->msg_set_queue_id) (hnode-> | ||
1217 | msg_queue_obj, | ||
1218 | hnode->node_env); | ||
1219 | } | ||
1220 | } | ||
1221 | } | ||
1222 | /* Phase II/Overlays: Create, execute, delete phases possibly in | ||
1223 | * different files/sections. */ | ||
1224 | if (hnode->loaded && hnode->phase_split) { | ||
1225 | /* If create code was dynamically loaded, we can now unload | ||
1226 | * it. */ | ||
1227 | status1 = hnode_mgr->nldr_fxns.unload(hnode->nldr_node_obj, | ||
1228 | NLDR_CREATE); | ||
1229 | hnode->loaded = false; | ||
1230 | } | ||
1231 | if (status1) | ||
1232 | pr_err("%s: Failed to unload create code: 0x%x\n", | ||
1233 | __func__, status1); | ||
1234 | func_cont2: | ||
1235 | /* Update node state and node manager state */ | ||
1236 | if (status >= 0) { | ||
1237 | NODE_SET_STATE(hnode, NODE_CREATED); | ||
1238 | hnode_mgr->num_created++; | ||
1239 | goto func_cont; | ||
1240 | } | ||
1241 | if (status != -EBADR) { | ||
1242 | /* Put back in NODE_ALLOCATED state if error occurred */ | ||
1243 | NODE_SET_STATE(hnode, NODE_ALLOCATED); | ||
1244 | } | ||
1245 | func_cont: | ||
1246 | /* Free access to node dispatcher */ | ||
1247 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
1248 | func_end: | ||
1249 | if (status >= 0) { | ||
1250 | proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE); | ||
1251 | ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE); | ||
1252 | } | ||
1253 | |||
1254 | dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__, | ||
1255 | hnode, status); | ||
1256 | return status; | ||
1257 | } | ||
1258 | |||
1259 | /* | ||
1260 | * ======== node_create_mgr ======== | ||
1261 | * Purpose: | ||
1262 | * Create a NODE Manager object. | ||
1263 | */ | ||
1264 | int node_create_mgr(struct node_mgr **node_man, | ||
1265 | struct dev_object *hdev_obj) | ||
1266 | { | ||
1267 | u32 i; | ||
1268 | struct node_mgr *node_mgr_obj = NULL; | ||
1269 | struct disp_attr disp_attr_obj; | ||
1270 | char *sz_zl_file = ""; | ||
1271 | struct nldr_attrs nldr_attrs_obj; | ||
1272 | int status = 0; | ||
1273 | u8 dev_type; | ||
1274 | |||
1275 | *node_man = NULL; | ||
1276 | /* Allocate Node manager object */ | ||
1277 | node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL); | ||
1278 | if (!node_mgr_obj) | ||
1279 | return -ENOMEM; | ||
1280 | |||
1281 | node_mgr_obj->dev_obj = hdev_obj; | ||
1282 | |||
1283 | node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object), | ||
1284 | GFP_KERNEL); | ||
1285 | if (!node_mgr_obj->ntfy_obj) { | ||
1286 | status = -ENOMEM; | ||
1287 | goto out_err; | ||
1288 | } | ||
1289 | ntfy_init(node_mgr_obj->ntfy_obj); | ||
1290 | |||
1291 | INIT_LIST_HEAD(&node_mgr_obj->node_list); | ||
1292 | |||
1293 | dev_get_dev_type(hdev_obj, &dev_type); | ||
1294 | |||
1295 | status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr); | ||
1296 | if (status) | ||
1297 | goto out_err; | ||
1298 | |||
1299 | status = get_proc_props(node_mgr_obj, hdev_obj); | ||
1300 | if (status) | ||
1301 | goto out_err; | ||
1302 | |||
1303 | /* Create NODE Dispatcher */ | ||
1304 | disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset; | ||
1305 | disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size; | ||
1306 | disp_attr_obj.proc_family = node_mgr_obj->proc_family; | ||
1307 | disp_attr_obj.proc_type = node_mgr_obj->proc_type; | ||
1308 | |||
1309 | status = disp_create(&node_mgr_obj->disp_obj, hdev_obj, &disp_attr_obj); | ||
1310 | if (status) | ||
1311 | goto out_err; | ||
1312 | |||
1313 | /* Create a STRM Manager */ | ||
1314 | status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj); | ||
1315 | if (status) | ||
1316 | goto out_err; | ||
1317 | |||
1318 | dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns); | ||
1319 | /* Get msg_ctrl queue manager */ | ||
1320 | dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj); | ||
1321 | mutex_init(&node_mgr_obj->node_mgr_lock); | ||
1322 | |||
1323 | /* Block out reserved channels */ | ||
1324 | for (i = 0; i < node_mgr_obj->chnl_offset; i++) | ||
1325 | set_bit(i, node_mgr_obj->chnl_map); | ||
1326 | |||
1327 | /* Block out channels reserved for RMS */ | ||
1328 | set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map); | ||
1329 | set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map); | ||
1330 | |||
1331 | /* NO RM Server on the IVA */ | ||
1332 | if (dev_type != IVA_UNIT) { | ||
1333 | /* Get addresses of any RMS functions loaded */ | ||
1334 | status = get_rms_fxns(node_mgr_obj); | ||
1335 | if (status) | ||
1336 | goto out_err; | ||
1337 | } | ||
1338 | |||
1339 | /* Get loader functions and create loader */ | ||
1340 | node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */ | ||
1341 | |||
1342 | nldr_attrs_obj.ovly = ovly; | ||
1343 | nldr_attrs_obj.write = mem_write; | ||
1344 | nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size; | ||
1345 | nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size; | ||
1346 | status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj, | ||
1347 | hdev_obj, | ||
1348 | &nldr_attrs_obj); | ||
1349 | if (status) | ||
1350 | goto out_err; | ||
1351 | |||
1352 | *node_man = node_mgr_obj; | ||
1353 | |||
1354 | return status; | ||
1355 | out_err: | ||
1356 | delete_node_mgr(node_mgr_obj); | ||
1357 | return status; | ||
1358 | } | ||
1359 | |||
1360 | /* | ||
1361 | * ======== node_delete ======== | ||
1362 | * Purpose: | ||
1363 | * Delete a node on the DSP by remotely calling the node's delete function. | ||
1364 | * Loads the node's delete function if necessary. Free GPP side resources | ||
1365 | * after node's delete function returns. | ||
1366 | */ | ||
1367 | int node_delete(struct node_res_object *noderes, | ||
1368 | struct process_context *pr_ctxt) | ||
1369 | { | ||
1370 | struct node_object *pnode = noderes->node; | ||
1371 | struct node_mgr *hnode_mgr; | ||
1372 | struct proc_object *hprocessor; | ||
1373 | struct disp_object *disp_obj; | ||
1374 | u32 ul_delete_fxn; | ||
1375 | enum node_type node_type; | ||
1376 | enum node_state state; | ||
1377 | int status = 0; | ||
1378 | int status1 = 0; | ||
1379 | struct dsp_cbdata cb_data; | ||
1380 | u32 proc_id; | ||
1381 | struct bridge_drv_interface *intf_fxns; | ||
1382 | |||
1383 | void *node_res = noderes; | ||
1384 | |||
1385 | struct dsp_processorstate proc_state; | ||
1386 | |||
1387 | if (!pnode) { | ||
1388 | status = -EFAULT; | ||
1389 | goto func_end; | ||
1390 | } | ||
1391 | /* create struct dsp_cbdata struct for PWR call */ | ||
1392 | cb_data.cb_data = PWR_TIMEOUT; | ||
1393 | hnode_mgr = pnode->node_mgr; | ||
1394 | hprocessor = pnode->processor; | ||
1395 | disp_obj = hnode_mgr->disp_obj; | ||
1396 | node_type = node_get_type(pnode); | ||
1397 | intf_fxns = hnode_mgr->intf_fxns; | ||
1398 | /* Enter critical section */ | ||
1399 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
1400 | |||
1401 | state = node_get_state(pnode); | ||
1402 | /* Execute delete phase code for non-device node in all cases | ||
1403 | * except when the node was only allocated. Delete phase must be | ||
1404 | * executed even if create phase was executed, but failed. | ||
1405 | * If the node environment pointer is non-NULL, the delete phase | ||
1406 | * code must be executed. */ | ||
1407 | if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) && | ||
1408 | node_type != NODE_DEVICE) { | ||
1409 | status = proc_get_processor_id(pnode->processor, &proc_id); | ||
1410 | if (status) | ||
1411 | goto func_cont1; | ||
1412 | |||
1413 | if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) { | ||
1414 | /* If node has terminated, execute phase code will | ||
1415 | * have already been unloaded in node_on_exit(). If the | ||
1416 | * node is PAUSED, the execute phase is loaded, and it | ||
1417 | * is now ok to unload it. If the node is running, we | ||
1418 | * will unload the execute phase only after deleting | ||
1419 | * the node. */ | ||
1420 | if (state == NODE_PAUSED && pnode->loaded && | ||
1421 | pnode->phase_split) { | ||
1422 | /* Ok to unload execute code as long as node | ||
1423 | * is not * running */ | ||
1424 | status1 = | ||
1425 | hnode_mgr->nldr_fxns. | ||
1426 | unload(pnode->nldr_node_obj, | ||
1427 | NLDR_EXECUTE); | ||
1428 | pnode->loaded = false; | ||
1429 | NODE_SET_STATE(pnode, NODE_DONE); | ||
1430 | } | ||
1431 | /* Load delete phase code if not loaded or if haven't | ||
1432 | * * unloaded EXECUTE phase */ | ||
1433 | if ((!(pnode->loaded) || (state == NODE_RUNNING)) && | ||
1434 | pnode->phase_split) { | ||
1435 | status = | ||
1436 | hnode_mgr->nldr_fxns. | ||
1437 | load(pnode->nldr_node_obj, NLDR_DELETE); | ||
1438 | if (!status) | ||
1439 | pnode->loaded = true; | ||
1440 | else | ||
1441 | pr_err("%s: fail - load delete code:" | ||
1442 | " 0x%x\n", __func__, status); | ||
1443 | } | ||
1444 | } | ||
1445 | func_cont1: | ||
1446 | if (!status) { | ||
1447 | /* Unblock a thread trying to terminate the node */ | ||
1448 | (void)sync_set_event(pnode->sync_done); | ||
1449 | if (proc_id == DSP_UNIT) { | ||
1450 | /* ul_delete_fxn = address of node's delete | ||
1451 | * function */ | ||
1452 | status = get_fxn_address(pnode, &ul_delete_fxn, | ||
1453 | DELETEPHASE); | ||
1454 | } else if (proc_id == IVA_UNIT) | ||
1455 | ul_delete_fxn = (u32) pnode->node_env; | ||
1456 | if (!status) { | ||
1457 | status = proc_get_state(hprocessor, | ||
1458 | &proc_state, | ||
1459 | sizeof(struct | ||
1460 | dsp_processorstate)); | ||
1461 | if (proc_state.proc_state != PROC_ERROR) { | ||
1462 | status = | ||
1463 | disp_node_delete(disp_obj, pnode, | ||
1464 | hnode_mgr-> | ||
1465 | fxn_addrs | ||
1466 | [RMSDELETENODE], | ||
1467 | ul_delete_fxn, | ||
1468 | pnode->node_env); | ||
1469 | } else | ||
1470 | NODE_SET_STATE(pnode, NODE_DONE); | ||
1471 | |||
1472 | /* Unload execute, if not unloaded, and delete | ||
1473 | * function */ | ||
1474 | if (state == NODE_RUNNING && | ||
1475 | pnode->phase_split) { | ||
1476 | status1 = | ||
1477 | hnode_mgr->nldr_fxns. | ||
1478 | unload(pnode->nldr_node_obj, | ||
1479 | NLDR_EXECUTE); | ||
1480 | } | ||
1481 | if (status1) | ||
1482 | pr_err("%s: fail - unload execute code:" | ||
1483 | " 0x%x\n", __func__, status1); | ||
1484 | |||
1485 | status1 = | ||
1486 | hnode_mgr->nldr_fxns.unload(pnode-> | ||
1487 | nldr_node_obj, | ||
1488 | NLDR_DELETE); | ||
1489 | pnode->loaded = false; | ||
1490 | if (status1) | ||
1491 | pr_err("%s: fail - unload delete code: " | ||
1492 | "0x%x\n", __func__, status1); | ||
1493 | } | ||
1494 | } | ||
1495 | } | ||
1496 | /* Free host side resources even if a failure occurred */ | ||
1497 | /* Remove node from hnode_mgr->node_list */ | ||
1498 | list_del(&pnode->list_elem); | ||
1499 | hnode_mgr->num_nodes--; | ||
1500 | /* Decrement count of nodes created on DSP */ | ||
1501 | if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) && | ||
1502 | (pnode->node_env != (u32) NULL))) | ||
1503 | hnode_mgr->num_created--; | ||
1504 | /* Free host-side resources allocated by node_create() | ||
1505 | * delete_node() fails if SM buffers not freed by client! */ | ||
1506 | drv_proc_node_update_status(node_res, false); | ||
1507 | delete_node(pnode, pr_ctxt); | ||
1508 | |||
1509 | /* | ||
1510 | * Release all Node resources and its context | ||
1511 | */ | ||
1512 | idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id); | ||
1513 | kfree(node_res); | ||
1514 | |||
1515 | /* Exit critical section */ | ||
1516 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
1517 | proc_notify_clients(hprocessor, DSP_NODESTATECHANGE); | ||
1518 | func_end: | ||
1519 | dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status); | ||
1520 | return status; | ||
1521 | } | ||
1522 | |||
1523 | /* | ||
1524 | * ======== node_delete_mgr ======== | ||
1525 | * Purpose: | ||
1526 | * Delete the NODE Manager. | ||
1527 | */ | ||
1528 | int node_delete_mgr(struct node_mgr *hnode_mgr) | ||
1529 | { | ||
1530 | if (!hnode_mgr) | ||
1531 | return -EFAULT; | ||
1532 | |||
1533 | delete_node_mgr(hnode_mgr); | ||
1534 | |||
1535 | return 0; | ||
1536 | } | ||
1537 | |||
1538 | /* | ||
1539 | * ======== node_enum_nodes ======== | ||
1540 | * Purpose: | ||
1541 | * Enumerate currently allocated nodes. | ||
1542 | */ | ||
1543 | int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab, | ||
1544 | u32 node_tab_size, u32 *pu_num_nodes, | ||
1545 | u32 *pu_allocated) | ||
1546 | { | ||
1547 | struct node_object *hnode; | ||
1548 | u32 i = 0; | ||
1549 | int status = 0; | ||
1550 | |||
1551 | if (!hnode_mgr) { | ||
1552 | status = -EFAULT; | ||
1553 | goto func_end; | ||
1554 | } | ||
1555 | /* Enter critical section */ | ||
1556 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
1557 | |||
1558 | if (hnode_mgr->num_nodes > node_tab_size) { | ||
1559 | *pu_allocated = hnode_mgr->num_nodes; | ||
1560 | *pu_num_nodes = 0; | ||
1561 | status = -EINVAL; | ||
1562 | } else { | ||
1563 | list_for_each_entry(hnode, &hnode_mgr->node_list, list_elem) | ||
1564 | node_tab[i++] = hnode; | ||
1565 | *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes; | ||
1566 | } | ||
1567 | /* end of sync_enter_cs */ | ||
1568 | /* Exit critical section */ | ||
1569 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
1570 | func_end: | ||
1571 | return status; | ||
1572 | } | ||
1573 | |||
1574 | /* | ||
1575 | * ======== node_free_msg_buf ======== | ||
1576 | * Purpose: | ||
1577 | * Frees the message buffer. | ||
1578 | */ | ||
1579 | int node_free_msg_buf(struct node_object *hnode, u8 *pbuffer, | ||
1580 | struct dsp_bufferattr *pattr) | ||
1581 | { | ||
1582 | struct node_object *pnode = (struct node_object *)hnode; | ||
1583 | int status = 0; | ||
1584 | u32 proc_id; | ||
1585 | |||
1586 | if (!hnode) { | ||
1587 | status = -EFAULT; | ||
1588 | goto func_end; | ||
1589 | } | ||
1590 | status = proc_get_processor_id(pnode->processor, &proc_id); | ||
1591 | if (proc_id == DSP_UNIT) { | ||
1592 | if (!status) { | ||
1593 | if (pattr == NULL) { | ||
1594 | /* set defaults */ | ||
1595 | pattr = &node_dfltbufattrs; | ||
1596 | } | ||
1597 | /* Node supports single SM segment only */ | ||
1598 | if (pattr->segment_id != 1) | ||
1599 | status = -EBADR; | ||
1600 | |||
1601 | /* pbuffer is clients Va. */ | ||
1602 | status = cmm_xlator_free_buf(pnode->xlator, pbuffer); | ||
1603 | } | ||
1604 | } else { | ||
1605 | } | ||
1606 | func_end: | ||
1607 | return status; | ||
1608 | } | ||
1609 | |||
1610 | /* | ||
1611 | * ======== node_get_attr ======== | ||
1612 | * Purpose: | ||
1613 | * Copy the current attributes of the specified node into a dsp_nodeattr | ||
1614 | * structure. | ||
1615 | */ | ||
1616 | int node_get_attr(struct node_object *hnode, | ||
1617 | struct dsp_nodeattr *pattr, u32 attr_size) | ||
1618 | { | ||
1619 | struct node_mgr *hnode_mgr; | ||
1620 | |||
1621 | if (!hnode) | ||
1622 | return -EFAULT; | ||
1623 | |||
1624 | hnode_mgr = hnode->node_mgr; | ||
1625 | /* Enter hnode_mgr critical section since we're accessing | ||
1626 | * data that could be changed by node_change_priority() and | ||
1627 | * node_connect(). */ | ||
1628 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
1629 | pattr->cb_struct = sizeof(struct dsp_nodeattr); | ||
1630 | /* dsp_nodeattrin */ | ||
1631 | pattr->in_node_attr_in.cb_struct = | ||
1632 | sizeof(struct dsp_nodeattrin); | ||
1633 | pattr->in_node_attr_in.prio = hnode->prio; | ||
1634 | pattr->in_node_attr_in.timeout = hnode->timeout; | ||
1635 | pattr->in_node_attr_in.heap_size = | ||
1636 | hnode->create_args.asa.task_arg_obj.heap_size; | ||
1637 | pattr->in_node_attr_in.pgpp_virt_addr = (void *) | ||
1638 | hnode->create_args.asa.task_arg_obj.gpp_heap_addr; | ||
1639 | pattr->node_attr_inputs = hnode->num_gpp_inputs; | ||
1640 | pattr->node_attr_outputs = hnode->num_gpp_outputs; | ||
1641 | /* dsp_nodeinfo */ | ||
1642 | get_node_info(hnode, &(pattr->node_info)); | ||
1643 | /* end of sync_enter_cs */ | ||
1644 | /* Exit critical section */ | ||
1645 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
1646 | |||
1647 | return 0; | ||
1648 | } | ||
1649 | |||
1650 | /* | ||
1651 | * ======== node_get_channel_id ======== | ||
1652 | * Purpose: | ||
1653 | * Get the channel index reserved for a stream connection between the | ||
1654 | * host and a node. | ||
1655 | */ | ||
1656 | int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index, | ||
1657 | u32 *chan_id) | ||
1658 | { | ||
1659 | enum node_type node_type; | ||
1660 | int status = -EINVAL; | ||
1661 | |||
1662 | if (!hnode) { | ||
1663 | status = -EFAULT; | ||
1664 | return status; | ||
1665 | } | ||
1666 | node_type = node_get_type(hnode); | ||
1667 | if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) { | ||
1668 | status = -EPERM; | ||
1669 | return status; | ||
1670 | } | ||
1671 | if (dir == DSP_TONODE) { | ||
1672 | if (index < MAX_INPUTS(hnode)) { | ||
1673 | if (hnode->inputs[index].type == HOSTCONNECT) { | ||
1674 | *chan_id = hnode->inputs[index].dev_id; | ||
1675 | status = 0; | ||
1676 | } | ||
1677 | } | ||
1678 | } else { | ||
1679 | if (index < MAX_OUTPUTS(hnode)) { | ||
1680 | if (hnode->outputs[index].type == HOSTCONNECT) { | ||
1681 | *chan_id = hnode->outputs[index].dev_id; | ||
1682 | status = 0; | ||
1683 | } | ||
1684 | } | ||
1685 | } | ||
1686 | return status; | ||
1687 | } | ||
1688 | |||
1689 | /* | ||
1690 | * ======== node_get_message ======== | ||
1691 | * Purpose: | ||
1692 | * Retrieve a message from a node on the DSP. | ||
1693 | */ | ||
1694 | int node_get_message(struct node_object *hnode, | ||
1695 | struct dsp_msg *message, u32 utimeout) | ||
1696 | { | ||
1697 | struct node_mgr *hnode_mgr; | ||
1698 | enum node_type node_type; | ||
1699 | struct bridge_drv_interface *intf_fxns; | ||
1700 | int status = 0; | ||
1701 | void *tmp_buf; | ||
1702 | struct dsp_processorstate proc_state; | ||
1703 | struct proc_object *hprocessor; | ||
1704 | |||
1705 | if (!hnode) { | ||
1706 | status = -EFAULT; | ||
1707 | goto func_end; | ||
1708 | } | ||
1709 | hprocessor = hnode->processor; | ||
1710 | status = proc_get_state(hprocessor, &proc_state, | ||
1711 | sizeof(struct dsp_processorstate)); | ||
1712 | if (status) | ||
1713 | goto func_end; | ||
1714 | /* If processor is in error state then don't attempt to get the | ||
1715 | message */ | ||
1716 | if (proc_state.proc_state == PROC_ERROR) { | ||
1717 | status = -EPERM; | ||
1718 | goto func_end; | ||
1719 | } | ||
1720 | hnode_mgr = hnode->node_mgr; | ||
1721 | node_type = node_get_type(hnode); | ||
1722 | if (node_type != NODE_MESSAGE && node_type != NODE_TASK && | ||
1723 | node_type != NODE_DAISSOCKET) { | ||
1724 | status = -EPERM; | ||
1725 | goto func_end; | ||
1726 | } | ||
1727 | /* This function will block unless a message is available. Since | ||
1728 | * DSPNode_RegisterNotify() allows notification when a message | ||
1729 | * is available, the system can be designed so that | ||
1730 | * DSPNode_GetMessage() is only called when a message is | ||
1731 | * available. */ | ||
1732 | intf_fxns = hnode_mgr->intf_fxns; | ||
1733 | status = | ||
1734 | (*intf_fxns->msg_get) (hnode->msg_queue_obj, message, utimeout); | ||
1735 | /* Check if message contains SM descriptor */ | ||
1736 | if (status || !(message->cmd & DSP_RMSBUFDESC)) | ||
1737 | goto func_end; | ||
1738 | |||
1739 | /* Translate DSP byte addr to GPP Va. */ | ||
1740 | tmp_buf = cmm_xlator_translate(hnode->xlator, | ||
1741 | (void *)(message->arg1 * | ||
1742 | hnode->node_mgr-> | ||
1743 | dsp_word_size), CMM_DSPPA2PA); | ||
1744 | if (tmp_buf != NULL) { | ||
1745 | /* now convert this GPP Pa to Va */ | ||
1746 | tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf, | ||
1747 | CMM_PA2VA); | ||
1748 | if (tmp_buf != NULL) { | ||
1749 | /* Adjust SM size in msg */ | ||
1750 | message->arg1 = (u32) tmp_buf; | ||
1751 | message->arg2 *= hnode->node_mgr->dsp_word_size; | ||
1752 | } else { | ||
1753 | status = -ESRCH; | ||
1754 | } | ||
1755 | } else { | ||
1756 | status = -ESRCH; | ||
1757 | } | ||
1758 | func_end: | ||
1759 | dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__, | ||
1760 | hnode, message, utimeout); | ||
1761 | return status; | ||
1762 | } | ||
1763 | |||
1764 | /* | ||
1765 | * ======== node_get_nldr_obj ======== | ||
1766 | */ | ||
1767 | int node_get_nldr_obj(struct node_mgr *hnode_mgr, | ||
1768 | struct nldr_object **nldr_ovlyobj) | ||
1769 | { | ||
1770 | int status = 0; | ||
1771 | struct node_mgr *node_mgr_obj = hnode_mgr; | ||
1772 | |||
1773 | if (!hnode_mgr) | ||
1774 | status = -EFAULT; | ||
1775 | else | ||
1776 | *nldr_ovlyobj = node_mgr_obj->nldr_obj; | ||
1777 | |||
1778 | return status; | ||
1779 | } | ||
1780 | |||
1781 | /* | ||
1782 | * ======== node_get_strm_mgr ======== | ||
1783 | * Purpose: | ||
1784 | * Returns the Stream manager. | ||
1785 | */ | ||
1786 | int node_get_strm_mgr(struct node_object *hnode, | ||
1787 | struct strm_mgr **strm_man) | ||
1788 | { | ||
1789 | int status = 0; | ||
1790 | |||
1791 | if (!hnode) | ||
1792 | status = -EFAULT; | ||
1793 | else | ||
1794 | *strm_man = hnode->node_mgr->strm_mgr_obj; | ||
1795 | |||
1796 | return status; | ||
1797 | } | ||
1798 | |||
1799 | /* | ||
1800 | * ======== node_get_load_type ======== | ||
1801 | */ | ||
1802 | enum nldr_loadtype node_get_load_type(struct node_object *hnode) | ||
1803 | { | ||
1804 | if (!hnode) { | ||
1805 | dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode); | ||
1806 | return -1; | ||
1807 | } else { | ||
1808 | return hnode->dcd_props.obj_data.node_obj.load_type; | ||
1809 | } | ||
1810 | } | ||
1811 | |||
1812 | /* | ||
1813 | * ======== node_get_timeout ======== | ||
1814 | * Purpose: | ||
1815 | * Returns the timeout value for this node. | ||
1816 | */ | ||
1817 | u32 node_get_timeout(struct node_object *hnode) | ||
1818 | { | ||
1819 | if (!hnode) { | ||
1820 | dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode); | ||
1821 | return 0; | ||
1822 | } else { | ||
1823 | return hnode->timeout; | ||
1824 | } | ||
1825 | } | ||
1826 | |||
1827 | /* | ||
1828 | * ======== node_get_type ======== | ||
1829 | * Purpose: | ||
1830 | * Returns the node type. | ||
1831 | */ | ||
1832 | enum node_type node_get_type(struct node_object *hnode) | ||
1833 | { | ||
1834 | enum node_type node_type; | ||
1835 | |||
1836 | if (hnode == (struct node_object *)DSP_HGPPNODE) | ||
1837 | node_type = NODE_GPP; | ||
1838 | else { | ||
1839 | if (!hnode) | ||
1840 | node_type = -1; | ||
1841 | else | ||
1842 | node_type = hnode->ntype; | ||
1843 | } | ||
1844 | return node_type; | ||
1845 | } | ||
1846 | |||
1847 | /* | ||
1848 | * ======== node_on_exit ======== | ||
1849 | * Purpose: | ||
1850 | * Gets called when RMS_EXIT is received for a node. | ||
1851 | */ | ||
1852 | void node_on_exit(struct node_object *hnode, s32 node_status) | ||
1853 | { | ||
1854 | if (!hnode) | ||
1855 | return; | ||
1856 | |||
1857 | /* Set node state to done */ | ||
1858 | NODE_SET_STATE(hnode, NODE_DONE); | ||
1859 | hnode->exit_status = node_status; | ||
1860 | if (hnode->loaded && hnode->phase_split) { | ||
1861 | (void)hnode->node_mgr->nldr_fxns.unload(hnode-> | ||
1862 | nldr_node_obj, | ||
1863 | NLDR_EXECUTE); | ||
1864 | hnode->loaded = false; | ||
1865 | } | ||
1866 | /* Unblock call to node_terminate */ | ||
1867 | (void)sync_set_event(hnode->sync_done); | ||
1868 | /* Notify clients */ | ||
1869 | proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE); | ||
1870 | ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE); | ||
1871 | } | ||
1872 | |||
1873 | /* | ||
1874 | * ======== node_pause ======== | ||
1875 | * Purpose: | ||
1876 | * Suspend execution of a node currently running on the DSP. | ||
1877 | */ | ||
1878 | int node_pause(struct node_object *hnode) | ||
1879 | { | ||
1880 | struct node_object *pnode = (struct node_object *)hnode; | ||
1881 | enum node_type node_type; | ||
1882 | enum node_state state; | ||
1883 | struct node_mgr *hnode_mgr; | ||
1884 | int status = 0; | ||
1885 | u32 proc_id; | ||
1886 | struct dsp_processorstate proc_state; | ||
1887 | struct proc_object *hprocessor; | ||
1888 | |||
1889 | if (!hnode) { | ||
1890 | status = -EFAULT; | ||
1891 | } else { | ||
1892 | node_type = node_get_type(hnode); | ||
1893 | if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) | ||
1894 | status = -EPERM; | ||
1895 | } | ||
1896 | if (status) | ||
1897 | goto func_end; | ||
1898 | |||
1899 | status = proc_get_processor_id(pnode->processor, &proc_id); | ||
1900 | |||
1901 | if (proc_id == IVA_UNIT) | ||
1902 | status = -ENOSYS; | ||
1903 | |||
1904 | if (!status) { | ||
1905 | hnode_mgr = hnode->node_mgr; | ||
1906 | |||
1907 | /* Enter critical section */ | ||
1908 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
1909 | state = node_get_state(hnode); | ||
1910 | /* Check node state */ | ||
1911 | if (state != NODE_RUNNING) | ||
1912 | status = -EBADR; | ||
1913 | |||
1914 | if (status) | ||
1915 | goto func_cont; | ||
1916 | hprocessor = hnode->processor; | ||
1917 | status = proc_get_state(hprocessor, &proc_state, | ||
1918 | sizeof(struct dsp_processorstate)); | ||
1919 | if (status) | ||
1920 | goto func_cont; | ||
1921 | /* If processor is in error state then don't attempt | ||
1922 | to send the message */ | ||
1923 | if (proc_state.proc_state == PROC_ERROR) { | ||
1924 | status = -EPERM; | ||
1925 | goto func_cont; | ||
1926 | } | ||
1927 | |||
1928 | status = disp_node_change_priority(hnode_mgr->disp_obj, hnode, | ||
1929 | hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY], | ||
1930 | hnode->node_env, NODE_SUSPENDEDPRI); | ||
1931 | |||
1932 | /* Update state */ | ||
1933 | if (status >= 0) | ||
1934 | NODE_SET_STATE(hnode, NODE_PAUSED); | ||
1935 | |||
1936 | func_cont: | ||
1937 | /* End of sync_enter_cs */ | ||
1938 | /* Leave critical section */ | ||
1939 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
1940 | if (status >= 0) { | ||
1941 | proc_notify_clients(hnode->processor, | ||
1942 | DSP_NODESTATECHANGE); | ||
1943 | ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE); | ||
1944 | } | ||
1945 | } | ||
1946 | func_end: | ||
1947 | dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status); | ||
1948 | return status; | ||
1949 | } | ||
1950 | |||
1951 | /* | ||
1952 | * ======== node_put_message ======== | ||
1953 | * Purpose: | ||
1954 | * Send a message to a message node, task node, or XDAIS socket node. This | ||
1955 | * function will block until the message stream can accommodate the | ||
1956 | * message, or a timeout occurs. | ||
1957 | */ | ||
1958 | int node_put_message(struct node_object *hnode, | ||
1959 | const struct dsp_msg *pmsg, u32 utimeout) | ||
1960 | { | ||
1961 | struct node_mgr *hnode_mgr = NULL; | ||
1962 | enum node_type node_type; | ||
1963 | struct bridge_drv_interface *intf_fxns; | ||
1964 | enum node_state state; | ||
1965 | int status = 0; | ||
1966 | void *tmp_buf; | ||
1967 | struct dsp_msg new_msg; | ||
1968 | struct dsp_processorstate proc_state; | ||
1969 | struct proc_object *hprocessor; | ||
1970 | |||
1971 | if (!hnode) { | ||
1972 | status = -EFAULT; | ||
1973 | goto func_end; | ||
1974 | } | ||
1975 | hprocessor = hnode->processor; | ||
1976 | status = proc_get_state(hprocessor, &proc_state, | ||
1977 | sizeof(struct dsp_processorstate)); | ||
1978 | if (status) | ||
1979 | goto func_end; | ||
1980 | /* If processor is in bad state then don't attempt sending the | ||
1981 | message */ | ||
1982 | if (proc_state.proc_state == PROC_ERROR) { | ||
1983 | status = -EPERM; | ||
1984 | goto func_end; | ||
1985 | } | ||
1986 | hnode_mgr = hnode->node_mgr; | ||
1987 | node_type = node_get_type(hnode); | ||
1988 | if (node_type != NODE_MESSAGE && node_type != NODE_TASK && | ||
1989 | node_type != NODE_DAISSOCKET) | ||
1990 | status = -EPERM; | ||
1991 | |||
1992 | if (!status) { | ||
1993 | /* Check node state. Can't send messages to a node after | ||
1994 | * we've sent the RMS_EXIT command. There is still the | ||
1995 | * possibility that node_terminate can be called after we've | ||
1996 | * checked the state. Could add another SYNC object to | ||
1997 | * prevent this (can't use node_mgr_lock, since we don't | ||
1998 | * want to block other NODE functions). However, the node may | ||
1999 | * still exit on its own, before this message is sent. */ | ||
2000 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
2001 | state = node_get_state(hnode); | ||
2002 | if (state == NODE_TERMINATING || state == NODE_DONE) | ||
2003 | status = -EBADR; | ||
2004 | |||
2005 | /* end of sync_enter_cs */ | ||
2006 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
2007 | } | ||
2008 | if (status) | ||
2009 | goto func_end; | ||
2010 | |||
2011 | /* assign pmsg values to new msg */ | ||
2012 | new_msg = *pmsg; | ||
2013 | /* Now, check if message contains a SM buffer descriptor */ | ||
2014 | if (pmsg->cmd & DSP_RMSBUFDESC) { | ||
2015 | /* Translate GPP Va to DSP physical buf Ptr. */ | ||
2016 | tmp_buf = cmm_xlator_translate(hnode->xlator, | ||
2017 | (void *)new_msg.arg1, | ||
2018 | CMM_VA2DSPPA); | ||
2019 | if (tmp_buf != NULL) { | ||
2020 | /* got translation, convert to MAUs in msg */ | ||
2021 | if (hnode->node_mgr->dsp_word_size != 0) { | ||
2022 | new_msg.arg1 = | ||
2023 | (u32) tmp_buf / | ||
2024 | hnode->node_mgr->dsp_word_size; | ||
2025 | /* MAUs */ | ||
2026 | new_msg.arg2 /= hnode->node_mgr-> | ||
2027 | dsp_word_size; | ||
2028 | } else { | ||
2029 | pr_err("%s: dsp_word_size is zero!\n", | ||
2030 | __func__); | ||
2031 | status = -EPERM; /* bad DSPWordSize */ | ||
2032 | } | ||
2033 | } else { /* failed to translate buffer address */ | ||
2034 | status = -ESRCH; | ||
2035 | } | ||
2036 | } | ||
2037 | if (!status) { | ||
2038 | intf_fxns = hnode_mgr->intf_fxns; | ||
2039 | status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, | ||
2040 | &new_msg, utimeout); | ||
2041 | } | ||
2042 | func_end: | ||
2043 | dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, " | ||
2044 | "status 0x%x\n", __func__, hnode, pmsg, utimeout, status); | ||
2045 | return status; | ||
2046 | } | ||
2047 | |||
2048 | /* | ||
2049 | * ======== node_register_notify ======== | ||
2050 | * Purpose: | ||
2051 | * Register to be notified on specific events for this node. | ||
2052 | */ | ||
2053 | int node_register_notify(struct node_object *hnode, u32 event_mask, | ||
2054 | u32 notify_type, | ||
2055 | struct dsp_notification *hnotification) | ||
2056 | { | ||
2057 | struct bridge_drv_interface *intf_fxns; | ||
2058 | int status = 0; | ||
2059 | |||
2060 | if (!hnode) { | ||
2061 | status = -EFAULT; | ||
2062 | } else { | ||
2063 | /* Check if event mask is a valid node related event */ | ||
2064 | if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY)) | ||
2065 | status = -EINVAL; | ||
2066 | |||
2067 | /* Check if notify type is valid */ | ||
2068 | if (notify_type != DSP_SIGNALEVENT) | ||
2069 | status = -EINVAL; | ||
2070 | |||
2071 | /* Only one Notification can be registered at a | ||
2072 | * time - Limitation */ | ||
2073 | if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY)) | ||
2074 | status = -EINVAL; | ||
2075 | } | ||
2076 | if (!status) { | ||
2077 | if (event_mask == DSP_NODESTATECHANGE) { | ||
2078 | status = ntfy_register(hnode->ntfy_obj, hnotification, | ||
2079 | event_mask & DSP_NODESTATECHANGE, | ||
2080 | notify_type); | ||
2081 | } else { | ||
2082 | /* Send Message part of event mask to msg_ctrl */ | ||
2083 | intf_fxns = hnode->node_mgr->intf_fxns; | ||
2084 | status = (*intf_fxns->msg_register_notify) | ||
2085 | (hnode->msg_queue_obj, | ||
2086 | event_mask & DSP_NODEMESSAGEREADY, notify_type, | ||
2087 | hnotification); | ||
2088 | } | ||
2089 | |||
2090 | } | ||
2091 | dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x " | ||
2092 | "hnotification: %p status 0x%x\n", __func__, hnode, | ||
2093 | event_mask, notify_type, hnotification, status); | ||
2094 | return status; | ||
2095 | } | ||
2096 | |||
2097 | /* | ||
2098 | * ======== node_run ======== | ||
2099 | * Purpose: | ||
2100 | * Start execution of a node's execute phase, or resume execution of a node | ||
2101 | * that has been suspended (via NODE_NodePause()) on the DSP. Load the | ||
2102 | * node's execute function if necessary. | ||
2103 | */ | ||
2104 | int node_run(struct node_object *hnode) | ||
2105 | { | ||
2106 | struct node_object *pnode = (struct node_object *)hnode; | ||
2107 | struct node_mgr *hnode_mgr; | ||
2108 | enum node_type node_type; | ||
2109 | enum node_state state; | ||
2110 | u32 ul_execute_fxn; | ||
2111 | u32 ul_fxn_addr; | ||
2112 | int status = 0; | ||
2113 | u32 proc_id; | ||
2114 | struct bridge_drv_interface *intf_fxns; | ||
2115 | struct dsp_processorstate proc_state; | ||
2116 | struct proc_object *hprocessor; | ||
2117 | |||
2118 | if (!hnode) { | ||
2119 | status = -EFAULT; | ||
2120 | goto func_end; | ||
2121 | } | ||
2122 | hprocessor = hnode->processor; | ||
2123 | status = proc_get_state(hprocessor, &proc_state, | ||
2124 | sizeof(struct dsp_processorstate)); | ||
2125 | if (status) | ||
2126 | goto func_end; | ||
2127 | /* If processor is in error state then don't attempt to run the node */ | ||
2128 | if (proc_state.proc_state == PROC_ERROR) { | ||
2129 | status = -EPERM; | ||
2130 | goto func_end; | ||
2131 | } | ||
2132 | node_type = node_get_type(hnode); | ||
2133 | if (node_type == NODE_DEVICE) | ||
2134 | status = -EPERM; | ||
2135 | if (status) | ||
2136 | goto func_end; | ||
2137 | |||
2138 | hnode_mgr = hnode->node_mgr; | ||
2139 | if (!hnode_mgr) { | ||
2140 | status = -EFAULT; | ||
2141 | goto func_end; | ||
2142 | } | ||
2143 | intf_fxns = hnode_mgr->intf_fxns; | ||
2144 | /* Enter critical section */ | ||
2145 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
2146 | |||
2147 | state = node_get_state(hnode); | ||
2148 | if (state != NODE_CREATED && state != NODE_PAUSED) | ||
2149 | status = -EBADR; | ||
2150 | |||
2151 | if (!status) | ||
2152 | status = proc_get_processor_id(pnode->processor, &proc_id); | ||
2153 | |||
2154 | if (status) | ||
2155 | goto func_cont1; | ||
2156 | |||
2157 | if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT)) | ||
2158 | goto func_cont1; | ||
2159 | |||
2160 | if (state == NODE_CREATED) { | ||
2161 | /* If node's execute function is not loaded, load it */ | ||
2162 | if (!(hnode->loaded) && hnode->phase_split) { | ||
2163 | status = | ||
2164 | hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj, | ||
2165 | NLDR_EXECUTE); | ||
2166 | if (!status) { | ||
2167 | hnode->loaded = true; | ||
2168 | } else { | ||
2169 | pr_err("%s: fail - load execute code: 0x%x\n", | ||
2170 | __func__, status); | ||
2171 | } | ||
2172 | } | ||
2173 | if (!status) { | ||
2174 | /* Get address of node's execute function */ | ||
2175 | if (proc_id == IVA_UNIT) | ||
2176 | ul_execute_fxn = (u32) hnode->node_env; | ||
2177 | else { | ||
2178 | status = get_fxn_address(hnode, &ul_execute_fxn, | ||
2179 | EXECUTEPHASE); | ||
2180 | } | ||
2181 | } | ||
2182 | if (!status) { | ||
2183 | ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE]; | ||
2184 | status = | ||
2185 | disp_node_run(hnode_mgr->disp_obj, hnode, | ||
2186 | ul_fxn_addr, ul_execute_fxn, | ||
2187 | hnode->node_env); | ||
2188 | } | ||
2189 | } else if (state == NODE_PAUSED) { | ||
2190 | ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY]; | ||
2191 | status = disp_node_change_priority(hnode_mgr->disp_obj, hnode, | ||
2192 | ul_fxn_addr, hnode->node_env, | ||
2193 | NODE_GET_PRIORITY(hnode)); | ||
2194 | } else { | ||
2195 | /* We should never get here */ | ||
2196 | } | ||
2197 | func_cont1: | ||
2198 | /* Update node state. */ | ||
2199 | if (status >= 0) | ||
2200 | NODE_SET_STATE(hnode, NODE_RUNNING); | ||
2201 | else /* Set state back to previous value */ | ||
2202 | NODE_SET_STATE(hnode, state); | ||
2203 | /*End of sync_enter_cs */ | ||
2204 | /* Exit critical section */ | ||
2205 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
2206 | if (status >= 0) { | ||
2207 | proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE); | ||
2208 | ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE); | ||
2209 | } | ||
2210 | func_end: | ||
2211 | dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status); | ||
2212 | return status; | ||
2213 | } | ||
2214 | |||
2215 | /* | ||
2216 | * ======== node_terminate ======== | ||
2217 | * Purpose: | ||
2218 | * Signal a node running on the DSP that it should exit its execute phase | ||
2219 | * function. | ||
2220 | */ | ||
2221 | int node_terminate(struct node_object *hnode, int *pstatus) | ||
2222 | { | ||
2223 | struct node_object *pnode = (struct node_object *)hnode; | ||
2224 | struct node_mgr *hnode_mgr = NULL; | ||
2225 | enum node_type node_type; | ||
2226 | struct bridge_drv_interface *intf_fxns; | ||
2227 | enum node_state state; | ||
2228 | struct dsp_msg msg, killmsg; | ||
2229 | int status = 0; | ||
2230 | u32 proc_id, kill_time_out; | ||
2231 | struct deh_mgr *hdeh_mgr; | ||
2232 | struct dsp_processorstate proc_state; | ||
2233 | |||
2234 | if (!hnode || !hnode->node_mgr) { | ||
2235 | status = -EFAULT; | ||
2236 | goto func_end; | ||
2237 | } | ||
2238 | if (pnode->processor == NULL) { | ||
2239 | status = -EFAULT; | ||
2240 | goto func_end; | ||
2241 | } | ||
2242 | status = proc_get_processor_id(pnode->processor, &proc_id); | ||
2243 | |||
2244 | if (!status) { | ||
2245 | hnode_mgr = hnode->node_mgr; | ||
2246 | node_type = node_get_type(hnode); | ||
2247 | if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) | ||
2248 | status = -EPERM; | ||
2249 | } | ||
2250 | if (!status) { | ||
2251 | /* Check node state */ | ||
2252 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
2253 | state = node_get_state(hnode); | ||
2254 | if (state != NODE_RUNNING) { | ||
2255 | status = -EBADR; | ||
2256 | /* Set the exit status if node terminated on | ||
2257 | * its own. */ | ||
2258 | if (state == NODE_DONE) | ||
2259 | *pstatus = hnode->exit_status; | ||
2260 | |||
2261 | } else { | ||
2262 | NODE_SET_STATE(hnode, NODE_TERMINATING); | ||
2263 | } | ||
2264 | /* end of sync_enter_cs */ | ||
2265 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
2266 | } | ||
2267 | if (!status) { | ||
2268 | /* | ||
2269 | * Send exit message. Do not change state to NODE_DONE | ||
2270 | * here. That will be done in callback. | ||
2271 | */ | ||
2272 | status = proc_get_state(pnode->processor, &proc_state, | ||
2273 | sizeof(struct dsp_processorstate)); | ||
2274 | if (status) | ||
2275 | goto func_cont; | ||
2276 | /* If processor is in error state then don't attempt to send | ||
2277 | * A kill task command */ | ||
2278 | if (proc_state.proc_state == PROC_ERROR) { | ||
2279 | status = -EPERM; | ||
2280 | goto func_cont; | ||
2281 | } | ||
2282 | |||
2283 | msg.cmd = RMS_EXIT; | ||
2284 | msg.arg1 = hnode->node_env; | ||
2285 | killmsg.cmd = RMS_KILLTASK; | ||
2286 | killmsg.arg1 = hnode->node_env; | ||
2287 | intf_fxns = hnode_mgr->intf_fxns; | ||
2288 | |||
2289 | if (hnode->timeout > MAXTIMEOUT) | ||
2290 | kill_time_out = MAXTIMEOUT; | ||
2291 | else | ||
2292 | kill_time_out = (hnode->timeout) * 2; | ||
2293 | |||
2294 | status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, &msg, | ||
2295 | hnode->timeout); | ||
2296 | if (status) | ||
2297 | goto func_cont; | ||
2298 | |||
2299 | /* | ||
2300 | * Wait on synchronization object that will be | ||
2301 | * posted in the callback on receiving RMS_EXIT | ||
2302 | * message, or by node_delete. Check for valid hnode, | ||
2303 | * in case posted by node_delete(). | ||
2304 | */ | ||
2305 | status = sync_wait_on_event(hnode->sync_done, | ||
2306 | kill_time_out / 2); | ||
2307 | if (status != ETIME) | ||
2308 | goto func_cont; | ||
2309 | |||
2310 | status = (*intf_fxns->msg_put)(hnode->msg_queue_obj, | ||
2311 | &killmsg, hnode->timeout); | ||
2312 | if (status) | ||
2313 | goto func_cont; | ||
2314 | status = sync_wait_on_event(hnode->sync_done, | ||
2315 | kill_time_out / 2); | ||
2316 | if (status) { | ||
2317 | /* | ||
2318 | * Here it goes the part of the simulation of | ||
2319 | * the DSP exception. | ||
2320 | */ | ||
2321 | dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr); | ||
2322 | if (!hdeh_mgr) | ||
2323 | goto func_cont; | ||
2324 | |||
2325 | bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, | ||
2326 | DSP_EXCEPTIONABORT); | ||
2327 | } | ||
2328 | } | ||
2329 | func_cont: | ||
2330 | if (!status) { | ||
2331 | /* Enter CS before getting exit status, in case node was | ||
2332 | * deleted. */ | ||
2333 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
2334 | /* Make sure node wasn't deleted while we blocked */ | ||
2335 | if (!hnode) { | ||
2336 | status = -EPERM; | ||
2337 | } else { | ||
2338 | *pstatus = hnode->exit_status; | ||
2339 | dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n", | ||
2340 | __func__, hnode, hnode->node_env, status); | ||
2341 | } | ||
2342 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
2343 | } /*End of sync_enter_cs */ | ||
2344 | func_end: | ||
2345 | return status; | ||
2346 | } | ||
2347 | |||
2348 | /* | ||
2349 | * ======== delete_node ======== | ||
2350 | * Purpose: | ||
2351 | * Free GPP resources allocated in node_allocate() or node_connect(). | ||
2352 | */ | ||
2353 | static void delete_node(struct node_object *hnode, | ||
2354 | struct process_context *pr_ctxt) | ||
2355 | { | ||
2356 | struct node_mgr *hnode_mgr; | ||
2357 | struct bridge_drv_interface *intf_fxns; | ||
2358 | u32 i; | ||
2359 | enum node_type node_type; | ||
2360 | struct stream_chnl stream; | ||
2361 | struct node_msgargs node_msg_args; | ||
2362 | struct node_taskargs task_arg_obj; | ||
2363 | #ifdef DSP_DMM_DEBUG | ||
2364 | struct dmm_object *dmm_mgr; | ||
2365 | struct proc_object *p_proc_object = | ||
2366 | (struct proc_object *)hnode->processor; | ||
2367 | #endif | ||
2368 | int status; | ||
2369 | |||
2370 | if (!hnode) | ||
2371 | goto func_end; | ||
2372 | hnode_mgr = hnode->node_mgr; | ||
2373 | if (!hnode_mgr) | ||
2374 | goto func_end; | ||
2375 | |||
2376 | node_type = node_get_type(hnode); | ||
2377 | if (node_type != NODE_DEVICE) { | ||
2378 | node_msg_args = hnode->create_args.asa.node_msg_args; | ||
2379 | kfree(node_msg_args.pdata); | ||
2380 | |||
2381 | /* Free msg_ctrl queue */ | ||
2382 | if (hnode->msg_queue_obj) { | ||
2383 | intf_fxns = hnode_mgr->intf_fxns; | ||
2384 | (*intf_fxns->msg_delete_queue) (hnode-> | ||
2385 | msg_queue_obj); | ||
2386 | hnode->msg_queue_obj = NULL; | ||
2387 | } | ||
2388 | |||
2389 | kfree(hnode->sync_done); | ||
2390 | |||
2391 | /* Free all stream info */ | ||
2392 | if (hnode->inputs) { | ||
2393 | for (i = 0; i < MAX_INPUTS(hnode); i++) { | ||
2394 | stream = hnode->inputs[i]; | ||
2395 | free_stream(hnode_mgr, stream); | ||
2396 | } | ||
2397 | kfree(hnode->inputs); | ||
2398 | hnode->inputs = NULL; | ||
2399 | } | ||
2400 | if (hnode->outputs) { | ||
2401 | for (i = 0; i < MAX_OUTPUTS(hnode); i++) { | ||
2402 | stream = hnode->outputs[i]; | ||
2403 | free_stream(hnode_mgr, stream); | ||
2404 | } | ||
2405 | kfree(hnode->outputs); | ||
2406 | hnode->outputs = NULL; | ||
2407 | } | ||
2408 | task_arg_obj = hnode->create_args.asa.task_arg_obj; | ||
2409 | if (task_arg_obj.strm_in_def) { | ||
2410 | for (i = 0; i < MAX_INPUTS(hnode); i++) { | ||
2411 | kfree(task_arg_obj.strm_in_def[i].sz_device); | ||
2412 | task_arg_obj.strm_in_def[i].sz_device = NULL; | ||
2413 | } | ||
2414 | kfree(task_arg_obj.strm_in_def); | ||
2415 | task_arg_obj.strm_in_def = NULL; | ||
2416 | } | ||
2417 | if (task_arg_obj.strm_out_def) { | ||
2418 | for (i = 0; i < MAX_OUTPUTS(hnode); i++) { | ||
2419 | kfree(task_arg_obj.strm_out_def[i].sz_device); | ||
2420 | task_arg_obj.strm_out_def[i].sz_device = NULL; | ||
2421 | } | ||
2422 | kfree(task_arg_obj.strm_out_def); | ||
2423 | task_arg_obj.strm_out_def = NULL; | ||
2424 | } | ||
2425 | if (task_arg_obj.dsp_heap_res_addr) { | ||
2426 | status = proc_un_map(hnode->processor, (void *) | ||
2427 | task_arg_obj.dsp_heap_addr, | ||
2428 | pr_ctxt); | ||
2429 | |||
2430 | status = proc_un_reserve_memory(hnode->processor, | ||
2431 | (void *) | ||
2432 | task_arg_obj. | ||
2433 | dsp_heap_res_addr, | ||
2434 | pr_ctxt); | ||
2435 | #ifdef DSP_DMM_DEBUG | ||
2436 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
2437 | if (dmm_mgr) | ||
2438 | dmm_mem_map_dump(dmm_mgr); | ||
2439 | else | ||
2440 | status = DSP_EHANDLE; | ||
2441 | #endif | ||
2442 | } | ||
2443 | } | ||
2444 | if (node_type != NODE_MESSAGE) { | ||
2445 | kfree(hnode->stream_connect); | ||
2446 | hnode->stream_connect = NULL; | ||
2447 | } | ||
2448 | kfree(hnode->str_dev_name); | ||
2449 | hnode->str_dev_name = NULL; | ||
2450 | |||
2451 | if (hnode->ntfy_obj) { | ||
2452 | ntfy_delete(hnode->ntfy_obj); | ||
2453 | kfree(hnode->ntfy_obj); | ||
2454 | hnode->ntfy_obj = NULL; | ||
2455 | } | ||
2456 | |||
2457 | /* These were allocated in dcd_get_object_def (via node_allocate) */ | ||
2458 | kfree(hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn); | ||
2459 | hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn = NULL; | ||
2460 | |||
2461 | kfree(hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn); | ||
2462 | hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn = NULL; | ||
2463 | |||
2464 | kfree(hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn); | ||
2465 | hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn = NULL; | ||
2466 | |||
2467 | kfree(hnode->dcd_props.obj_data.node_obj.str_i_alg_name); | ||
2468 | hnode->dcd_props.obj_data.node_obj.str_i_alg_name = NULL; | ||
2469 | |||
2470 | /* Free all SM address translator resources */ | ||
2471 | kfree(hnode->xlator); | ||
2472 | kfree(hnode->nldr_node_obj); | ||
2473 | hnode->nldr_node_obj = NULL; | ||
2474 | hnode->node_mgr = NULL; | ||
2475 | kfree(hnode); | ||
2476 | hnode = NULL; | ||
2477 | func_end: | ||
2478 | return; | ||
2479 | } | ||
2480 | |||
2481 | /* | ||
2482 | * ======== delete_node_mgr ======== | ||
2483 | * Purpose: | ||
2484 | * Frees the node manager. | ||
2485 | */ | ||
2486 | static void delete_node_mgr(struct node_mgr *hnode_mgr) | ||
2487 | { | ||
2488 | struct node_object *hnode, *tmp; | ||
2489 | |||
2490 | if (hnode_mgr) { | ||
2491 | /* Free resources */ | ||
2492 | if (hnode_mgr->dcd_mgr) | ||
2493 | dcd_destroy_manager(hnode_mgr->dcd_mgr); | ||
2494 | |||
2495 | /* Remove any elements remaining in lists */ | ||
2496 | list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list, | ||
2497 | list_elem) { | ||
2498 | list_del(&hnode->list_elem); | ||
2499 | delete_node(hnode, NULL); | ||
2500 | } | ||
2501 | mutex_destroy(&hnode_mgr->node_mgr_lock); | ||
2502 | if (hnode_mgr->ntfy_obj) { | ||
2503 | ntfy_delete(hnode_mgr->ntfy_obj); | ||
2504 | kfree(hnode_mgr->ntfy_obj); | ||
2505 | } | ||
2506 | |||
2507 | if (hnode_mgr->disp_obj) | ||
2508 | disp_delete(hnode_mgr->disp_obj); | ||
2509 | |||
2510 | if (hnode_mgr->strm_mgr_obj) | ||
2511 | strm_delete(hnode_mgr->strm_mgr_obj); | ||
2512 | |||
2513 | /* Delete the loader */ | ||
2514 | if (hnode_mgr->nldr_obj) | ||
2515 | hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj); | ||
2516 | |||
2517 | kfree(hnode_mgr); | ||
2518 | } | ||
2519 | } | ||
2520 | |||
2521 | /* | ||
2522 | * ======== fill_stream_connect ======== | ||
2523 | * Purpose: | ||
2524 | * Fills stream information. | ||
2525 | */ | ||
2526 | static void fill_stream_connect(struct node_object *node1, | ||
2527 | struct node_object *node2, | ||
2528 | u32 stream1, u32 stream2) | ||
2529 | { | ||
2530 | u32 strm_index; | ||
2531 | struct dsp_streamconnect *strm1 = NULL; | ||
2532 | struct dsp_streamconnect *strm2 = NULL; | ||
2533 | enum node_type node1_type = NODE_TASK; | ||
2534 | enum node_type node2_type = NODE_TASK; | ||
2535 | |||
2536 | node1_type = node_get_type(node1); | ||
2537 | node2_type = node_get_type(node2); | ||
2538 | if (node1 != (struct node_object *)DSP_HGPPNODE) { | ||
2539 | |||
2540 | if (node1_type != NODE_DEVICE) { | ||
2541 | strm_index = node1->num_inputs + | ||
2542 | node1->num_outputs - 1; | ||
2543 | strm1 = &(node1->stream_connect[strm_index]); | ||
2544 | strm1->cb_struct = sizeof(struct dsp_streamconnect); | ||
2545 | strm1->this_node_stream_index = stream1; | ||
2546 | } | ||
2547 | |||
2548 | if (node2 != (struct node_object *)DSP_HGPPNODE) { | ||
2549 | /* NODE == > NODE */ | ||
2550 | if (node1_type != NODE_DEVICE) { | ||
2551 | strm1->connected_node = node2; | ||
2552 | strm1->ui_connected_node_id = node2->node_uuid; | ||
2553 | strm1->connected_node_stream_index = stream2; | ||
2554 | strm1->connect_type = CONNECTTYPE_NODEOUTPUT; | ||
2555 | } | ||
2556 | if (node2_type != NODE_DEVICE) { | ||
2557 | strm_index = node2->num_inputs + | ||
2558 | node2->num_outputs - 1; | ||
2559 | strm2 = &(node2->stream_connect[strm_index]); | ||
2560 | strm2->cb_struct = | ||
2561 | sizeof(struct dsp_streamconnect); | ||
2562 | strm2->this_node_stream_index = stream2; | ||
2563 | strm2->connected_node = node1; | ||
2564 | strm2->ui_connected_node_id = node1->node_uuid; | ||
2565 | strm2->connected_node_stream_index = stream1; | ||
2566 | strm2->connect_type = CONNECTTYPE_NODEINPUT; | ||
2567 | } | ||
2568 | } else if (node1_type != NODE_DEVICE) | ||
2569 | strm1->connect_type = CONNECTTYPE_GPPOUTPUT; | ||
2570 | } else { | ||
2571 | /* GPP == > NODE */ | ||
2572 | strm_index = node2->num_inputs + node2->num_outputs - 1; | ||
2573 | strm2 = &(node2->stream_connect[strm_index]); | ||
2574 | strm2->cb_struct = sizeof(struct dsp_streamconnect); | ||
2575 | strm2->this_node_stream_index = stream2; | ||
2576 | strm2->connect_type = CONNECTTYPE_GPPINPUT; | ||
2577 | } | ||
2578 | } | ||
2579 | |||
2580 | /* | ||
2581 | * ======== fill_stream_def ======== | ||
2582 | * Purpose: | ||
2583 | * Fills Stream attributes. | ||
2584 | */ | ||
2585 | static void fill_stream_def(struct node_object *hnode, | ||
2586 | struct node_strmdef *pstrm_def, | ||
2587 | struct dsp_strmattr *pattrs) | ||
2588 | { | ||
2589 | struct node_mgr *hnode_mgr = hnode->node_mgr; | ||
2590 | |||
2591 | if (pattrs != NULL) { | ||
2592 | pstrm_def->num_bufs = pattrs->num_bufs; | ||
2593 | pstrm_def->buf_size = | ||
2594 | pattrs->buf_size / hnode_mgr->dsp_data_mau_size; | ||
2595 | pstrm_def->seg_id = pattrs->seg_id; | ||
2596 | pstrm_def->buf_alignment = pattrs->buf_alignment; | ||
2597 | pstrm_def->timeout = pattrs->timeout; | ||
2598 | } else { | ||
2599 | pstrm_def->num_bufs = DEFAULTNBUFS; | ||
2600 | pstrm_def->buf_size = | ||
2601 | DEFAULTBUFSIZE / hnode_mgr->dsp_data_mau_size; | ||
2602 | pstrm_def->seg_id = DEFAULTSEGID; | ||
2603 | pstrm_def->buf_alignment = DEFAULTALIGNMENT; | ||
2604 | pstrm_def->timeout = DEFAULTTIMEOUT; | ||
2605 | } | ||
2606 | } | ||
2607 | |||
2608 | /* | ||
2609 | * ======== free_stream ======== | ||
2610 | * Purpose: | ||
2611 | * Updates the channel mask and frees the pipe id. | ||
2612 | */ | ||
2613 | static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream) | ||
2614 | { | ||
2615 | /* Free up the pipe id unless other node has not yet been deleted. */ | ||
2616 | if (stream.type == NODECONNECT) { | ||
2617 | if (test_bit(stream.dev_id, hnode_mgr->pipe_done_map)) { | ||
2618 | /* The other node has already been deleted */ | ||
2619 | clear_bit(stream.dev_id, hnode_mgr->pipe_done_map); | ||
2620 | clear_bit(stream.dev_id, hnode_mgr->pipe_map); | ||
2621 | } else { | ||
2622 | /* The other node has not been deleted yet */ | ||
2623 | set_bit(stream.dev_id, hnode_mgr->pipe_done_map); | ||
2624 | } | ||
2625 | } else if (stream.type == HOSTCONNECT) { | ||
2626 | if (stream.dev_id < hnode_mgr->num_chnls) { | ||
2627 | clear_bit(stream.dev_id, hnode_mgr->chnl_map); | ||
2628 | } else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) { | ||
2629 | /* dsp-dma */ | ||
2630 | clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls), | ||
2631 | hnode_mgr->dma_chnl_map); | ||
2632 | } else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) { | ||
2633 | /* zero-copy */ | ||
2634 | clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls), | ||
2635 | hnode_mgr->zc_chnl_map); | ||
2636 | } | ||
2637 | } | ||
2638 | } | ||
2639 | |||
2640 | /* | ||
2641 | * ======== get_fxn_address ======== | ||
2642 | * Purpose: | ||
2643 | * Retrieves the address for create, execute or delete phase for a node. | ||
2644 | */ | ||
2645 | static int get_fxn_address(struct node_object *hnode, u32 *fxn_addr, | ||
2646 | u32 phase) | ||
2647 | { | ||
2648 | char *pstr_fxn_name = NULL; | ||
2649 | struct node_mgr *hnode_mgr = hnode->node_mgr; | ||
2650 | int status = 0; | ||
2651 | |||
2652 | switch (phase) { | ||
2653 | case CREATEPHASE: | ||
2654 | pstr_fxn_name = | ||
2655 | hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn; | ||
2656 | break; | ||
2657 | case EXECUTEPHASE: | ||
2658 | pstr_fxn_name = | ||
2659 | hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn; | ||
2660 | break; | ||
2661 | case DELETEPHASE: | ||
2662 | pstr_fxn_name = | ||
2663 | hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn; | ||
2664 | break; | ||
2665 | default: | ||
2666 | /* Should never get here */ | ||
2667 | break; | ||
2668 | } | ||
2669 | |||
2670 | status = | ||
2671 | hnode_mgr->nldr_fxns.get_fxn_addr(hnode->nldr_node_obj, | ||
2672 | pstr_fxn_name, fxn_addr); | ||
2673 | |||
2674 | return status; | ||
2675 | } | ||
2676 | |||
2677 | /* | ||
2678 | * ======== get_node_info ======== | ||
2679 | * Purpose: | ||
2680 | * Retrieves the node information. | ||
2681 | */ | ||
2682 | void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info) | ||
2683 | { | ||
2684 | u32 i; | ||
2685 | |||
2686 | node_info->cb_struct = sizeof(struct dsp_nodeinfo); | ||
2687 | node_info->nb_node_database_props = | ||
2688 | hnode->dcd_props.obj_data.node_obj.ndb_props; | ||
2689 | node_info->execution_priority = hnode->prio; | ||
2690 | node_info->device_owner = hnode->device_owner; | ||
2691 | node_info->number_streams = hnode->num_inputs + hnode->num_outputs; | ||
2692 | node_info->node_env = hnode->node_env; | ||
2693 | |||
2694 | node_info->ns_execution_state = node_get_state(hnode); | ||
2695 | |||
2696 | /* Copy stream connect data */ | ||
2697 | for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++) | ||
2698 | node_info->sc_stream_connection[i] = hnode->stream_connect[i]; | ||
2699 | |||
2700 | } | ||
2701 | |||
2702 | /* | ||
2703 | * ======== get_node_props ======== | ||
2704 | * Purpose: | ||
2705 | * Retrieve node properties. | ||
2706 | */ | ||
2707 | static int get_node_props(struct dcd_manager *hdcd_mgr, | ||
2708 | struct node_object *hnode, | ||
2709 | const struct dsp_uuid *node_uuid, | ||
2710 | struct dcd_genericobj *dcd_prop) | ||
2711 | { | ||
2712 | u32 len; | ||
2713 | struct node_msgargs *pmsg_args; | ||
2714 | struct node_taskargs *task_arg_obj; | ||
2715 | enum node_type node_type = NODE_TASK; | ||
2716 | struct dsp_ndbprops *pndb_props = | ||
2717 | &(dcd_prop->obj_data.node_obj.ndb_props); | ||
2718 | int status = 0; | ||
2719 | char sz_uuid[MAXUUIDLEN]; | ||
2720 | |||
2721 | status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid, | ||
2722 | DSP_DCDNODETYPE, dcd_prop); | ||
2723 | |||
2724 | if (!status) { | ||
2725 | hnode->ntype = node_type = pndb_props->ntype; | ||
2726 | |||
2727 | /* Create UUID value to set in registry. */ | ||
2728 | snprintf(sz_uuid, MAXUUIDLEN, "%pUL", node_uuid); | ||
2729 | dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid); | ||
2730 | |||
2731 | /* Fill in message args that come from NDB */ | ||
2732 | if (node_type != NODE_DEVICE) { | ||
2733 | pmsg_args = &(hnode->create_args.asa.node_msg_args); | ||
2734 | pmsg_args->seg_id = | ||
2735 | dcd_prop->obj_data.node_obj.msg_segid; | ||
2736 | pmsg_args->notify_type = | ||
2737 | dcd_prop->obj_data.node_obj.msg_notify_type; | ||
2738 | pmsg_args->max_msgs = pndb_props->message_depth; | ||
2739 | dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n", | ||
2740 | pmsg_args->max_msgs); | ||
2741 | } else { | ||
2742 | /* Copy device name */ | ||
2743 | len = strlen(pndb_props->ac_name); | ||
2744 | hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL); | ||
2745 | if (hnode->str_dev_name == NULL) { | ||
2746 | status = -ENOMEM; | ||
2747 | } else { | ||
2748 | strncpy(hnode->str_dev_name, | ||
2749 | pndb_props->ac_name, len); | ||
2750 | } | ||
2751 | } | ||
2752 | } | ||
2753 | if (!status) { | ||
2754 | /* Fill in create args that come from NDB */ | ||
2755 | if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) { | ||
2756 | task_arg_obj = &(hnode->create_args.asa.task_arg_obj); | ||
2757 | task_arg_obj->prio = pndb_props->prio; | ||
2758 | task_arg_obj->stack_size = pndb_props->stack_size; | ||
2759 | task_arg_obj->sys_stack_size = | ||
2760 | pndb_props->sys_stack_size; | ||
2761 | task_arg_obj->stack_seg = pndb_props->stack_seg; | ||
2762 | dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: " | ||
2763 | "0x%x words System Stack Size: 0x%x words " | ||
2764 | "Stack Segment: 0x%x profile count : 0x%x\n", | ||
2765 | task_arg_obj->prio, task_arg_obj->stack_size, | ||
2766 | task_arg_obj->sys_stack_size, | ||
2767 | task_arg_obj->stack_seg, | ||
2768 | pndb_props->count_profiles); | ||
2769 | } | ||
2770 | } | ||
2771 | |||
2772 | return status; | ||
2773 | } | ||
2774 | |||
2775 | /* | ||
2776 | * ======== get_proc_props ======== | ||
2777 | * Purpose: | ||
2778 | * Retrieve the processor properties. | ||
2779 | */ | ||
2780 | static int get_proc_props(struct node_mgr *hnode_mgr, | ||
2781 | struct dev_object *hdev_obj) | ||
2782 | { | ||
2783 | struct cfg_hostres *host_res; | ||
2784 | struct bridge_dev_context *pbridge_context; | ||
2785 | int status = 0; | ||
2786 | |||
2787 | status = dev_get_bridge_context(hdev_obj, &pbridge_context); | ||
2788 | if (!pbridge_context) | ||
2789 | status = -EFAULT; | ||
2790 | |||
2791 | if (!status) { | ||
2792 | host_res = pbridge_context->resources; | ||
2793 | if (!host_res) | ||
2794 | return -EPERM; | ||
2795 | hnode_mgr->chnl_offset = host_res->chnl_offset; | ||
2796 | hnode_mgr->chnl_buf_size = host_res->chnl_buf_size; | ||
2797 | hnode_mgr->num_chnls = host_res->num_chnls; | ||
2798 | |||
2799 | /* | ||
2800 | * PROC will add an API to get dsp_processorinfo. | ||
2801 | * Fill in default values for now. | ||
2802 | */ | ||
2803 | /* TODO -- Instead of hard coding, take from registry */ | ||
2804 | hnode_mgr->proc_family = 6000; | ||
2805 | hnode_mgr->proc_type = 6410; | ||
2806 | hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY; | ||
2807 | hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY; | ||
2808 | hnode_mgr->dsp_word_size = DSPWORDSIZE; | ||
2809 | hnode_mgr->dsp_data_mau_size = DSPWORDSIZE; | ||
2810 | hnode_mgr->dsp_mau_size = 1; | ||
2811 | |||
2812 | } | ||
2813 | return status; | ||
2814 | } | ||
2815 | |||
2816 | /* | ||
2817 | * ======== node_get_uuid_props ======== | ||
2818 | * Purpose: | ||
2819 | * Fetch Node UUID properties from DCD/DOF file. | ||
2820 | */ | ||
2821 | int node_get_uuid_props(void *hprocessor, | ||
2822 | const struct dsp_uuid *node_uuid, | ||
2823 | struct dsp_ndbprops *node_props) | ||
2824 | { | ||
2825 | struct node_mgr *hnode_mgr = NULL; | ||
2826 | struct dev_object *hdev_obj; | ||
2827 | int status = 0; | ||
2828 | struct dcd_nodeprops dcd_node_props; | ||
2829 | struct dsp_processorstate proc_state; | ||
2830 | |||
2831 | if (hprocessor == NULL || node_uuid == NULL) { | ||
2832 | status = -EFAULT; | ||
2833 | goto func_end; | ||
2834 | } | ||
2835 | status = proc_get_state(hprocessor, &proc_state, | ||
2836 | sizeof(struct dsp_processorstate)); | ||
2837 | if (status) | ||
2838 | goto func_end; | ||
2839 | /* If processor is in error state then don't attempt | ||
2840 | to send the message */ | ||
2841 | if (proc_state.proc_state == PROC_ERROR) { | ||
2842 | status = -EPERM; | ||
2843 | goto func_end; | ||
2844 | } | ||
2845 | |||
2846 | status = proc_get_dev_object(hprocessor, &hdev_obj); | ||
2847 | if (hdev_obj) { | ||
2848 | status = dev_get_node_manager(hdev_obj, &hnode_mgr); | ||
2849 | if (hnode_mgr == NULL) { | ||
2850 | status = -EFAULT; | ||
2851 | goto func_end; | ||
2852 | } | ||
2853 | } | ||
2854 | |||
2855 | /* | ||
2856 | * Enter the critical section. This is needed because | ||
2857 | * dcd_get_object_def will ultimately end up calling dbll_open/close, | ||
2858 | * which needs to be protected in order to not corrupt the zlib manager | ||
2859 | * (COD). | ||
2860 | */ | ||
2861 | mutex_lock(&hnode_mgr->node_mgr_lock); | ||
2862 | |||
2863 | dcd_node_props.str_create_phase_fxn = NULL; | ||
2864 | dcd_node_props.str_execute_phase_fxn = NULL; | ||
2865 | dcd_node_props.str_delete_phase_fxn = NULL; | ||
2866 | dcd_node_props.str_i_alg_name = NULL; | ||
2867 | |||
2868 | status = dcd_get_object_def(hnode_mgr->dcd_mgr, | ||
2869 | (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE, | ||
2870 | (struct dcd_genericobj *)&dcd_node_props); | ||
2871 | |||
2872 | if (!status) { | ||
2873 | *node_props = dcd_node_props.ndb_props; | ||
2874 | kfree(dcd_node_props.str_create_phase_fxn); | ||
2875 | |||
2876 | kfree(dcd_node_props.str_execute_phase_fxn); | ||
2877 | |||
2878 | kfree(dcd_node_props.str_delete_phase_fxn); | ||
2879 | |||
2880 | kfree(dcd_node_props.str_i_alg_name); | ||
2881 | } | ||
2882 | /* Leave the critical section, we're done. */ | ||
2883 | mutex_unlock(&hnode_mgr->node_mgr_lock); | ||
2884 | func_end: | ||
2885 | return status; | ||
2886 | } | ||
2887 | |||
2888 | /* | ||
2889 | * ======== get_rms_fxns ======== | ||
2890 | * Purpose: | ||
2891 | * Retrieve the RMS functions. | ||
2892 | */ | ||
2893 | static int get_rms_fxns(struct node_mgr *hnode_mgr) | ||
2894 | { | ||
2895 | s32 i; | ||
2896 | struct dev_object *dev_obj = hnode_mgr->dev_obj; | ||
2897 | int status = 0; | ||
2898 | |||
2899 | static char *psz_fxns[NUMRMSFXNS] = { | ||
2900 | "RMS_queryServer", /* RMSQUERYSERVER */ | ||
2901 | "RMS_configureServer", /* RMSCONFIGURESERVER */ | ||
2902 | "RMS_createNode", /* RMSCREATENODE */ | ||
2903 | "RMS_executeNode", /* RMSEXECUTENODE */ | ||
2904 | "RMS_deleteNode", /* RMSDELETENODE */ | ||
2905 | "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */ | ||
2906 | "RMS_readMemory", /* RMSREADMEMORY */ | ||
2907 | "RMS_writeMemory", /* RMSWRITEMEMORY */ | ||
2908 | "RMS_copy", /* RMSCOPY */ | ||
2909 | }; | ||
2910 | |||
2911 | for (i = 0; i < NUMRMSFXNS; i++) { | ||
2912 | status = dev_get_symbol(dev_obj, psz_fxns[i], | ||
2913 | &(hnode_mgr->fxn_addrs[i])); | ||
2914 | if (status) { | ||
2915 | if (status == -ESPIPE) { | ||
2916 | /* | ||
2917 | * May be loaded dynamically (in the future), | ||
2918 | * but return an error for now. | ||
2919 | */ | ||
2920 | dev_dbg(bridge, "%s: RMS function: %s currently" | ||
2921 | " not loaded\n", __func__, psz_fxns[i]); | ||
2922 | } else { | ||
2923 | dev_dbg(bridge, "%s: Symbol not found: %s " | ||
2924 | "status = 0x%x\n", __func__, | ||
2925 | psz_fxns[i], status); | ||
2926 | break; | ||
2927 | } | ||
2928 | } | ||
2929 | } | ||
2930 | |||
2931 | return status; | ||
2932 | } | ||
2933 | |||
2934 | /* | ||
2935 | * ======== ovly ======== | ||
2936 | * Purpose: | ||
2937 | * Called during overlay.Sends command to RMS to copy a block of data. | ||
2938 | */ | ||
2939 | static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr, | ||
2940 | u32 ul_num_bytes, u32 mem_space) | ||
2941 | { | ||
2942 | struct node_object *hnode = (struct node_object *)priv_ref; | ||
2943 | struct node_mgr *hnode_mgr; | ||
2944 | u32 ul_bytes = 0; | ||
2945 | u32 ul_size; | ||
2946 | u32 ul_timeout; | ||
2947 | int status = 0; | ||
2948 | struct bridge_dev_context *hbridge_context; | ||
2949 | /* Function interface to Bridge driver*/ | ||
2950 | struct bridge_drv_interface *intf_fxns; | ||
2951 | |||
2952 | hnode_mgr = hnode->node_mgr; | ||
2953 | |||
2954 | ul_size = ul_num_bytes / hnode_mgr->dsp_word_size; | ||
2955 | ul_timeout = hnode->timeout; | ||
2956 | |||
2957 | /* Call new MemCopy function */ | ||
2958 | intf_fxns = hnode_mgr->intf_fxns; | ||
2959 | status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context); | ||
2960 | if (!status) { | ||
2961 | status = | ||
2962 | (*intf_fxns->brd_mem_copy) (hbridge_context, | ||
2963 | dsp_run_addr, dsp_load_addr, | ||
2964 | ul_num_bytes, (u32) mem_space); | ||
2965 | if (!status) | ||
2966 | ul_bytes = ul_num_bytes; | ||
2967 | else | ||
2968 | pr_debug("%s: failed to copy brd memory, status 0x%x\n", | ||
2969 | __func__, status); | ||
2970 | } else { | ||
2971 | pr_debug("%s: failed to get Bridge context, status 0x%x\n", | ||
2972 | __func__, status); | ||
2973 | } | ||
2974 | |||
2975 | return ul_bytes; | ||
2976 | } | ||
2977 | |||
2978 | /* | ||
2979 | * ======== mem_write ======== | ||
2980 | */ | ||
2981 | static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf, | ||
2982 | u32 ul_num_bytes, u32 mem_space) | ||
2983 | { | ||
2984 | struct node_object *hnode = (struct node_object *)priv_ref; | ||
2985 | struct node_mgr *hnode_mgr; | ||
2986 | u16 mem_sect_type; | ||
2987 | u32 ul_timeout; | ||
2988 | int status = 0; | ||
2989 | struct bridge_dev_context *hbridge_context; | ||
2990 | /* Function interface to Bridge driver */ | ||
2991 | struct bridge_drv_interface *intf_fxns; | ||
2992 | |||
2993 | hnode_mgr = hnode->node_mgr; | ||
2994 | |||
2995 | ul_timeout = hnode->timeout; | ||
2996 | mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA; | ||
2997 | |||
2998 | /* Call new MemWrite function */ | ||
2999 | intf_fxns = hnode_mgr->intf_fxns; | ||
3000 | status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context); | ||
3001 | status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf, | ||
3002 | dsp_add, ul_num_bytes, mem_sect_type); | ||
3003 | |||
3004 | return ul_num_bytes; | ||
3005 | } | ||
3006 | |||
3007 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | ||
3008 | /* | ||
3009 | * ======== node_find_addr ======== | ||
3010 | */ | ||
3011 | int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr, | ||
3012 | u32 offset_range, void *sym_addr_output, char *sym_name) | ||
3013 | { | ||
3014 | struct node_object *node_obj; | ||
3015 | int status = -ENOENT; | ||
3016 | |||
3017 | list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) { | ||
3018 | status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr, | ||
3019 | offset_range, sym_addr_output, sym_name); | ||
3020 | if (!status) { | ||
3021 | pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__, | ||
3022 | (unsigned int) node_mgr, | ||
3023 | sym_addr, offset_range, | ||
3024 | (unsigned int) sym_addr_output, sym_name); | ||
3025 | break; | ||
3026 | } | ||
3027 | } | ||
3028 | |||
3029 | return status; | ||
3030 | } | ||
3031 | #endif | ||
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c deleted file mode 100644 index 23e5146989b1..000000000000 --- a/drivers/staging/tidspbridge/rmgr/proc.c +++ /dev/null | |||
@@ -1,1836 +0,0 @@ | |||
1 | /* | ||
2 | * proc.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Processor interface at the driver level. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | /* ------------------------------------ Host OS */ | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/scatterlist.h> | ||
23 | #include <dspbridge/host_os.h> | ||
24 | |||
25 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
26 | #include <dspbridge/dbdefs.h> | ||
27 | |||
28 | /* ----------------------------------- OS Adaptation Layer */ | ||
29 | #include <dspbridge/ntfy.h> | ||
30 | #include <dspbridge/sync.h> | ||
31 | /* ----------------------------------- Bridge Driver */ | ||
32 | #include <dspbridge/dspdefs.h> | ||
33 | #include <dspbridge/dspdeh.h> | ||
34 | /* ----------------------------------- Platform Manager */ | ||
35 | #include <dspbridge/cod.h> | ||
36 | #include <dspbridge/dev.h> | ||
37 | #include <dspbridge/procpriv.h> | ||
38 | #include <dspbridge/dmm.h> | ||
39 | |||
40 | /* ----------------------------------- Resource Manager */ | ||
41 | #include <dspbridge/mgr.h> | ||
42 | #include <dspbridge/node.h> | ||
43 | #include <dspbridge/nldr.h> | ||
44 | #include <dspbridge/rmm.h> | ||
45 | |||
46 | /* ----------------------------------- Others */ | ||
47 | #include <dspbridge/dbdcd.h> | ||
48 | #include <dspbridge/msg.h> | ||
49 | #include <dspbridge/dspioctl.h> | ||
50 | #include <dspbridge/drv.h> | ||
51 | |||
52 | /* ----------------------------------- This */ | ||
53 | #include <dspbridge/proc.h> | ||
54 | #include <dspbridge/pwr.h> | ||
55 | |||
56 | #include <dspbridge/resourcecleanup.h> | ||
57 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
58 | #define MAXCMDLINELEN 255 | ||
59 | #define PROC_ENVPROCID "PROC_ID=%d" | ||
60 | #define MAXPROCIDLEN (8 + 5) | ||
61 | #define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */ | ||
62 | #define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */ | ||
63 | #define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */ | ||
64 | |||
65 | #define DSP_CACHE_LINE 128 | ||
66 | |||
67 | #define BUFMODE_MASK (3 << 14) | ||
68 | |||
69 | /* Buffer modes from DSP perspective */ | ||
70 | #define RBUF 0x4000 /* Input buffer */ | ||
71 | #define WBUF 0x8000 /* Output Buffer */ | ||
72 | |||
73 | extern struct device *bridge; | ||
74 | |||
75 | /* ----------------------------------- Globals */ | ||
76 | |||
77 | /* The proc_object structure. */ | ||
78 | struct proc_object { | ||
79 | struct list_head link; /* Link to next proc_object */ | ||
80 | struct dev_object *dev_obj; /* Device this PROC represents */ | ||
81 | u32 process; /* Process owning this Processor */ | ||
82 | struct mgr_object *mgr_obj; /* Manager Object Handle */ | ||
83 | u32 attach_count; /* Processor attach count */ | ||
84 | u32 processor_id; /* Processor number */ | ||
85 | u32 timeout; /* Time out count */ | ||
86 | enum dsp_procstate proc_state; /* Processor state */ | ||
87 | u32 unit; /* DDSP unit number */ | ||
88 | bool is_already_attached; /* | ||
89 | * True if the Device below has | ||
90 | * GPP Client attached | ||
91 | */ | ||
92 | struct ntfy_object *ntfy_obj; /* Manages notifications */ | ||
93 | /* Bridge Context Handle */ | ||
94 | struct bridge_dev_context *bridge_context; | ||
95 | /* Function interface to Bridge driver */ | ||
96 | struct bridge_drv_interface *intf_fxns; | ||
97 | char *last_coff; | ||
98 | struct list_head proc_list; | ||
99 | }; | ||
100 | |||
101 | DEFINE_MUTEX(proc_lock); /* For critical sections */ | ||
102 | |||
103 | /* ----------------------------------- Function Prototypes */ | ||
104 | static int proc_monitor(struct proc_object *proc_obj); | ||
105 | static s32 get_envp_count(char **envp); | ||
106 | static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems, | ||
107 | s32 cnew_envp, char *sz_var); | ||
108 | |||
109 | /* remember mapping information */ | ||
110 | static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt, | ||
111 | u32 mpu_addr, u32 dsp_addr, u32 size) | ||
112 | { | ||
113 | struct dmm_map_object *map_obj; | ||
114 | |||
115 | u32 num_usr_pgs = size / PG_SIZE4K; | ||
116 | |||
117 | pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n", | ||
118 | __func__, mpu_addr, | ||
119 | dsp_addr, size); | ||
120 | |||
121 | map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL); | ||
122 | if (!map_obj) | ||
123 | return NULL; | ||
124 | |||
125 | INIT_LIST_HEAD(&map_obj->link); | ||
126 | |||
127 | map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *), | ||
128 | GFP_KERNEL); | ||
129 | if (!map_obj->pages) { | ||
130 | kfree(map_obj); | ||
131 | return NULL; | ||
132 | } | ||
133 | |||
134 | map_obj->mpu_addr = mpu_addr; | ||
135 | map_obj->dsp_addr = dsp_addr; | ||
136 | map_obj->size = size; | ||
137 | map_obj->num_usr_pgs = num_usr_pgs; | ||
138 | |||
139 | spin_lock(&pr_ctxt->dmm_map_lock); | ||
140 | list_add(&map_obj->link, &pr_ctxt->dmm_map_list); | ||
141 | spin_unlock(&pr_ctxt->dmm_map_lock); | ||
142 | |||
143 | return map_obj; | ||
144 | } | ||
145 | |||
146 | static int match_exact_map_obj(struct dmm_map_object *map_obj, | ||
147 | u32 dsp_addr, u32 size) | ||
148 | { | ||
149 | if (map_obj->dsp_addr == dsp_addr && map_obj->size != size) | ||
150 | pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n", | ||
151 | __func__, dsp_addr, map_obj->size, size); | ||
152 | |||
153 | return map_obj->dsp_addr == dsp_addr && | ||
154 | map_obj->size == size; | ||
155 | } | ||
156 | |||
157 | static void remove_mapping_information(struct process_context *pr_ctxt, | ||
158 | u32 dsp_addr, u32 size) | ||
159 | { | ||
160 | struct dmm_map_object *map_obj; | ||
161 | |||
162 | pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__, | ||
163 | dsp_addr, size); | ||
164 | |||
165 | spin_lock(&pr_ctxt->dmm_map_lock); | ||
166 | list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { | ||
167 | pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", | ||
168 | __func__, | ||
169 | map_obj->mpu_addr, | ||
170 | map_obj->dsp_addr, | ||
171 | map_obj->size); | ||
172 | |||
173 | if (match_exact_map_obj(map_obj, dsp_addr, size)) { | ||
174 | pr_debug("%s: match, deleting map info\n", __func__); | ||
175 | list_del(&map_obj->link); | ||
176 | kfree(map_obj->dma_info.sg); | ||
177 | kfree(map_obj->pages); | ||
178 | kfree(map_obj); | ||
179 | goto out; | ||
180 | } | ||
181 | pr_debug("%s: candidate didn't match\n", __func__); | ||
182 | } | ||
183 | |||
184 | pr_err("%s: failed to find given map info\n", __func__); | ||
185 | out: | ||
186 | spin_unlock(&pr_ctxt->dmm_map_lock); | ||
187 | } | ||
188 | |||
189 | static int match_containing_map_obj(struct dmm_map_object *map_obj, | ||
190 | u32 mpu_addr, u32 size) | ||
191 | { | ||
192 | u32 map_obj_end = map_obj->mpu_addr + map_obj->size; | ||
193 | |||
194 | return mpu_addr >= map_obj->mpu_addr && | ||
195 | mpu_addr + size <= map_obj_end; | ||
196 | } | ||
197 | |||
198 | static struct dmm_map_object *find_containing_mapping( | ||
199 | struct process_context *pr_ctxt, | ||
200 | u32 mpu_addr, u32 size) | ||
201 | { | ||
202 | struct dmm_map_object *map_obj; | ||
203 | |||
204 | pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__, | ||
205 | mpu_addr, size); | ||
206 | |||
207 | spin_lock(&pr_ctxt->dmm_map_lock); | ||
208 | list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { | ||
209 | pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", | ||
210 | __func__, | ||
211 | map_obj->mpu_addr, | ||
212 | map_obj->dsp_addr, | ||
213 | map_obj->size); | ||
214 | if (match_containing_map_obj(map_obj, mpu_addr, size)) { | ||
215 | pr_debug("%s: match!\n", __func__); | ||
216 | goto out; | ||
217 | } | ||
218 | |||
219 | pr_debug("%s: no match!\n", __func__); | ||
220 | } | ||
221 | |||
222 | map_obj = NULL; | ||
223 | out: | ||
224 | spin_unlock(&pr_ctxt->dmm_map_lock); | ||
225 | return map_obj; | ||
226 | } | ||
227 | |||
228 | static int find_first_page_in_cache(struct dmm_map_object *map_obj, | ||
229 | unsigned long mpu_addr) | ||
230 | { | ||
231 | u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT; | ||
232 | u32 requested_base_page = mpu_addr >> PAGE_SHIFT; | ||
233 | int pg_index = requested_base_page - mapped_base_page; | ||
234 | |||
235 | if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) { | ||
236 | pr_err("%s: failed (got %d)\n", __func__, pg_index); | ||
237 | return -1; | ||
238 | } | ||
239 | |||
240 | pr_debug("%s: first page is %d\n", __func__, pg_index); | ||
241 | return pg_index; | ||
242 | } | ||
243 | |||
244 | static inline struct page *get_mapping_page(struct dmm_map_object *map_obj, | ||
245 | int pg_i) | ||
246 | { | ||
247 | pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__, | ||
248 | pg_i, map_obj->num_usr_pgs); | ||
249 | |||
250 | if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) { | ||
251 | pr_err("%s: requested pg_i %d is out of mapped range\n", | ||
252 | __func__, pg_i); | ||
253 | return NULL; | ||
254 | } | ||
255 | |||
256 | return map_obj->pages[pg_i]; | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * ======== proc_attach ======== | ||
261 | * Purpose: | ||
262 | * Prepare for communication with a particular DSP processor, and return | ||
263 | * a handle to the processor object. | ||
264 | */ | ||
265 | int | ||
266 | proc_attach(u32 processor_id, | ||
267 | const struct dsp_processorattrin *attr_in, | ||
268 | void **ph_processor, struct process_context *pr_ctxt) | ||
269 | { | ||
270 | int status = 0; | ||
271 | struct dev_object *hdev_obj; | ||
272 | struct proc_object *p_proc_object = NULL; | ||
273 | struct mgr_object *hmgr_obj = NULL; | ||
274 | struct drv_object *hdrv_obj = NULL; | ||
275 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
276 | u8 dev_type; | ||
277 | |||
278 | if (pr_ctxt->processor) { | ||
279 | *ph_processor = pr_ctxt->processor; | ||
280 | return status; | ||
281 | } | ||
282 | |||
283 | /* Get the Driver and Manager Object Handles */ | ||
284 | if (!drv_datap || !drv_datap->drv_object || !drv_datap->mgr_object) { | ||
285 | status = -ENODATA; | ||
286 | pr_err("%s: Failed to get object handles\n", __func__); | ||
287 | } else { | ||
288 | hdrv_obj = drv_datap->drv_object; | ||
289 | hmgr_obj = drv_datap->mgr_object; | ||
290 | } | ||
291 | |||
292 | if (!status) { | ||
293 | /* Get the Device Object */ | ||
294 | status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj); | ||
295 | } | ||
296 | if (!status) | ||
297 | status = dev_get_dev_type(hdev_obj, &dev_type); | ||
298 | |||
299 | if (status) | ||
300 | goto func_end; | ||
301 | |||
302 | /* If we made it this far, create the Processor object: */ | ||
303 | p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL); | ||
304 | /* Fill out the Processor Object: */ | ||
305 | if (p_proc_object == NULL) { | ||
306 | status = -ENOMEM; | ||
307 | goto func_end; | ||
308 | } | ||
309 | p_proc_object->dev_obj = hdev_obj; | ||
310 | p_proc_object->mgr_obj = hmgr_obj; | ||
311 | p_proc_object->processor_id = dev_type; | ||
312 | /* Store TGID instead of process handle */ | ||
313 | p_proc_object->process = current->tgid; | ||
314 | |||
315 | INIT_LIST_HEAD(&p_proc_object->proc_list); | ||
316 | |||
317 | if (attr_in) | ||
318 | p_proc_object->timeout = attr_in->timeout; | ||
319 | else | ||
320 | p_proc_object->timeout = PROC_DFLT_TIMEOUT; | ||
321 | |||
322 | status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); | ||
323 | if (!status) { | ||
324 | status = dev_get_bridge_context(hdev_obj, | ||
325 | &p_proc_object->bridge_context); | ||
326 | if (status) | ||
327 | kfree(p_proc_object); | ||
328 | } else | ||
329 | kfree(p_proc_object); | ||
330 | |||
331 | if (status) | ||
332 | goto func_end; | ||
333 | |||
334 | /* Create the Notification Object */ | ||
335 | /* This is created with no event mask, no notify mask | ||
336 | * and no valid handle to the notification. They all get | ||
337 | * filled up when proc_register_notify is called */ | ||
338 | p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object), | ||
339 | GFP_KERNEL); | ||
340 | if (p_proc_object->ntfy_obj) | ||
341 | ntfy_init(p_proc_object->ntfy_obj); | ||
342 | else | ||
343 | status = -ENOMEM; | ||
344 | |||
345 | if (!status) { | ||
346 | /* Insert the Processor Object into the DEV List. | ||
347 | * Return handle to this Processor Object: | ||
348 | * Find out if the Device is already attached to a | ||
349 | * Processor. If so, return AlreadyAttached status */ | ||
350 | status = dev_insert_proc_object(p_proc_object->dev_obj, | ||
351 | (u32) p_proc_object, | ||
352 | &p_proc_object-> | ||
353 | is_already_attached); | ||
354 | if (!status) { | ||
355 | if (p_proc_object->is_already_attached) | ||
356 | status = 0; | ||
357 | } else { | ||
358 | if (p_proc_object->ntfy_obj) { | ||
359 | ntfy_delete(p_proc_object->ntfy_obj); | ||
360 | kfree(p_proc_object->ntfy_obj); | ||
361 | } | ||
362 | |||
363 | kfree(p_proc_object); | ||
364 | } | ||
365 | if (!status) { | ||
366 | *ph_processor = (void *)p_proc_object; | ||
367 | pr_ctxt->processor = *ph_processor; | ||
368 | (void)proc_notify_clients(p_proc_object, | ||
369 | DSP_PROCESSORATTACH); | ||
370 | } | ||
371 | } else { | ||
372 | /* Don't leak memory if status is failed */ | ||
373 | kfree(p_proc_object); | ||
374 | } | ||
375 | func_end: | ||
376 | return status; | ||
377 | } | ||
378 | |||
379 | static int get_exec_file(struct cfg_devnode *dev_node_obj, | ||
380 | struct dev_object *hdev_obj, | ||
381 | u32 size, char *exec_file) | ||
382 | { | ||
383 | u8 dev_type; | ||
384 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
385 | |||
386 | dev_get_dev_type(hdev_obj, (u8 *) &dev_type); | ||
387 | |||
388 | if (!exec_file) | ||
389 | return -EFAULT; | ||
390 | |||
391 | if (dev_type == DSP_UNIT) { | ||
392 | if (!drv_datap || !drv_datap->base_img) | ||
393 | return -EFAULT; | ||
394 | |||
395 | if (strlen(drv_datap->base_img) >= size) | ||
396 | return -EINVAL; | ||
397 | |||
398 | strcpy(exec_file, drv_datap->base_img); | ||
399 | } else { | ||
400 | return -ENOENT; | ||
401 | } | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * ======== proc_auto_start ======== = | ||
408 | * Purpose: | ||
409 | * A Particular device gets loaded with the default image | ||
410 | * if the AutoStart flag is set. | ||
411 | * Parameters: | ||
412 | * hdev_obj: Handle to the Device | ||
413 | * Returns: | ||
414 | * 0: On Successful Loading | ||
415 | * -EPERM General Failure | ||
416 | * Requires: | ||
417 | * hdev_obj != NULL | ||
418 | * Ensures: | ||
419 | */ | ||
420 | int proc_auto_start(struct cfg_devnode *dev_node_obj, | ||
421 | struct dev_object *hdev_obj) | ||
422 | { | ||
423 | int status = -EPERM; | ||
424 | struct proc_object *p_proc_object; | ||
425 | char sz_exec_file[MAXCMDLINELEN]; | ||
426 | char *argv[2]; | ||
427 | struct mgr_object *hmgr_obj = NULL; | ||
428 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
429 | u8 dev_type; | ||
430 | |||
431 | /* Create a Dummy PROC Object */ | ||
432 | if (!drv_datap || !drv_datap->mgr_object) { | ||
433 | status = -ENODATA; | ||
434 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | ||
435 | goto func_end; | ||
436 | } else { | ||
437 | hmgr_obj = drv_datap->mgr_object; | ||
438 | } | ||
439 | |||
440 | p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL); | ||
441 | if (p_proc_object == NULL) { | ||
442 | status = -ENOMEM; | ||
443 | goto func_end; | ||
444 | } | ||
445 | p_proc_object->dev_obj = hdev_obj; | ||
446 | p_proc_object->mgr_obj = hmgr_obj; | ||
447 | status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); | ||
448 | if (!status) | ||
449 | status = dev_get_bridge_context(hdev_obj, | ||
450 | &p_proc_object->bridge_context); | ||
451 | if (status) | ||
452 | goto func_cont; | ||
453 | |||
454 | /* Stop the Device, put it into standby mode */ | ||
455 | status = proc_stop(p_proc_object); | ||
456 | |||
457 | if (status) | ||
458 | goto func_cont; | ||
459 | |||
460 | /* Get the default executable for this board... */ | ||
461 | dev_get_dev_type(hdev_obj, (u8 *) &dev_type); | ||
462 | p_proc_object->processor_id = dev_type; | ||
463 | status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file), | ||
464 | sz_exec_file); | ||
465 | if (!status) { | ||
466 | argv[0] = sz_exec_file; | ||
467 | argv[1] = NULL; | ||
468 | /* ...and try to load it: */ | ||
469 | status = proc_load(p_proc_object, 1, (const char **)argv, NULL); | ||
470 | if (!status) | ||
471 | status = proc_start(p_proc_object); | ||
472 | } | ||
473 | kfree(p_proc_object->last_coff); | ||
474 | p_proc_object->last_coff = NULL; | ||
475 | func_cont: | ||
476 | kfree(p_proc_object); | ||
477 | func_end: | ||
478 | return status; | ||
479 | } | ||
480 | |||
481 | /* | ||
482 | * ======== proc_ctrl ======== | ||
483 | * Purpose: | ||
484 | * Pass control information to the GPP device driver managing the | ||
485 | * DSP processor. | ||
486 | * | ||
487 | * This will be an OEM-only function, and not part of the DSP/BIOS Bridge | ||
488 | * application developer's API. | ||
489 | * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous | ||
490 | * Operation. arg can be null. | ||
491 | */ | ||
492 | int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata *arg) | ||
493 | { | ||
494 | int status = 0; | ||
495 | struct proc_object *p_proc_object = hprocessor; | ||
496 | u32 timeout = 0; | ||
497 | |||
498 | if (p_proc_object) { | ||
499 | /* intercept PWR deep sleep command */ | ||
500 | if (dw_cmd == BRDIOCTL_DEEPSLEEP) { | ||
501 | timeout = arg->cb_data; | ||
502 | status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout); | ||
503 | } | ||
504 | /* intercept PWR emergency sleep command */ | ||
505 | else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) { | ||
506 | timeout = arg->cb_data; | ||
507 | status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout); | ||
508 | } else if (dw_cmd == PWR_DEEPSLEEP) { | ||
509 | /* timeout = arg->cb_data; */ | ||
510 | status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout); | ||
511 | } | ||
512 | /* intercept PWR wake commands */ | ||
513 | else if (dw_cmd == BRDIOCTL_WAKEUP) { | ||
514 | timeout = arg->cb_data; | ||
515 | status = pwr_wake_dsp(timeout); | ||
516 | } else if (dw_cmd == PWR_WAKEUP) { | ||
517 | /* timeout = arg->cb_data; */ | ||
518 | status = pwr_wake_dsp(timeout); | ||
519 | } else | ||
520 | if (!((*p_proc_object->intf_fxns->dev_cntrl) | ||
521 | (p_proc_object->bridge_context, dw_cmd, | ||
522 | arg))) { | ||
523 | status = 0; | ||
524 | } else { | ||
525 | status = -EPERM; | ||
526 | } | ||
527 | } else { | ||
528 | status = -EFAULT; | ||
529 | } | ||
530 | |||
531 | return status; | ||
532 | } | ||
533 | |||
534 | /* | ||
535 | * ======== proc_detach ======== | ||
536 | * Purpose: | ||
537 | * Destroys the Processor Object. Removes the notification from the Dev | ||
538 | * List. | ||
539 | */ | ||
540 | int proc_detach(struct process_context *pr_ctxt) | ||
541 | { | ||
542 | int status = 0; | ||
543 | struct proc_object *p_proc_object = NULL; | ||
544 | |||
545 | p_proc_object = (struct proc_object *)pr_ctxt->processor; | ||
546 | |||
547 | if (p_proc_object) { | ||
548 | /* Notify the Client */ | ||
549 | ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH); | ||
550 | /* Remove the notification memory */ | ||
551 | if (p_proc_object->ntfy_obj) { | ||
552 | ntfy_delete(p_proc_object->ntfy_obj); | ||
553 | kfree(p_proc_object->ntfy_obj); | ||
554 | } | ||
555 | |||
556 | kfree(p_proc_object->last_coff); | ||
557 | p_proc_object->last_coff = NULL; | ||
558 | /* Remove the Proc from the DEV List */ | ||
559 | (void)dev_remove_proc_object(p_proc_object->dev_obj, | ||
560 | (u32) p_proc_object); | ||
561 | /* Free the Processor Object */ | ||
562 | kfree(p_proc_object); | ||
563 | pr_ctxt->processor = NULL; | ||
564 | } else { | ||
565 | status = -EFAULT; | ||
566 | } | ||
567 | |||
568 | return status; | ||
569 | } | ||
570 | |||
571 | /* | ||
572 | * ======== proc_enum_nodes ======== | ||
573 | * Purpose: | ||
574 | * Enumerate and get configuration information about nodes allocated | ||
575 | * on a DSP processor. | ||
576 | */ | ||
577 | int proc_enum_nodes(void *hprocessor, void **node_tab, | ||
578 | u32 node_tab_size, u32 *pu_num_nodes, | ||
579 | u32 *pu_allocated) | ||
580 | { | ||
581 | int status = -EPERM; | ||
582 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
583 | struct node_mgr *hnode_mgr = NULL; | ||
584 | |||
585 | if (p_proc_object) { | ||
586 | if (!(dev_get_node_manager(p_proc_object->dev_obj, | ||
587 | &hnode_mgr))) { | ||
588 | if (hnode_mgr) { | ||
589 | status = node_enum_nodes(hnode_mgr, node_tab, | ||
590 | node_tab_size, | ||
591 | pu_num_nodes, | ||
592 | pu_allocated); | ||
593 | } | ||
594 | } | ||
595 | } else { | ||
596 | status = -EFAULT; | ||
597 | } | ||
598 | |||
599 | return status; | ||
600 | } | ||
601 | |||
602 | /* Cache operation against kernel address instead of users */ | ||
603 | static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start, | ||
604 | ssize_t len, int pg_i) | ||
605 | { | ||
606 | struct page *page; | ||
607 | unsigned long offset; | ||
608 | ssize_t rest; | ||
609 | int ret = 0, i = 0; | ||
610 | struct scatterlist *sg = map_obj->dma_info.sg; | ||
611 | |||
612 | while (len) { | ||
613 | page = get_mapping_page(map_obj, pg_i); | ||
614 | if (!page) { | ||
615 | pr_err("%s: no page for %08lx\n", __func__, start); | ||
616 | ret = -EINVAL; | ||
617 | goto out; | ||
618 | } else if (IS_ERR(page)) { | ||
619 | pr_err("%s: err page for %08lx(%lu)\n", __func__, start, | ||
620 | PTR_ERR(page)); | ||
621 | ret = PTR_ERR(page); | ||
622 | goto out; | ||
623 | } | ||
624 | |||
625 | offset = start & ~PAGE_MASK; | ||
626 | rest = min_t(ssize_t, PAGE_SIZE - offset, len); | ||
627 | |||
628 | sg_set_page(&sg[i], page, rest, offset); | ||
629 | |||
630 | len -= rest; | ||
631 | start += rest; | ||
632 | pg_i++, i++; | ||
633 | } | ||
634 | |||
635 | if (i != map_obj->dma_info.num_pages) { | ||
636 | pr_err("%s: bad number of sg iterations\n", __func__); | ||
637 | ret = -EFAULT; | ||
638 | goto out; | ||
639 | } | ||
640 | |||
641 | out: | ||
642 | return ret; | ||
643 | } | ||
644 | |||
645 | static int memory_regain_ownership(struct dmm_map_object *map_obj, | ||
646 | unsigned long start, ssize_t len, enum dma_data_direction dir) | ||
647 | { | ||
648 | int ret = 0; | ||
649 | unsigned long first_data_page = start >> PAGE_SHIFT; | ||
650 | unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); | ||
651 | /* calculating the number of pages this area spans */ | ||
652 | unsigned long num_pages = last_data_page - first_data_page + 1; | ||
653 | struct bridge_dma_map_info *dma_info = &map_obj->dma_info; | ||
654 | |||
655 | if (!dma_info->sg) | ||
656 | goto out; | ||
657 | |||
658 | if (dma_info->dir != dir || dma_info->num_pages != num_pages) { | ||
659 | pr_err("%s: dma info doesn't match given params\n", __func__); | ||
660 | return -EINVAL; | ||
661 | } | ||
662 | |||
663 | dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir); | ||
664 | |||
665 | pr_debug("%s: dma_map_sg unmapped\n", __func__); | ||
666 | |||
667 | kfree(dma_info->sg); | ||
668 | |||
669 | map_obj->dma_info.sg = NULL; | ||
670 | |||
671 | out: | ||
672 | return ret; | ||
673 | } | ||
674 | |||
675 | /* Cache operation against kernel address instead of users */ | ||
676 | static int memory_give_ownership(struct dmm_map_object *map_obj, | ||
677 | unsigned long start, ssize_t len, enum dma_data_direction dir) | ||
678 | { | ||
679 | int pg_i, ret, sg_num; | ||
680 | struct scatterlist *sg; | ||
681 | unsigned long first_data_page = start >> PAGE_SHIFT; | ||
682 | unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); | ||
683 | /* calculating the number of pages this area spans */ | ||
684 | unsigned long num_pages = last_data_page - first_data_page + 1; | ||
685 | |||
686 | pg_i = find_first_page_in_cache(map_obj, start); | ||
687 | if (pg_i < 0) { | ||
688 | pr_err("%s: failed to find first page in cache\n", __func__); | ||
689 | ret = -EINVAL; | ||
690 | goto out; | ||
691 | } | ||
692 | |||
693 | sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL); | ||
694 | if (!sg) { | ||
695 | ret = -ENOMEM; | ||
696 | goto out; | ||
697 | } | ||
698 | |||
699 | sg_init_table(sg, num_pages); | ||
700 | |||
701 | /* cleanup a previous sg allocation */ | ||
702 | /* this may happen if application doesn't signal for e/o DMA */ | ||
703 | kfree(map_obj->dma_info.sg); | ||
704 | |||
705 | map_obj->dma_info.sg = sg; | ||
706 | map_obj->dma_info.dir = dir; | ||
707 | map_obj->dma_info.num_pages = num_pages; | ||
708 | |||
709 | ret = build_dma_sg(map_obj, start, len, pg_i); | ||
710 | if (ret) | ||
711 | goto kfree_sg; | ||
712 | |||
713 | sg_num = dma_map_sg(bridge, sg, num_pages, dir); | ||
714 | if (sg_num < 1) { | ||
715 | pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num); | ||
716 | ret = -EFAULT; | ||
717 | goto kfree_sg; | ||
718 | } | ||
719 | |||
720 | pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num); | ||
721 | map_obj->dma_info.sg_num = sg_num; | ||
722 | |||
723 | return 0; | ||
724 | |||
725 | kfree_sg: | ||
726 | kfree(sg); | ||
727 | map_obj->dma_info.sg = NULL; | ||
728 | out: | ||
729 | return ret; | ||
730 | } | ||
731 | |||
732 | int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, | ||
733 | enum dma_data_direction dir) | ||
734 | { | ||
735 | /* Keep STATUS here for future additions to this function */ | ||
736 | int status = 0; | ||
737 | struct process_context *pr_ctxt = (struct process_context *) hprocessor; | ||
738 | struct dmm_map_object *map_obj; | ||
739 | |||
740 | if (!pr_ctxt) { | ||
741 | status = -EFAULT; | ||
742 | goto err_out; | ||
743 | } | ||
744 | |||
745 | pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, | ||
746 | (u32)pmpu_addr, | ||
747 | ul_size, dir); | ||
748 | |||
749 | mutex_lock(&proc_lock); | ||
750 | |||
751 | /* find requested memory are in cached mapping information */ | ||
752 | map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); | ||
753 | if (!map_obj) { | ||
754 | pr_err("%s: find_containing_mapping failed\n", __func__); | ||
755 | status = -EFAULT; | ||
756 | goto no_map; | ||
757 | } | ||
758 | |||
759 | if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { | ||
760 | pr_err("%s: InValid address parameters %p %x\n", | ||
761 | __func__, pmpu_addr, ul_size); | ||
762 | status = -EFAULT; | ||
763 | } | ||
764 | |||
765 | no_map: | ||
766 | mutex_unlock(&proc_lock); | ||
767 | err_out: | ||
768 | |||
769 | return status; | ||
770 | } | ||
771 | |||
772 | int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, | ||
773 | enum dma_data_direction dir) | ||
774 | { | ||
775 | /* Keep STATUS here for future additions to this function */ | ||
776 | int status = 0; | ||
777 | struct process_context *pr_ctxt = (struct process_context *) hprocessor; | ||
778 | struct dmm_map_object *map_obj; | ||
779 | |||
780 | if (!pr_ctxt) { | ||
781 | status = -EFAULT; | ||
782 | goto err_out; | ||
783 | } | ||
784 | |||
785 | pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, | ||
786 | (u32)pmpu_addr, | ||
787 | ul_size, dir); | ||
788 | |||
789 | mutex_lock(&proc_lock); | ||
790 | |||
791 | /* find requested memory are in cached mapping information */ | ||
792 | map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); | ||
793 | if (!map_obj) { | ||
794 | pr_err("%s: find_containing_mapping failed\n", __func__); | ||
795 | status = -EFAULT; | ||
796 | goto no_map; | ||
797 | } | ||
798 | |||
799 | if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { | ||
800 | pr_err("%s: InValid address parameters %p %x\n", | ||
801 | __func__, pmpu_addr, ul_size); | ||
802 | status = -EFAULT; | ||
803 | } | ||
804 | |||
805 | no_map: | ||
806 | mutex_unlock(&proc_lock); | ||
807 | err_out: | ||
808 | return status; | ||
809 | } | ||
810 | |||
811 | /* | ||
812 | * ======== proc_flush_memory ======== | ||
813 | * Purpose: | ||
814 | * Flush cache | ||
815 | */ | ||
816 | int proc_flush_memory(void *hprocessor, void *pmpu_addr, | ||
817 | u32 ul_size, u32 ul_flags) | ||
818 | { | ||
819 | enum dma_data_direction dir = DMA_BIDIRECTIONAL; | ||
820 | |||
821 | return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir); | ||
822 | } | ||
823 | |||
824 | /* | ||
825 | * ======== proc_invalidate_memory ======== | ||
826 | * Purpose: | ||
827 | * Invalidates the memory specified | ||
828 | */ | ||
829 | int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size) | ||
830 | { | ||
831 | enum dma_data_direction dir = DMA_FROM_DEVICE; | ||
832 | |||
833 | return proc_begin_dma(hprocessor, pmpu_addr, size, dir); | ||
834 | } | ||
835 | |||
836 | /* | ||
837 | * ======== proc_get_resource_info ======== | ||
838 | * Purpose: | ||
839 | * Enumerate the resources currently available on a processor. | ||
840 | */ | ||
841 | int proc_get_resource_info(void *hprocessor, u32 resource_type, | ||
842 | struct dsp_resourceinfo *resource_info, | ||
843 | u32 resource_info_size) | ||
844 | { | ||
845 | int status = -EPERM; | ||
846 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
847 | struct node_mgr *hnode_mgr = NULL; | ||
848 | struct nldr_object *nldr_obj = NULL; | ||
849 | struct rmm_target_obj *rmm = NULL; | ||
850 | struct io_mgr *hio_mgr = NULL; /* IO manager handle */ | ||
851 | |||
852 | if (!p_proc_object) { | ||
853 | status = -EFAULT; | ||
854 | goto func_end; | ||
855 | } | ||
856 | switch (resource_type) { | ||
857 | case DSP_RESOURCE_DYNDARAM: | ||
858 | case DSP_RESOURCE_DYNSARAM: | ||
859 | case DSP_RESOURCE_DYNEXTERNAL: | ||
860 | case DSP_RESOURCE_DYNSRAM: | ||
861 | status = dev_get_node_manager(p_proc_object->dev_obj, | ||
862 | &hnode_mgr); | ||
863 | if (!hnode_mgr) { | ||
864 | status = -EFAULT; | ||
865 | goto func_end; | ||
866 | } | ||
867 | |||
868 | status = node_get_nldr_obj(hnode_mgr, &nldr_obj); | ||
869 | if (!status) { | ||
870 | status = nldr_get_rmm_manager(nldr_obj, &rmm); | ||
871 | if (rmm) { | ||
872 | if (!rmm_stat(rmm, | ||
873 | (enum dsp_memtype)resource_type, | ||
874 | (struct dsp_memstat *) | ||
875 | &(resource_info->result. | ||
876 | mem_stat))) | ||
877 | status = -EINVAL; | ||
878 | } else { | ||
879 | status = -EFAULT; | ||
880 | } | ||
881 | } | ||
882 | break; | ||
883 | case DSP_RESOURCE_PROCLOAD: | ||
884 | status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr); | ||
885 | if (hio_mgr) | ||
886 | status = | ||
887 | p_proc_object->intf_fxns-> | ||
888 | io_get_proc_load(hio_mgr, | ||
889 | (struct dsp_procloadstat *) | ||
890 | &(resource_info->result. | ||
891 | proc_load_stat)); | ||
892 | else | ||
893 | status = -EFAULT; | ||
894 | break; | ||
895 | default: | ||
896 | status = -EPERM; | ||
897 | break; | ||
898 | } | ||
899 | func_end: | ||
900 | return status; | ||
901 | } | ||
902 | |||
903 | /* | ||
904 | * ======== proc_get_dev_object ======== | ||
905 | * Purpose: | ||
906 | * Return the Dev Object handle for a given Processor. | ||
907 | * | ||
908 | */ | ||
909 | int proc_get_dev_object(void *hprocessor, | ||
910 | struct dev_object **device_obj) | ||
911 | { | ||
912 | int status = -EPERM; | ||
913 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
914 | |||
915 | if (p_proc_object) { | ||
916 | *device_obj = p_proc_object->dev_obj; | ||
917 | status = 0; | ||
918 | } else { | ||
919 | *device_obj = NULL; | ||
920 | status = -EFAULT; | ||
921 | } | ||
922 | |||
923 | return status; | ||
924 | } | ||
925 | |||
926 | /* | ||
927 | * ======== proc_get_state ======== | ||
928 | * Purpose: | ||
929 | * Report the state of the specified DSP processor. | ||
930 | */ | ||
931 | int proc_get_state(void *hprocessor, | ||
932 | struct dsp_processorstate *proc_state_obj, | ||
933 | u32 state_info_size) | ||
934 | { | ||
935 | int status = 0; | ||
936 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
937 | int brd_status; | ||
938 | |||
939 | if (p_proc_object) { | ||
940 | /* First, retrieve BRD state information */ | ||
941 | status = (*p_proc_object->intf_fxns->brd_status) | ||
942 | (p_proc_object->bridge_context, &brd_status); | ||
943 | if (!status) { | ||
944 | switch (brd_status) { | ||
945 | case BRD_STOPPED: | ||
946 | proc_state_obj->proc_state = PROC_STOPPED; | ||
947 | break; | ||
948 | case BRD_SLEEP_TRANSITION: | ||
949 | case BRD_DSP_HIBERNATION: | ||
950 | /* Fall through */ | ||
951 | case BRD_RUNNING: | ||
952 | proc_state_obj->proc_state = PROC_RUNNING; | ||
953 | break; | ||
954 | case BRD_LOADED: | ||
955 | proc_state_obj->proc_state = PROC_LOADED; | ||
956 | break; | ||
957 | case BRD_ERROR: | ||
958 | proc_state_obj->proc_state = PROC_ERROR; | ||
959 | break; | ||
960 | default: | ||
961 | proc_state_obj->proc_state = 0xFF; | ||
962 | status = -EPERM; | ||
963 | break; | ||
964 | } | ||
965 | } | ||
966 | } else { | ||
967 | status = -EFAULT; | ||
968 | } | ||
969 | dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n", | ||
970 | __func__, status, proc_state_obj->proc_state); | ||
971 | return status; | ||
972 | } | ||
973 | |||
974 | /* | ||
975 | * ======== proc_get_trace ======== | ||
976 | * Purpose: | ||
977 | * Retrieve the current contents of the trace buffer, located on the | ||
978 | * Processor. Predefined symbols for the trace buffer must have been | ||
979 | * configured into the DSP executable. | ||
980 | * Details: | ||
981 | * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a | ||
982 | * trace buffer, only. Treat it as an undocumented feature. | ||
983 | * This call is destructive, meaning the processor is placed in the monitor | ||
984 | * state as a result of this function. | ||
985 | */ | ||
986 | int proc_get_trace(void *hprocessor, u8 *pbuf, u32 max_size) | ||
987 | { | ||
988 | int status; | ||
989 | |||
990 | status = -ENOSYS; | ||
991 | return status; | ||
992 | } | ||
993 | |||
994 | /* | ||
995 | * ======== proc_load ======== | ||
996 | * Purpose: | ||
997 | * Reset a processor and load a new base program image. | ||
998 | * This will be an OEM-only function, and not part of the DSP/BIOS Bridge | ||
999 | * application developer's API. | ||
1000 | */ | ||
1001 | int proc_load(void *hprocessor, const s32 argc_index, | ||
1002 | const char **user_args, const char **user_envp) | ||
1003 | { | ||
1004 | int status = 0; | ||
1005 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
1006 | struct io_mgr *hio_mgr; /* IO manager handle */ | ||
1007 | struct msg_mgr *hmsg_mgr; | ||
1008 | struct cod_manager *cod_mgr; /* Code manager handle */ | ||
1009 | char *pargv0; /* temp argv[0] ptr */ | ||
1010 | char **new_envp; /* Updated envp[] array. */ | ||
1011 | char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */ | ||
1012 | s32 envp_elems; /* Num elements in envp[]. */ | ||
1013 | s32 cnew_envp; /* " " in new_envp[] */ | ||
1014 | s32 nproc_id = 0; /* Anticipate MP version. */ | ||
1015 | struct dcd_manager *hdcd_handle; | ||
1016 | struct dmm_object *dmm_mgr; | ||
1017 | u32 dw_ext_end; | ||
1018 | u32 proc_id; | ||
1019 | int brd_state; | ||
1020 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | ||
1021 | |||
1022 | #ifdef OPT_LOAD_TIME_INSTRUMENTATION | ||
1023 | struct timeval tv1; | ||
1024 | struct timeval tv2; | ||
1025 | #endif | ||
1026 | |||
1027 | #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) | ||
1028 | struct dspbridge_platform_data *pdata = | ||
1029 | omap_dspbridge_dev->dev.platform_data; | ||
1030 | #endif | ||
1031 | |||
1032 | #ifdef OPT_LOAD_TIME_INSTRUMENTATION | ||
1033 | do_gettimeofday(&tv1); | ||
1034 | #endif | ||
1035 | if (!p_proc_object) { | ||
1036 | status = -EFAULT; | ||
1037 | goto func_end; | ||
1038 | } | ||
1039 | dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr); | ||
1040 | if (!cod_mgr) { | ||
1041 | status = -EPERM; | ||
1042 | goto func_end; | ||
1043 | } | ||
1044 | status = proc_stop(hprocessor); | ||
1045 | if (status) | ||
1046 | goto func_end; | ||
1047 | |||
1048 | /* Place the board in the monitor state. */ | ||
1049 | status = proc_monitor(hprocessor); | ||
1050 | if (status) | ||
1051 | goto func_end; | ||
1052 | |||
1053 | /* Save ptr to original argv[0]. */ | ||
1054 | pargv0 = (char *)user_args[0]; | ||
1055 | /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */ | ||
1056 | envp_elems = get_envp_count((char **)user_envp); | ||
1057 | cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2)); | ||
1058 | new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL); | ||
1059 | if (new_envp) { | ||
1060 | status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID, | ||
1061 | nproc_id); | ||
1062 | if (status == -1) { | ||
1063 | dev_dbg(bridge, "%s: Proc ID string overflow\n", | ||
1064 | __func__); | ||
1065 | status = -EPERM; | ||
1066 | } else { | ||
1067 | new_envp = | ||
1068 | prepend_envp(new_envp, (char **)user_envp, | ||
1069 | envp_elems, cnew_envp, sz_proc_id); | ||
1070 | /* Get the DCD Handle */ | ||
1071 | status = mgr_get_dcd_handle(p_proc_object->mgr_obj, | ||
1072 | (u32 *) &hdcd_handle); | ||
1073 | if (!status) { | ||
1074 | /* Before proceeding with new load, | ||
1075 | * check if a previously registered COFF | ||
1076 | * exists. | ||
1077 | * If yes, unregister nodes in previously | ||
1078 | * registered COFF. If any error occurred, | ||
1079 | * set previously registered COFF to NULL. */ | ||
1080 | if (p_proc_object->last_coff != NULL) { | ||
1081 | status = | ||
1082 | dcd_auto_unregister(hdcd_handle, | ||
1083 | p_proc_object-> | ||
1084 | last_coff); | ||
1085 | /* Regardless of auto unregister status, | ||
1086 | * free previously allocated | ||
1087 | * memory. */ | ||
1088 | kfree(p_proc_object->last_coff); | ||
1089 | p_proc_object->last_coff = NULL; | ||
1090 | } | ||
1091 | } | ||
1092 | /* On success, do cod_open_base() */ | ||
1093 | status = cod_open_base(cod_mgr, (char *)user_args[0], | ||
1094 | COD_SYMB); | ||
1095 | } | ||
1096 | } else { | ||
1097 | status = -ENOMEM; | ||
1098 | } | ||
1099 | if (!status) { | ||
1100 | /* Auto-register data base */ | ||
1101 | /* Get the DCD Handle */ | ||
1102 | status = mgr_get_dcd_handle(p_proc_object->mgr_obj, | ||
1103 | (u32 *) &hdcd_handle); | ||
1104 | if (!status) { | ||
1105 | /* Auto register nodes in specified COFF | ||
1106 | * file. If registration did not fail, | ||
1107 | * (status = 0 or -EACCES) | ||
1108 | * save the name of the COFF file for | ||
1109 | * de-registration in the future. */ | ||
1110 | status = | ||
1111 | dcd_auto_register(hdcd_handle, | ||
1112 | (char *)user_args[0]); | ||
1113 | if (status == -EACCES) | ||
1114 | status = 0; | ||
1115 | |||
1116 | if (status) { | ||
1117 | status = -EPERM; | ||
1118 | } else { | ||
1119 | /* Allocate memory for pszLastCoff */ | ||
1120 | p_proc_object->last_coff = | ||
1121 | kzalloc((strlen(user_args[0]) + | ||
1122 | 1), GFP_KERNEL); | ||
1123 | /* If memory allocated, save COFF file name */ | ||
1124 | if (p_proc_object->last_coff) { | ||
1125 | strncpy(p_proc_object->last_coff, | ||
1126 | (char *)user_args[0], | ||
1127 | (strlen((char *)user_args[0]) + | ||
1128 | 1)); | ||
1129 | } | ||
1130 | } | ||
1131 | } | ||
1132 | } | ||
1133 | /* Update shared memory address and size */ | ||
1134 | if (!status) { | ||
1135 | /* Create the message manager. This must be done | ||
1136 | * before calling the IOOnLoaded function. */ | ||
1137 | dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr); | ||
1138 | if (!hmsg_mgr) { | ||
1139 | status = msg_create(&hmsg_mgr, p_proc_object->dev_obj, | ||
1140 | (msg_onexit) node_on_exit); | ||
1141 | dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr); | ||
1142 | } | ||
1143 | } | ||
1144 | if (!status) { | ||
1145 | /* Set the Device object's message manager */ | ||
1146 | status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr); | ||
1147 | if (hio_mgr) | ||
1148 | status = (*p_proc_object->intf_fxns->io_on_loaded) | ||
1149 | (hio_mgr); | ||
1150 | else | ||
1151 | status = -EFAULT; | ||
1152 | } | ||
1153 | if (!status) { | ||
1154 | /* Now, attempt to load an exec: */ | ||
1155 | |||
1156 | /* Boost the OPP level to Maximum level supported by baseport */ | ||
1157 | #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) | ||
1158 | if (pdata->cpu_set_freq) | ||
1159 | (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]); | ||
1160 | #endif | ||
1161 | status = cod_load_base(cod_mgr, argc_index, (char **)user_args, | ||
1162 | dev_brd_write_fxn, | ||
1163 | p_proc_object->dev_obj, NULL); | ||
1164 | if (status) { | ||
1165 | if (status == -EBADF) { | ||
1166 | dev_dbg(bridge, "%s: Failure to Load the EXE\n", | ||
1167 | __func__); | ||
1168 | } | ||
1169 | if (status == -ESPIPE) { | ||
1170 | pr_err("%s: Couldn't parse the file\n", | ||
1171 | __func__); | ||
1172 | } | ||
1173 | } | ||
1174 | /* Requesting the lowest opp supported */ | ||
1175 | #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) | ||
1176 | if (pdata->cpu_set_freq) | ||
1177 | (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]); | ||
1178 | #endif | ||
1179 | |||
1180 | } | ||
1181 | if (!status) { | ||
1182 | /* Update the Processor status to loaded */ | ||
1183 | status = (*p_proc_object->intf_fxns->brd_set_state) | ||
1184 | (p_proc_object->bridge_context, BRD_LOADED); | ||
1185 | if (!status) { | ||
1186 | p_proc_object->proc_state = PROC_LOADED; | ||
1187 | if (p_proc_object->ntfy_obj) | ||
1188 | proc_notify_clients(p_proc_object, | ||
1189 | DSP_PROCESSORSTATECHANGE); | ||
1190 | } | ||
1191 | } | ||
1192 | if (!status) { | ||
1193 | status = proc_get_processor_id(hprocessor, &proc_id); | ||
1194 | if (proc_id == DSP_UNIT) { | ||
1195 | /* Use all available DSP address space after EXTMEM | ||
1196 | * for DMM */ | ||
1197 | if (!status) | ||
1198 | status = cod_get_sym_value(cod_mgr, EXTEND, | ||
1199 | &dw_ext_end); | ||
1200 | |||
1201 | /* Reset DMM structs and add an initial free chunk */ | ||
1202 | if (!status) { | ||
1203 | status = | ||
1204 | dev_get_dmm_mgr(p_proc_object->dev_obj, | ||
1205 | &dmm_mgr); | ||
1206 | if (dmm_mgr) { | ||
1207 | /* Set dw_ext_end to DMM START u8 | ||
1208 | * address */ | ||
1209 | dw_ext_end = | ||
1210 | (dw_ext_end + 1) * DSPWORDSIZE; | ||
1211 | /* DMM memory is from EXT_END */ | ||
1212 | status = dmm_create_tables(dmm_mgr, | ||
1213 | dw_ext_end, | ||
1214 | DMMPOOLSIZE); | ||
1215 | } else { | ||
1216 | status = -EFAULT; | ||
1217 | } | ||
1218 | } | ||
1219 | } | ||
1220 | } | ||
1221 | /* Restore the original argv[0] */ | ||
1222 | kfree(new_envp); | ||
1223 | user_args[0] = pargv0; | ||
1224 | if (!status) { | ||
1225 | if (!((*p_proc_object->intf_fxns->brd_status) | ||
1226 | (p_proc_object->bridge_context, &brd_state))) { | ||
1227 | pr_info("%s: Processor Loaded %s\n", __func__, pargv0); | ||
1228 | kfree(drv_datap->base_img); | ||
1229 | drv_datap->base_img = kstrdup(pargv0, GFP_KERNEL); | ||
1230 | if (!drv_datap->base_img) | ||
1231 | status = -ENOMEM; | ||
1232 | } | ||
1233 | } | ||
1234 | |||
1235 | func_end: | ||
1236 | if (status) { | ||
1237 | pr_err("%s: Processor failed to load\n", __func__); | ||
1238 | proc_stop(p_proc_object); | ||
1239 | } | ||
1240 | #ifdef OPT_LOAD_TIME_INSTRUMENTATION | ||
1241 | do_gettimeofday(&tv2); | ||
1242 | if (tv2.tv_usec < tv1.tv_usec) { | ||
1243 | tv2.tv_usec += 1000000; | ||
1244 | tv2.tv_sec--; | ||
1245 | } | ||
1246 | dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__, | ||
1247 | tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec); | ||
1248 | #endif | ||
1249 | return status; | ||
1250 | } | ||
1251 | |||
1252 | /* | ||
1253 | * ======== proc_map ======== | ||
1254 | * Purpose: | ||
1255 | * Maps a MPU buffer to DSP address space. | ||
1256 | */ | ||
1257 | int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, | ||
1258 | void *req_addr, void **pp_map_addr, u32 ul_map_attr, | ||
1259 | struct process_context *pr_ctxt) | ||
1260 | { | ||
1261 | u32 va_align; | ||
1262 | u32 pa_align; | ||
1263 | struct dmm_object *dmm_mgr; | ||
1264 | u32 size_align; | ||
1265 | int status = 0; | ||
1266 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
1267 | struct dmm_map_object *map_obj; | ||
1268 | u32 tmp_addr = 0; | ||
1269 | |||
1270 | #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK | ||
1271 | if ((ul_map_attr & BUFMODE_MASK) != RBUF) { | ||
1272 | if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) || | ||
1273 | !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) { | ||
1274 | pr_err("%s: not aligned: 0x%x (%d)\n", __func__, | ||
1275 | (u32)pmpu_addr, ul_size); | ||
1276 | return -EFAULT; | ||
1277 | } | ||
1278 | } | ||
1279 | #endif | ||
1280 | |||
1281 | /* Calculate the page-aligned PA, VA and size */ | ||
1282 | va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K); | ||
1283 | pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K); | ||
1284 | size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align, | ||
1285 | PG_SIZE4K); | ||
1286 | |||
1287 | if (!p_proc_object) { | ||
1288 | status = -EFAULT; | ||
1289 | goto func_end; | ||
1290 | } | ||
1291 | /* Critical section */ | ||
1292 | mutex_lock(&proc_lock); | ||
1293 | dmm_get_handle(p_proc_object, &dmm_mgr); | ||
1294 | if (dmm_mgr) | ||
1295 | status = dmm_map_memory(dmm_mgr, va_align, size_align); | ||
1296 | else | ||
1297 | status = -EFAULT; | ||
1298 | |||
1299 | /* Add mapping to the page tables. */ | ||
1300 | if (!status) { | ||
1301 | |||
1302 | /* Mapped address = MSB of VA | LSB of PA */ | ||
1303 | tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1))); | ||
1304 | /* mapped memory resource tracking */ | ||
1305 | map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr, | ||
1306 | size_align); | ||
1307 | if (!map_obj) | ||
1308 | status = -ENOMEM; | ||
1309 | else | ||
1310 | status = (*p_proc_object->intf_fxns->brd_mem_map) | ||
1311 | (p_proc_object->bridge_context, pa_align, va_align, | ||
1312 | size_align, ul_map_attr, map_obj->pages); | ||
1313 | } | ||
1314 | if (!status) { | ||
1315 | /* Mapped address = MSB of VA | LSB of PA */ | ||
1316 | *pp_map_addr = (void *) tmp_addr; | ||
1317 | } else { | ||
1318 | remove_mapping_information(pr_ctxt, tmp_addr, size_align); | ||
1319 | dmm_un_map_memory(dmm_mgr, va_align, &size_align); | ||
1320 | } | ||
1321 | mutex_unlock(&proc_lock); | ||
1322 | |||
1323 | if (status) | ||
1324 | goto func_end; | ||
1325 | |||
1326 | func_end: | ||
1327 | dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, " | ||
1328 | "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, " | ||
1329 | "pa_align %x, size_align %x status 0x%x\n", __func__, | ||
1330 | hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr, | ||
1331 | pp_map_addr, va_align, pa_align, size_align, status); | ||
1332 | |||
1333 | return status; | ||
1334 | } | ||
1335 | |||
1336 | /* | ||
1337 | * ======== proc_register_notify ======== | ||
1338 | * Purpose: | ||
1339 | * Register to be notified of specific processor events. | ||
1340 | */ | ||
1341 | int proc_register_notify(void *hprocessor, u32 event_mask, | ||
1342 | u32 notify_type, struct dsp_notification | ||
1343 | *hnotification) | ||
1344 | { | ||
1345 | int status = 0; | ||
1346 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
1347 | struct deh_mgr *hdeh_mgr; | ||
1348 | |||
1349 | /* Check processor handle */ | ||
1350 | if (!p_proc_object) { | ||
1351 | status = -EFAULT; | ||
1352 | goto func_end; | ||
1353 | } | ||
1354 | /* Check if event mask is a valid processor related event */ | ||
1355 | if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH | | ||
1356 | DSP_PROCESSORDETACH | DSP_PROCESSORRESTART | | ||
1357 | DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR | | ||
1358 | DSP_WDTOVERFLOW)) | ||
1359 | status = -EINVAL; | ||
1360 | |||
1361 | /* Check if notify type is valid */ | ||
1362 | if (notify_type != DSP_SIGNALEVENT) | ||
1363 | status = -EINVAL; | ||
1364 | |||
1365 | if (!status) { | ||
1366 | /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT, | ||
1367 | * or DSP_PWRERROR then register event immediately. */ | ||
1368 | if (event_mask & | ||
1369 | ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR | | ||
1370 | DSP_WDTOVERFLOW)) { | ||
1371 | status = ntfy_register(p_proc_object->ntfy_obj, | ||
1372 | hnotification, event_mask, | ||
1373 | notify_type); | ||
1374 | /* Special case alert, special case alert! | ||
1375 | * If we're trying to *deregister* (i.e. event_mask | ||
1376 | * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification, | ||
1377 | * we have to deregister with the DEH manager. | ||
1378 | * There's no way to know, based on event_mask which | ||
1379 | * manager the notification event was registered with, | ||
1380 | * so if we're trying to deregister and ntfy_register | ||
1381 | * failed, we'll give the deh manager a shot. | ||
1382 | */ | ||
1383 | if ((event_mask == 0) && status) { | ||
1384 | status = | ||
1385 | dev_get_deh_mgr(p_proc_object->dev_obj, | ||
1386 | &hdeh_mgr); | ||
1387 | status = | ||
1388 | bridge_deh_register_notify(hdeh_mgr, | ||
1389 | event_mask, | ||
1390 | notify_type, | ||
1391 | hnotification); | ||
1392 | } | ||
1393 | } else { | ||
1394 | status = dev_get_deh_mgr(p_proc_object->dev_obj, | ||
1395 | &hdeh_mgr); | ||
1396 | status = | ||
1397 | bridge_deh_register_notify(hdeh_mgr, | ||
1398 | event_mask, | ||
1399 | notify_type, | ||
1400 | hnotification); | ||
1401 | |||
1402 | } | ||
1403 | } | ||
1404 | func_end: | ||
1405 | return status; | ||
1406 | } | ||
1407 | |||
1408 | /* | ||
1409 | * ======== proc_reserve_memory ======== | ||
1410 | * Purpose: | ||
1411 | * Reserve a virtually contiguous region of DSP address space. | ||
1412 | */ | ||
1413 | int proc_reserve_memory(void *hprocessor, u32 ul_size, | ||
1414 | void **pp_rsv_addr, | ||
1415 | struct process_context *pr_ctxt) | ||
1416 | { | ||
1417 | struct dmm_object *dmm_mgr; | ||
1418 | int status = 0; | ||
1419 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
1420 | struct dmm_rsv_object *rsv_obj; | ||
1421 | |||
1422 | if (!p_proc_object) { | ||
1423 | status = -EFAULT; | ||
1424 | goto func_end; | ||
1425 | } | ||
1426 | |||
1427 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
1428 | if (!dmm_mgr) { | ||
1429 | status = -EFAULT; | ||
1430 | goto func_end; | ||
1431 | } | ||
1432 | |||
1433 | status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr); | ||
1434 | if (status != 0) | ||
1435 | goto func_end; | ||
1436 | |||
1437 | /* | ||
1438 | * A successful reserve should be followed by insertion of rsv_obj | ||
1439 | * into dmm_rsv_list, so that reserved memory resource tracking | ||
1440 | * remains uptodate | ||
1441 | */ | ||
1442 | rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL); | ||
1443 | if (rsv_obj) { | ||
1444 | rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr; | ||
1445 | spin_lock(&pr_ctxt->dmm_rsv_lock); | ||
1446 | list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list); | ||
1447 | spin_unlock(&pr_ctxt->dmm_rsv_lock); | ||
1448 | } | ||
1449 | |||
1450 | func_end: | ||
1451 | dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p " | ||
1452 | "status 0x%x\n", __func__, hprocessor, | ||
1453 | ul_size, pp_rsv_addr, status); | ||
1454 | return status; | ||
1455 | } | ||
1456 | |||
1457 | /* | ||
1458 | * ======== proc_start ======== | ||
1459 | * Purpose: | ||
1460 | * Start a processor running. | ||
1461 | */ | ||
1462 | int proc_start(void *hprocessor) | ||
1463 | { | ||
1464 | int status = 0; | ||
1465 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
1466 | struct cod_manager *cod_mgr; /* Code manager handle */ | ||
1467 | u32 dw_dsp_addr; /* Loaded code's entry point. */ | ||
1468 | int brd_state; | ||
1469 | |||
1470 | if (!p_proc_object) { | ||
1471 | status = -EFAULT; | ||
1472 | goto func_end; | ||
1473 | } | ||
1474 | /* Call the bridge_brd_start */ | ||
1475 | if (p_proc_object->proc_state != PROC_LOADED) { | ||
1476 | status = -EBADR; | ||
1477 | goto func_end; | ||
1478 | } | ||
1479 | status = dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr); | ||
1480 | if (!cod_mgr) { | ||
1481 | status = -EFAULT; | ||
1482 | goto func_cont; | ||
1483 | } | ||
1484 | |||
1485 | status = cod_get_entry(cod_mgr, &dw_dsp_addr); | ||
1486 | if (status) | ||
1487 | goto func_cont; | ||
1488 | |||
1489 | status = (*p_proc_object->intf_fxns->brd_start) | ||
1490 | (p_proc_object->bridge_context, dw_dsp_addr); | ||
1491 | if (status) | ||
1492 | goto func_cont; | ||
1493 | |||
1494 | /* Call dev_create2 */ | ||
1495 | status = dev_create2(p_proc_object->dev_obj); | ||
1496 | if (!status) { | ||
1497 | p_proc_object->proc_state = PROC_RUNNING; | ||
1498 | /* Deep sleep switces off the peripheral clocks. | ||
1499 | * we just put the DSP CPU in idle in the idle loop. | ||
1500 | * so there is no need to send a command to DSP */ | ||
1501 | |||
1502 | if (p_proc_object->ntfy_obj) { | ||
1503 | proc_notify_clients(p_proc_object, | ||
1504 | DSP_PROCESSORSTATECHANGE); | ||
1505 | } | ||
1506 | } else { | ||
1507 | /* Failed to Create Node Manager and DISP Object | ||
1508 | * Stop the Processor from running. Put it in STOPPED State */ | ||
1509 | (void)(*p_proc_object->intf_fxns-> | ||
1510 | brd_stop) (p_proc_object->bridge_context); | ||
1511 | p_proc_object->proc_state = PROC_STOPPED; | ||
1512 | } | ||
1513 | func_cont: | ||
1514 | if (!status) { | ||
1515 | if (!((*p_proc_object->intf_fxns->brd_status) | ||
1516 | (p_proc_object->bridge_context, &brd_state))) { | ||
1517 | pr_info("%s: dsp in running state\n", __func__); | ||
1518 | } | ||
1519 | } else { | ||
1520 | pr_err("%s: Failed to start the dsp\n", __func__); | ||
1521 | proc_stop(p_proc_object); | ||
1522 | } | ||
1523 | |||
1524 | func_end: | ||
1525 | return status; | ||
1526 | } | ||
1527 | |||
1528 | /* | ||
1529 | * ======== proc_stop ======== | ||
1530 | * Purpose: | ||
1531 | * Stop a processor running. | ||
1532 | */ | ||
1533 | int proc_stop(void *hprocessor) | ||
1534 | { | ||
1535 | int status = 0; | ||
1536 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
1537 | struct msg_mgr *hmsg_mgr; | ||
1538 | struct node_mgr *hnode_mgr; | ||
1539 | void *hnode; | ||
1540 | u32 node_tab_size = 1; | ||
1541 | u32 num_nodes = 0; | ||
1542 | u32 nodes_allocated = 0; | ||
1543 | |||
1544 | if (!p_proc_object) { | ||
1545 | status = -EFAULT; | ||
1546 | goto func_end; | ||
1547 | } | ||
1548 | /* check if there are any running nodes */ | ||
1549 | status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr); | ||
1550 | if (!status && hnode_mgr) { | ||
1551 | status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size, | ||
1552 | &num_nodes, &nodes_allocated); | ||
1553 | if ((status == -EINVAL) || (nodes_allocated > 0)) { | ||
1554 | pr_err("%s: Can't stop device, active nodes = %d\n", | ||
1555 | __func__, nodes_allocated); | ||
1556 | return -EBADR; | ||
1557 | } | ||
1558 | } | ||
1559 | /* Call the bridge_brd_stop */ | ||
1560 | /* It is OK to stop a device that does n't have nodes OR not started */ | ||
1561 | status = | ||
1562 | (*p_proc_object->intf_fxns-> | ||
1563 | brd_stop) (p_proc_object->bridge_context); | ||
1564 | if (!status) { | ||
1565 | dev_dbg(bridge, "%s: processor in standby mode\n", __func__); | ||
1566 | p_proc_object->proc_state = PROC_STOPPED; | ||
1567 | /* Destroy the Node Manager, msg_ctrl Manager */ | ||
1568 | if (!(dev_destroy2(p_proc_object->dev_obj))) { | ||
1569 | /* Destroy the msg_ctrl by calling msg_delete */ | ||
1570 | dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr); | ||
1571 | if (hmsg_mgr) { | ||
1572 | msg_delete(hmsg_mgr); | ||
1573 | dev_set_msg_mgr(p_proc_object->dev_obj, NULL); | ||
1574 | } | ||
1575 | } | ||
1576 | } else { | ||
1577 | pr_err("%s: Failed to stop the processor\n", __func__); | ||
1578 | } | ||
1579 | func_end: | ||
1580 | |||
1581 | return status; | ||
1582 | } | ||
1583 | |||
1584 | /* | ||
1585 | * ======== proc_un_map ======== | ||
1586 | * Purpose: | ||
1587 | * Removes a MPU buffer mapping from the DSP address space. | ||
1588 | */ | ||
1589 | int proc_un_map(void *hprocessor, void *map_addr, | ||
1590 | struct process_context *pr_ctxt) | ||
1591 | { | ||
1592 | int status = 0; | ||
1593 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
1594 | struct dmm_object *dmm_mgr; | ||
1595 | u32 va_align; | ||
1596 | u32 size_align; | ||
1597 | |||
1598 | va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); | ||
1599 | if (!p_proc_object) { | ||
1600 | status = -EFAULT; | ||
1601 | goto func_end; | ||
1602 | } | ||
1603 | |||
1604 | status = dmm_get_handle(hprocessor, &dmm_mgr); | ||
1605 | if (!dmm_mgr) { | ||
1606 | status = -EFAULT; | ||
1607 | goto func_end; | ||
1608 | } | ||
1609 | |||
1610 | /* Critical section */ | ||
1611 | mutex_lock(&proc_lock); | ||
1612 | /* | ||
1613 | * Update DMM structures. Get the size to unmap. | ||
1614 | * This function returns error if the VA is not mapped | ||
1615 | */ | ||
1616 | status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align); | ||
1617 | /* Remove mapping from the page tables. */ | ||
1618 | if (!status) { | ||
1619 | status = (*p_proc_object->intf_fxns->brd_mem_un_map) | ||
1620 | (p_proc_object->bridge_context, va_align, size_align); | ||
1621 | } | ||
1622 | |||
1623 | if (status) | ||
1624 | goto unmap_failed; | ||
1625 | |||
1626 | /* | ||
1627 | * A successful unmap should be followed by removal of map_obj | ||
1628 | * from dmm_map_list, so that mapped memory resource tracking | ||
1629 | * remains uptodate | ||
1630 | */ | ||
1631 | remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); | ||
1632 | |||
1633 | unmap_failed: | ||
1634 | mutex_unlock(&proc_lock); | ||
1635 | |||
1636 | func_end: | ||
1637 | dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", | ||
1638 | __func__, hprocessor, map_addr, status); | ||
1639 | return status; | ||
1640 | } | ||
1641 | |||
1642 | /* | ||
1643 | * ======== proc_un_reserve_memory ======== | ||
1644 | * Purpose: | ||
1645 | * Frees a previously reserved region of DSP address space. | ||
1646 | */ | ||
1647 | int proc_un_reserve_memory(void *hprocessor, void *prsv_addr, | ||
1648 | struct process_context *pr_ctxt) | ||
1649 | { | ||
1650 | struct dmm_object *dmm_mgr; | ||
1651 | int status = 0; | ||
1652 | struct proc_object *p_proc_object = (struct proc_object *)hprocessor; | ||
1653 | struct dmm_rsv_object *rsv_obj; | ||
1654 | |||
1655 | if (!p_proc_object) { | ||
1656 | status = -EFAULT; | ||
1657 | goto func_end; | ||
1658 | } | ||
1659 | |||
1660 | status = dmm_get_handle(p_proc_object, &dmm_mgr); | ||
1661 | if (!dmm_mgr) { | ||
1662 | status = -EFAULT; | ||
1663 | goto func_end; | ||
1664 | } | ||
1665 | |||
1666 | status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr); | ||
1667 | if (status != 0) | ||
1668 | goto func_end; | ||
1669 | |||
1670 | /* | ||
1671 | * A successful unreserve should be followed by removal of rsv_obj | ||
1672 | * from dmm_rsv_list, so that reserved memory resource tracking | ||
1673 | * remains uptodate | ||
1674 | */ | ||
1675 | spin_lock(&pr_ctxt->dmm_rsv_lock); | ||
1676 | list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) { | ||
1677 | if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) { | ||
1678 | list_del(&rsv_obj->link); | ||
1679 | kfree(rsv_obj); | ||
1680 | break; | ||
1681 | } | ||
1682 | } | ||
1683 | spin_unlock(&pr_ctxt->dmm_rsv_lock); | ||
1684 | |||
1685 | func_end: | ||
1686 | dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n", | ||
1687 | __func__, hprocessor, prsv_addr, status); | ||
1688 | return status; | ||
1689 | } | ||
1690 | |||
1691 | /* | ||
1692 | * ======== = proc_monitor ======== == | ||
1693 | * Purpose: | ||
1694 | * Place the Processor in Monitor State. This is an internal | ||
1695 | * function and a requirement before Processor is loaded. | ||
1696 | * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor. | ||
1697 | * In dev_destroy2 we delete the node manager. | ||
1698 | * Parameters: | ||
1699 | * p_proc_object: Pointer to Processor Object | ||
1700 | * Returns: | ||
1701 | * 0: Processor placed in monitor mode. | ||
1702 | * !0: Failed to place processor in monitor mode. | ||
1703 | * Requires: | ||
1704 | * Valid Processor Handle | ||
1705 | * Ensures: | ||
1706 | * Success: ProcObject state is PROC_IDLE | ||
1707 | */ | ||
1708 | static int proc_monitor(struct proc_object *proc_obj) | ||
1709 | { | ||
1710 | int status = -EPERM; | ||
1711 | struct msg_mgr *hmsg_mgr; | ||
1712 | |||
1713 | /* This is needed only when Device is loaded when it is | ||
1714 | * already 'ACTIVE' */ | ||
1715 | /* Destroy the Node Manager, msg_ctrl Manager */ | ||
1716 | if (!dev_destroy2(proc_obj->dev_obj)) { | ||
1717 | /* Destroy the msg_ctrl by calling msg_delete */ | ||
1718 | dev_get_msg_mgr(proc_obj->dev_obj, &hmsg_mgr); | ||
1719 | if (hmsg_mgr) { | ||
1720 | msg_delete(hmsg_mgr); | ||
1721 | dev_set_msg_mgr(proc_obj->dev_obj, NULL); | ||
1722 | } | ||
1723 | } | ||
1724 | /* Place the Board in the Monitor State */ | ||
1725 | if (!((*proc_obj->intf_fxns->brd_monitor) | ||
1726 | (proc_obj->bridge_context))) { | ||
1727 | status = 0; | ||
1728 | } | ||
1729 | |||
1730 | return status; | ||
1731 | } | ||
1732 | |||
1733 | /* | ||
1734 | * ======== get_envp_count ======== | ||
1735 | * Purpose: | ||
1736 | * Return the number of elements in the envp array, including the | ||
1737 | * terminating NULL element. | ||
1738 | */ | ||
1739 | static s32 get_envp_count(char **envp) | ||
1740 | { | ||
1741 | s32 ret = 0; | ||
1742 | |||
1743 | if (envp) { | ||
1744 | while (*envp++) | ||
1745 | ret++; | ||
1746 | |||
1747 | ret += 1; /* Include the terminating NULL in the count. */ | ||
1748 | } | ||
1749 | |||
1750 | return ret; | ||
1751 | } | ||
1752 | |||
1753 | /* | ||
1754 | * ======== prepend_envp ======== | ||
1755 | * Purpose: | ||
1756 | * Prepend an environment variable=value pair to the new envp array, and | ||
1757 | * copy in the existing var=value pairs in the old envp array. | ||
1758 | */ | ||
1759 | static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems, | ||
1760 | s32 cnew_envp, char *sz_var) | ||
1761 | { | ||
1762 | char **pp_envp = new_envp; | ||
1763 | |||
1764 | /* Prepend new environ var=value string */ | ||
1765 | *new_envp++ = sz_var; | ||
1766 | |||
1767 | /* Copy user's environment into our own. */ | ||
1768 | while (envp_elems--) | ||
1769 | *new_envp++ = *envp++; | ||
1770 | |||
1771 | /* Ensure NULL terminates the new environment strings array. */ | ||
1772 | if (envp_elems == 0) | ||
1773 | *new_envp = NULL; | ||
1774 | |||
1775 | return pp_envp; | ||
1776 | } | ||
1777 | |||
1778 | /* | ||
1779 | * ======== proc_notify_clients ======== | ||
1780 | * Purpose: | ||
1781 | * Notify the processor the events. | ||
1782 | */ | ||
1783 | int proc_notify_clients(void *proc, u32 events) | ||
1784 | { | ||
1785 | int status = 0; | ||
1786 | struct proc_object *p_proc_object = (struct proc_object *)proc; | ||
1787 | |||
1788 | if (!p_proc_object) { | ||
1789 | status = -EFAULT; | ||
1790 | goto func_end; | ||
1791 | } | ||
1792 | |||
1793 | ntfy_notify(p_proc_object->ntfy_obj, events); | ||
1794 | func_end: | ||
1795 | return status; | ||
1796 | } | ||
1797 | |||
1798 | /* | ||
1799 | * ======== proc_notify_all_clients ======== | ||
1800 | * Purpose: | ||
1801 | * Notify the processor the events. This includes notifying all clients | ||
1802 | * attached to a particulat DSP. | ||
1803 | */ | ||
1804 | int proc_notify_all_clients(void *proc, u32 events) | ||
1805 | { | ||
1806 | int status = 0; | ||
1807 | struct proc_object *p_proc_object = (struct proc_object *)proc; | ||
1808 | |||
1809 | if (!p_proc_object) { | ||
1810 | status = -EFAULT; | ||
1811 | goto func_end; | ||
1812 | } | ||
1813 | |||
1814 | dev_notify_clients(p_proc_object->dev_obj, events); | ||
1815 | |||
1816 | func_end: | ||
1817 | return status; | ||
1818 | } | ||
1819 | |||
1820 | /* | ||
1821 | * ======== proc_get_processor_id ======== | ||
1822 | * Purpose: | ||
1823 | * Retrieves the processor ID. | ||
1824 | */ | ||
1825 | int proc_get_processor_id(void *proc, u32 *proc_id) | ||
1826 | { | ||
1827 | int status = 0; | ||
1828 | struct proc_object *p_proc_object = (struct proc_object *)proc; | ||
1829 | |||
1830 | if (p_proc_object) | ||
1831 | *proc_id = p_proc_object->processor_id; | ||
1832 | else | ||
1833 | status = -EFAULT; | ||
1834 | |||
1835 | return status; | ||
1836 | } | ||
diff --git a/drivers/staging/tidspbridge/rmgr/pwr.c b/drivers/staging/tidspbridge/rmgr/pwr.c deleted file mode 100644 index 17748df351b9..000000000000 --- a/drivers/staging/tidspbridge/rmgr/pwr.c +++ /dev/null | |||
@@ -1,176 +0,0 @@ | |||
1 | /* | ||
2 | * pwr.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * PWR API for controlling DSP power states. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | /* ----------------------------------- Host OS */ | ||
20 | #include <dspbridge/host_os.h> | ||
21 | |||
22 | /* ----------------------------------- This */ | ||
23 | #include <dspbridge/pwr.h> | ||
24 | |||
25 | /* ----------------------------------- Resource Manager */ | ||
26 | #include <dspbridge/devdefs.h> | ||
27 | #include <dspbridge/drv.h> | ||
28 | |||
29 | /* ----------------------------------- Platform Manager */ | ||
30 | #include <dspbridge/dev.h> | ||
31 | |||
32 | /* ----------------------------------- Link Driver */ | ||
33 | #include <dspbridge/dspioctl.h> | ||
34 | |||
35 | /* | ||
36 | * ======== pwr_sleep_dsp ======== | ||
37 | * Send command to DSP to enter sleep state. | ||
38 | */ | ||
39 | int pwr_sleep_dsp(const u32 sleep_code, const u32 timeout) | ||
40 | { | ||
41 | struct bridge_drv_interface *intf_fxns; | ||
42 | struct bridge_dev_context *dw_context; | ||
43 | int status = -EPERM; | ||
44 | struct dev_object *hdev_obj = NULL; | ||
45 | u32 ioctlcode = 0; | ||
46 | u32 arg = timeout; | ||
47 | |||
48 | for (hdev_obj = (struct dev_object *)drv_get_first_dev_object(); | ||
49 | hdev_obj != NULL; | ||
50 | hdev_obj = | ||
51 | (struct dev_object *)drv_get_next_dev_object((u32) hdev_obj)) { | ||
52 | if (dev_get_bridge_context(hdev_obj, | ||
53 | (struct bridge_dev_context **) | ||
54 | &dw_context)) { | ||
55 | continue; | ||
56 | } | ||
57 | if (dev_get_intf_fxns(hdev_obj, | ||
58 | (struct bridge_drv_interface **) | ||
59 | &intf_fxns)) { | ||
60 | continue; | ||
61 | } | ||
62 | if (sleep_code == PWR_DEEPSLEEP) | ||
63 | ioctlcode = BRDIOCTL_DEEPSLEEP; | ||
64 | else if (sleep_code == PWR_EMERGENCYDEEPSLEEP) | ||
65 | ioctlcode = BRDIOCTL_EMERGENCYSLEEP; | ||
66 | else | ||
67 | status = -EINVAL; | ||
68 | |||
69 | if (status != -EINVAL) { | ||
70 | status = (*intf_fxns->dev_cntrl) (dw_context, | ||
71 | ioctlcode, | ||
72 | (void *)&arg); | ||
73 | } | ||
74 | } | ||
75 | return status; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * ======== pwr_wake_dsp ======== | ||
80 | * Send command to DSP to wake it from sleep. | ||
81 | */ | ||
82 | int pwr_wake_dsp(const u32 timeout) | ||
83 | { | ||
84 | struct bridge_drv_interface *intf_fxns; | ||
85 | struct bridge_dev_context *dw_context; | ||
86 | int status = -EPERM; | ||
87 | struct dev_object *hdev_obj = NULL; | ||
88 | u32 arg = timeout; | ||
89 | |||
90 | for (hdev_obj = (struct dev_object *)drv_get_first_dev_object(); | ||
91 | hdev_obj != NULL; | ||
92 | hdev_obj = (struct dev_object *)drv_get_next_dev_object | ||
93 | ((u32) hdev_obj)) { | ||
94 | if (!(dev_get_bridge_context(hdev_obj, | ||
95 | (struct bridge_dev_context | ||
96 | **)&dw_context))) { | ||
97 | if (!(dev_get_intf_fxns(hdev_obj, | ||
98 | (struct bridge_drv_interface **)&intf_fxns))) { | ||
99 | status = | ||
100 | (*intf_fxns->dev_cntrl) (dw_context, | ||
101 | BRDIOCTL_WAKEUP, | ||
102 | (void *)&arg); | ||
103 | } | ||
104 | } | ||
105 | } | ||
106 | return status; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * ======== pwr_pm_pre_scale======== | ||
111 | * Sends pre-notification message to DSP. | ||
112 | */ | ||
113 | int pwr_pm_pre_scale(u16 voltage_domain, u32 level) | ||
114 | { | ||
115 | struct bridge_drv_interface *intf_fxns; | ||
116 | struct bridge_dev_context *dw_context; | ||
117 | int status = -EPERM; | ||
118 | struct dev_object *hdev_obj = NULL; | ||
119 | u32 arg[2]; | ||
120 | |||
121 | arg[0] = voltage_domain; | ||
122 | arg[1] = level; | ||
123 | |||
124 | for (hdev_obj = (struct dev_object *)drv_get_first_dev_object(); | ||
125 | hdev_obj != NULL; | ||
126 | hdev_obj = (struct dev_object *)drv_get_next_dev_object | ||
127 | ((u32) hdev_obj)) { | ||
128 | if (!(dev_get_bridge_context(hdev_obj, | ||
129 | (struct bridge_dev_context | ||
130 | **)&dw_context))) { | ||
131 | if (!(dev_get_intf_fxns(hdev_obj, | ||
132 | (struct bridge_drv_interface **)&intf_fxns))) { | ||
133 | status = | ||
134 | (*intf_fxns->dev_cntrl) (dw_context, | ||
135 | BRDIOCTL_PRESCALE_NOTIFY, | ||
136 | (void *)&arg); | ||
137 | } | ||
138 | } | ||
139 | } | ||
140 | return status; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * ======== pwr_pm_post_scale======== | ||
145 | * Sends post-notification message to DSP. | ||
146 | */ | ||
147 | int pwr_pm_post_scale(u16 voltage_domain, u32 level) | ||
148 | { | ||
149 | struct bridge_drv_interface *intf_fxns; | ||
150 | struct bridge_dev_context *dw_context; | ||
151 | int status = -EPERM; | ||
152 | struct dev_object *hdev_obj = NULL; | ||
153 | u32 arg[2]; | ||
154 | |||
155 | arg[0] = voltage_domain; | ||
156 | arg[1] = level; | ||
157 | |||
158 | for (hdev_obj = (struct dev_object *)drv_get_first_dev_object(); | ||
159 | hdev_obj != NULL; | ||
160 | hdev_obj = (struct dev_object *)drv_get_next_dev_object | ||
161 | ((u32) hdev_obj)) { | ||
162 | if (!(dev_get_bridge_context(hdev_obj, | ||
163 | (struct bridge_dev_context | ||
164 | **)&dw_context))) { | ||
165 | if (!(dev_get_intf_fxns(hdev_obj, | ||
166 | (struct bridge_drv_interface **)&intf_fxns))) { | ||
167 | status = | ||
168 | (*intf_fxns->dev_cntrl) (dw_context, | ||
169 | BRDIOCTL_POSTSCALE_NOTIFY, | ||
170 | (void *)&arg); | ||
171 | } | ||
172 | } | ||
173 | } | ||
174 | return status; | ||
175 | |||
176 | } | ||
diff --git a/drivers/staging/tidspbridge/rmgr/rmm.c b/drivers/staging/tidspbridge/rmgr/rmm.c deleted file mode 100644 index 52187bd97729..000000000000 --- a/drivers/staging/tidspbridge/rmgr/rmm.c +++ /dev/null | |||
@@ -1,456 +0,0 @@ | |||
1 | /* | ||
2 | * rmm.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
7 | * | ||
8 | * This package is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
13 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | */ | ||
16 | |||
17 | /* | ||
18 | * This memory manager provides general heap management and arbitrary | ||
19 | * alignment for any number of memory segments. | ||
20 | * | ||
21 | * Notes: | ||
22 | * | ||
23 | * Memory blocks are allocated from the end of the first free memory | ||
24 | * block large enough to satisfy the request. Alignment requirements | ||
25 | * are satisfied by "sliding" the block forward until its base satisfies | ||
26 | * the alignment specification; if this is not possible then the next | ||
27 | * free block large enough to hold the request is tried. | ||
28 | * | ||
29 | * Since alignment can cause the creation of a new free block - the | ||
30 | * unused memory formed between the start of the original free block | ||
31 | * and the start of the allocated block - the memory manager must free | ||
32 | * this memory to prevent a memory leak. | ||
33 | * | ||
34 | * Overlay memory is managed by reserving through rmm_alloc, and freeing | ||
35 | * it through rmm_free. The memory manager prevents DSP code/data that is | ||
36 | * overlayed from being overwritten as long as the memory it runs at has | ||
37 | * been allocated, and not yet freed. | ||
38 | */ | ||
39 | |||
40 | #include <linux/types.h> | ||
41 | #include <linux/list.h> | ||
42 | |||
43 | /* ----------------------------------- Host OS */ | ||
44 | #include <dspbridge/host_os.h> | ||
45 | |||
46 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
47 | #include <dspbridge/dbdefs.h> | ||
48 | |||
49 | /* ----------------------------------- This */ | ||
50 | #include <dspbridge/rmm.h> | ||
51 | |||
52 | /* | ||
53 | * ======== rmm_header ======== | ||
54 | * This header is used to maintain a list of free memory blocks. | ||
55 | */ | ||
56 | struct rmm_header { | ||
57 | struct rmm_header *next; /* form a free memory link list */ | ||
58 | u32 size; /* size of the free memory */ | ||
59 | u32 addr; /* DSP address of memory block */ | ||
60 | }; | ||
61 | |||
62 | /* | ||
63 | * ======== rmm_ovly_sect ======== | ||
64 | * Keeps track of memory occupied by overlay section. | ||
65 | */ | ||
66 | struct rmm_ovly_sect { | ||
67 | struct list_head list_elem; | ||
68 | u32 addr; /* Start of memory section */ | ||
69 | u32 size; /* Length (target MAUs) of section */ | ||
70 | s32 page; /* Memory page */ | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * ======== rmm_target_obj ======== | ||
75 | */ | ||
76 | struct rmm_target_obj { | ||
77 | struct rmm_segment *seg_tab; | ||
78 | struct rmm_header **free_list; | ||
79 | u32 num_segs; | ||
80 | struct list_head ovly_list; /* List of overlay memory in use */ | ||
81 | }; | ||
82 | |||
83 | static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size, | ||
84 | u32 align, u32 *dsp_address); | ||
85 | static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr, | ||
86 | u32 size); | ||
87 | |||
88 | /* | ||
89 | * ======== rmm_alloc ======== | ||
90 | */ | ||
91 | int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size, | ||
92 | u32 align, u32 *dsp_address, bool reserve) | ||
93 | { | ||
94 | struct rmm_ovly_sect *sect, *prev_sect = NULL; | ||
95 | struct rmm_ovly_sect *new_sect; | ||
96 | u32 addr; | ||
97 | int status = 0; | ||
98 | |||
99 | if (!reserve) { | ||
100 | if (!alloc_block(target, segid, size, align, dsp_address)) { | ||
101 | status = -ENOMEM; | ||
102 | } else { | ||
103 | /* Increment the number of allocated blocks in this | ||
104 | * segment */ | ||
105 | target->seg_tab[segid].number++; | ||
106 | } | ||
107 | goto func_end; | ||
108 | } | ||
109 | /* An overlay section - See if block is already in use. If not, | ||
110 | * insert into the list in ascending address size. */ | ||
111 | addr = *dsp_address; | ||
112 | /* Find place to insert new list element. List is sorted from | ||
113 | * smallest to largest address. */ | ||
114 | list_for_each_entry(sect, &target->ovly_list, list_elem) { | ||
115 | if (addr <= sect->addr) { | ||
116 | /* Check for overlap with sect */ | ||
117 | if ((addr + size > sect->addr) || (prev_sect && | ||
118 | (prev_sect->addr + | ||
119 | prev_sect->size > | ||
120 | addr))) { | ||
121 | status = -ENXIO; | ||
122 | } | ||
123 | break; | ||
124 | } | ||
125 | prev_sect = sect; | ||
126 | } | ||
127 | if (!status) { | ||
128 | /* No overlap - allocate list element for new section. */ | ||
129 | new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL); | ||
130 | if (new_sect == NULL) { | ||
131 | status = -ENOMEM; | ||
132 | } else { | ||
133 | new_sect->addr = addr; | ||
134 | new_sect->size = size; | ||
135 | new_sect->page = segid; | ||
136 | if (list_is_last(§->list_elem, &target->ovly_list)) | ||
137 | /* Put new section at the end of the list */ | ||
138 | list_add_tail(&new_sect->list_elem, | ||
139 | &target->ovly_list); | ||
140 | else | ||
141 | /* Put new section just before sect */ | ||
142 | list_add_tail(&new_sect->list_elem, | ||
143 | §->list_elem); | ||
144 | } | ||
145 | } | ||
146 | func_end: | ||
147 | return status; | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * ======== rmm_create ======== | ||
152 | */ | ||
153 | int rmm_create(struct rmm_target_obj **target_obj, | ||
154 | struct rmm_segment seg_tab[], u32 num_segs) | ||
155 | { | ||
156 | struct rmm_header *hptr; | ||
157 | struct rmm_segment *sptr, *tmp; | ||
158 | struct rmm_target_obj *target; | ||
159 | s32 i; | ||
160 | int status = 0; | ||
161 | |||
162 | /* Allocate DBL target object */ | ||
163 | target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL); | ||
164 | |||
165 | if (target == NULL) | ||
166 | status = -ENOMEM; | ||
167 | |||
168 | if (status) | ||
169 | goto func_cont; | ||
170 | |||
171 | target->num_segs = num_segs; | ||
172 | if (!(num_segs > 0)) | ||
173 | goto func_cont; | ||
174 | |||
175 | /* Allocate the memory for freelist from host's memory */ | ||
176 | target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *), | ||
177 | GFP_KERNEL); | ||
178 | if (target->free_list == NULL) { | ||
179 | status = -ENOMEM; | ||
180 | } else { | ||
181 | /* Allocate headers for each element on the free list */ | ||
182 | for (i = 0; i < (s32) num_segs; i++) { | ||
183 | target->free_list[i] = | ||
184 | kzalloc(sizeof(struct rmm_header), GFP_KERNEL); | ||
185 | if (target->free_list[i] == NULL) { | ||
186 | status = -ENOMEM; | ||
187 | break; | ||
188 | } | ||
189 | } | ||
190 | /* Allocate memory for initial segment table */ | ||
191 | target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment), | ||
192 | GFP_KERNEL); | ||
193 | if (target->seg_tab == NULL) { | ||
194 | status = -ENOMEM; | ||
195 | } else { | ||
196 | /* Initialize segment table and free list */ | ||
197 | sptr = target->seg_tab; | ||
198 | for (i = 0, tmp = seg_tab; num_segs > 0; | ||
199 | num_segs--, i++) { | ||
200 | *sptr = *tmp; | ||
201 | hptr = target->free_list[i]; | ||
202 | hptr->addr = tmp->base; | ||
203 | hptr->size = tmp->length; | ||
204 | hptr->next = NULL; | ||
205 | tmp++; | ||
206 | sptr++; | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | func_cont: | ||
211 | /* Initialize overlay memory list */ | ||
212 | if (!status) | ||
213 | INIT_LIST_HEAD(&target->ovly_list); | ||
214 | |||
215 | if (!status) { | ||
216 | *target_obj = target; | ||
217 | } else { | ||
218 | *target_obj = NULL; | ||
219 | if (target) | ||
220 | rmm_delete(target); | ||
221 | |||
222 | } | ||
223 | |||
224 | return status; | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * ======== rmm_delete ======== | ||
229 | */ | ||
230 | void rmm_delete(struct rmm_target_obj *target) | ||
231 | { | ||
232 | struct rmm_ovly_sect *sect, *tmp; | ||
233 | struct rmm_header *hptr; | ||
234 | struct rmm_header *next; | ||
235 | u32 i; | ||
236 | |||
237 | kfree(target->seg_tab); | ||
238 | |||
239 | list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) { | ||
240 | list_del(§->list_elem); | ||
241 | kfree(sect); | ||
242 | } | ||
243 | |||
244 | if (target->free_list != NULL) { | ||
245 | /* Free elements on freelist */ | ||
246 | for (i = 0; i < target->num_segs; i++) { | ||
247 | hptr = next = target->free_list[i]; | ||
248 | while (next) { | ||
249 | hptr = next; | ||
250 | next = hptr->next; | ||
251 | kfree(hptr); | ||
252 | } | ||
253 | } | ||
254 | kfree(target->free_list); | ||
255 | } | ||
256 | |||
257 | kfree(target); | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * ======== rmm_free ======== | ||
262 | */ | ||
263 | bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size, | ||
264 | bool reserved) | ||
265 | { | ||
266 | struct rmm_ovly_sect *sect, *tmp; | ||
267 | bool ret = false; | ||
268 | |||
269 | /* | ||
270 | * Free or unreserve memory. | ||
271 | */ | ||
272 | if (!reserved) { | ||
273 | ret = free_block(target, segid, dsp_addr, size); | ||
274 | if (ret) | ||
275 | target->seg_tab[segid].number--; | ||
276 | |||
277 | } else { | ||
278 | /* Unreserve memory */ | ||
279 | list_for_each_entry_safe(sect, tmp, &target->ovly_list, | ||
280 | list_elem) { | ||
281 | if (dsp_addr == sect->addr) { | ||
282 | /* Remove from list */ | ||
283 | list_del(§->list_elem); | ||
284 | kfree(sect); | ||
285 | return true; | ||
286 | } | ||
287 | } | ||
288 | } | ||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * ======== rmm_stat ======== | ||
294 | */ | ||
295 | bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid, | ||
296 | struct dsp_memstat *mem_stat_buf) | ||
297 | { | ||
298 | struct rmm_header *head; | ||
299 | bool ret = false; | ||
300 | u32 max_free_size = 0; | ||
301 | u32 total_free_size = 0; | ||
302 | u32 free_blocks = 0; | ||
303 | |||
304 | if ((u32) segid < target->num_segs) { | ||
305 | head = target->free_list[segid]; | ||
306 | |||
307 | /* Collect data from free_list */ | ||
308 | while (head != NULL) { | ||
309 | max_free_size = max(max_free_size, head->size); | ||
310 | total_free_size += head->size; | ||
311 | free_blocks++; | ||
312 | head = head->next; | ||
313 | } | ||
314 | |||
315 | /* ul_size */ | ||
316 | mem_stat_buf->size = target->seg_tab[segid].length; | ||
317 | |||
318 | /* num_free_blocks */ | ||
319 | mem_stat_buf->num_free_blocks = free_blocks; | ||
320 | |||
321 | /* total_free_size */ | ||
322 | mem_stat_buf->total_free_size = total_free_size; | ||
323 | |||
324 | /* len_max_free_block */ | ||
325 | mem_stat_buf->len_max_free_block = max_free_size; | ||
326 | |||
327 | /* num_alloc_blocks */ | ||
328 | mem_stat_buf->num_alloc_blocks = | ||
329 | target->seg_tab[segid].number; | ||
330 | |||
331 | ret = true; | ||
332 | } | ||
333 | |||
334 | return ret; | ||
335 | } | ||
336 | |||
337 | /* | ||
338 | * ======== balloc ======== | ||
339 | * This allocation function allocates memory from the lowest addresses | ||
340 | * first. | ||
341 | */ | ||
342 | static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size, | ||
343 | u32 align, u32 *dsp_address) | ||
344 | { | ||
345 | struct rmm_header *head; | ||
346 | struct rmm_header *prevhead = NULL; | ||
347 | struct rmm_header *next; | ||
348 | u32 tmpalign; | ||
349 | u32 alignbytes; | ||
350 | u32 hsize; | ||
351 | u32 allocsize; | ||
352 | u32 addr; | ||
353 | |||
354 | alignbytes = (align == 0) ? 1 : align; | ||
355 | prevhead = NULL; | ||
356 | head = target->free_list[segid]; | ||
357 | |||
358 | do { | ||
359 | hsize = head->size; | ||
360 | next = head->next; | ||
361 | |||
362 | addr = head->addr; /* alloc from the bottom */ | ||
363 | |||
364 | /* align allocation */ | ||
365 | (tmpalign = (u32) addr % alignbytes); | ||
366 | if (tmpalign != 0) | ||
367 | tmpalign = alignbytes - tmpalign; | ||
368 | |||
369 | allocsize = size + tmpalign; | ||
370 | |||
371 | if (hsize >= allocsize) { /* big enough */ | ||
372 | if (hsize == allocsize && prevhead != NULL) { | ||
373 | prevhead->next = next; | ||
374 | kfree(head); | ||
375 | } else { | ||
376 | head->size = hsize - allocsize; | ||
377 | head->addr += allocsize; | ||
378 | } | ||
379 | |||
380 | /* free up any hole created by alignment */ | ||
381 | if (tmpalign) | ||
382 | free_block(target, segid, addr, tmpalign); | ||
383 | |||
384 | *dsp_address = addr + tmpalign; | ||
385 | return true; | ||
386 | } | ||
387 | |||
388 | prevhead = head; | ||
389 | head = next; | ||
390 | |||
391 | } while (head != NULL); | ||
392 | |||
393 | return false; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * ======== free_block ======== | ||
398 | * TO DO: free_block() allocates memory, which could result in failure. | ||
399 | * Could allocate an rmm_header in rmm_alloc(), to be kept in a pool. | ||
400 | * free_block() could use an rmm_header from the pool, freeing as blocks | ||
401 | * are coalesced. | ||
402 | */ | ||
403 | static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr, | ||
404 | u32 size) | ||
405 | { | ||
406 | struct rmm_header *head; | ||
407 | struct rmm_header *thead; | ||
408 | struct rmm_header *rhead; | ||
409 | bool ret = true; | ||
410 | |||
411 | /* Create a memory header to hold the newly free'd block. */ | ||
412 | rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL); | ||
413 | if (rhead == NULL) { | ||
414 | ret = false; | ||
415 | } else { | ||
416 | /* search down the free list to find the right place for addr */ | ||
417 | head = target->free_list[segid]; | ||
418 | |||
419 | if (addr >= head->addr) { | ||
420 | while (head->next != NULL && addr > head->next->addr) | ||
421 | head = head->next; | ||
422 | |||
423 | thead = head->next; | ||
424 | |||
425 | head->next = rhead; | ||
426 | rhead->next = thead; | ||
427 | rhead->addr = addr; | ||
428 | rhead->size = size; | ||
429 | } else { | ||
430 | *rhead = *head; | ||
431 | head->next = rhead; | ||
432 | head->addr = addr; | ||
433 | head->size = size; | ||
434 | thead = rhead->next; | ||
435 | } | ||
436 | |||
437 | /* join with upper block, if possible */ | ||
438 | if (thead != NULL && (rhead->addr + rhead->size) == | ||
439 | thead->addr) { | ||
440 | head->next = rhead->next; | ||
441 | thead->size = size + thead->size; | ||
442 | thead->addr = addr; | ||
443 | kfree(rhead); | ||
444 | rhead = thead; | ||
445 | } | ||
446 | |||
447 | /* join with the lower block, if possible */ | ||
448 | if ((head->addr + head->size) == rhead->addr) { | ||
449 | head->next = rhead->next; | ||
450 | head->size = head->size + rhead->size; | ||
451 | kfree(rhead); | ||
452 | } | ||
453 | } | ||
454 | |||
455 | return ret; | ||
456 | } | ||
diff --git a/drivers/staging/tidspbridge/rmgr/strm.c b/drivers/staging/tidspbridge/rmgr/strm.c deleted file mode 100644 index b88b27bbe2e7..000000000000 --- a/drivers/staging/tidspbridge/rmgr/strm.c +++ /dev/null | |||
@@ -1,733 +0,0 @@ | |||
1 | /* | ||
2 | * strm.c | ||
3 | * | ||
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | ||
5 | * | ||
6 | * DSP/BIOS Bridge Stream Manager. | ||
7 | * | ||
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | ||
9 | * | ||
10 | * This package is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | ||
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | ||
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | ||
17 | */ | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | |||
21 | /* ----------------------------------- Host OS */ | ||
22 | #include <dspbridge/host_os.h> | ||
23 | |||
24 | /* ----------------------------------- DSP/BIOS Bridge */ | ||
25 | #include <dspbridge/dbdefs.h> | ||
26 | |||
27 | /* ----------------------------------- OS Adaptation Layer */ | ||
28 | #include <dspbridge/sync.h> | ||
29 | |||
30 | /* ----------------------------------- Bridge Driver */ | ||
31 | #include <dspbridge/dspdefs.h> | ||
32 | |||
33 | /* ----------------------------------- Resource Manager */ | ||
34 | #include <dspbridge/nodepriv.h> | ||
35 | |||
36 | /* ----------------------------------- Others */ | ||
37 | #include <dspbridge/cmm.h> | ||
38 | |||
39 | /* ----------------------------------- This */ | ||
40 | #include <dspbridge/strm.h> | ||
41 | |||
42 | #include <dspbridge/resourcecleanup.h> | ||
43 | |||
44 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | ||
45 | #define DEFAULTTIMEOUT 10000 | ||
46 | #define DEFAULTNUMBUFS 2 | ||
47 | |||
48 | /* | ||
49 | * ======== strm_mgr ======== | ||
50 | * The strm_mgr contains device information needed to open the underlying | ||
51 | * channels of a stream. | ||
52 | */ | ||
53 | struct strm_mgr { | ||
54 | struct dev_object *dev_obj; /* Device for this processor */ | ||
55 | struct chnl_mgr *chnl_mgr; /* Channel manager */ | ||
56 | /* Function interface to Bridge driver */ | ||
57 | struct bridge_drv_interface *intf_fxns; | ||
58 | }; | ||
59 | |||
60 | /* | ||
61 | * ======== strm_object ======== | ||
62 | * This object is allocated in strm_open(). | ||
63 | */ | ||
64 | struct strm_object { | ||
65 | struct strm_mgr *strm_mgr_obj; | ||
66 | struct chnl_object *chnl_obj; | ||
67 | u32 dir; /* DSP_TONODE or DSP_FROMNODE */ | ||
68 | u32 timeout; | ||
69 | u32 num_bufs; /* Max # of bufs allowed in stream */ | ||
70 | u32 bufs_in_strm; /* Current # of bufs in stream */ | ||
71 | u32 bytes; /* bytes transferred since idled */ | ||
72 | /* STREAM_IDLE, STREAM_READY, ... */ | ||
73 | enum dsp_streamstate strm_state; | ||
74 | void *user_event; /* Saved for strm_get_info() */ | ||
75 | enum dsp_strmmode strm_mode; /* STRMMODE_[PROCCOPY][ZEROCOPY]... */ | ||
76 | u32 dma_chnl_id; /* DMA chnl id */ | ||
77 | u32 dma_priority; /* DMA priority:DMAPRI_[LOW][HIGH] */ | ||
78 | u32 segment_id; /* >0 is SM segment.=0 is local heap */ | ||
79 | u32 buf_alignment; /* Alignment for stream bufs */ | ||
80 | /* Stream's SM address translator */ | ||
81 | struct cmm_xlatorobject *xlator; | ||
82 | }; | ||
83 | |||
84 | /* ----------------------------------- Function Prototypes */ | ||
85 | static int delete_strm(struct strm_object *stream_obj); | ||
86 | |||
87 | /* | ||
88 | * ======== strm_allocate_buffer ======== | ||
89 | * Purpose: | ||
90 | * Allocates buffers for a stream. | ||
91 | */ | ||
92 | int strm_allocate_buffer(struct strm_res_object *strmres, u32 usize, | ||
93 | u8 **ap_buffer, u32 num_bufs, | ||
94 | struct process_context *pr_ctxt) | ||
95 | { | ||
96 | int status = 0; | ||
97 | u32 alloc_cnt = 0; | ||
98 | u32 i; | ||
99 | struct strm_object *stream_obj = strmres->stream; | ||
100 | |||
101 | if (stream_obj) { | ||
102 | /* | ||
103 | * Allocate from segment specified at time of stream open. | ||
104 | */ | ||
105 | if (usize == 0) | ||
106 | status = -EINVAL; | ||
107 | |||
108 | } else { | ||
109 | status = -EFAULT; | ||
110 | } | ||
111 | |||
112 | if (status) | ||
113 | goto func_end; | ||
114 | |||
115 | for (i = 0; i < num_bufs; i++) { | ||
116 | (void)cmm_xlator_alloc_buf(stream_obj->xlator, &ap_buffer[i], | ||
117 | usize); | ||
118 | if (ap_buffer[i] == NULL) { | ||
119 | status = -ENOMEM; | ||
120 | alloc_cnt = i; | ||
121 | break; | ||
122 | } | ||
123 | } | ||
124 | if (status) | ||
125 | strm_free_buffer(strmres, ap_buffer, alloc_cnt, pr_ctxt); | ||
126 | |||
127 | if (status) | ||
128 | goto func_end; | ||
129 | |||
130 | drv_proc_update_strm_res(num_bufs, strmres); | ||
131 | |||
132 | func_end: | ||
133 | return status; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * ======== strm_close ======== | ||
138 | * Purpose: | ||
139 | * Close a stream opened with strm_open(). | ||
140 | */ | ||
141 | int strm_close(struct strm_res_object *strmres, | ||
142 | struct process_context *pr_ctxt) | ||
143 | { | ||
144 | struct bridge_drv_interface *intf_fxns; | ||
145 | struct chnl_info chnl_info_obj; | ||
146 | int status = 0; | ||
147 | struct strm_object *stream_obj = strmres->stream; | ||
148 | |||
149 | if (!stream_obj) { | ||
150 | status = -EFAULT; | ||
151 | } else { | ||
152 | /* Have all buffers been reclaimed? If not, return | ||
153 | * -EPIPE */ | ||
154 | intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; | ||
155 | status = | ||
156 | (*intf_fxns->chnl_get_info) (stream_obj->chnl_obj, | ||
157 | &chnl_info_obj); | ||
158 | |||
159 | if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0) | ||
160 | status = -EPIPE; | ||
161 | else | ||
162 | status = delete_strm(stream_obj); | ||
163 | } | ||
164 | |||
165 | if (status) | ||
166 | goto func_end; | ||
167 | |||
168 | idr_remove(pr_ctxt->stream_id, strmres->id); | ||
169 | func_end: | ||
170 | dev_dbg(bridge, "%s: stream_obj: %p, status 0x%x\n", __func__, | ||
171 | stream_obj, status); | ||
172 | return status; | ||
173 | } | ||
174 | |||
175 | /* | ||
176 | * ======== strm_create ======== | ||
177 | * Purpose: | ||
178 | * Create a STRM manager object. | ||
179 | */ | ||
180 | int strm_create(struct strm_mgr **strm_man, | ||
181 | struct dev_object *dev_obj) | ||
182 | { | ||
183 | struct strm_mgr *strm_mgr_obj; | ||
184 | int status = 0; | ||
185 | |||
186 | *strm_man = NULL; | ||
187 | /* Allocate STRM manager object */ | ||
188 | strm_mgr_obj = kzalloc(sizeof(struct strm_mgr), GFP_KERNEL); | ||
189 | if (strm_mgr_obj == NULL) | ||
190 | status = -ENOMEM; | ||
191 | else | ||
192 | strm_mgr_obj->dev_obj = dev_obj; | ||
193 | |||
194 | /* Get Channel manager and Bridge function interface */ | ||
195 | if (!status) { | ||
196 | status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->chnl_mgr)); | ||
197 | if (!status) { | ||
198 | (void)dev_get_intf_fxns(dev_obj, | ||
199 | &(strm_mgr_obj->intf_fxns)); | ||
200 | } | ||
201 | } | ||
202 | |||
203 | if (!status) | ||
204 | *strm_man = strm_mgr_obj; | ||
205 | else | ||
206 | kfree(strm_mgr_obj); | ||
207 | |||
208 | return status; | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * ======== strm_delete ======== | ||
213 | * Purpose: | ||
214 | * Delete the STRM Manager Object. | ||
215 | */ | ||
216 | void strm_delete(struct strm_mgr *strm_mgr_obj) | ||
217 | { | ||
218 | kfree(strm_mgr_obj); | ||
219 | } | ||
220 | |||
221 | /* | ||
222 | * ======== strm_free_buffer ======== | ||
223 | * Purpose: | ||
224 | * Frees the buffers allocated for a stream. | ||
225 | */ | ||
226 | int strm_free_buffer(struct strm_res_object *strmres, u8 **ap_buffer, | ||
227 | u32 num_bufs, struct process_context *pr_ctxt) | ||
228 | { | ||
229 | int status = 0; | ||
230 | u32 i = 0; | ||
231 | struct strm_object *stream_obj = strmres->stream; | ||
232 | |||
233 | if (!stream_obj) | ||
234 | status = -EFAULT; | ||
235 | |||
236 | if (!status) { | ||
237 | for (i = 0; i < num_bufs; i++) { | ||
238 | status = | ||
239 | cmm_xlator_free_buf(stream_obj->xlator, | ||
240 | ap_buffer[i]); | ||
241 | if (status) | ||
242 | break; | ||
243 | ap_buffer[i] = NULL; | ||
244 | } | ||
245 | } | ||
246 | drv_proc_update_strm_res(num_bufs - i, strmres); | ||
247 | |||
248 | return status; | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * ======== strm_get_info ======== | ||
253 | * Purpose: | ||
254 | * Retrieves information about a stream. | ||
255 | */ | ||
256 | int strm_get_info(struct strm_object *stream_obj, | ||
257 | struct stream_info *stream_info, | ||
258 | u32 stream_info_size) | ||
259 | { | ||
260 | struct bridge_drv_interface *intf_fxns; | ||
261 | struct chnl_info chnl_info_obj; | ||
262 | int status = 0; | ||
263 | void *virt_base = NULL; /* NULL if no SM used */ | ||
264 | |||
265 | if (!stream_obj) { | ||
266 | status = -EFAULT; | ||
267 | } else { | ||
268 | if (stream_info_size < sizeof(struct stream_info)) { | ||
269 | /* size of users info */ | ||
270 | status = -EINVAL; | ||
271 | } | ||
272 | } | ||
273 | if (status) | ||
274 | goto func_end; | ||
275 | |||
276 | intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; | ||
277 | status = | ||
278 | (*intf_fxns->chnl_get_info) (stream_obj->chnl_obj, | ||
279 | &chnl_info_obj); | ||
280 | if (status) | ||
281 | goto func_end; | ||
282 | |||
283 | if (stream_obj->xlator) { | ||
284 | /* We have a translator */ | ||
285 | cmm_xlator_info(stream_obj->xlator, (u8 **) &virt_base, 0, | ||
286 | stream_obj->segment_id, false); | ||
287 | } | ||
288 | stream_info->segment_id = stream_obj->segment_id; | ||
289 | stream_info->strm_mode = stream_obj->strm_mode; | ||
290 | stream_info->virt_base = virt_base; | ||
291 | stream_info->user_strm->number_bufs_allowed = stream_obj->num_bufs; | ||
292 | stream_info->user_strm->number_bufs_in_stream = chnl_info_obj.cio_cs + | ||
293 | chnl_info_obj.cio_reqs; | ||
294 | /* # of bytes transferred since last call to DSPStream_Idle() */ | ||
295 | stream_info->user_strm->number_bytes = chnl_info_obj.bytes_tx; | ||
296 | stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj; | ||
297 | /* Determine stream state based on channel state and info */ | ||
298 | if (chnl_info_obj.state & CHNL_STATEEOS) { | ||
299 | stream_info->user_strm->ss_stream_state = STREAM_DONE; | ||
300 | } else { | ||
301 | if (chnl_info_obj.cio_cs > 0) | ||
302 | stream_info->user_strm->ss_stream_state = STREAM_READY; | ||
303 | else if (chnl_info_obj.cio_reqs > 0) | ||
304 | stream_info->user_strm->ss_stream_state = | ||
305 | STREAM_PENDING; | ||
306 | else | ||
307 | stream_info->user_strm->ss_stream_state = STREAM_IDLE; | ||
308 | |||
309 | } | ||
310 | func_end: | ||
311 | return status; | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * ======== strm_idle ======== | ||
316 | * Purpose: | ||
317 | * Idles a particular stream. | ||
318 | */ | ||
319 | int strm_idle(struct strm_object *stream_obj, bool flush_data) | ||
320 | { | ||
321 | struct bridge_drv_interface *intf_fxns; | ||
322 | int status = 0; | ||
323 | |||
324 | if (!stream_obj) { | ||
325 | status = -EFAULT; | ||
326 | } else { | ||
327 | intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; | ||
328 | |||
329 | status = (*intf_fxns->chnl_idle) (stream_obj->chnl_obj, | ||
330 | stream_obj->timeout, | ||
331 | flush_data); | ||
332 | } | ||
333 | |||
334 | dev_dbg(bridge, "%s: stream_obj: %p flush_data: 0x%x status: 0x%x\n", | ||
335 | __func__, stream_obj, flush_data, status); | ||
336 | return status; | ||
337 | } | ||
338 | |||
339 | /* | ||
340 | * ======== strm_issue ======== | ||
341 | * Purpose: | ||
342 | * Issues a buffer on a stream | ||
343 | */ | ||
344 | int strm_issue(struct strm_object *stream_obj, u8 *pbuf, u32 ul_bytes, | ||
345 | u32 ul_buf_size, u32 dw_arg) | ||
346 | { | ||
347 | struct bridge_drv_interface *intf_fxns; | ||
348 | int status = 0; | ||
349 | void *tmp_buf = NULL; | ||
350 | |||
351 | if (!stream_obj) { | ||
352 | status = -EFAULT; | ||
353 | } else { | ||
354 | intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; | ||
355 | |||
356 | if (stream_obj->segment_id != 0) { | ||
357 | tmp_buf = cmm_xlator_translate(stream_obj->xlator, | ||
358 | (void *)pbuf, | ||
359 | CMM_VA2DSPPA); | ||
360 | if (tmp_buf == NULL) | ||
361 | status = -ESRCH; | ||
362 | |||
363 | } | ||
364 | if (!status) { | ||
365 | status = (*intf_fxns->chnl_add_io_req) | ||
366 | (stream_obj->chnl_obj, pbuf, ul_bytes, ul_buf_size, | ||
367 | (u32) tmp_buf, dw_arg); | ||
368 | } | ||
369 | if (status == -EIO) | ||
370 | status = -ENOSR; | ||
371 | } | ||
372 | |||
373 | dev_dbg(bridge, "%s: stream_obj: %p pbuf: %p ul_bytes: 0x%x dw_arg:" | ||
374 | " 0x%x status: 0x%x\n", __func__, stream_obj, pbuf, | ||
375 | ul_bytes, dw_arg, status); | ||
376 | return status; | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * ======== strm_open ======== | ||
381 | * Purpose: | ||
382 | * Open a stream for sending/receiving data buffers to/from a task or | ||
383 | * XDAIS socket node on the DSP. | ||
384 | */ | ||
385 | int strm_open(struct node_object *hnode, u32 dir, u32 index, | ||
386 | struct strm_attr *pattr, | ||
387 | struct strm_res_object **strmres, | ||
388 | struct process_context *pr_ctxt) | ||
389 | { | ||
390 | struct strm_mgr *strm_mgr_obj; | ||
391 | struct bridge_drv_interface *intf_fxns; | ||
392 | u32 ul_chnl_id; | ||
393 | struct strm_object *strm_obj = NULL; | ||
394 | s8 chnl_mode; | ||
395 | struct chnl_attr chnl_attr_obj; | ||
396 | int status = 0; | ||
397 | struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */ | ||
398 | |||
399 | void *stream_res; | ||
400 | |||
401 | *strmres = NULL; | ||
402 | if (dir != DSP_TONODE && dir != DSP_FROMNODE) { | ||
403 | status = -EPERM; | ||
404 | } else { | ||
405 | /* Get the channel id from the node (set in node_connect()) */ | ||
406 | status = node_get_channel_id(hnode, dir, index, &ul_chnl_id); | ||
407 | } | ||
408 | if (!status) | ||
409 | status = node_get_strm_mgr(hnode, &strm_mgr_obj); | ||
410 | |||
411 | if (!status) { | ||
412 | strm_obj = kzalloc(sizeof(struct strm_object), GFP_KERNEL); | ||
413 | if (strm_obj == NULL) { | ||
414 | status = -ENOMEM; | ||
415 | } else { | ||
416 | strm_obj->strm_mgr_obj = strm_mgr_obj; | ||
417 | strm_obj->dir = dir; | ||
418 | strm_obj->strm_state = STREAM_IDLE; | ||
419 | strm_obj->user_event = pattr->user_event; | ||
420 | if (pattr->stream_attr_in != NULL) { | ||
421 | strm_obj->timeout = | ||
422 | pattr->stream_attr_in->timeout; | ||
423 | strm_obj->num_bufs = | ||
424 | pattr->stream_attr_in->num_bufs; | ||
425 | strm_obj->strm_mode = | ||
426 | pattr->stream_attr_in->strm_mode; | ||
427 | strm_obj->segment_id = | ||
428 | pattr->stream_attr_in->segment_id; | ||
429 | strm_obj->buf_alignment = | ||
430 | pattr->stream_attr_in->buf_alignment; | ||
431 | strm_obj->dma_chnl_id = | ||
432 | pattr->stream_attr_in->dma_chnl_id; | ||
433 | strm_obj->dma_priority = | ||
434 | pattr->stream_attr_in->dma_priority; | ||
435 | chnl_attr_obj.uio_reqs = | ||
436 | pattr->stream_attr_in->num_bufs; | ||
437 | } else { | ||
438 | strm_obj->timeout = DEFAULTTIMEOUT; | ||
439 | strm_obj->num_bufs = DEFAULTNUMBUFS; | ||
440 | strm_obj->strm_mode = STRMMODE_PROCCOPY; | ||
441 | strm_obj->segment_id = 0; /* local mem */ | ||
442 | strm_obj->buf_alignment = 0; | ||
443 | strm_obj->dma_chnl_id = 0; | ||
444 | strm_obj->dma_priority = 0; | ||
445 | chnl_attr_obj.uio_reqs = DEFAULTNUMBUFS; | ||
446 | } | ||
447 | chnl_attr_obj.reserved1 = NULL; | ||
448 | /* DMA chnl flush timeout */ | ||
449 | chnl_attr_obj.reserved2 = strm_obj->timeout; | ||
450 | chnl_attr_obj.event_obj = NULL; | ||
451 | if (pattr->user_event != NULL) | ||
452 | chnl_attr_obj.event_obj = pattr->user_event; | ||
453 | |||
454 | } | ||
455 | } | ||
456 | if (status) | ||
457 | goto func_cont; | ||
458 | |||
459 | if ((pattr->virt_base == NULL) || !(pattr->virt_size > 0)) | ||
460 | goto func_cont; | ||
461 | |||
462 | /* No System DMA */ | ||
463 | /* Get the shared mem mgr for this streams dev object */ | ||
464 | status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr); | ||
465 | if (!status) { | ||
466 | /*Allocate a SM addr translator for this strm. */ | ||
467 | status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL); | ||
468 | if (!status) { | ||
469 | /* Set translators Virt Addr attributes */ | ||
470 | status = cmm_xlator_info(strm_obj->xlator, | ||
471 | (u8 **) &pattr->virt_base, | ||
472 | pattr->virt_size, | ||
473 | strm_obj->segment_id, true); | ||
474 | } | ||
475 | } | ||
476 | func_cont: | ||
477 | if (!status) { | ||
478 | /* Open channel */ | ||
479 | chnl_mode = (dir == DSP_TONODE) ? | ||
480 | CHNL_MODETODSP : CHNL_MODEFROMDSP; | ||
481 | intf_fxns = strm_mgr_obj->intf_fxns; | ||
482 | status = (*intf_fxns->chnl_open) (&(strm_obj->chnl_obj), | ||
483 | strm_mgr_obj->chnl_mgr, | ||
484 | chnl_mode, ul_chnl_id, | ||
485 | &chnl_attr_obj); | ||
486 | if (status) { | ||
487 | /* | ||
488 | * over-ride non-returnable status codes so we return | ||
489 | * something documented | ||
490 | */ | ||
491 | if (status != -ENOMEM && status != | ||
492 | -EINVAL && status != -EPERM) { | ||
493 | /* | ||
494 | * We got a status that's not return-able. | ||
495 | * Assert that we got something we were | ||
496 | * expecting (-EFAULT isn't acceptable, | ||
497 | * strm_mgr_obj->chnl_mgr better be valid or we | ||
498 | * assert here), and then return -EPERM. | ||
499 | */ | ||
500 | status = -EPERM; | ||
501 | } | ||
502 | } | ||
503 | } | ||
504 | if (!status) { | ||
505 | status = drv_proc_insert_strm_res_element(strm_obj, | ||
506 | &stream_res, pr_ctxt); | ||
507 | if (status) | ||
508 | delete_strm(strm_obj); | ||
509 | else | ||
510 | *strmres = (struct strm_res_object *)stream_res; | ||
511 | } else { | ||
512 | (void)delete_strm(strm_obj); | ||
513 | } | ||
514 | |||
515 | dev_dbg(bridge, "%s: hnode: %p dir: 0x%x index: 0x%x pattr: %p " | ||
516 | "strmres: %p status: 0x%x\n", __func__, | ||
517 | hnode, dir, index, pattr, strmres, status); | ||
518 | return status; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * ======== strm_reclaim ======== | ||
523 | * Purpose: | ||
524 | * Relcaims a buffer from a stream. | ||
525 | */ | ||
526 | int strm_reclaim(struct strm_object *stream_obj, u8 **buf_ptr, | ||
527 | u32 *nbytes, u32 *buff_size, u32 *pdw_arg) | ||
528 | { | ||
529 | struct bridge_drv_interface *intf_fxns; | ||
530 | struct chnl_ioc chnl_ioc_obj; | ||
531 | int status = 0; | ||
532 | void *tmp_buf = NULL; | ||
533 | |||
534 | if (!stream_obj) { | ||
535 | status = -EFAULT; | ||
536 | goto func_end; | ||
537 | } | ||
538 | intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; | ||
539 | |||
540 | status = | ||
541 | (*intf_fxns->chnl_get_ioc) (stream_obj->chnl_obj, | ||
542 | stream_obj->timeout, | ||
543 | &chnl_ioc_obj); | ||
544 | if (!status) { | ||
545 | *nbytes = chnl_ioc_obj.byte_size; | ||
546 | if (buff_size) | ||
547 | *buff_size = chnl_ioc_obj.buf_size; | ||
548 | |||
549 | *pdw_arg = chnl_ioc_obj.arg; | ||
550 | if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) { | ||
551 | if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) { | ||
552 | status = -ETIME; | ||
553 | } else { | ||
554 | /* Allow reclaims after idle to succeed */ | ||
555 | if (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj)) | ||
556 | status = -EPERM; | ||
557 | |||
558 | } | ||
559 | } | ||
560 | /* Translate zerocopy buffer if channel not canceled. */ | ||
561 | if (!status | ||
562 | && (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj)) | ||
563 | && (stream_obj->strm_mode == STRMMODE_ZEROCOPY)) { | ||
564 | /* | ||
565 | * This is a zero-copy channel so chnl_ioc_obj.buf | ||
566 | * contains the DSP address of SM. We need to | ||
567 | * translate it to a virtual address for the user | ||
568 | * thread to access. | ||
569 | * Note: Could add CMM_DSPPA2VA to CMM in the future. | ||
570 | */ | ||
571 | tmp_buf = cmm_xlator_translate(stream_obj->xlator, | ||
572 | chnl_ioc_obj.buf, | ||
573 | CMM_DSPPA2PA); | ||
574 | if (tmp_buf != NULL) { | ||
575 | /* now convert this GPP Pa to Va */ | ||
576 | tmp_buf = cmm_xlator_translate(stream_obj-> | ||
577 | xlator, | ||
578 | tmp_buf, | ||
579 | CMM_PA2VA); | ||
580 | } | ||
581 | if (tmp_buf == NULL) | ||
582 | status = -ESRCH; | ||
583 | |||
584 | chnl_ioc_obj.buf = tmp_buf; | ||
585 | } | ||
586 | *buf_ptr = chnl_ioc_obj.buf; | ||
587 | } | ||
588 | func_end: | ||
589 | dev_dbg(bridge, "%s: stream_obj: %p buf_ptr: %p nbytes: %p " | ||
590 | "pdw_arg: %p status 0x%x\n", __func__, stream_obj, | ||
591 | buf_ptr, nbytes, pdw_arg, status); | ||
592 | return status; | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * ======== strm_register_notify ======== | ||
597 | * Purpose: | ||
598 | * Register to be notified on specific events for this stream. | ||
599 | */ | ||
600 | int strm_register_notify(struct strm_object *stream_obj, u32 event_mask, | ||
601 | u32 notify_type, struct dsp_notification | ||
602 | *hnotification) | ||
603 | { | ||
604 | struct bridge_drv_interface *intf_fxns; | ||
605 | int status = 0; | ||
606 | |||
607 | if (!stream_obj) { | ||
608 | status = -EFAULT; | ||
609 | } else if ((event_mask & ~((DSP_STREAMIOCOMPLETION) | | ||
610 | DSP_STREAMDONE)) != 0) { | ||
611 | status = -EINVAL; | ||
612 | } else { | ||
613 | if (notify_type != DSP_SIGNALEVENT) | ||
614 | status = -ENOSYS; | ||
615 | |||
616 | } | ||
617 | if (!status) { | ||
618 | intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; | ||
619 | |||
620 | status = | ||
621 | (*intf_fxns->chnl_register_notify) (stream_obj-> | ||
622 | chnl_obj, | ||
623 | event_mask, | ||
624 | notify_type, | ||
625 | hnotification); | ||
626 | } | ||
627 | |||
628 | return status; | ||
629 | } | ||
630 | |||
631 | /* | ||
632 | * ======== strm_select ======== | ||
633 | * Purpose: | ||
634 | * Selects a ready stream. | ||
635 | */ | ||
636 | int strm_select(struct strm_object **strm_tab, u32 strms, | ||
637 | u32 *pmask, u32 utimeout) | ||
638 | { | ||
639 | u32 index; | ||
640 | struct chnl_info chnl_info_obj; | ||
641 | struct bridge_drv_interface *intf_fxns; | ||
642 | struct sync_object **sync_events = NULL; | ||
643 | u32 i; | ||
644 | int status = 0; | ||
645 | |||
646 | *pmask = 0; | ||
647 | for (i = 0; i < strms; i++) { | ||
648 | if (!strm_tab[i]) { | ||
649 | status = -EFAULT; | ||
650 | break; | ||
651 | } | ||
652 | } | ||
653 | if (status) | ||
654 | goto func_end; | ||
655 | |||
656 | /* Determine which channels have IO ready */ | ||
657 | for (i = 0; i < strms; i++) { | ||
658 | intf_fxns = strm_tab[i]->strm_mgr_obj->intf_fxns; | ||
659 | status = (*intf_fxns->chnl_get_info) (strm_tab[i]->chnl_obj, | ||
660 | &chnl_info_obj); | ||
661 | if (status) { | ||
662 | break; | ||
663 | } else { | ||
664 | if (chnl_info_obj.cio_cs > 0) | ||
665 | *pmask |= (1 << i); | ||
666 | |||
667 | } | ||
668 | } | ||
669 | if (!status && utimeout > 0 && *pmask == 0) { | ||
670 | /* Non-zero timeout */ | ||
671 | sync_events = kmalloc(strms * sizeof(struct sync_object *), | ||
672 | GFP_KERNEL); | ||
673 | |||
674 | if (sync_events == NULL) { | ||
675 | status = -ENOMEM; | ||
676 | } else { | ||
677 | for (i = 0; i < strms; i++) { | ||
678 | intf_fxns = | ||
679 | strm_tab[i]->strm_mgr_obj->intf_fxns; | ||
680 | status = (*intf_fxns->chnl_get_info) | ||
681 | (strm_tab[i]->chnl_obj, &chnl_info_obj); | ||
682 | if (status) | ||
683 | break; | ||
684 | else | ||
685 | sync_events[i] = | ||
686 | chnl_info_obj.sync_event; | ||
687 | |||
688 | } | ||
689 | } | ||
690 | if (!status) { | ||
691 | status = | ||
692 | sync_wait_on_multiple_events(sync_events, strms, | ||
693 | utimeout, &index); | ||
694 | if (!status) { | ||
695 | /* Since we waited on the event, we have to | ||
696 | * reset it */ | ||
697 | sync_set_event(sync_events[index]); | ||
698 | *pmask = 1 << index; | ||
699 | } | ||
700 | } | ||
701 | } | ||
702 | func_end: | ||
703 | kfree(sync_events); | ||
704 | |||
705 | return status; | ||
706 | } | ||
707 | |||
708 | /* | ||
709 | * ======== delete_strm ======== | ||
710 | * Purpose: | ||
711 | * Frees the resources allocated for a stream. | ||
712 | */ | ||
713 | static int delete_strm(struct strm_object *stream_obj) | ||
714 | { | ||
715 | struct bridge_drv_interface *intf_fxns; | ||
716 | int status = 0; | ||
717 | |||
718 | if (stream_obj) { | ||
719 | if (stream_obj->chnl_obj) { | ||
720 | intf_fxns = stream_obj->strm_mgr_obj->intf_fxns; | ||
721 | /* Channel close can fail only if the channel handle | ||
722 | * is invalid. */ | ||
723 | status = (*intf_fxns->chnl_close) | ||
724 | (stream_obj->chnl_obj); | ||
725 | } | ||
726 | /* Free all SM address translator resources */ | ||
727 | kfree(stream_obj->xlator); | ||
728 | kfree(stream_obj); | ||
729 | } else { | ||
730 | status = -EFAULT; | ||
731 | } | ||
732 | return status; | ||
733 | } | ||