diff options
200 files changed, 26934 insertions, 8 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index 0f3e8bbab8d7..45b3df936d2f 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX | |||
@@ -299,6 +299,8 @@ memory-hotplug.txt | |||
299 | - Hotpluggable memory support, how to use and current status. | 299 | - Hotpluggable memory support, how to use and current status. |
300 | memory.txt | 300 | memory.txt |
301 | - info on typical Linux memory problems. | 301 | - info on typical Linux memory problems. |
302 | metag/ | ||
303 | - directory with info about Linux on Meta architecture. | ||
302 | mips/ | 304 | mips/ |
303 | - directory with info about Linux on MIPS architecture. | 305 | - directory with info about Linux on MIPS architecture. |
304 | misc-devices/ | 306 | misc-devices/ |
diff --git a/Documentation/devicetree/bindings/metag/meta-intc.txt b/Documentation/devicetree/bindings/metag/meta-intc.txt new file mode 100644 index 000000000000..8c47dcbfabc6 --- /dev/null +++ b/Documentation/devicetree/bindings/metag/meta-intc.txt | |||
@@ -0,0 +1,82 @@ | |||
1 | * Meta External Trigger Controller Binding | ||
2 | |||
3 | This binding specifies what properties must be available in the device tree | ||
4 | representation of a Meta external trigger controller. | ||
5 | |||
6 | Required properties: | ||
7 | |||
8 | - compatible: Specifies the compatibility list for the interrupt controller. | ||
9 | The type shall be <string> and the value shall include "img,meta-intc". | ||
10 | |||
11 | - num-banks: Specifies the number of interrupt banks (each of which can | ||
12 | handle 32 interrupt sources). | ||
13 | |||
14 | - interrupt-controller: The presence of this property identifies the node | ||
15 | as an interupt controller. No property value shall be defined. | ||
16 | |||
17 | - #interrupt-cells: Specifies the number of cells needed to encode an | ||
18 | interrupt source. The type shall be a <u32> and the value shall be 2. | ||
19 | |||
20 | - #address-cells: Specifies the number of cells needed to encode an | ||
21 | address. The type shall be <u32> and the value shall be 0. As such, | ||
22 | 'interrupt-map' nodes do not have to specify a parent unit address. | ||
23 | |||
24 | Optional properties: | ||
25 | |||
26 | - no-mask: The controller doesn't have any mask registers. | ||
27 | |||
28 | * Interrupt Specifier Definition | ||
29 | |||
30 | Interrupt specifiers consists of 2 cells encoded as follows: | ||
31 | |||
32 | - <1st-cell>: The interrupt-number that identifies the interrupt source. | ||
33 | |||
34 | - <2nd-cell>: The Linux interrupt flags containing level-sense information, | ||
35 | encoded as follows: | ||
36 | 1 = edge triggered | ||
37 | 4 = level-sensitive | ||
38 | |||
39 | * Examples | ||
40 | |||
41 | Example 1: | ||
42 | |||
43 | /* | ||
44 | * Meta external trigger block | ||
45 | */ | ||
46 | intc: intc { | ||
47 | // This is an interrupt controller node. | ||
48 | interrupt-controller; | ||
49 | |||
50 | // No address cells so that 'interrupt-map' nodes which | ||
51 | // reference this interrupt controller node do not need a parent | ||
52 | // address specifier. | ||
53 | #address-cells = <0>; | ||
54 | |||
55 | // Two cells to encode interrupt sources. | ||
56 | #interrupt-cells = <2>; | ||
57 | |||
58 | // Number of interrupt banks | ||
59 | num-banks = <2>; | ||
60 | |||
61 | // No HWMASKEXT is available (specify on Chorus2 and Comet ES1) | ||
62 | no-mask; | ||
63 | |||
64 | // Compatible with Meta hardware trigger block. | ||
65 | compatible = "img,meta-intc"; | ||
66 | }; | ||
67 | |||
68 | Example 2: | ||
69 | |||
70 | /* | ||
71 | * An interrupt generating device that is wired to a Meta external | ||
72 | * trigger block. | ||
73 | */ | ||
74 | uart1: uart@0x02004c00 { | ||
75 | // Interrupt source '5' that is level-sensitive. | ||
76 | // Note that there are only two cells as specified in the | ||
77 | // interrupt parent's '#interrupt-cells' property. | ||
78 | interrupts = <5 4 /* level */>; | ||
79 | |||
80 | // The interrupt controller that this device is wired to. | ||
81 | interrupt-parent = <&intc>; | ||
82 | }; | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 3a54fca730c0..4609e81dbc37 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -978,6 +978,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
978 | If specified, z/VM IUCV HVC accepts connections | 978 | If specified, z/VM IUCV HVC accepts connections |
979 | from listed z/VM user IDs only. | 979 | from listed z/VM user IDs only. |
980 | 980 | ||
981 | hwthread_map= [METAG] Comma-separated list of Linux cpu id to | ||
982 | hardware thread id mappings. | ||
983 | Format: <cpu>:<hwthread> | ||
984 | |||
981 | keep_bootcon [KNL] | 985 | keep_bootcon [KNL] |
982 | Do not unregister boot console at start. This is only | 986 | Do not unregister boot console at start. This is only |
983 | useful for debugging when something happens in the window | 987 | useful for debugging when something happens in the window |
diff --git a/Documentation/metag/00-INDEX b/Documentation/metag/00-INDEX new file mode 100644 index 000000000000..db11c513bd5c --- /dev/null +++ b/Documentation/metag/00-INDEX | |||
@@ -0,0 +1,4 @@ | |||
1 | 00-INDEX | ||
2 | - this file | ||
3 | kernel-ABI.txt | ||
4 | - Documents metag ABI details | ||
diff --git a/Documentation/metag/kernel-ABI.txt b/Documentation/metag/kernel-ABI.txt new file mode 100644 index 000000000000..7b8dee83b9c1 --- /dev/null +++ b/Documentation/metag/kernel-ABI.txt | |||
@@ -0,0 +1,256 @@ | |||
1 | ========================== | ||
2 | KERNEL ABIS FOR METAG ARCH | ||
3 | ========================== | ||
4 | |||
5 | This document describes the Linux ABIs for the metag architecture, and has the | ||
6 | following sections: | ||
7 | |||
8 | (*) Outline of registers | ||
9 | (*) Userland registers | ||
10 | (*) Kernel registers | ||
11 | (*) System call ABI | ||
12 | (*) Calling conventions | ||
13 | |||
14 | |||
15 | ==================== | ||
16 | OUTLINE OF REGISTERS | ||
17 | ==================== | ||
18 | |||
19 | The main Meta core registers are arranged in units: | ||
20 | |||
21 | UNIT Type DESCRIPTION GP EXT PRIV GLOBAL | ||
22 | ======= ======= =============== ======= ======= ======= ======= | ||
23 | CT Special Control unit | ||
24 | D0 General Data unit 0 0-7 8-15 16-31 16-31 | ||
25 | D1 General Data unit 1 0-7 8-15 16-31 16-31 | ||
26 | A0 General Address unit 0 0-3 4-7 8-15 8-15 | ||
27 | A1 General Address unit 1 0-3 4-7 8-15 8-15 | ||
28 | PC Special PC unit 0 1 | ||
29 | PORT Special Ports | ||
30 | TR Special Trigger unit 0-7 | ||
31 | TT Special Trace unit 0-5 | ||
32 | FX General FP unit 0-15 | ||
33 | |||
34 | GP registers form part of the main context. | ||
35 | |||
36 | Extended context registers (EXT) may not be present on all hardware threads and | ||
37 | can be context switched if support is enabled and the appropriate bits are set | ||
38 | in e.g. the D0.8 register to indicate what extended state to preserve. | ||
39 | |||
40 | Global registers are shared between threads and are privilege protected. | ||
41 | |||
42 | See arch/metag/include/asm/metag_regs.h for definitions relating to core | ||
43 | registers and the fields and bits they contain. See the TRMs for further details | ||
44 | about special registers. | ||
45 | |||
46 | Several special registers are preserved in the main context, these are the | ||
47 | interesting ones: | ||
48 | |||
49 | REG (ALIAS) PURPOSE | ||
50 | ======================= =============================================== | ||
51 | CT.1 (TXMODE) Processor mode bits (particularly for DSP) | ||
52 | CT.2 (TXSTATUS) Condition flags and LSM_STEP (MGET/MSET step) | ||
53 | CT.3 (TXRPT) Branch repeat counter | ||
54 | PC.0 (PC) Program counter | ||
55 | |||
56 | Some of the general registers have special purposes in the ABI and therefore | ||
57 | have aliases: | ||
58 | |||
59 | D0 REG (ALIAS) PURPOSE D1 REG (ALIAS) PURPOSE | ||
60 | =============== =============== =============== ======================= | ||
61 | D0.0 (D0Re0) 32bit result D1.0 (D1Re0) Top half of 64bit result | ||
62 | D0.1 (D0Ar6) Argument 6 D1.1 (D1Ar5) Argument 5 | ||
63 | D0.2 (D0Ar4) Argument 4 D1.2 (D1Ar3) Argument 3 | ||
64 | D0.3 (D0Ar2) Argument 2 D1.3 (D1Ar1) Argument 1 | ||
65 | D0.4 (D0FrT) Frame temp D1.4 (D1RtP) Return pointer | ||
66 | D0.5 Call preserved D1.5 Call preserved | ||
67 | D0.6 Call preserved D1.6 Call preserved | ||
68 | D0.7 Call preserved D1.7 Call preserved | ||
69 | |||
70 | A0 REG (ALIAS) PURPOSE A1 REG (ALIAS) PURPOSE | ||
71 | =============== =============== =============== ======================= | ||
72 | A0.0 (A0StP) Stack pointer A1.0 (A1GbP) Global base pointer | ||
73 | A0.1 (A0FrP) Frame pointer A1.1 (A1LbP) Local base pointer | ||
74 | A0.2 A1.2 | ||
75 | A0.3 A1.3 | ||
76 | |||
77 | |||
78 | ================== | ||
79 | USERLAND REGISTERS | ||
80 | ================== | ||
81 | |||
82 | All the general purpose D0, D1, A0, A1 registers are preserved when entering the | ||
83 | kernel (including asynchronous events such as interrupts and timer ticks) except | ||
84 | the following which have special purposes in the ABI: | ||
85 | |||
86 | REGISTERS WHEN STATUS PURPOSE | ||
87 | =============== ======= =============== =============================== | ||
88 | D0.8 DSP Preserved ECH, determines what extended | ||
89 | DSP state to preserve. | ||
90 | A0.0 (A0StP) ALWAYS Preserved Stack >= A0StP may be clobbered | ||
91 | at any time by the creation of a | ||
92 | signal frame. | ||
93 | A1.0 (A1GbP) SMP Clobbered Used as temporary for loading | ||
94 | kernel stack pointer and saving | ||
95 | core context. | ||
96 | A0.15 !SMP Protected Stores kernel stack pointer. | ||
97 | A1.15 ALWAYS Protected Stores kernel base pointer. | ||
98 | |||
99 | On UP A0.15 is used to store the kernel stack pointer for storing the userland | ||
100 | context. A0.15 is global between hardware threads though which means it cannot | ||
101 | be used on SMP for this purpose. Since no protected local registers are | ||
102 | available A1GbP is reserved for use as a temporary to allow a percpu stack | ||
103 | pointer to be loaded for storing the rest of the context. | ||
104 | |||
105 | |||
106 | ================ | ||
107 | KERNEL REGISTERS | ||
108 | ================ | ||
109 | |||
110 | When in the kernel the following registers have special purposes in the ABI: | ||
111 | |||
112 | REGISTERS WHEN STATUS PURPOSE | ||
113 | =============== ======= =============== =============================== | ||
114 | A0.0 (A0StP) ALWAYS Preserved Stack >= A0StP may be clobbered | ||
115 | at any time by the creation of | ||
116 | an irq signal frame. | ||
117 | A1.0 (A1GbP) ALWAYS Preserved Reserved (kernel base pointer). | ||
118 | |||
119 | |||
120 | =============== | ||
121 | SYSTEM CALL ABI | ||
122 | =============== | ||
123 | |||
124 | When a system call is made, the following registers are effective: | ||
125 | |||
126 | REGISTERS CALL RETURN | ||
127 | =============== ======================= =============================== | ||
128 | D0.0 (D0Re0) Return value (or -errno) | ||
129 | D1.0 (D1Re0) System call number Clobbered | ||
130 | D0.1 (D0Ar6) Syscall arg #6 Preserved | ||
131 | D1.1 (D1Ar5) Syscall arg #5 Preserved | ||
132 | D0.2 (D0Ar4) Syscall arg #4 Preserved | ||
133 | D1.2 (D1Ar3) Syscall arg #3 Preserved | ||
134 | D0.3 (D0Ar2) Syscall arg #2 Preserved | ||
135 | D1.3 (D1Ar1) Syscall arg #1 Preserved | ||
136 | |||
137 | Due to the limited number of argument registers and some system calls with badly | ||
138 | aligned 64-bit arguments, 64-bit values are always packed in consecutive | ||
139 | arguments, even if this is contrary to the normal calling conventions (where the | ||
140 | two halves would go in a matching pair of data registers). | ||
141 | |||
142 | For example fadvise64_64 usually has the signature: | ||
143 | |||
144 | long sys_fadvise64_64(i32 fd, i64 offs, i64 len, i32 advice); | ||
145 | |||
146 | But for metag fadvise64_64 is wrapped so that the 64-bit arguments are packed: | ||
147 | |||
148 | long sys_fadvise64_64_metag(i32 fd, i32 offs_lo, | ||
149 | i32 offs_hi, i32 len_lo, | ||
150 | i32 len_hi, i32 advice) | ||
151 | |||
152 | So the arguments are packed in the registers like this: | ||
153 | |||
154 | D0 REG (ALIAS) VALUE D1 REG (ALIAS) VALUE | ||
155 | =============== =============== =============== ======================= | ||
156 | D0.1 (D0Ar6) advice D1.1 (D1Ar5) hi(len) | ||
157 | D0.2 (D0Ar4) lo(len) D1.2 (D1Ar3) hi(offs) | ||
158 | D0.3 (D0Ar2) lo(offs) D1.3 (D1Ar1) fd | ||
159 | |||
160 | |||
161 | =================== | ||
162 | CALLING CONVENTIONS | ||
163 | =================== | ||
164 | |||
165 | These calling conventions apply to both user and kernel code. The stack grows | ||
166 | from low addresses to high addresses in the metag ABI. The stack pointer (A0StP) | ||
167 | should always point to the next free address on the stack and should at all | ||
168 | times be 64-bit aligned. The following registers are effective at the point of a | ||
169 | call: | ||
170 | |||
171 | REGISTERS CALL RETURN | ||
172 | =============== ======================= =============================== | ||
173 | D0.0 (D0Re0) 32bit return value | ||
174 | D1.0 (D1Re0) Upper half of 64bit return value | ||
175 | D0.1 (D0Ar6) 32bit argument #6 Clobbered | ||
176 | D1.1 (D1Ar5) 32bit argument #5 Clobbered | ||
177 | D0.2 (D0Ar4) 32bit argument #4 Clobbered | ||
178 | D1.2 (D1Ar3) 32bit argument #3 Clobbered | ||
179 | D0.3 (D0Ar2) 32bit argument #2 Clobbered | ||
180 | D1.3 (D1Ar1) 32bit argument #1 Clobbered | ||
181 | D0.4 (D0FrT) Clobbered | ||
182 | D1.4 (D1RtP) Return pointer Clobbered | ||
183 | D{0-1}.{5-7} Preserved | ||
184 | A0.0 (A0StP) Stack pointer Preserved | ||
185 | A1.0 (A0GbP) Preserved | ||
186 | A0.1 (A0FrP) Frame pointer Preserved | ||
187 | A1.1 (A0LbP) Preserved | ||
188 | A{0-1},{2-3} Clobbered | ||
189 | |||
190 | 64-bit arguments are placed in matching pairs of registers (i.e. the same | ||
191 | register number in both D0 and D1 units), with the least significant half in D0 | ||
192 | and the most significant half in D1, leaving a gap where necessary. Futher | ||
193 | arguments are stored on the stack in reverse order (earlier arguments at higher | ||
194 | addresses): | ||
195 | |||
196 | ADDRESS 0 1 2 3 4 5 6 7 | ||
197 | =============== ===== ===== ===== ===== ===== ===== ===== ===== | ||
198 | A0StP --> | ||
199 | A0StP-0x08 32bit argument #8 32bit argument #7 | ||
200 | A0StP-0x10 32bit argument #10 32bit argument #9 | ||
201 | |||
202 | Function prologues tend to look a bit like this: | ||
203 | |||
204 | /* If frame pointer in use, move it to frame temp register so it can be | ||
205 | easily pushed onto stack */ | ||
206 | MOV D0FrT,A0FrP | ||
207 | |||
208 | /* If frame pointer in use, set it to stack pointer */ | ||
209 | ADD A0FrP,A0StP,#0 | ||
210 | |||
211 | /* Preserve D0FrT, D1RtP, D{0-1}.{5-7} on stack, incrementing A0StP */ | ||
212 | MSETL [A0StP++],D0FrT,D0.5,D0.6,D0.7 | ||
213 | |||
214 | /* Allocate some stack space for local variables */ | ||
215 | ADD A0StP,A0StP,#0x10 | ||
216 | |||
217 | At this point the stack would look like this: | ||
218 | |||
219 | ADDRESS 0 1 2 3 4 5 6 7 | ||
220 | =============== ===== ===== ===== ===== ===== ===== ===== ===== | ||
221 | A0StP --> | ||
222 | A0StP-0x08 | ||
223 | A0StP-0x10 | ||
224 | A0StP-0x18 Old D0.7 Old D1.7 | ||
225 | A0StP-0x20 Old D0.6 Old D1.6 | ||
226 | A0StP-0x28 Old D0.5 Old D1.5 | ||
227 | A0FrP --> Old A0FrP (frame ptr) Old D1RtP (return ptr) | ||
228 | A0FrP-0x08 32bit argument #8 32bit argument #7 | ||
229 | A0FrP-0x10 32bit argument #10 32bit argument #9 | ||
230 | |||
231 | Function epilogues tend to differ depending on the use of a frame pointer. An | ||
232 | example of a frame pointer epilogue: | ||
233 | |||
234 | /* Restore D0FrT, D1RtP, D{0-1}.{5-7} from stack, incrementing A0FrP */ | ||
235 | MGETL D0FrT,D0.5,D0.6,D0.7,[A0FrP++] | ||
236 | /* Restore stack pointer to where frame pointer was before increment */ | ||
237 | SUB A0StP,A0FrP,#0x20 | ||
238 | /* Restore frame pointer from frame temp */ | ||
239 | MOV A0FrP,D0FrT | ||
240 | /* Return to caller via restored return pointer */ | ||
241 | MOV PC,D1RtP | ||
242 | |||
243 | If the function hasn't touched the frame pointer, MGETL cannot be safely used | ||
244 | with A0StP as it always increments and that would expose the stack to clobbering | ||
245 | by interrupts (kernel) or signals (user). Therefore it's common to see the MGETL | ||
246 | split into separate GETL instructions: | ||
247 | |||
248 | /* Restore D0FrT, D1RtP, D{0-1}.{5-7} from stack */ | ||
249 | GETL D0FrT,D1RtP,[A0StP+#-0x30] | ||
250 | GETL D0.5,D1.5,[A0StP+#-0x28] | ||
251 | GETL D0.6,D1.6,[A0StP+#-0x20] | ||
252 | GETL D0.7,D1.7,[A0StP+#-0x18] | ||
253 | /* Restore stack pointer */ | ||
254 | SUB A0StP,A0StP,#0x30 | ||
255 | /* Return to caller via restored return pointer */ | ||
256 | MOV PC,D1RtP | ||
diff --git a/MAINTAINERS b/MAINTAINERS index aea0adf414dc..e95b1e944eb7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5204,6 +5204,18 @@ F: drivers/mtd/ | |||
5204 | F: include/linux/mtd/ | 5204 | F: include/linux/mtd/ |
5205 | F: include/uapi/mtd/ | 5205 | F: include/uapi/mtd/ |
5206 | 5206 | ||
5207 | METAG ARCHITECTURE | ||
5208 | M: James Hogan <james.hogan@imgtec.com> | ||
5209 | S: Supported | ||
5210 | F: arch/metag/ | ||
5211 | F: Documentation/metag/ | ||
5212 | F: Documentation/devicetree/bindings/metag/ | ||
5213 | F: drivers/clocksource/metag_generic.c | ||
5214 | F: drivers/irqchip/irq-metag.c | ||
5215 | F: drivers/irqchip/irq-metag-ext.c | ||
5216 | F: drivers/tty/metag_da.c | ||
5217 | F: fs/imgdafs/ | ||
5218 | |||
5207 | MICROBLAZE ARCHITECTURE | 5219 | MICROBLAZE ARCHITECTURE |
5208 | M: Michal Simek <monstr@monstr.eu> | 5220 | M: Michal Simek <monstr@monstr.eu> |
5209 | L: microblaze-uclinux@itee.uq.edu.au (moderated for non-subscribers) | 5221 | L: microblaze-uclinux@itee.uq.edu.au (moderated for non-subscribers) |
diff --git a/arch/Kconfig b/arch/Kconfig index dcd91a85536a..5a1779c93940 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -103,6 +103,22 @@ config UPROBES | |||
103 | 103 | ||
104 | If in doubt, say "N". | 104 | If in doubt, say "N". |
105 | 105 | ||
106 | config HAVE_64BIT_ALIGNED_ACCESS | ||
107 | def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
108 | help | ||
109 | Some architectures require 64 bit accesses to be 64 bit | ||
110 | aligned, which also requires structs containing 64 bit values | ||
111 | to be 64 bit aligned too. This includes some 32 bit | ||
112 | architectures which can do 64 bit accesses, as well as 64 bit | ||
113 | architectures without unaligned access. | ||
114 | |||
115 | This symbol should be selected by an architecture if 64 bit | ||
116 | accesses are required to be 64 bit aligned in this way even | ||
117 | though it is not a 64 bit architecture. | ||
118 | |||
119 | See Documentation/unaligned-memory-access.txt for more | ||
120 | information on the topic of unaligned memory accesses. | ||
121 | |||
106 | config HAVE_EFFICIENT_UNALIGNED_ACCESS | 122 | config HAVE_EFFICIENT_UNALIGNED_ACCESS |
107 | bool | 123 | bool |
108 | help | 124 | help |
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig new file mode 100644 index 000000000000..afc8973d1488 --- /dev/null +++ b/arch/metag/Kconfig | |||
@@ -0,0 +1,290 @@ | |||
1 | config SYMBOL_PREFIX | ||
2 | string | ||
3 | default "_" | ||
4 | |||
5 | config METAG | ||
6 | def_bool y | ||
7 | select EMBEDDED | ||
8 | select GENERIC_ATOMIC64 | ||
9 | select GENERIC_CLOCKEVENTS | ||
10 | select GENERIC_IRQ_SHOW | ||
11 | select GENERIC_SMP_IDLE_THREAD | ||
12 | select HAVE_64BIT_ALIGNED_ACCESS | ||
13 | select HAVE_ARCH_TRACEHOOK | ||
14 | select HAVE_C_RECORDMCOUNT | ||
15 | select HAVE_DEBUG_KMEMLEAK | ||
16 | select HAVE_DYNAMIC_FTRACE | ||
17 | select HAVE_FTRACE_MCOUNT_RECORD | ||
18 | select HAVE_FUNCTION_TRACER | ||
19 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
20 | select HAVE_GENERIC_HARDIRQS | ||
21 | select HAVE_KERNEL_BZIP2 | ||
22 | select HAVE_KERNEL_GZIP | ||
23 | select HAVE_KERNEL_LZO | ||
24 | select HAVE_KERNEL_XZ | ||
25 | select HAVE_MEMBLOCK | ||
26 | select HAVE_MEMBLOCK_NODE_MAP | ||
27 | select HAVE_MOD_ARCH_SPECIFIC | ||
28 | select HAVE_PERF_EVENTS | ||
29 | select HAVE_SYSCALL_TRACEPOINTS | ||
30 | select IRQ_DOMAIN | ||
31 | select MODULES_USE_ELF_RELA | ||
32 | select OF | ||
33 | select OF_EARLY_FLATTREE | ||
34 | select SPARSE_IRQ | ||
35 | |||
36 | config STACKTRACE_SUPPORT | ||
37 | def_bool y | ||
38 | |||
39 | config LOCKDEP_SUPPORT | ||
40 | def_bool y | ||
41 | |||
42 | config HAVE_LATENCYTOP_SUPPORT | ||
43 | def_bool y | ||
44 | |||
45 | config RWSEM_GENERIC_SPINLOCK | ||
46 | def_bool y | ||
47 | |||
48 | config RWSEM_XCHGADD_ALGORITHM | ||
49 | bool | ||
50 | |||
51 | config GENERIC_HWEIGHT | ||
52 | def_bool y | ||
53 | |||
54 | config GENERIC_CALIBRATE_DELAY | ||
55 | def_bool y | ||
56 | |||
57 | config GENERIC_GPIO | ||
58 | def_bool n | ||
59 | |||
60 | config NO_IOPORT | ||
61 | def_bool y | ||
62 | |||
63 | source "init/Kconfig" | ||
64 | |||
65 | source "kernel/Kconfig.freezer" | ||
66 | |||
67 | menu "Processor type and features" | ||
68 | |||
69 | config MMU | ||
70 | def_bool y | ||
71 | |||
72 | config STACK_GROWSUP | ||
73 | def_bool y | ||
74 | |||
75 | config HOTPLUG_CPU | ||
76 | bool "Enable CPU hotplug support" | ||
77 | depends on SMP | ||
78 | help | ||
79 | Say Y here to allow turning CPUs off and on. CPUs can be | ||
80 | controlled through /sys/devices/system/cpu. | ||
81 | |||
82 | Say N if you want to disable CPU hotplug. | ||
83 | |||
84 | config HIGHMEM | ||
85 | bool "High Memory Support" | ||
86 | help | ||
87 | The address space of Meta processors is only 4 Gigabytes large | ||
88 | and it has to accommodate user address space, kernel address | ||
89 | space as well as some memory mapped IO. That means that, if you | ||
90 | have a large amount of physical memory and/or IO, not all of the | ||
91 | memory can be "permanently mapped" by the kernel. The physical | ||
92 | memory that is not permanently mapped is called "high memory". | ||
93 | |||
94 | Depending on the selected kernel/user memory split, minimum | ||
95 | vmalloc space and actual amount of RAM, you may not need this | ||
96 | option which should result in a slightly faster kernel. | ||
97 | |||
98 | If unsure, say n. | ||
99 | |||
100 | source "arch/metag/mm/Kconfig" | ||
101 | |||
102 | source "arch/metag/Kconfig.soc" | ||
103 | |||
104 | config METAG_META12 | ||
105 | bool | ||
106 | help | ||
107 | Select this from the SoC config symbol to indicate that it contains a | ||
108 | Meta 1.2 core. | ||
109 | |||
110 | config METAG_META21 | ||
111 | bool | ||
112 | help | ||
113 | Select this from the SoC config symbol to indicate that it contains a | ||
114 | Meta 2.1 core. | ||
115 | |||
116 | config SMP | ||
117 | bool "Symmetric multi-processing support" | ||
118 | depends on METAG_META21 && METAG_META21_MMU | ||
119 | select USE_GENERIC_SMP_HELPERS | ||
120 | help | ||
121 | This enables support for systems with more than one thread running | ||
122 | Linux. If you have a system with only one thread running Linux, | ||
123 | say N. Otherwise, say Y. | ||
124 | |||
125 | config NR_CPUS | ||
126 | int "Maximum number of CPUs (2-4)" if SMP | ||
127 | range 2 4 if SMP | ||
128 | default "1" if !SMP | ||
129 | default "4" if SMP | ||
130 | |||
131 | config METAG_SMP_WRITE_REORDERING | ||
132 | bool | ||
133 | help | ||
134 | This attempts to prevent cache-memory incoherence due to external | ||
135 | reordering of writes from different hardware threads when SMP is | ||
136 | enabled. It adds fences (system event 0) to smp_mb and smp_rmb in an | ||
137 | attempt to catch some of the cases, and also before writes to shared | ||
138 | memory in LOCK1 protected atomics and spinlocks. | ||
139 | This will not completely prevent cache incoherency on affected cores. | ||
140 | |||
141 | config METAG_LNKGET_AROUND_CACHE | ||
142 | bool | ||
143 | depends on METAG_META21 | ||
144 | help | ||
145 | This indicates that the LNKGET/LNKSET instructions go around the | ||
146 | cache, which requires some extra cache flushes when the memory needs | ||
147 | to be accessed by normal GET/SET instructions too. | ||
148 | |||
149 | choice | ||
150 | prompt "Atomicity primitive" | ||
151 | default METAG_ATOMICITY_LNKGET | ||
152 | help | ||
153 | This option selects the mechanism for performing atomic operations. | ||
154 | |||
155 | config METAG_ATOMICITY_IRQSOFF | ||
156 | depends on !SMP | ||
157 | bool "irqsoff" | ||
158 | help | ||
159 | This option disables interrupts to achieve atomicity. This mechanism | ||
160 | is not SMP-safe. | ||
161 | |||
162 | config METAG_ATOMICITY_LNKGET | ||
163 | depends on METAG_META21 | ||
164 | bool "lnkget/lnkset" | ||
165 | help | ||
166 | This option uses the LNKGET and LNKSET instructions to achieve | ||
167 | atomicity. LNKGET/LNKSET are load-link/store-conditional instructions. | ||
168 | Choose this option if your system requires low latency. | ||
169 | |||
170 | config METAG_ATOMICITY_LOCK1 | ||
171 | depends on SMP | ||
172 | bool "lock1" | ||
173 | help | ||
174 | This option uses the LOCK1 instruction for atomicity. This is mainly | ||
175 | provided as a debugging aid if the lnkget/lnkset atomicity primitive | ||
176 | isn't working properly. | ||
177 | |||
178 | endchoice | ||
179 | |||
180 | config METAG_FPU | ||
181 | bool "FPU Support" | ||
182 | depends on METAG_META21 | ||
183 | default y | ||
184 | help | ||
185 | This option allows processes to use FPU hardware available with this | ||
186 | CPU. If this option is not enabled FPU registers will not be saved | ||
187 | and restored on context-switch. | ||
188 | |||
189 | If you plan on running programs which are compiled to use hard floats | ||
190 | say Y here. | ||
191 | |||
192 | config METAG_DSP | ||
193 | bool "DSP Support" | ||
194 | help | ||
195 | This option allows processes to use DSP hardware available | ||
196 | with this CPU. If this option is not enabled DSP registers | ||
197 | will not be saved and restored on context-switch. | ||
198 | |||
199 | If you plan on running DSP programs say Y here. | ||
200 | |||
201 | config METAG_PERFCOUNTER_IRQS | ||
202 | bool "PerfCounters interrupt support" | ||
203 | depends on METAG_META21 | ||
204 | help | ||
205 | This option enables using interrupts to collect information from | ||
206 | Performance Counters. This option is supported in new META21 | ||
207 | (starting from HTP265). | ||
208 | |||
209 | When disabled, Performance Counters information will be collected | ||
210 | based on Timer Interrupt. | ||
211 | |||
212 | config METAG_DA | ||
213 | bool "DA support" | ||
214 | help | ||
215 | Say Y if you plan to use a DA debug adapter with Linux. The presence | ||
216 | of the DA will be detected automatically at boot, so it is safe to say | ||
217 | Y to this option even when booting without a DA. | ||
218 | |||
219 | This enables support for services provided by DA JTAG debug adapters, | ||
220 | such as: | ||
221 | - communication over DA channels (such as the console driver). | ||
222 | - use of the DA filesystem. | ||
223 | |||
224 | menu "Boot options" | ||
225 | |||
226 | config METAG_BUILTIN_DTB | ||
227 | bool "Embed DTB in kernel image" | ||
228 | default y | ||
229 | help | ||
230 | Embeds a device tree binary in the kernel image. | ||
231 | |||
232 | config METAG_BUILTIN_DTB_NAME | ||
233 | string "Built in DTB" | ||
234 | depends on METAG_BUILTIN_DTB | ||
235 | help | ||
236 | Set the name of the DTB to embed (leave blank to pick one | ||
237 | automatically based on kernel configuration). | ||
238 | |||
239 | config CMDLINE_BOOL | ||
240 | bool "Default bootloader kernel arguments" | ||
241 | |||
242 | config CMDLINE | ||
243 | string "Kernel command line" | ||
244 | depends on CMDLINE_BOOL | ||
245 | help | ||
246 | On some architectures there is currently no way for the boot loader | ||
247 | to pass arguments to the kernel. For these architectures, you should | ||
248 | supply some command-line options at build time by entering them | ||
249 | here. | ||
250 | |||
251 | config CMDLINE_FORCE | ||
252 | bool "Force default kernel command string" | ||
253 | depends on CMDLINE_BOOL | ||
254 | help | ||
255 | Set this to have arguments from the default kernel command string | ||
256 | override those passed by the boot loader. | ||
257 | |||
258 | endmenu | ||
259 | |||
260 | source "kernel/Kconfig.preempt" | ||
261 | |||
262 | source kernel/Kconfig.hz | ||
263 | |||
264 | endmenu | ||
265 | |||
266 | menu "Power management options" | ||
267 | |||
268 | source kernel/power/Kconfig | ||
269 | |||
270 | endmenu | ||
271 | |||
272 | menu "Executable file formats" | ||
273 | |||
274 | source "fs/Kconfig.binfmt" | ||
275 | |||
276 | endmenu | ||
277 | |||
278 | source "net/Kconfig" | ||
279 | |||
280 | source "drivers/Kconfig" | ||
281 | |||
282 | source "fs/Kconfig" | ||
283 | |||
284 | source "arch/metag/Kconfig.debug" | ||
285 | |||
286 | source "security/Kconfig" | ||
287 | |||
288 | source "crypto/Kconfig" | ||
289 | |||
290 | source "lib/Kconfig" | ||
diff --git a/arch/metag/Kconfig.debug b/arch/metag/Kconfig.debug new file mode 100644 index 000000000000..e45bbf6a7a5d --- /dev/null +++ b/arch/metag/Kconfig.debug | |||
@@ -0,0 +1,40 @@ | |||
1 | menu "Kernel hacking" | ||
2 | |||
3 | config TRACE_IRQFLAGS_SUPPORT | ||
4 | bool | ||
5 | default y | ||
6 | |||
7 | source "lib/Kconfig.debug" | ||
8 | |||
9 | config DEBUG_STACKOVERFLOW | ||
10 | bool "Check for stack overflows" | ||
11 | depends on DEBUG_KERNEL | ||
12 | help | ||
13 | This option will cause messages to be printed if free stack space | ||
14 | drops below a certain limit. | ||
15 | |||
16 | config 4KSTACKS | ||
17 | bool "Use 4Kb for kernel stacks instead of 8Kb" | ||
18 | depends on DEBUG_KERNEL | ||
19 | help | ||
20 | If you say Y here the kernel will use a 4Kb stacksize for the | ||
21 | kernel stack attached to each process/thread. This facilitates | ||
22 | running more threads on a system and also reduces the pressure | ||
23 | on the VM subsystem for higher order allocations. This option | ||
24 | will also use IRQ stacks to compensate for the reduced stackspace. | ||
25 | |||
26 | config METAG_FUNCTION_TRACE | ||
27 | bool "Output Meta real-time trace data for function entry/exit" | ||
28 | help | ||
29 | If you say Y here the kernel will use the Meta hardware trace | ||
30 | unit to output information about function entry and exit that | ||
31 | can be used by a debugger for profiling and call-graphs. | ||
32 | |||
33 | config METAG_POISON_CATCH_BUFFERS | ||
34 | bool "Poison catch buffer contents on kernel entry" | ||
35 | help | ||
36 | If you say Y here the kernel will write poison data to the | ||
37 | catch buffer registers on kernel entry. This will make any | ||
38 | problem with catch buffer handling much more apparent. | ||
39 | |||
40 | endmenu | ||
diff --git a/arch/metag/Kconfig.soc b/arch/metag/Kconfig.soc new file mode 100644 index 000000000000..ec079cfb7c6a --- /dev/null +++ b/arch/metag/Kconfig.soc | |||
@@ -0,0 +1,55 @@ | |||
1 | choice | ||
2 | prompt "SoC Type" | ||
3 | default META21_FPGA | ||
4 | |||
5 | config META12_FPGA | ||
6 | bool "Meta 1.2 FPGA" | ||
7 | select METAG_META12 | ||
8 | help | ||
9 | This is a Meta 1.2 FPGA bitstream, just a bare CPU. | ||
10 | |||
11 | config META21_FPGA | ||
12 | bool "Meta 2.1 FPGA" | ||
13 | select METAG_META21 | ||
14 | help | ||
15 | This is a Meta 2.1 FPGA bitstream, just a bare CPU. | ||
16 | |||
17 | endchoice | ||
18 | |||
19 | menu "SoC configuration" | ||
20 | |||
21 | if METAG_META21 | ||
22 | |||
23 | # Meta 2.x specific options | ||
24 | |||
25 | config METAG_META21_MMU | ||
26 | bool "Meta 2.x MMU mode" | ||
27 | default y | ||
28 | help | ||
29 | Use the Meta 2.x MMU in extended mode. | ||
30 | |||
31 | config METAG_UNALIGNED | ||
32 | bool "Meta 2.x unaligned access checking" | ||
33 | default y | ||
34 | help | ||
35 | All memory accesses will be checked for alignment and an exception | ||
36 | raised on unaligned accesses. This feature does cost performance | ||
37 | but without it there will be no notification of this type of error. | ||
38 | |||
39 | config METAG_USER_TCM | ||
40 | bool "Meta on-chip memory support for userland" | ||
41 | select GENERIC_ALLOCATOR | ||
42 | default y | ||
43 | help | ||
44 | Allow the on-chip memories of Meta SoCs to be used by user | ||
45 | applications. | ||
46 | |||
47 | endif | ||
48 | |||
49 | config METAG_HALT_ON_PANIC | ||
50 | bool "Halt the core on panic" | ||
51 | help | ||
52 | Halt the core when a panic occurs. This is useful when running | ||
53 | pre-production silicon or in an FPGA environment. | ||
54 | |||
55 | endmenu | ||
diff --git a/arch/metag/Makefile b/arch/metag/Makefile new file mode 100644 index 000000000000..81bd6a1c7483 --- /dev/null +++ b/arch/metag/Makefile | |||
@@ -0,0 +1,87 @@ | |||
1 | # | ||
2 | # metag/Makefile | ||
3 | # | ||
4 | # This file is included by the global makefile so that you can add your own | ||
5 | # architecture-specific flags and dependencies. Remember to do have actions | ||
6 | # for "archclean" cleaning up for this architecture. | ||
7 | # | ||
8 | # This file is subject to the terms and conditions of the GNU General Public | ||
9 | # License. See the file "COPYING" in the main directory of this archive | ||
10 | # for more details. | ||
11 | # | ||
12 | # Copyright (C) 1994 by Linus Torvalds | ||
13 | # 2007,2008,2012 by Imagination Technologies Ltd. | ||
14 | # | ||
15 | |||
16 | LDFLAGS := | ||
17 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S | ||
18 | |||
19 | checkflags-$(CONFIG_METAG_META12) += -DMETAC_1_2 | ||
20 | checkflags-$(CONFIG_METAG_META21) += -DMETAC_2_1 | ||
21 | CHECKFLAGS += -D__metag__ $(checkflags-y) | ||
22 | |||
23 | KBUILD_DEFCONFIG := meta2_defconfig | ||
24 | |||
25 | sflags-$(CONFIG_METAG_META12) += -mmetac=1.2 | ||
26 | ifeq ($(CONFIG_METAG_META12),y) | ||
27 | # Only use TBI API 1.4 if DSP is enabled for META12 cores | ||
28 | sflags-$(CONFIG_METAG_DSP) += -DTBI_1_4 | ||
29 | endif | ||
30 | sflags-$(CONFIG_METAG_META21) += -mmetac=2.1 -DTBI_1_4 | ||
31 | |||
32 | cflags-$(CONFIG_METAG_FUNCTION_TRACE) += -mhwtrace-leaf -mhwtrace-retpc | ||
33 | cflags-$(CONFIG_METAG_META21) += -mextensions=bex | ||
34 | |||
35 | KBUILD_CFLAGS += -pipe | ||
36 | KBUILD_CFLAGS += -ffunction-sections | ||
37 | |||
38 | KBUILD_CFLAGS += $(sflags-y) $(cflags-y) | ||
39 | KBUILD_AFLAGS += $(sflags-y) | ||
40 | |||
41 | LDFLAGS_vmlinux := $(ldflags-y) | ||
42 | |||
43 | head-y := arch/metag/kernel/head.o | ||
44 | |||
45 | core-y += arch/metag/boot/dts/ | ||
46 | core-y += arch/metag/kernel/ | ||
47 | core-y += arch/metag/mm/ | ||
48 | |||
49 | libs-y += arch/metag/lib/ | ||
50 | libs-y += arch/metag/tbx/ | ||
51 | |||
52 | boot := arch/metag/boot | ||
53 | |||
54 | boot_targets += uImage | ||
55 | boot_targets += uImage.gz | ||
56 | boot_targets += uImage.bz2 | ||
57 | boot_targets += uImage.xz | ||
58 | boot_targets += uImage.lzo | ||
59 | boot_targets += uImage.bin | ||
60 | boot_targets += vmlinux.bin | ||
61 | |||
62 | PHONY += $(boot_targets) | ||
63 | |||
64 | all: vmlinux.bin | ||
65 | |||
66 | $(boot_targets): vmlinux | ||
67 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | ||
68 | |||
69 | %.dtb %.dtb.S %.dtb.o: scripts | ||
70 | $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ | ||
71 | |||
72 | dtbs: scripts | ||
73 | $(Q)$(MAKE) $(build)=$(boot)/dts dtbs | ||
74 | |||
75 | archclean: | ||
76 | $(Q)$(MAKE) $(clean)=$(boot) | ||
77 | |||
78 | define archhelp | ||
79 | echo '* vmlinux.bin - Binary kernel image (arch/$(ARCH)/boot/vmlinux.bin)' | ||
80 | @echo ' uImage - Alias to bootable U-Boot image' | ||
81 | @echo ' uImage.bin - Kernel-only image for U-Boot (bin)' | ||
82 | @echo ' uImage.gz - Kernel-only image for U-Boot (gzip)' | ||
83 | @echo ' uImage.bz2 - Kernel-only image for U-Boot (bzip2)' | ||
84 | @echo ' uImage.xz - Kernel-only image for U-Boot (xz)' | ||
85 | @echo ' uImage.lzo - Kernel-only image for U-Boot (lzo)' | ||
86 | @echo ' dtbs - Build device tree blobs for enabled boards' | ||
87 | endef | ||
diff --git a/arch/metag/boot/.gitignore b/arch/metag/boot/.gitignore new file mode 100644 index 000000000000..a021da201156 --- /dev/null +++ b/arch/metag/boot/.gitignore | |||
@@ -0,0 +1,4 @@ | |||
1 | vmlinux* | ||
2 | uImage* | ||
3 | ramdisk.* | ||
4 | *.dtb | ||
diff --git a/arch/metag/boot/Makefile b/arch/metag/boot/Makefile new file mode 100644 index 000000000000..5a1f88cf91e3 --- /dev/null +++ b/arch/metag/boot/Makefile | |||
@@ -0,0 +1,68 @@ | |||
1 | # | ||
2 | # This file is subject to the terms and conditions of the GNU General Public | ||
3 | # License. See the file "COPYING" in the main directory of this archive | ||
4 | # for more details. | ||
5 | # | ||
6 | # Copyright (C) 2007,2012 Imagination Technologies Ltd. | ||
7 | # | ||
8 | |||
9 | suffix-y := bin | ||
10 | suffix-$(CONFIG_KERNEL_GZIP) := gz | ||
11 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 | ||
12 | suffix-$(CONFIG_KERNEL_XZ) := xz | ||
13 | suffix-$(CONFIG_KERNEL_LZO) := lzo | ||
14 | |||
15 | targets += vmlinux.bin | ||
16 | targets += uImage | ||
17 | targets += uImage.gz | ||
18 | targets += uImage.bz2 | ||
19 | targets += uImage.xz | ||
20 | targets += uImage.lzo | ||
21 | targets += uImage.bin | ||
22 | |||
23 | extra-y += vmlinux.bin | ||
24 | extra-y += vmlinux.bin.gz | ||
25 | extra-y += vmlinux.bin.bz2 | ||
26 | extra-y += vmlinux.bin.xz | ||
27 | extra-y += vmlinux.bin.lzo | ||
28 | |||
29 | UIMAGE_LOADADDR = $(CONFIG_PAGE_OFFSET) | ||
30 | |||
31 | ifeq ($(CONFIG_FUNCTION_TRACER),y) | ||
32 | orig_cflags := $(KBUILD_CFLAGS) | ||
33 | KBUILD_CFLAGS = $(subst -pg, , $(orig_cflags)) | ||
34 | endif | ||
35 | |||
36 | $(obj)/vmlinux.bin: vmlinux FORCE | ||
37 | $(call if_changed,objcopy) | ||
38 | |||
39 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE | ||
40 | $(call if_changed,gzip) | ||
41 | |||
42 | $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE | ||
43 | $(call if_changed,bzip2) | ||
44 | |||
45 | $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE | ||
46 | $(call if_changed,xzkern) | ||
47 | |||
48 | $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE | ||
49 | $(call if_changed,lzo) | ||
50 | |||
51 | $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE | ||
52 | $(call if_changed,uimage,gzip) | ||
53 | |||
54 | $(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE | ||
55 | $(call if_changed,uimage,bzip2) | ||
56 | |||
57 | $(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE | ||
58 | $(call if_changed,uimage,xz) | ||
59 | |||
60 | $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE | ||
61 | $(call if_changed,uimage,lzo) | ||
62 | |||
63 | $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE | ||
64 | $(call if_changed,uimage,none) | ||
65 | |||
66 | $(obj)/uImage: $(obj)/uImage.$(suffix-y) | ||
67 | @ln -sf $(notdir $<) $@ | ||
68 | @echo ' Image $@ is ready' | ||
diff --git a/arch/metag/boot/dts/Makefile b/arch/metag/boot/dts/Makefile new file mode 100644 index 000000000000..e0b5afd8bde8 --- /dev/null +++ b/arch/metag/boot/dts/Makefile | |||
@@ -0,0 +1,16 @@ | |||
1 | dtb-y += skeleton.dtb | ||
2 | |||
3 | # Built-in dtb | ||
4 | builtindtb-y := skeleton | ||
5 | |||
6 | ifneq ($(CONFIG_METAG_BUILTIN_DTB_NAME),"") | ||
7 | builtindtb-y := $(CONFIG_METAG_BUILTIN_DTB_NAME) | ||
8 | endif | ||
9 | obj-$(CONFIG_METAG_BUILTIN_DTB) += $(patsubst "%",%,$(builtindtb-y)).dtb.o | ||
10 | |||
11 | targets += dtbs | ||
12 | targets += $(dtb-y) | ||
13 | |||
14 | dtbs: $(addprefix $(obj)/, $(dtb-y)) | ||
15 | |||
16 | clean-files += *.dtb | ||
diff --git a/arch/metag/boot/dts/skeleton.dts b/arch/metag/boot/dts/skeleton.dts new file mode 100644 index 000000000000..7244d1f0d555 --- /dev/null +++ b/arch/metag/boot/dts/skeleton.dts | |||
@@ -0,0 +1,10 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | /dts-v1/; | ||
9 | |||
10 | /include/ "skeleton.dtsi" | ||
diff --git a/arch/metag/boot/dts/skeleton.dtsi b/arch/metag/boot/dts/skeleton.dtsi new file mode 100644 index 000000000000..78229eacced7 --- /dev/null +++ b/arch/metag/boot/dts/skeleton.dtsi | |||
@@ -0,0 +1,14 @@ | |||
1 | /* | ||
2 | * Skeleton device tree; the bare minimum needed to boot; just include and | ||
3 | * add a compatible value. The bootloader will typically populate the memory | ||
4 | * node. | ||
5 | */ | ||
6 | |||
7 | / { | ||
8 | compatible = "img,meta"; | ||
9 | #address-cells = <1>; | ||
10 | #size-cells = <1>; | ||
11 | chosen { }; | ||
12 | aliases { }; | ||
13 | memory { device_type = "memory"; reg = <0 0>; }; | ||
14 | }; | ||
diff --git a/arch/metag/configs/meta1_defconfig b/arch/metag/configs/meta1_defconfig new file mode 100644 index 000000000000..c35a75e8ecfe --- /dev/null +++ b/arch/metag/configs/meta1_defconfig | |||
@@ -0,0 +1,40 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | ||
2 | # CONFIG_SWAP is not set | ||
3 | CONFIG_LOG_BUF_SHIFT=13 | ||
4 | CONFIG_SYSFS_DEPRECATED=y | ||
5 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
6 | CONFIG_KALLSYMS_ALL=y | ||
7 | # CONFIG_ELF_CORE is not set | ||
8 | CONFIG_SLAB=y | ||
9 | # CONFIG_BLK_DEV_BSG is not set | ||
10 | CONFIG_PARTITION_ADVANCED=y | ||
11 | # CONFIG_MSDOS_PARTITION is not set | ||
12 | # CONFIG_IOSCHED_DEADLINE is not set | ||
13 | # CONFIG_IOSCHED_CFQ is not set | ||
14 | CONFIG_FLATMEM_MANUAL=y | ||
15 | CONFIG_META12_FPGA=y | ||
16 | CONFIG_METAG_DA=y | ||
17 | CONFIG_HZ_100=y | ||
18 | CONFIG_DEVTMPFS=y | ||
19 | CONFIG_DEVTMPFS_MOUNT=y | ||
20 | # CONFIG_STANDALONE is not set | ||
21 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | ||
22 | # CONFIG_FW_LOADER is not set | ||
23 | CONFIG_BLK_DEV_RAM=y | ||
24 | CONFIG_BLK_DEV_RAM_COUNT=1 | ||
25 | CONFIG_BLK_DEV_RAM_SIZE=16384 | ||
26 | # CONFIG_INPUT is not set | ||
27 | # CONFIG_SERIO is not set | ||
28 | # CONFIG_VT is not set | ||
29 | # CONFIG_LEGACY_PTYS is not set | ||
30 | CONFIG_DA_TTY=y | ||
31 | CONFIG_DA_CONSOLE=y | ||
32 | # CONFIG_DEVKMEM is not set | ||
33 | # CONFIG_HW_RANDOM is not set | ||
34 | # CONFIG_HWMON is not set | ||
35 | # CONFIG_USB_SUPPORT is not set | ||
36 | # CONFIG_DNOTIFY is not set | ||
37 | CONFIG_TMPFS=y | ||
38 | # CONFIG_MISC_FILESYSTEMS is not set | ||
39 | # CONFIG_SCHED_DEBUG is not set | ||
40 | CONFIG_DEBUG_INFO=y | ||
diff --git a/arch/metag/configs/meta2_defconfig b/arch/metag/configs/meta2_defconfig new file mode 100644 index 000000000000..fb3148410183 --- /dev/null +++ b/arch/metag/configs/meta2_defconfig | |||
@@ -0,0 +1,41 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | ||
2 | # CONFIG_SWAP is not set | ||
3 | CONFIG_SYSVIPC=y | ||
4 | CONFIG_LOG_BUF_SHIFT=13 | ||
5 | CONFIG_SYSFS_DEPRECATED=y | ||
6 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
7 | CONFIG_KALLSYMS_ALL=y | ||
8 | # CONFIG_ELF_CORE is not set | ||
9 | CONFIG_SLAB=y | ||
10 | # CONFIG_BLK_DEV_BSG is not set | ||
11 | CONFIG_PARTITION_ADVANCED=y | ||
12 | # CONFIG_MSDOS_PARTITION is not set | ||
13 | # CONFIG_IOSCHED_DEADLINE is not set | ||
14 | # CONFIG_IOSCHED_CFQ is not set | ||
15 | CONFIG_METAG_L2C=y | ||
16 | CONFIG_FLATMEM_MANUAL=y | ||
17 | CONFIG_METAG_HALT_ON_PANIC=y | ||
18 | CONFIG_METAG_DA=y | ||
19 | CONFIG_HZ_100=y | ||
20 | CONFIG_DEVTMPFS=y | ||
21 | # CONFIG_STANDALONE is not set | ||
22 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | ||
23 | # CONFIG_FW_LOADER is not set | ||
24 | CONFIG_BLK_DEV_RAM=y | ||
25 | CONFIG_BLK_DEV_RAM_COUNT=1 | ||
26 | CONFIG_BLK_DEV_RAM_SIZE=16384 | ||
27 | # CONFIG_INPUT is not set | ||
28 | # CONFIG_SERIO is not set | ||
29 | # CONFIG_VT is not set | ||
30 | # CONFIG_LEGACY_PTYS is not set | ||
31 | CONFIG_DA_TTY=y | ||
32 | CONFIG_DA_CONSOLE=y | ||
33 | # CONFIG_DEVKMEM is not set | ||
34 | # CONFIG_HW_RANDOM is not set | ||
35 | # CONFIG_HWMON is not set | ||
36 | # CONFIG_USB_SUPPORT is not set | ||
37 | # CONFIG_DNOTIFY is not set | ||
38 | CONFIG_TMPFS=y | ||
39 | # CONFIG_MISC_FILESYSTEMS is not set | ||
40 | # CONFIG_SCHED_DEBUG is not set | ||
41 | CONFIG_DEBUG_INFO=y | ||
diff --git a/arch/metag/configs/meta2_smp_defconfig b/arch/metag/configs/meta2_smp_defconfig new file mode 100644 index 000000000000..6c7b777ac276 --- /dev/null +++ b/arch/metag/configs/meta2_smp_defconfig | |||
@@ -0,0 +1,42 @@ | |||
1 | # CONFIG_LOCALVERSION_AUTO is not set | ||
2 | # CONFIG_SWAP is not set | ||
3 | CONFIG_SYSVIPC=y | ||
4 | CONFIG_LOG_BUF_SHIFT=13 | ||
5 | CONFIG_SYSFS_DEPRECATED=y | ||
6 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
7 | CONFIG_KALLSYMS_ALL=y | ||
8 | # CONFIG_ELF_CORE is not set | ||
9 | CONFIG_SLAB=y | ||
10 | # CONFIG_BLK_DEV_BSG is not set | ||
11 | CONFIG_PARTITION_ADVANCED=y | ||
12 | # CONFIG_MSDOS_PARTITION is not set | ||
13 | # CONFIG_IOSCHED_DEADLINE is not set | ||
14 | # CONFIG_IOSCHED_CFQ is not set | ||
15 | CONFIG_METAG_L2C=y | ||
16 | CONFIG_FLATMEM_MANUAL=y | ||
17 | CONFIG_METAG_HALT_ON_PANIC=y | ||
18 | CONFIG_SMP=y | ||
19 | CONFIG_METAG_DA=y | ||
20 | CONFIG_HZ_100=y | ||
21 | CONFIG_DEVTMPFS=y | ||
22 | # CONFIG_STANDALONE is not set | ||
23 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | ||
24 | # CONFIG_FW_LOADER is not set | ||
25 | CONFIG_BLK_DEV_RAM=y | ||
26 | CONFIG_BLK_DEV_RAM_COUNT=1 | ||
27 | CONFIG_BLK_DEV_RAM_SIZE=16384 | ||
28 | # CONFIG_INPUT is not set | ||
29 | # CONFIG_SERIO is not set | ||
30 | # CONFIG_VT is not set | ||
31 | # CONFIG_LEGACY_PTYS is not set | ||
32 | CONFIG_DA_TTY=y | ||
33 | CONFIG_DA_CONSOLE=y | ||
34 | # CONFIG_DEVKMEM is not set | ||
35 | # CONFIG_HW_RANDOM is not set | ||
36 | # CONFIG_HWMON is not set | ||
37 | # CONFIG_USB_SUPPORT is not set | ||
38 | # CONFIG_DNOTIFY is not set | ||
39 | CONFIG_TMPFS=y | ||
40 | # CONFIG_MISC_FILESYSTEMS is not set | ||
41 | # CONFIG_SCHED_DEBUG is not set | ||
42 | CONFIG_DEBUG_INFO=y | ||
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild new file mode 100644 index 000000000000..6ae0ccb632cb --- /dev/null +++ b/arch/metag/include/asm/Kbuild | |||
@@ -0,0 +1,54 @@ | |||
1 | generic-y += auxvec.h | ||
2 | generic-y += bitsperlong.h | ||
3 | generic-y += bugs.h | ||
4 | generic-y += clkdev.h | ||
5 | generic-y += cputime.h | ||
6 | generic-y += current.h | ||
7 | generic-y += device.h | ||
8 | generic-y += dma.h | ||
9 | generic-y += emergency-restart.h | ||
10 | generic-y += errno.h | ||
11 | generic-y += exec.h | ||
12 | generic-y += fb.h | ||
13 | generic-y += fcntl.h | ||
14 | generic-y += futex.h | ||
15 | generic-y += hardirq.h | ||
16 | generic-y += hw_irq.h | ||
17 | generic-y += ioctl.h | ||
18 | generic-y += ioctls.h | ||
19 | generic-y += ipcbuf.h | ||
20 | generic-y += irq_regs.h | ||
21 | generic-y += kdebug.h | ||
22 | generic-y += kmap_types.h | ||
23 | generic-y += kvm_para.h | ||
24 | generic-y += local.h | ||
25 | generic-y += local64.h | ||
26 | generic-y += msgbuf.h | ||
27 | generic-y += mutex.h | ||
28 | generic-y += param.h | ||
29 | generic-y += pci.h | ||
30 | generic-y += percpu.h | ||
31 | generic-y += poll.h | ||
32 | generic-y += posix_types.h | ||
33 | generic-y += scatterlist.h | ||
34 | generic-y += sections.h | ||
35 | generic-y += sembuf.h | ||
36 | generic-y += serial.h | ||
37 | generic-y += shmbuf.h | ||
38 | generic-y += shmparam.h | ||
39 | generic-y += signal.h | ||
40 | generic-y += socket.h | ||
41 | generic-y += sockios.h | ||
42 | generic-y += stat.h | ||
43 | generic-y += statfs.h | ||
44 | generic-y += switch_to.h | ||
45 | generic-y += termbits.h | ||
46 | generic-y += termios.h | ||
47 | generic-y += timex.h | ||
48 | generic-y += trace_clock.h | ||
49 | generic-y += types.h | ||
50 | generic-y += ucontext.h | ||
51 | generic-y += unaligned.h | ||
52 | generic-y += user.h | ||
53 | generic-y += vga.h | ||
54 | generic-y += xor.h | ||
diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h new file mode 100644 index 000000000000..307ecd2bd9a1 --- /dev/null +++ b/arch/metag/include/asm/atomic.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef __ASM_METAG_ATOMIC_H | ||
2 | #define __ASM_METAG_ATOMIC_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/cmpxchg.h> | ||
7 | |||
8 | #if defined(CONFIG_METAG_ATOMICITY_IRQSOFF) | ||
9 | /* The simple UP case. */ | ||
10 | #include <asm-generic/atomic.h> | ||
11 | #else | ||
12 | |||
13 | #if defined(CONFIG_METAG_ATOMICITY_LOCK1) | ||
14 | #include <asm/atomic_lock1.h> | ||
15 | #else | ||
16 | #include <asm/atomic_lnkget.h> | ||
17 | #endif | ||
18 | |||
19 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
20 | |||
21 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
22 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
23 | |||
24 | /* | ||
25 | * atomic_inc_and_test - increment and test | ||
26 | * @v: pointer of type atomic_t | ||
27 | * | ||
28 | * Atomically increments @v by 1 | ||
29 | * and returns true if the result is zero, or false for all | ||
30 | * other cases. | ||
31 | */ | ||
32 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
33 | |||
34 | #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) | ||
35 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | ||
36 | |||
37 | #define atomic_inc(v) atomic_add(1, (v)) | ||
38 | #define atomic_dec(v) atomic_sub(1, (v)) | ||
39 | |||
40 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
41 | |||
42 | #define smp_mb__before_atomic_dec() barrier() | ||
43 | #define smp_mb__after_atomic_dec() barrier() | ||
44 | #define smp_mb__before_atomic_inc() barrier() | ||
45 | #define smp_mb__after_atomic_inc() barrier() | ||
46 | |||
47 | #endif | ||
48 | |||
49 | #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) | ||
50 | |||
51 | #include <asm-generic/atomic64.h> | ||
52 | |||
53 | #endif /* __ASM_METAG_ATOMIC_H */ | ||
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h new file mode 100644 index 000000000000..d2e60a18986c --- /dev/null +++ b/arch/metag/include/asm/atomic_lnkget.h | |||
@@ -0,0 +1,234 @@ | |||
1 | #ifndef __ASM_METAG_ATOMIC_LNKGET_H | ||
2 | #define __ASM_METAG_ATOMIC_LNKGET_H | ||
3 | |||
4 | #define ATOMIC_INIT(i) { (i) } | ||
5 | |||
6 | #define atomic_set(v, i) ((v)->counter = (i)) | ||
7 | |||
8 | #include <linux/compiler.h> | ||
9 | |||
10 | #include <asm/barrier.h> | ||
11 | |||
12 | /* | ||
13 | * None of these asm statements clobber memory as LNKSET writes around | ||
14 | * the cache so the memory it modifies cannot safely be read by any means | ||
15 | * other than these accessors. | ||
16 | */ | ||
17 | |||
18 | static inline int atomic_read(const atomic_t *v) | ||
19 | { | ||
20 | int temp; | ||
21 | |||
22 | asm volatile ( | ||
23 | "LNKGETD %0, [%1]\n" | ||
24 | : "=da" (temp) | ||
25 | : "da" (&v->counter)); | ||
26 | |||
27 | return temp; | ||
28 | } | ||
29 | |||
30 | static inline void atomic_add(int i, atomic_t *v) | ||
31 | { | ||
32 | int temp; | ||
33 | |||
34 | asm volatile ( | ||
35 | "1: LNKGETD %0, [%1]\n" | ||
36 | " ADD %0, %0, %2\n" | ||
37 | " LNKSETD [%1], %0\n" | ||
38 | " DEFR %0, TXSTAT\n" | ||
39 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
40 | " CMPT %0, #HI(0x02000000)\n" | ||
41 | " BNZ 1b\n" | ||
42 | : "=&d" (temp) | ||
43 | : "da" (&v->counter), "bd" (i) | ||
44 | : "cc"); | ||
45 | } | ||
46 | |||
47 | static inline void atomic_sub(int i, atomic_t *v) | ||
48 | { | ||
49 | int temp; | ||
50 | |||
51 | asm volatile ( | ||
52 | "1: LNKGETD %0, [%1]\n" | ||
53 | " SUB %0, %0, %2\n" | ||
54 | " LNKSETD [%1], %0\n" | ||
55 | " DEFR %0, TXSTAT\n" | ||
56 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
57 | " CMPT %0, #HI(0x02000000)\n" | ||
58 | " BNZ 1b\n" | ||
59 | : "=&d" (temp) | ||
60 | : "da" (&v->counter), "bd" (i) | ||
61 | : "cc"); | ||
62 | } | ||
63 | |||
64 | static inline int atomic_add_return(int i, atomic_t *v) | ||
65 | { | ||
66 | int result, temp; | ||
67 | |||
68 | smp_mb(); | ||
69 | |||
70 | asm volatile ( | ||
71 | "1: LNKGETD %1, [%2]\n" | ||
72 | " ADD %1, %1, %3\n" | ||
73 | " LNKSETD [%2], %1\n" | ||
74 | " DEFR %0, TXSTAT\n" | ||
75 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
76 | " CMPT %0, #HI(0x02000000)\n" | ||
77 | " BNZ 1b\n" | ||
78 | : "=&d" (temp), "=&da" (result) | ||
79 | : "da" (&v->counter), "bd" (i) | ||
80 | : "cc"); | ||
81 | |||
82 | smp_mb(); | ||
83 | |||
84 | return result; | ||
85 | } | ||
86 | |||
87 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
88 | { | ||
89 | int result, temp; | ||
90 | |||
91 | smp_mb(); | ||
92 | |||
93 | asm volatile ( | ||
94 | "1: LNKGETD %1, [%2]\n" | ||
95 | " SUB %1, %1, %3\n" | ||
96 | " LNKSETD [%2], %1\n" | ||
97 | " DEFR %0, TXSTAT\n" | ||
98 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
99 | " CMPT %0, #HI(0x02000000)\n" | ||
100 | " BNZ 1b\n" | ||
101 | : "=&d" (temp), "=&da" (result) | ||
102 | : "da" (&v->counter), "bd" (i) | ||
103 | : "cc"); | ||
104 | |||
105 | smp_mb(); | ||
106 | |||
107 | return result; | ||
108 | } | ||
109 | |||
110 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | ||
111 | { | ||
112 | int temp; | ||
113 | |||
114 | asm volatile ( | ||
115 | "1: LNKGETD %0, [%1]\n" | ||
116 | " AND %0, %0, %2\n" | ||
117 | " LNKSETD [%1] %0\n" | ||
118 | " DEFR %0, TXSTAT\n" | ||
119 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
120 | " CMPT %0, #HI(0x02000000)\n" | ||
121 | " BNZ 1b\n" | ||
122 | : "=&d" (temp) | ||
123 | : "da" (&v->counter), "bd" (~mask) | ||
124 | : "cc"); | ||
125 | } | ||
126 | |||
127 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | ||
128 | { | ||
129 | int temp; | ||
130 | |||
131 | asm volatile ( | ||
132 | "1: LNKGETD %0, [%1]\n" | ||
133 | " OR %0, %0, %2\n" | ||
134 | " LNKSETD [%1], %0\n" | ||
135 | " DEFR %0, TXSTAT\n" | ||
136 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
137 | " CMPT %0, #HI(0x02000000)\n" | ||
138 | " BNZ 1b\n" | ||
139 | : "=&d" (temp) | ||
140 | : "da" (&v->counter), "bd" (mask) | ||
141 | : "cc"); | ||
142 | } | ||
143 | |||
144 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
145 | { | ||
146 | int result, temp; | ||
147 | |||
148 | smp_mb(); | ||
149 | |||
150 | asm volatile ( | ||
151 | "1: LNKGETD %1, [%2]\n" | ||
152 | " CMP %1, %3\n" | ||
153 | " LNKSETDEQ [%2], %4\n" | ||
154 | " BNE 2f\n" | ||
155 | " DEFR %0, TXSTAT\n" | ||
156 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
157 | " CMPT %0, #HI(0x02000000)\n" | ||
158 | " BNZ 1b\n" | ||
159 | "2:\n" | ||
160 | : "=&d" (temp), "=&d" (result) | ||
161 | : "da" (&v->counter), "bd" (old), "da" (new) | ||
162 | : "cc"); | ||
163 | |||
164 | smp_mb(); | ||
165 | |||
166 | return result; | ||
167 | } | ||
168 | |||
169 | static inline int atomic_xchg(atomic_t *v, int new) | ||
170 | { | ||
171 | int temp, old; | ||
172 | |||
173 | asm volatile ( | ||
174 | "1: LNKGETD %1, [%2]\n" | ||
175 | " LNKSETD [%2], %3\n" | ||
176 | " DEFR %0, TXSTAT\n" | ||
177 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
178 | " CMPT %0, #HI(0x02000000)\n" | ||
179 | " BNZ 1b\n" | ||
180 | : "=&d" (temp), "=&d" (old) | ||
181 | : "da" (&v->counter), "da" (new) | ||
182 | : "cc"); | ||
183 | |||
184 | return old; | ||
185 | } | ||
186 | |||
187 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | ||
188 | { | ||
189 | int result, temp; | ||
190 | |||
191 | smp_mb(); | ||
192 | |||
193 | asm volatile ( | ||
194 | "1: LNKGETD %1, [%2]\n" | ||
195 | " CMP %1, %3\n" | ||
196 | " ADD %0, %1, %4\n" | ||
197 | " LNKSETDNE [%2], %0\n" | ||
198 | " BEQ 2f\n" | ||
199 | " DEFR %0, TXSTAT\n" | ||
200 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
201 | " CMPT %0, #HI(0x02000000)\n" | ||
202 | " BNZ 1b\n" | ||
203 | "2:\n" | ||
204 | : "=&d" (temp), "=&d" (result) | ||
205 | : "da" (&v->counter), "bd" (u), "bd" (a) | ||
206 | : "cc"); | ||
207 | |||
208 | smp_mb(); | ||
209 | |||
210 | return result; | ||
211 | } | ||
212 | |||
213 | static inline int atomic_sub_if_positive(int i, atomic_t *v) | ||
214 | { | ||
215 | int result, temp; | ||
216 | |||
217 | asm volatile ( | ||
218 | "1: LNKGETD %1, [%2]\n" | ||
219 | " SUBS %1, %1, %3\n" | ||
220 | " LNKSETDGE [%2], %1\n" | ||
221 | " BLT 2f\n" | ||
222 | " DEFR %0, TXSTAT\n" | ||
223 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
224 | " CMPT %0, #HI(0x02000000)\n" | ||
225 | " BNZ 1b\n" | ||
226 | "2:\n" | ||
227 | : "=&d" (temp), "=&da" (result) | ||
228 | : "da" (&v->counter), "bd" (i) | ||
229 | : "cc"); | ||
230 | |||
231 | return result; | ||
232 | } | ||
233 | |||
234 | #endif /* __ASM_METAG_ATOMIC_LNKGET_H */ | ||
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h new file mode 100644 index 000000000000..e578955e674b --- /dev/null +++ b/arch/metag/include/asm/atomic_lock1.h | |||
@@ -0,0 +1,160 @@ | |||
1 | #ifndef __ASM_METAG_ATOMIC_LOCK1_H | ||
2 | #define __ASM_METAG_ATOMIC_LOCK1_H | ||
3 | |||
4 | #define ATOMIC_INIT(i) { (i) } | ||
5 | |||
6 | #include <linux/compiler.h> | ||
7 | |||
8 | #include <asm/barrier.h> | ||
9 | #include <asm/global_lock.h> | ||
10 | |||
11 | static inline int atomic_read(const atomic_t *v) | ||
12 | { | ||
13 | return (v)->counter; | ||
14 | } | ||
15 | |||
16 | /* | ||
17 | * atomic_set needs to be take the lock to protect atomic_add_unless from a | ||
18 | * possible race, as it reads the counter twice: | ||
19 | * | ||
20 | * CPU0 CPU1 | ||
21 | * atomic_add_unless(1, 0) | ||
22 | * ret = v->counter (non-zero) | ||
23 | * if (ret != u) v->counter = 0 | ||
24 | * v->counter += 1 (counter set to 1) | ||
25 | * | ||
26 | * Making atomic_set take the lock ensures that ordering and logical | ||
27 | * consistency is preserved. | ||
28 | */ | ||
29 | static inline int atomic_set(atomic_t *v, int i) | ||
30 | { | ||
31 | unsigned long flags; | ||
32 | |||
33 | __global_lock1(flags); | ||
34 | fence(); | ||
35 | v->counter = i; | ||
36 | __global_unlock1(flags); | ||
37 | return i; | ||
38 | } | ||
39 | |||
40 | static inline void atomic_add(int i, atomic_t *v) | ||
41 | { | ||
42 | unsigned long flags; | ||
43 | |||
44 | __global_lock1(flags); | ||
45 | fence(); | ||
46 | v->counter += i; | ||
47 | __global_unlock1(flags); | ||
48 | } | ||
49 | |||
50 | static inline void atomic_sub(int i, atomic_t *v) | ||
51 | { | ||
52 | unsigned long flags; | ||
53 | |||
54 | __global_lock1(flags); | ||
55 | fence(); | ||
56 | v->counter -= i; | ||
57 | __global_unlock1(flags); | ||
58 | } | ||
59 | |||
60 | static inline int atomic_add_return(int i, atomic_t *v) | ||
61 | { | ||
62 | unsigned long result; | ||
63 | unsigned long flags; | ||
64 | |||
65 | __global_lock1(flags); | ||
66 | result = v->counter; | ||
67 | result += i; | ||
68 | fence(); | ||
69 | v->counter = result; | ||
70 | __global_unlock1(flags); | ||
71 | |||
72 | return result; | ||
73 | } | ||
74 | |||
75 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
76 | { | ||
77 | unsigned long result; | ||
78 | unsigned long flags; | ||
79 | |||
80 | __global_lock1(flags); | ||
81 | result = v->counter; | ||
82 | result -= i; | ||
83 | fence(); | ||
84 | v->counter = result; | ||
85 | __global_unlock1(flags); | ||
86 | |||
87 | return result; | ||
88 | } | ||
89 | |||
90 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | ||
91 | { | ||
92 | unsigned long flags; | ||
93 | |||
94 | __global_lock1(flags); | ||
95 | fence(); | ||
96 | v->counter &= ~mask; | ||
97 | __global_unlock1(flags); | ||
98 | } | ||
99 | |||
100 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | ||
101 | { | ||
102 | unsigned long flags; | ||
103 | |||
104 | __global_lock1(flags); | ||
105 | fence(); | ||
106 | v->counter |= mask; | ||
107 | __global_unlock1(flags); | ||
108 | } | ||
109 | |||
110 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
111 | { | ||
112 | int ret; | ||
113 | unsigned long flags; | ||
114 | |||
115 | __global_lock1(flags); | ||
116 | ret = v->counter; | ||
117 | if (ret == old) { | ||
118 | fence(); | ||
119 | v->counter = new; | ||
120 | } | ||
121 | __global_unlock1(flags); | ||
122 | |||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
127 | |||
128 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | ||
129 | { | ||
130 | int ret; | ||
131 | unsigned long flags; | ||
132 | |||
133 | __global_lock1(flags); | ||
134 | ret = v->counter; | ||
135 | if (ret != u) { | ||
136 | fence(); | ||
137 | v->counter += a; | ||
138 | } | ||
139 | __global_unlock1(flags); | ||
140 | |||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | static inline int atomic_sub_if_positive(int i, atomic_t *v) | ||
145 | { | ||
146 | int ret; | ||
147 | unsigned long flags; | ||
148 | |||
149 | __global_lock1(flags); | ||
150 | ret = v->counter - 1; | ||
151 | if (ret >= 0) { | ||
152 | fence(); | ||
153 | v->counter = ret; | ||
154 | } | ||
155 | __global_unlock1(flags); | ||
156 | |||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | #endif /* __ASM_METAG_ATOMIC_LOCK1_H */ | ||
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h new file mode 100644 index 000000000000..c90bfc6bf648 --- /dev/null +++ b/arch/metag/include/asm/barrier.h | |||
@@ -0,0 +1,85 @@ | |||
1 | #ifndef _ASM_METAG_BARRIER_H | ||
2 | #define _ASM_METAG_BARRIER_H | ||
3 | |||
4 | #include <asm/metag_mem.h> | ||
5 | |||
6 | #define nop() asm volatile ("NOP") | ||
7 | #define mb() wmb() | ||
8 | #define rmb() barrier() | ||
9 | |||
10 | #ifdef CONFIG_METAG_META21 | ||
11 | |||
12 | /* HTP and above have a system event to fence writes */ | ||
13 | static inline void wr_fence(void) | ||
14 | { | ||
15 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE; | ||
16 | barrier(); | ||
17 | *flushptr = 0; | ||
18 | } | ||
19 | |||
20 | #else /* CONFIG_METAG_META21 */ | ||
21 | |||
22 | /* | ||
23 | * ATP doesn't have system event to fence writes, so it is necessary to flush | ||
24 | * the processor write queues as well as possibly the write combiner (depending | ||
25 | * on the page being written). | ||
26 | * To ensure the write queues are flushed we do 4 writes to a system event | ||
27 | * register (in this case write combiner flush) which will also flush the write | ||
28 | * combiner. | ||
29 | */ | ||
30 | static inline void wr_fence(void) | ||
31 | { | ||
32 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH; | ||
33 | barrier(); | ||
34 | *flushptr = 0; | ||
35 | *flushptr = 0; | ||
36 | *flushptr = 0; | ||
37 | *flushptr = 0; | ||
38 | } | ||
39 | |||
40 | #endif /* !CONFIG_METAG_META21 */ | ||
41 | |||
42 | static inline void wmb(void) | ||
43 | { | ||
44 | /* flush writes through the write combiner */ | ||
45 | wr_fence(); | ||
46 | } | ||
47 | |||
48 | #define read_barrier_depends() do { } while (0) | ||
49 | |||
50 | #ifndef CONFIG_SMP | ||
51 | #define fence() do { } while (0) | ||
52 | #define smp_mb() barrier() | ||
53 | #define smp_rmb() barrier() | ||
54 | #define smp_wmb() barrier() | ||
55 | #else | ||
56 | |||
57 | #ifdef CONFIG_METAG_SMP_WRITE_REORDERING | ||
58 | /* | ||
59 | * Write to the atomic memory unlock system event register (command 0). This is | ||
60 | * needed before a write to shared memory in a critical section, to prevent | ||
61 | * external reordering of writes before the fence on other threads with writes | ||
62 | * after the fence on this thread (and to prevent the ensuing cache-memory | ||
63 | * incoherence). It is therefore ineffective if used after and on the same | ||
64 | * thread as a write. | ||
65 | */ | ||
66 | static inline void fence(void) | ||
67 | { | ||
68 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; | ||
69 | barrier(); | ||
70 | *flushptr = 0; | ||
71 | } | ||
72 | #define smp_mb() fence() | ||
73 | #define smp_rmb() fence() | ||
74 | #define smp_wmb() barrier() | ||
75 | #else | ||
76 | #define fence() do { } while (0) | ||
77 | #define smp_mb() barrier() | ||
78 | #define smp_rmb() barrier() | ||
79 | #define smp_wmb() barrier() | ||
80 | #endif | ||
81 | #endif | ||
82 | #define smp_read_barrier_depends() do { } while (0) | ||
83 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | ||
84 | |||
85 | #endif /* _ASM_METAG_BARRIER_H */ | ||
diff --git a/arch/metag/include/asm/bitops.h b/arch/metag/include/asm/bitops.h new file mode 100644 index 000000000000..c0d0df0d1378 --- /dev/null +++ b/arch/metag/include/asm/bitops.h | |||
@@ -0,0 +1,132 @@ | |||
1 | #ifndef __ASM_METAG_BITOPS_H | ||
2 | #define __ASM_METAG_BITOPS_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <asm/barrier.h> | ||
6 | #include <asm/global_lock.h> | ||
7 | |||
8 | /* | ||
9 | * clear_bit() doesn't provide any barrier for the compiler. | ||
10 | */ | ||
11 | #define smp_mb__before_clear_bit() barrier() | ||
12 | #define smp_mb__after_clear_bit() barrier() | ||
13 | |||
14 | #ifdef CONFIG_SMP | ||
15 | /* | ||
16 | * These functions are the basis of our bit ops. | ||
17 | */ | ||
18 | static inline void set_bit(unsigned int bit, volatile unsigned long *p) | ||
19 | { | ||
20 | unsigned long flags; | ||
21 | unsigned long mask = 1UL << (bit & 31); | ||
22 | |||
23 | p += bit >> 5; | ||
24 | |||
25 | __global_lock1(flags); | ||
26 | fence(); | ||
27 | *p |= mask; | ||
28 | __global_unlock1(flags); | ||
29 | } | ||
30 | |||
31 | static inline void clear_bit(unsigned int bit, volatile unsigned long *p) | ||
32 | { | ||
33 | unsigned long flags; | ||
34 | unsigned long mask = 1UL << (bit & 31); | ||
35 | |||
36 | p += bit >> 5; | ||
37 | |||
38 | __global_lock1(flags); | ||
39 | fence(); | ||
40 | *p &= ~mask; | ||
41 | __global_unlock1(flags); | ||
42 | } | ||
43 | |||
44 | static inline void change_bit(unsigned int bit, volatile unsigned long *p) | ||
45 | { | ||
46 | unsigned long flags; | ||
47 | unsigned long mask = 1UL << (bit & 31); | ||
48 | |||
49 | p += bit >> 5; | ||
50 | |||
51 | __global_lock1(flags); | ||
52 | fence(); | ||
53 | *p ^= mask; | ||
54 | __global_unlock1(flags); | ||
55 | } | ||
56 | |||
57 | static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p) | ||
58 | { | ||
59 | unsigned long flags; | ||
60 | unsigned long old; | ||
61 | unsigned long mask = 1UL << (bit & 31); | ||
62 | |||
63 | p += bit >> 5; | ||
64 | |||
65 | __global_lock1(flags); | ||
66 | old = *p; | ||
67 | if (!(old & mask)) { | ||
68 | fence(); | ||
69 | *p = old | mask; | ||
70 | } | ||
71 | __global_unlock1(flags); | ||
72 | |||
73 | return (old & mask) != 0; | ||
74 | } | ||
75 | |||
76 | static inline int test_and_clear_bit(unsigned int bit, | ||
77 | volatile unsigned long *p) | ||
78 | { | ||
79 | unsigned long flags; | ||
80 | unsigned long old; | ||
81 | unsigned long mask = 1UL << (bit & 31); | ||
82 | |||
83 | p += bit >> 5; | ||
84 | |||
85 | __global_lock1(flags); | ||
86 | old = *p; | ||
87 | if (old & mask) { | ||
88 | fence(); | ||
89 | *p = old & ~mask; | ||
90 | } | ||
91 | __global_unlock1(flags); | ||
92 | |||
93 | return (old & mask) != 0; | ||
94 | } | ||
95 | |||
96 | static inline int test_and_change_bit(unsigned int bit, | ||
97 | volatile unsigned long *p) | ||
98 | { | ||
99 | unsigned long flags; | ||
100 | unsigned long old; | ||
101 | unsigned long mask = 1UL << (bit & 31); | ||
102 | |||
103 | p += bit >> 5; | ||
104 | |||
105 | __global_lock1(flags); | ||
106 | fence(); | ||
107 | old = *p; | ||
108 | *p = old ^ mask; | ||
109 | __global_unlock1(flags); | ||
110 | |||
111 | return (old & mask) != 0; | ||
112 | } | ||
113 | |||
114 | #else | ||
115 | #include <asm-generic/bitops/atomic.h> | ||
116 | #endif /* CONFIG_SMP */ | ||
117 | |||
118 | #include <asm-generic/bitops/non-atomic.h> | ||
119 | #include <asm-generic/bitops/find.h> | ||
120 | #include <asm-generic/bitops/ffs.h> | ||
121 | #include <asm-generic/bitops/__ffs.h> | ||
122 | #include <asm-generic/bitops/ffz.h> | ||
123 | #include <asm-generic/bitops/fls.h> | ||
124 | #include <asm-generic/bitops/__fls.h> | ||
125 | #include <asm-generic/bitops/fls64.h> | ||
126 | #include <asm-generic/bitops/hweight.h> | ||
127 | #include <asm-generic/bitops/lock.h> | ||
128 | #include <asm-generic/bitops/sched.h> | ||
129 | #include <asm-generic/bitops/le.h> | ||
130 | #include <asm-generic/bitops/ext2-atomic.h> | ||
131 | |||
132 | #endif /* __ASM_METAG_BITOPS_H */ | ||
diff --git a/arch/metag/include/asm/bug.h b/arch/metag/include/asm/bug.h new file mode 100644 index 000000000000..d04b48cefecc --- /dev/null +++ b/arch/metag/include/asm/bug.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_METAG_BUG_H | ||
2 | #define _ASM_METAG_BUG_H | ||
3 | |||
4 | #include <asm-generic/bug.h> | ||
5 | |||
6 | struct pt_regs; | ||
7 | |||
8 | extern const char *trap_name(int trapno); | ||
9 | extern void die(const char *str, struct pt_regs *regs, long err, | ||
10 | unsigned long addr) __attribute__ ((noreturn)); | ||
11 | |||
12 | #endif | ||
diff --git a/arch/metag/include/asm/cache.h b/arch/metag/include/asm/cache.h new file mode 100644 index 000000000000..a43b650cfdc0 --- /dev/null +++ b/arch/metag/include/asm/cache.h | |||
@@ -0,0 +1,23 @@ | |||
1 | #ifndef __ASM_METAG_CACHE_H | ||
2 | #define __ASM_METAG_CACHE_H | ||
3 | |||
4 | /* L1 cache line size (64 bytes) */ | ||
5 | #define L1_CACHE_SHIFT 6 | ||
6 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | ||
7 | |||
8 | /* Meta requires large data items to be 8 byte aligned. */ | ||
9 | #define ARCH_SLAB_MINALIGN 8 | ||
10 | |||
11 | /* | ||
12 | * With an L2 cache, we may invalidate dirty lines, so we need to ensure DMA | ||
13 | * buffers have cache line alignment. | ||
14 | */ | ||
15 | #ifdef CONFIG_METAG_L2C | ||
16 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES | ||
17 | #else | ||
18 | #define ARCH_DMA_MINALIGN 8 | ||
19 | #endif | ||
20 | |||
21 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) | ||
22 | |||
23 | #endif | ||
diff --git a/arch/metag/include/asm/cacheflush.h b/arch/metag/include/asm/cacheflush.h new file mode 100644 index 000000000000..7787ec5e3ed0 --- /dev/null +++ b/arch/metag/include/asm/cacheflush.h | |||
@@ -0,0 +1,250 @@ | |||
1 | #ifndef _METAG_CACHEFLUSH_H | ||
2 | #define _METAG_CACHEFLUSH_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <linux/io.h> | ||
7 | |||
8 | #include <asm/l2cache.h> | ||
9 | #include <asm/metag_isa.h> | ||
10 | #include <asm/metag_mem.h> | ||
11 | |||
12 | void metag_cache_probe(void); | ||
13 | |||
14 | void metag_data_cache_flush_all(const void *start); | ||
15 | void metag_code_cache_flush_all(const void *start); | ||
16 | |||
17 | /* | ||
18 | * Routines to flush physical cache lines that may be used to cache data or code | ||
19 | * normally accessed via the linear address range supplied. The region flushed | ||
20 | * must either lie in local or global address space determined by the top bit of | ||
21 | * the pStart address. If Bytes is >= 4K then the whole of the related cache | ||
22 | * state will be flushed rather than a limited range. | ||
23 | */ | ||
24 | void metag_data_cache_flush(const void *start, int bytes); | ||
25 | void metag_code_cache_flush(const void *start, int bytes); | ||
26 | |||
27 | #ifdef CONFIG_METAG_META12 | ||
28 | |||
29 | /* Write through, virtually tagged, split I/D cache. */ | ||
30 | |||
31 | static inline void __flush_cache_all(void) | ||
32 | { | ||
33 | metag_code_cache_flush_all((void *) PAGE_OFFSET); | ||
34 | metag_data_cache_flush_all((void *) PAGE_OFFSET); | ||
35 | } | ||
36 | |||
37 | #define flush_cache_all() __flush_cache_all() | ||
38 | |||
39 | /* flush the entire user address space referenced in this mm structure */ | ||
40 | static inline void flush_cache_mm(struct mm_struct *mm) | ||
41 | { | ||
42 | if (mm == current->mm) | ||
43 | __flush_cache_all(); | ||
44 | } | ||
45 | |||
46 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) | ||
47 | |||
48 | /* flush a range of addresses from this mm */ | ||
49 | static inline void flush_cache_range(struct vm_area_struct *vma, | ||
50 | unsigned long start, unsigned long end) | ||
51 | { | ||
52 | flush_cache_mm(vma->vm_mm); | ||
53 | } | ||
54 | |||
55 | static inline void flush_cache_page(struct vm_area_struct *vma, | ||
56 | unsigned long vmaddr, unsigned long pfn) | ||
57 | { | ||
58 | flush_cache_mm(vma->vm_mm); | ||
59 | } | ||
60 | |||
61 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | ||
62 | static inline void flush_dcache_page(struct page *page) | ||
63 | { | ||
64 | metag_data_cache_flush_all((void *) PAGE_OFFSET); | ||
65 | } | ||
66 | |||
67 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
68 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
69 | |||
70 | static inline void flush_icache_page(struct vm_area_struct *vma, | ||
71 | struct page *page) | ||
72 | { | ||
73 | metag_code_cache_flush(page_to_virt(page), PAGE_SIZE); | ||
74 | } | ||
75 | |||
76 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) | ||
77 | { | ||
78 | metag_data_cache_flush_all((void *) PAGE_OFFSET); | ||
79 | } | ||
80 | |||
81 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | ||
82 | { | ||
83 | metag_data_cache_flush_all((void *) PAGE_OFFSET); | ||
84 | } | ||
85 | |||
86 | #else | ||
87 | |||
88 | /* Write through, physically tagged, split I/D cache. */ | ||
89 | |||
90 | #define flush_cache_all() do { } while (0) | ||
91 | #define flush_cache_mm(mm) do { } while (0) | ||
92 | #define flush_cache_dup_mm(mm) do { } while (0) | ||
93 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
94 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
95 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
96 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
97 | #define flush_icache_page(vma, pg) do { } while (0) | ||
98 | #define flush_cache_vmap(start, end) do { } while (0) | ||
99 | #define flush_cache_vunmap(start, end) do { } while (0) | ||
100 | |||
101 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | ||
102 | static inline void flush_dcache_page(struct page *page) | ||
103 | { | ||
104 | /* FIXME: We can do better than this. All we are trying to do is | ||
105 | * make the i-cache coherent, we should use the PG_arch_1 bit like | ||
106 | * e.g. powerpc. | ||
107 | */ | ||
108 | #ifdef CONFIG_SMP | ||
109 | metag_out32(1, SYSC_ICACHE_FLUSH); | ||
110 | #else | ||
111 | metag_code_cache_flush_all((void *) PAGE_OFFSET); | ||
112 | #endif | ||
113 | } | ||
114 | |||
115 | #endif | ||
116 | |||
117 | /* Push n pages at kernel virtual address and clear the icache */ | ||
118 | static inline void flush_icache_range(unsigned long address, | ||
119 | unsigned long endaddr) | ||
120 | { | ||
121 | #ifdef CONFIG_SMP | ||
122 | metag_out32(1, SYSC_ICACHE_FLUSH); | ||
123 | #else | ||
124 | metag_code_cache_flush((void *) address, endaddr - address); | ||
125 | #endif | ||
126 | } | ||
127 | |||
128 | static inline void flush_cache_sigtramp(unsigned long addr, int size) | ||
129 | { | ||
130 | /* | ||
131 | * Flush the icache in case there was previously some code | ||
132 | * fetched from this address, perhaps a previous sigtramp. | ||
133 | * | ||
134 | * We don't need to flush the dcache, it's write through and | ||
135 | * we just wrote the sigtramp code through it. | ||
136 | */ | ||
137 | #ifdef CONFIG_SMP | ||
138 | metag_out32(1, SYSC_ICACHE_FLUSH); | ||
139 | #else | ||
140 | metag_code_cache_flush((void *) addr, size); | ||
141 | #endif | ||
142 | } | ||
143 | |||
144 | #ifdef CONFIG_METAG_L2C | ||
145 | |||
146 | /* | ||
147 | * Perform a single specific CACHEWD operation on an address, masking lower bits | ||
148 | * of address first. | ||
149 | */ | ||
150 | static inline void cachewd_line(void *addr, unsigned int data) | ||
151 | { | ||
152 | unsigned long masked = (unsigned long)addr & -0x40; | ||
153 | __builtin_meta2_cachewd((void *)masked, data); | ||
154 | } | ||
155 | |||
156 | /* Perform a certain CACHEW op on each cache line in a range */ | ||
157 | static inline void cachew_region_op(void *start, unsigned long size, | ||
158 | unsigned int op) | ||
159 | { | ||
160 | unsigned long offset = (unsigned long)start & 0x3f; | ||
161 | int i; | ||
162 | if (offset) { | ||
163 | size += offset; | ||
164 | start -= offset; | ||
165 | } | ||
166 | i = (size - 1) >> 6; | ||
167 | do { | ||
168 | __builtin_meta2_cachewd(start, op); | ||
169 | start += 0x40; | ||
170 | } while (i--); | ||
171 | } | ||
172 | |||
173 | /* prevent write fence and flushbacks being reordered in L2 */ | ||
174 | static inline void l2c_fence_flush(void *addr) | ||
175 | { | ||
176 | /* | ||
177 | * Synchronise by reading back and re-flushing. | ||
178 | * It is assumed this access will miss, as the caller should have just | ||
179 | * flushed the cache line. | ||
180 | */ | ||
181 | (void)(volatile u8 *)addr; | ||
182 | cachewd_line(addr, CACHEW_FLUSH_L1D_L2); | ||
183 | } | ||
184 | |||
185 | /* prevent write fence and writebacks being reordered in L2 */ | ||
186 | static inline void l2c_fence(void *addr) | ||
187 | { | ||
188 | /* | ||
189 | * A write back has occurred, but not necessarily an invalidate, so the | ||
190 | * readback in l2c_fence_flush() would hit in the cache and have no | ||
191 | * effect. Therefore fully flush the line first. | ||
192 | */ | ||
193 | cachewd_line(addr, CACHEW_FLUSH_L1D_L2); | ||
194 | l2c_fence_flush(addr); | ||
195 | } | ||
196 | |||
197 | /* Used to keep memory consistent when doing DMA. */ | ||
198 | static inline void flush_dcache_region(void *start, unsigned long size) | ||
199 | { | ||
200 | /* metag_data_cache_flush won't flush L2 cache lines if size >= 4096 */ | ||
201 | if (meta_l2c_is_enabled()) { | ||
202 | cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2); | ||
203 | if (meta_l2c_is_writeback()) | ||
204 | l2c_fence_flush(start + size - 1); | ||
205 | } else { | ||
206 | metag_data_cache_flush(start, size); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | /* Write back dirty lines to memory (or do nothing if no writeback caches) */ | ||
211 | static inline void writeback_dcache_region(void *start, unsigned long size) | ||
212 | { | ||
213 | if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) { | ||
214 | cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2); | ||
215 | l2c_fence(start + size - 1); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* Invalidate (may also write back if necessary) */ | ||
220 | static inline void invalidate_dcache_region(void *start, unsigned long size) | ||
221 | { | ||
222 | if (meta_l2c_is_enabled()) | ||
223 | cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2); | ||
224 | else | ||
225 | metag_data_cache_flush(start, size); | ||
226 | } | ||
227 | #else | ||
228 | #define flush_dcache_region(s, l) metag_data_cache_flush((s), (l)) | ||
229 | #define writeback_dcache_region(s, l) do {} while (0) | ||
230 | #define invalidate_dcache_region(s, l) flush_dcache_region((s), (l)) | ||
231 | #endif | ||
232 | |||
233 | static inline void copy_to_user_page(struct vm_area_struct *vma, | ||
234 | struct page *page, unsigned long vaddr, | ||
235 | void *dst, const void *src, | ||
236 | unsigned long len) | ||
237 | { | ||
238 | memcpy(dst, src, len); | ||
239 | flush_icache_range((unsigned long)dst, (unsigned long)dst + len); | ||
240 | } | ||
241 | |||
242 | static inline void copy_from_user_page(struct vm_area_struct *vma, | ||
243 | struct page *page, unsigned long vaddr, | ||
244 | void *dst, const void *src, | ||
245 | unsigned long len) | ||
246 | { | ||
247 | memcpy(dst, src, len); | ||
248 | } | ||
249 | |||
250 | #endif /* _METAG_CACHEFLUSH_H */ | ||
diff --git a/arch/metag/include/asm/cachepart.h b/arch/metag/include/asm/cachepart.h new file mode 100644 index 000000000000..cf6b44e916b5 --- /dev/null +++ b/arch/metag/include/asm/cachepart.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Meta cache partition manipulation. | ||
3 | * | ||
4 | * Copyright 2010 Imagination Technologies Ltd. | ||
5 | */ | ||
6 | |||
7 | #ifndef _METAG_CACHEPART_H_ | ||
8 | #define _METAG_CACHEPART_H_ | ||
9 | |||
10 | /** | ||
11 | * get_dcache_size() - Get size of data cache. | ||
12 | */ | ||
13 | unsigned int get_dcache_size(void); | ||
14 | |||
15 | /** | ||
16 | * get_icache_size() - Get size of code cache. | ||
17 | */ | ||
18 | unsigned int get_icache_size(void); | ||
19 | |||
20 | /** | ||
21 | * get_global_dcache_size() - Get the thread's global dcache. | ||
22 | * | ||
23 | * Returns the size of the current thread's global dcache partition. | ||
24 | */ | ||
25 | unsigned int get_global_dcache_size(void); | ||
26 | |||
27 | /** | ||
28 | * get_global_icache_size() - Get the thread's global icache. | ||
29 | * | ||
30 | * Returns the size of the current thread's global icache partition. | ||
31 | */ | ||
32 | unsigned int get_global_icache_size(void); | ||
33 | |||
34 | /** | ||
35 | * check_for_dache_aliasing() - Ensure that the bootloader has configured the | ||
36 | * dache and icache properly to avoid aliasing | ||
37 | * @thread_id: Hardware thread ID | ||
38 | * | ||
39 | */ | ||
40 | void check_for_cache_aliasing(int thread_id); | ||
41 | |||
42 | #endif | ||
diff --git a/arch/metag/include/asm/checksum.h b/arch/metag/include/asm/checksum.h new file mode 100644 index 000000000000..999bf761a732 --- /dev/null +++ b/arch/metag/include/asm/checksum.h | |||
@@ -0,0 +1,92 @@ | |||
1 | #ifndef _METAG_CHECKSUM_H | ||
2 | #define _METAG_CHECKSUM_H | ||
3 | |||
4 | /* | ||
5 | * computes the checksum of a memory block at buff, length len, | ||
6 | * and adds in "sum" (32-bit) | ||
7 | * | ||
8 | * returns a 32-bit number suitable for feeding into itself | ||
9 | * or csum_tcpudp_magic | ||
10 | * | ||
11 | * this function must be called with even lengths, except | ||
12 | * for the last fragment, which may be odd | ||
13 | * | ||
14 | * it's best to have buff aligned on a 32-bit boundary | ||
15 | */ | ||
16 | extern __wsum csum_partial(const void *buff, int len, __wsum sum); | ||
17 | |||
18 | /* | ||
19 | * the same as csum_partial, but copies from src while it | ||
20 | * checksums | ||
21 | * | ||
22 | * here even more important to align src and dst on a 32-bit (or even | ||
23 | * better 64-bit) boundary | ||
24 | */ | ||
25 | extern __wsum csum_partial_copy(const void *src, void *dst, int len, | ||
26 | __wsum sum); | ||
27 | |||
28 | /* | ||
29 | * the same as csum_partial_copy, but copies from user space. | ||
30 | * | ||
31 | * here even more important to align src and dst on a 32-bit (or even | ||
32 | * better 64-bit) boundary | ||
33 | */ | ||
34 | extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, | ||
35 | int len, __wsum sum, int *csum_err); | ||
36 | |||
37 | #define csum_partial_copy_nocheck(src, dst, len, sum) \ | ||
38 | csum_partial_copy((src), (dst), (len), (sum)) | ||
39 | |||
40 | /* | ||
41 | * Fold a partial checksum | ||
42 | */ | ||
43 | static inline __sum16 csum_fold(__wsum csum) | ||
44 | { | ||
45 | u32 sum = (__force u32)csum; | ||
46 | sum = (sum & 0xffff) + (sum >> 16); | ||
47 | sum = (sum & 0xffff) + (sum >> 16); | ||
48 | return (__force __sum16)~sum; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * This is a version of ip_compute_csum() optimized for IP headers, | ||
53 | * which always checksum on 4 octet boundaries. | ||
54 | */ | ||
55 | extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); | ||
56 | |||
57 | /* | ||
58 | * computes the checksum of the TCP/UDP pseudo-header | ||
59 | * returns a 16-bit checksum, already complemented | ||
60 | */ | ||
61 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | ||
62 | unsigned short len, | ||
63 | unsigned short proto, | ||
64 | __wsum sum) | ||
65 | { | ||
66 | unsigned long len_proto = (proto + len) << 8; | ||
67 | asm ("ADD %0, %0, %1\n" | ||
68 | "ADDS %0, %0, %2\n" | ||
69 | "ADDCS %0, %0, #1\n" | ||
70 | "ADDS %0, %0, %3\n" | ||
71 | "ADDCS %0, %0, #1\n" | ||
72 | : "=d" (sum) | ||
73 | : "d" (daddr), "d" (saddr), "d" (len_proto), | ||
74 | "0" (sum) | ||
75 | : "cc"); | ||
76 | return sum; | ||
77 | } | ||
78 | |||
79 | static inline __sum16 | ||
80 | csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, | ||
81 | unsigned short proto, __wsum sum) | ||
82 | { | ||
83 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * this routine is used for miscellaneous IP-like checksums, mainly | ||
88 | * in icmp.c | ||
89 | */ | ||
90 | extern __sum16 ip_compute_csum(const void *buff, int len); | ||
91 | |||
92 | #endif /* _METAG_CHECKSUM_H */ | ||
diff --git a/arch/metag/include/asm/clock.h b/arch/metag/include/asm/clock.h new file mode 100644 index 000000000000..3e2915a280c7 --- /dev/null +++ b/arch/metag/include/asm/clock.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * arch/metag/include/asm/clock.h | ||
3 | * | ||
4 | * Copyright (C) 2012 Imagination Technologies Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef _METAG_CLOCK_H_ | ||
12 | #define _METAG_CLOCK_H_ | ||
13 | |||
14 | #include <asm/mach/arch.h> | ||
15 | |||
16 | /** | ||
17 | * struct meta_clock_desc - Meta Core clock callbacks. | ||
18 | * @get_core_freq: Get the frequency of the Meta core. If this is NULL, the | ||
19 | * core frequency will be determined like this: | ||
20 | * Meta 1: based on loops_per_jiffy. | ||
21 | * Meta 2: (EXPAND_TIMER_DIV + 1) MHz. | ||
22 | */ | ||
23 | struct meta_clock_desc { | ||
24 | unsigned long (*get_core_freq)(void); | ||
25 | }; | ||
26 | |||
27 | extern struct meta_clock_desc _meta_clock; | ||
28 | |||
29 | /* | ||
30 | * Set up the default clock, ensuring all callbacks are valid - only accessible | ||
31 | * during boot. | ||
32 | */ | ||
33 | void setup_meta_clocks(struct meta_clock_desc *desc); | ||
34 | |||
35 | /** | ||
36 | * get_coreclock() - Get the frequency of the Meta core clock. | ||
37 | * | ||
38 | * Returns: The Meta core clock frequency in Hz. | ||
39 | */ | ||
40 | static inline unsigned long get_coreclock(void) | ||
41 | { | ||
42 | /* | ||
43 | * Use the current clock callback. If set correctly this will provide | ||
44 | * the most accurate frequency as it can be calculated directly from the | ||
45 | * PLL configuration. otherwise a default callback will have been set | ||
46 | * instead. | ||
47 | */ | ||
48 | return _meta_clock.get_core_freq(); | ||
49 | } | ||
50 | |||
51 | #endif /* _METAG_CLOCK_H_ */ | ||
diff --git a/arch/metag/include/asm/cmpxchg.h b/arch/metag/include/asm/cmpxchg.h new file mode 100644 index 000000000000..b1bc1be8540f --- /dev/null +++ b/arch/metag/include/asm/cmpxchg.h | |||
@@ -0,0 +1,65 @@ | |||
1 | #ifndef __ASM_METAG_CMPXCHG_H | ||
2 | #define __ASM_METAG_CMPXCHG_H | ||
3 | |||
4 | #include <asm/barrier.h> | ||
5 | |||
6 | #if defined(CONFIG_METAG_ATOMICITY_IRQSOFF) | ||
7 | #include <asm/cmpxchg_irq.h> | ||
8 | #elif defined(CONFIG_METAG_ATOMICITY_LOCK1) | ||
9 | #include <asm/cmpxchg_lock1.h> | ||
10 | #elif defined(CONFIG_METAG_ATOMICITY_LNKGET) | ||
11 | #include <asm/cmpxchg_lnkget.h> | ||
12 | #endif | ||
13 | |||
14 | extern void __xchg_called_with_bad_pointer(void); | ||
15 | |||
16 | #define __xchg(ptr, x, size) \ | ||
17 | ({ \ | ||
18 | unsigned long __xchg__res; \ | ||
19 | volatile void *__xchg_ptr = (ptr); \ | ||
20 | switch (size) { \ | ||
21 | case 4: \ | ||
22 | __xchg__res = xchg_u32(__xchg_ptr, x); \ | ||
23 | break; \ | ||
24 | case 1: \ | ||
25 | __xchg__res = xchg_u8(__xchg_ptr, x); \ | ||
26 | break; \ | ||
27 | default: \ | ||
28 | __xchg_called_with_bad_pointer(); \ | ||
29 | __xchg__res = x; \ | ||
30 | break; \ | ||
31 | } \ | ||
32 | \ | ||
33 | __xchg__res; \ | ||
34 | }) | ||
35 | |||
36 | #define xchg(ptr, x) \ | ||
37 | ((__typeof__(*(ptr)))__xchg((ptr), (unsigned long)(x), sizeof(*(ptr)))) | ||
38 | |||
39 | /* This function doesn't exist, so you'll get a linker error | ||
40 | * if something tries to do an invalid cmpxchg(). */ | ||
41 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
42 | |||
43 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
44 | unsigned long new, int size) | ||
45 | { | ||
46 | switch (size) { | ||
47 | case 4: | ||
48 | return __cmpxchg_u32(ptr, old, new); | ||
49 | } | ||
50 | __cmpxchg_called_with_bad_pointer(); | ||
51 | return old; | ||
52 | } | ||
53 | |||
54 | #define __HAVE_ARCH_CMPXCHG 1 | ||
55 | |||
56 | #define cmpxchg(ptr, o, n) \ | ||
57 | ({ \ | ||
58 | __typeof__(*(ptr)) _o_ = (o); \ | ||
59 | __typeof__(*(ptr)) _n_ = (n); \ | ||
60 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
61 | (unsigned long)_n_, \ | ||
62 | sizeof(*(ptr))); \ | ||
63 | }) | ||
64 | |||
65 | #endif /* __ASM_METAG_CMPXCHG_H */ | ||
diff --git a/arch/metag/include/asm/cmpxchg_irq.h b/arch/metag/include/asm/cmpxchg_irq.h new file mode 100644 index 000000000000..649573168b05 --- /dev/null +++ b/arch/metag/include/asm/cmpxchg_irq.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef __ASM_METAG_CMPXCHG_IRQ_H | ||
2 | #define __ASM_METAG_CMPXCHG_IRQ_H | ||
3 | |||
4 | #include <linux/irqflags.h> | ||
5 | |||
6 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | ||
7 | { | ||
8 | unsigned long flags, retval; | ||
9 | |||
10 | local_irq_save(flags); | ||
11 | retval = *m; | ||
12 | *m = val; | ||
13 | local_irq_restore(flags); | ||
14 | return retval; | ||
15 | } | ||
16 | |||
17 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
18 | { | ||
19 | unsigned long flags, retval; | ||
20 | |||
21 | local_irq_save(flags); | ||
22 | retval = *m; | ||
23 | *m = val & 0xff; | ||
24 | local_irq_restore(flags); | ||
25 | return retval; | ||
26 | } | ||
27 | |||
28 | static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, | ||
29 | unsigned long new) | ||
30 | { | ||
31 | __u32 retval; | ||
32 | unsigned long flags; | ||
33 | |||
34 | local_irq_save(flags); | ||
35 | retval = *m; | ||
36 | if (retval == old) | ||
37 | *m = new; | ||
38 | local_irq_restore(flags); /* implies memory barrier */ | ||
39 | return retval; | ||
40 | } | ||
41 | |||
42 | #endif /* __ASM_METAG_CMPXCHG_IRQ_H */ | ||
diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h new file mode 100644 index 000000000000..0154e2807ebb --- /dev/null +++ b/arch/metag/include/asm/cmpxchg_lnkget.h | |||
@@ -0,0 +1,86 @@ | |||
1 | #ifndef __ASM_METAG_CMPXCHG_LNKGET_H | ||
2 | #define __ASM_METAG_CMPXCHG_LNKGET_H | ||
3 | |||
4 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | ||
5 | { | ||
6 | int temp, old; | ||
7 | |||
8 | smp_mb(); | ||
9 | |||
10 | asm volatile ( | ||
11 | "1: LNKGETD %1, [%2]\n" | ||
12 | " LNKSETD [%2], %3\n" | ||
13 | " DEFR %0, TXSTAT\n" | ||
14 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
15 | " CMPT %0, #HI(0x02000000)\n" | ||
16 | " BNZ 1b\n" | ||
17 | #ifdef CONFIG_METAG_LNKGET_AROUND_CACHE | ||
18 | " DCACHE [%2], %0\n" | ||
19 | #endif | ||
20 | : "=&d" (temp), "=&d" (old) | ||
21 | : "da" (m), "da" (val) | ||
22 | : "cc" | ||
23 | ); | ||
24 | |||
25 | smp_mb(); | ||
26 | |||
27 | return old; | ||
28 | } | ||
29 | |||
30 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
31 | { | ||
32 | int temp, old; | ||
33 | |||
34 | smp_mb(); | ||
35 | |||
36 | asm volatile ( | ||
37 | "1: LNKGETD %1, [%2]\n" | ||
38 | " LNKSETD [%2], %3\n" | ||
39 | " DEFR %0, TXSTAT\n" | ||
40 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
41 | " CMPT %0, #HI(0x02000000)\n" | ||
42 | " BNZ 1b\n" | ||
43 | #ifdef CONFIG_METAG_LNKGET_AROUND_CACHE | ||
44 | " DCACHE [%2], %0\n" | ||
45 | #endif | ||
46 | : "=&d" (temp), "=&d" (old) | ||
47 | : "da" (m), "da" (val & 0xff) | ||
48 | : "cc" | ||
49 | ); | ||
50 | |||
51 | smp_mb(); | ||
52 | |||
53 | return old; | ||
54 | } | ||
55 | |||
56 | static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, | ||
57 | unsigned long new) | ||
58 | { | ||
59 | __u32 retval, temp; | ||
60 | |||
61 | smp_mb(); | ||
62 | |||
63 | asm volatile ( | ||
64 | "1: LNKGETD %1, [%2]\n" | ||
65 | " CMP %1, %3\n" | ||
66 | " LNKSETDEQ [%2], %4\n" | ||
67 | " BNE 2f\n" | ||
68 | " DEFR %0, TXSTAT\n" | ||
69 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
70 | " CMPT %0, #HI(0x02000000)\n" | ||
71 | " BNZ 1b\n" | ||
72 | #ifdef CONFIG_METAG_LNKGET_AROUND_CACHE | ||
73 | " DCACHE [%2], %0\n" | ||
74 | #endif | ||
75 | "2:\n" | ||
76 | : "=&d" (temp), "=&da" (retval) | ||
77 | : "da" (m), "bd" (old), "da" (new) | ||
78 | : "cc" | ||
79 | ); | ||
80 | |||
81 | smp_mb(); | ||
82 | |||
83 | return retval; | ||
84 | } | ||
85 | |||
86 | #endif /* __ASM_METAG_CMPXCHG_LNKGET_H */ | ||
diff --git a/arch/metag/include/asm/cmpxchg_lock1.h b/arch/metag/include/asm/cmpxchg_lock1.h new file mode 100644 index 000000000000..fd6850474969 --- /dev/null +++ b/arch/metag/include/asm/cmpxchg_lock1.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef __ASM_METAG_CMPXCHG_LOCK1_H | ||
2 | #define __ASM_METAG_CMPXCHG_LOCK1_H | ||
3 | |||
4 | #include <asm/global_lock.h> | ||
5 | |||
6 | /* Use LOCK2 as these have to be atomic w.r.t. ordinary accesses. */ | ||
7 | |||
8 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | ||
9 | { | ||
10 | unsigned long flags, retval; | ||
11 | |||
12 | __global_lock2(flags); | ||
13 | fence(); | ||
14 | retval = *m; | ||
15 | *m = val; | ||
16 | __global_unlock2(flags); | ||
17 | return retval; | ||
18 | } | ||
19 | |||
20 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
21 | { | ||
22 | unsigned long flags, retval; | ||
23 | |||
24 | __global_lock2(flags); | ||
25 | fence(); | ||
26 | retval = *m; | ||
27 | *m = val & 0xff; | ||
28 | __global_unlock2(flags); | ||
29 | return retval; | ||
30 | } | ||
31 | |||
32 | static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, | ||
33 | unsigned long new) | ||
34 | { | ||
35 | __u32 retval; | ||
36 | unsigned long flags; | ||
37 | |||
38 | __global_lock2(flags); | ||
39 | retval = *m; | ||
40 | if (retval == old) { | ||
41 | fence(); | ||
42 | *m = new; | ||
43 | } | ||
44 | __global_unlock2(flags); | ||
45 | return retval; | ||
46 | } | ||
47 | |||
48 | #endif /* __ASM_METAG_CMPXCHG_LOCK1_H */ | ||
diff --git a/arch/metag/include/asm/core_reg.h b/arch/metag/include/asm/core_reg.h new file mode 100644 index 000000000000..bdbc3a51f31c --- /dev/null +++ b/arch/metag/include/asm/core_reg.h | |||
@@ -0,0 +1,35 @@ | |||
1 | #ifndef __ASM_METAG_CORE_REG_H_ | ||
2 | #define __ASM_METAG_CORE_REG_H_ | ||
3 | |||
4 | #include <asm/metag_regs.h> | ||
5 | |||
6 | extern void core_reg_write(int unit, int reg, int thread, unsigned int val); | ||
7 | extern unsigned int core_reg_read(int unit, int reg, int thread); | ||
8 | |||
9 | /* | ||
10 | * These macros allow direct access from C to any register known to the | ||
11 | * assembler. Example candidates are TXTACTCYC, TXIDLECYC, and TXPRIVEXT. | ||
12 | */ | ||
13 | |||
14 | #define __core_reg_get(reg) ({ \ | ||
15 | unsigned int __grvalue; \ | ||
16 | asm volatile("MOV %0," #reg \ | ||
17 | : "=r" (__grvalue)); \ | ||
18 | __grvalue; \ | ||
19 | }) | ||
20 | |||
21 | #define __core_reg_set(reg, value) do { \ | ||
22 | unsigned int __srvalue = (value); \ | ||
23 | asm volatile("MOV " #reg ",%0" \ | ||
24 | : \ | ||
25 | : "r" (__srvalue)); \ | ||
26 | } while (0) | ||
27 | |||
28 | #define __core_reg_swap(reg, value) do { \ | ||
29 | unsigned int __srvalue = (value); \ | ||
30 | asm volatile("SWAP " #reg ",%0" \ | ||
31 | : "+r" (__srvalue)); \ | ||
32 | (value) = __srvalue; \ | ||
33 | } while (0) | ||
34 | |||
35 | #endif | ||
diff --git a/arch/metag/include/asm/cpu.h b/arch/metag/include/asm/cpu.h new file mode 100644 index 000000000000..decf12969268 --- /dev/null +++ b/arch/metag/include/asm/cpu.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _ASM_METAG_CPU_H | ||
2 | #define _ASM_METAG_CPU_H | ||
3 | |||
4 | #include <linux/percpu.h> | ||
5 | |||
6 | struct cpuinfo_metag { | ||
7 | struct cpu cpu; | ||
8 | #ifdef CONFIG_SMP | ||
9 | unsigned long loops_per_jiffy; | ||
10 | #endif | ||
11 | }; | ||
12 | |||
13 | DECLARE_PER_CPU(struct cpuinfo_metag, cpu_data); | ||
14 | #endif /* _ASM_METAG_CPU_H */ | ||
diff --git a/arch/metag/include/asm/da.h b/arch/metag/include/asm/da.h new file mode 100644 index 000000000000..81bd5212fb03 --- /dev/null +++ b/arch/metag/include/asm/da.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Meta DA JTAG debugger control. | ||
3 | * | ||
4 | * Copyright 2012 Imagination Technologies Ltd. | ||
5 | */ | ||
6 | |||
7 | #ifndef _METAG_DA_H_ | ||
8 | #define _METAG_DA_H_ | ||
9 | |||
10 | #ifdef CONFIG_METAG_DA | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/types.h> | ||
14 | |||
15 | extern bool _metag_da_present; | ||
16 | |||
17 | /** | ||
18 | * metag_da_enabled() - Find whether a DA is currently enabled. | ||
19 | * | ||
20 | * Returns: true if a DA was detected, false if not. | ||
21 | */ | ||
22 | static inline bool metag_da_enabled(void) | ||
23 | { | ||
24 | return _metag_da_present; | ||
25 | } | ||
26 | |||
27 | /** | ||
28 | * metag_da_probe() - Try and detect a connected DA. | ||
29 | * | ||
30 | * This is used at start up to detect whether a DA is active. | ||
31 | * | ||
32 | * Returns: 0 on detection, -err otherwise. | ||
33 | */ | ||
34 | int __init metag_da_probe(void); | ||
35 | |||
36 | #else /* !CONFIG_METAG_DA */ | ||
37 | |||
38 | #define metag_da_enabled() false | ||
39 | #define metag_da_probe() do {} while (0) | ||
40 | |||
41 | #endif | ||
42 | |||
43 | #endif /* _METAG_DA_H_ */ | ||
diff --git a/arch/metag/include/asm/delay.h b/arch/metag/include/asm/delay.h new file mode 100644 index 000000000000..9c92f996957a --- /dev/null +++ b/arch/metag/include/asm/delay.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _METAG_DELAY_H | ||
2 | #define _METAG_DELAY_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 1993 Linus Torvalds | ||
6 | * | ||
7 | * Delay routines calling functions in arch/metag/lib/delay.c | ||
8 | */ | ||
9 | |||
10 | /* Undefined functions to get compile-time errors */ | ||
11 | extern void __bad_udelay(void); | ||
12 | extern void __bad_ndelay(void); | ||
13 | |||
14 | extern void __udelay(unsigned long usecs); | ||
15 | extern void __ndelay(unsigned long nsecs); | ||
16 | extern void __const_udelay(unsigned long xloops); | ||
17 | extern void __delay(unsigned long loops); | ||
18 | |||
19 | /* 0x10c7 is 2**32 / 1000000 (rounded up) */ | ||
20 | #define udelay(n) (__builtin_constant_p(n) ? \ | ||
21 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ | ||
22 | __udelay(n)) | ||
23 | |||
24 | /* 0x5 is 2**32 / 1000000000 (rounded up) */ | ||
25 | #define ndelay(n) (__builtin_constant_p(n) ? \ | ||
26 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ | ||
27 | __ndelay(n)) | ||
28 | |||
29 | #endif /* _METAG_DELAY_H */ | ||
diff --git a/arch/metag/include/asm/div64.h b/arch/metag/include/asm/div64.h new file mode 100644 index 000000000000..0fdd11676212 --- /dev/null +++ b/arch/metag/include/asm/div64.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef __ASM_DIV64_H__ | ||
2 | #define __ASM_DIV64_H__ | ||
3 | |||
4 | #include <asm-generic/div64.h> | ||
5 | |||
6 | extern u64 div_u64(u64 dividend, u64 divisor); | ||
7 | extern s64 div_s64(s64 dividend, s64 divisor); | ||
8 | |||
9 | #define div_u64 div_u64 | ||
10 | #define div_s64 div_s64 | ||
11 | |||
12 | #endif | ||
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h new file mode 100644 index 000000000000..14b23efd9b7a --- /dev/null +++ b/arch/metag/include/asm/dma-mapping.h | |||
@@ -0,0 +1,190 @@ | |||
1 | #ifndef _ASM_METAG_DMA_MAPPING_H | ||
2 | #define _ASM_METAG_DMA_MAPPING_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | |||
6 | #include <asm/cache.h> | ||
7 | #include <asm/io.h> | ||
8 | #include <linux/scatterlist.h> | ||
9 | #include <asm/bug.h> | ||
10 | |||
11 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
12 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
13 | |||
14 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
15 | dma_addr_t *dma_handle, gfp_t flag); | ||
16 | |||
17 | void dma_free_coherent(struct device *dev, size_t size, | ||
18 | void *vaddr, dma_addr_t dma_handle); | ||
19 | |||
20 | void dma_sync_for_device(void *vaddr, size_t size, int dma_direction); | ||
21 | void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction); | ||
22 | |||
23 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
24 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
25 | |||
26 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | ||
27 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | ||
28 | |||
29 | static inline dma_addr_t | ||
30 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
31 | enum dma_data_direction direction) | ||
32 | { | ||
33 | BUG_ON(!valid_dma_direction(direction)); | ||
34 | WARN_ON(size == 0); | ||
35 | dma_sync_for_device(ptr, size, direction); | ||
36 | return virt_to_phys(ptr); | ||
37 | } | ||
38 | |||
39 | static inline void | ||
40 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
41 | enum dma_data_direction direction) | ||
42 | { | ||
43 | BUG_ON(!valid_dma_direction(direction)); | ||
44 | dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction); | ||
45 | } | ||
46 | |||
47 | static inline int | ||
48 | dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||
49 | enum dma_data_direction direction) | ||
50 | { | ||
51 | struct scatterlist *sg; | ||
52 | int i; | ||
53 | |||
54 | BUG_ON(!valid_dma_direction(direction)); | ||
55 | WARN_ON(nents == 0 || sglist[0].length == 0); | ||
56 | |||
57 | for_each_sg(sglist, sg, nents, i) { | ||
58 | BUG_ON(!sg_page(sg)); | ||
59 | |||
60 | sg->dma_address = sg_phys(sg); | ||
61 | dma_sync_for_device(sg_virt(sg), sg->length, direction); | ||
62 | } | ||
63 | |||
64 | return nents; | ||
65 | } | ||
66 | |||
67 | static inline dma_addr_t | ||
68 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
69 | size_t size, enum dma_data_direction direction) | ||
70 | { | ||
71 | BUG_ON(!valid_dma_direction(direction)); | ||
72 | dma_sync_for_device((void *)(page_to_phys(page) + offset), size, | ||
73 | direction); | ||
74 | return page_to_phys(page) + offset; | ||
75 | } | ||
76 | |||
77 | static inline void | ||
78 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
79 | enum dma_data_direction direction) | ||
80 | { | ||
81 | BUG_ON(!valid_dma_direction(direction)); | ||
82 | dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); | ||
83 | } | ||
84 | |||
85 | |||
86 | static inline void | ||
87 | dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries, | ||
88 | enum dma_data_direction direction) | ||
89 | { | ||
90 | struct scatterlist *sg; | ||
91 | int i; | ||
92 | |||
93 | BUG_ON(!valid_dma_direction(direction)); | ||
94 | WARN_ON(nhwentries == 0 || sglist[0].length == 0); | ||
95 | |||
96 | for_each_sg(sglist, sg, nhwentries, i) { | ||
97 | BUG_ON(!sg_page(sg)); | ||
98 | |||
99 | sg->dma_address = sg_phys(sg); | ||
100 | dma_sync_for_cpu(sg_virt(sg), sg->length, direction); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | static inline void | ||
105 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
106 | enum dma_data_direction direction) | ||
107 | { | ||
108 | dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); | ||
109 | } | ||
110 | |||
111 | static inline void | ||
112 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
113 | size_t size, enum dma_data_direction direction) | ||
114 | { | ||
115 | dma_sync_for_device(phys_to_virt(dma_handle), size, direction); | ||
116 | } | ||
117 | |||
118 | static inline void | ||
119 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
120 | unsigned long offset, size_t size, | ||
121 | enum dma_data_direction direction) | ||
122 | { | ||
123 | dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size, | ||
124 | direction); | ||
125 | } | ||
126 | |||
127 | static inline void | ||
128 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
129 | unsigned long offset, size_t size, | ||
130 | enum dma_data_direction direction) | ||
131 | { | ||
132 | dma_sync_for_device(phys_to_virt(dma_handle)+offset, size, | ||
133 | direction); | ||
134 | } | ||
135 | |||
136 | static inline void | ||
137 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
138 | enum dma_data_direction direction) | ||
139 | { | ||
140 | int i; | ||
141 | for (i = 0; i < nelems; i++, sg++) | ||
142 | dma_sync_for_cpu(sg_virt(sg), sg->length, direction); | ||
143 | } | ||
144 | |||
145 | static inline void | ||
146 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
147 | enum dma_data_direction direction) | ||
148 | { | ||
149 | int i; | ||
150 | for (i = 0; i < nelems; i++, sg++) | ||
151 | dma_sync_for_device(sg_virt(sg), sg->length, direction); | ||
152 | } | ||
153 | |||
154 | static inline int | ||
155 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
156 | { | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | #define dma_supported(dev, mask) (1) | ||
161 | |||
162 | static inline int | ||
163 | dma_set_mask(struct device *dev, u64 mask) | ||
164 | { | ||
165 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
166 | return -EIO; | ||
167 | |||
168 | *dev->dma_mask = mask; | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to | ||
175 | * do any flushing here. | ||
176 | */ | ||
177 | static inline void | ||
178 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
179 | enum dma_data_direction direction) | ||
180 | { | ||
181 | } | ||
182 | |||
183 | /* drivers/base/dma-mapping.c */ | ||
184 | extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
185 | void *cpu_addr, dma_addr_t dma_addr, | ||
186 | size_t size); | ||
187 | |||
188 | #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) | ||
189 | |||
190 | #endif | ||
diff --git a/arch/metag/include/asm/elf.h b/arch/metag/include/asm/elf.h new file mode 100644 index 000000000000..d63b9d0e57dd --- /dev/null +++ b/arch/metag/include/asm/elf.h | |||
@@ -0,0 +1,128 @@ | |||
1 | #ifndef __ASM_METAG_ELF_H | ||
2 | #define __ASM_METAG_ELF_H | ||
3 | |||
4 | #define EM_METAG 174 | ||
5 | |||
6 | /* Meta relocations */ | ||
7 | #define R_METAG_HIADDR16 0 | ||
8 | #define R_METAG_LOADDR16 1 | ||
9 | #define R_METAG_ADDR32 2 | ||
10 | #define R_METAG_NONE 3 | ||
11 | #define R_METAG_RELBRANCH 4 | ||
12 | #define R_METAG_GETSETOFF 5 | ||
13 | |||
14 | /* Backward compatability */ | ||
15 | #define R_METAG_REG32OP1 6 | ||
16 | #define R_METAG_REG32OP2 7 | ||
17 | #define R_METAG_REG32OP3 8 | ||
18 | #define R_METAG_REG16OP1 9 | ||
19 | #define R_METAG_REG16OP2 10 | ||
20 | #define R_METAG_REG16OP3 11 | ||
21 | #define R_METAG_REG32OP4 12 | ||
22 | |||
23 | #define R_METAG_HIOG 13 | ||
24 | #define R_METAG_LOOG 14 | ||
25 | |||
26 | /* GNU */ | ||
27 | #define R_METAG_GNU_VTINHERIT 30 | ||
28 | #define R_METAG_GNU_VTENTRY 31 | ||
29 | |||
30 | /* PIC relocations */ | ||
31 | #define R_METAG_HI16_GOTOFF 32 | ||
32 | #define R_METAG_LO16_GOTOFF 33 | ||
33 | #define R_METAG_GETSET_GOTOFF 34 | ||
34 | #define R_METAG_GETSET_GOT 35 | ||
35 | #define R_METAG_HI16_GOTPC 36 | ||
36 | #define R_METAG_LO16_GOTPC 37 | ||
37 | #define R_METAG_HI16_PLT 38 | ||
38 | #define R_METAG_LO16_PLT 39 | ||
39 | #define R_METAG_RELBRANCH_PLT 40 | ||
40 | #define R_METAG_GOTOFF 41 | ||
41 | #define R_METAG_PLT 42 | ||
42 | #define R_METAG_COPY 43 | ||
43 | #define R_METAG_JMP_SLOT 44 | ||
44 | #define R_METAG_RELATIVE 45 | ||
45 | #define R_METAG_GLOB_DAT 46 | ||
46 | |||
47 | /* | ||
48 | * ELF register definitions. | ||
49 | */ | ||
50 | |||
51 | #include <asm/page.h> | ||
52 | #include <asm/processor.h> | ||
53 | #include <asm/ptrace.h> | ||
54 | #include <asm/user.h> | ||
55 | |||
56 | typedef unsigned long elf_greg_t; | ||
57 | |||
58 | #define ELF_NGREG (sizeof(struct user_gp_regs) / sizeof(elf_greg_t)) | ||
59 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
60 | |||
61 | typedef unsigned long elf_fpregset_t; | ||
62 | |||
63 | /* | ||
64 | * This is used to ensure we don't load something for the wrong architecture. | ||
65 | */ | ||
66 | #define elf_check_arch(x) ((x)->e_machine == EM_METAG) | ||
67 | |||
68 | /* | ||
69 | * These are used to set parameters in the core dumps. | ||
70 | */ | ||
71 | #define ELF_CLASS ELFCLASS32 | ||
72 | #define ELF_DATA ELFDATA2LSB | ||
73 | #define ELF_ARCH EM_METAG | ||
74 | |||
75 | #define ELF_PLAT_INIT(_r, load_addr) \ | ||
76 | do { _r->ctx.AX[0].U0 = 0; } while (0) | ||
77 | |||
78 | #define USE_ELF_CORE_DUMP | ||
79 | #define CORE_DUMP_USE_REGSET | ||
80 | #define ELF_EXEC_PAGESIZE PAGE_SIZE | ||
81 | |||
82 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
83 | use of this is to invoke "./ld.so someprog" to test out a new version of | ||
84 | the loader. We need to make sure that it is out of the way of the program | ||
85 | that it will "exec", and that there is sufficient room for the brk. */ | ||
86 | |||
87 | #define ELF_ET_DYN_BASE 0x08000000UL | ||
88 | |||
89 | #define ELF_CORE_COPY_REGS(_dest, _regs) \ | ||
90 | memcpy((char *)&_dest, (char *)_regs, sizeof(struct pt_regs)); | ||
91 | |||
92 | /* This yields a mask that user programs can use to figure out what | ||
93 | instruction set this cpu supports. */ | ||
94 | |||
95 | #define ELF_HWCAP (0) | ||
96 | |||
97 | /* This yields a string that ld.so will use to load implementation | ||
98 | specific libraries for optimization. This is more specific in | ||
99 | intent than poking at uname or /proc/cpuinfo. */ | ||
100 | |||
101 | #define ELF_PLATFORM (NULL) | ||
102 | |||
103 | #define SET_PERSONALITY(ex) \ | ||
104 | set_personality(PER_LINUX | (current->personality & (~PER_MASK))) | ||
105 | |||
106 | #define STACK_RND_MASK (0) | ||
107 | |||
108 | #ifdef CONFIG_METAG_USER_TCM | ||
109 | |||
110 | struct elf32_phdr; | ||
111 | struct file; | ||
112 | |||
113 | unsigned long __metag_elf_map(struct file *filep, unsigned long addr, | ||
114 | struct elf32_phdr *eppnt, int prot, int type, | ||
115 | unsigned long total_size); | ||
116 | |||
117 | static inline unsigned long metag_elf_map(struct file *filep, | ||
118 | unsigned long addr, | ||
119 | struct elf32_phdr *eppnt, int prot, | ||
120 | int type, unsigned long total_size) | ||
121 | { | ||
122 | return __metag_elf_map(filep, addr, eppnt, prot, type, total_size); | ||
123 | } | ||
124 | #define elf_map metag_elf_map | ||
125 | |||
126 | #endif | ||
127 | |||
128 | #endif | ||
diff --git a/arch/metag/include/asm/fixmap.h b/arch/metag/include/asm/fixmap.h new file mode 100644 index 000000000000..33312751c92b --- /dev/null +++ b/arch/metag/include/asm/fixmap.h | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * fixmap.h: compile-time virtual memory allocation | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1998 Ingo Molnar | ||
9 | * | ||
10 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_FIXMAP_H | ||
14 | #define _ASM_FIXMAP_H | ||
15 | |||
16 | #include <asm/pgtable.h> | ||
17 | #ifdef CONFIG_HIGHMEM | ||
18 | #include <linux/threads.h> | ||
19 | #include <asm/kmap_types.h> | ||
20 | #endif | ||
21 | |||
22 | /* | ||
23 | * Here we define all the compile-time 'special' virtual | ||
24 | * addresses. The point is to have a constant address at | ||
25 | * compile time, but to set the physical address only | ||
26 | * in the boot process. We allocate these special addresses | ||
27 | * from the end of the consistent memory region backwards. | ||
28 | * Also this lets us do fail-safe vmalloc(), we | ||
29 | * can guarantee that these special addresses and | ||
30 | * vmalloc()-ed addresses never overlap. | ||
31 | * | ||
32 | * these 'compile-time allocated' memory buffers are | ||
33 | * fixed-size 4k pages. (or larger if used with an increment | ||
34 | * higher than 1) use fixmap_set(idx,phys) to associate | ||
35 | * physical memory with fixmap indices. | ||
36 | * | ||
37 | * TLB entries of such buffers will not be flushed across | ||
38 | * task switches. | ||
39 | */ | ||
40 | enum fixed_addresses { | ||
41 | #define FIX_N_COLOURS 8 | ||
42 | #ifdef CONFIG_HIGHMEM | ||
43 | /* reserved pte's for temporary kernel mappings */ | ||
44 | FIX_KMAP_BEGIN, | ||
45 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||
46 | #endif | ||
47 | __end_of_fixed_addresses | ||
48 | }; | ||
49 | |||
50 | #define FIXADDR_TOP (CONSISTENT_START - PAGE_SIZE) | ||
51 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
52 | #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) | ||
53 | |||
54 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
55 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
56 | |||
57 | extern void __this_fixmap_does_not_exist(void); | ||
58 | /* | ||
59 | * 'index to address' translation. If anyone tries to use the idx | ||
60 | * directly without tranlation, we catch the bug with a NULL-deference | ||
61 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
62 | */ | ||
63 | static inline unsigned long fix_to_virt(const unsigned int idx) | ||
64 | { | ||
65 | /* | ||
66 | * this branch gets completely eliminated after inlining, | ||
67 | * except when someone tries to use fixaddr indices in an | ||
68 | * illegal way. (such as mixing up address types or using | ||
69 | * out-of-range indices). | ||
70 | * | ||
71 | * If it doesn't get removed, the linker will complain | ||
72 | * loudly with a reasonably clear error message.. | ||
73 | */ | ||
74 | if (idx >= __end_of_fixed_addresses) | ||
75 | __this_fixmap_does_not_exist(); | ||
76 | |||
77 | return __fix_to_virt(idx); | ||
78 | } | ||
79 | |||
80 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
81 | { | ||
82 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
83 | return __virt_to_fix(vaddr); | ||
84 | } | ||
85 | |||
86 | #define kmap_get_fixmap_pte(vaddr) \ | ||
87 | pte_offset_kernel( \ | ||
88 | pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \ | ||
89 | (vaddr) \ | ||
90 | ) | ||
91 | |||
92 | /* | ||
93 | * Called from pgtable_init() | ||
94 | */ | ||
95 | extern void fixrange_init(unsigned long start, unsigned long end, | ||
96 | pgd_t *pgd_base); | ||
97 | |||
98 | |||
99 | #endif | ||
diff --git a/arch/metag/include/asm/ftrace.h b/arch/metag/include/asm/ftrace.h new file mode 100644 index 000000000000..2901f0f7d944 --- /dev/null +++ b/arch/metag/include/asm/ftrace.h | |||
@@ -0,0 +1,23 @@ | |||
1 | #ifndef _ASM_METAG_FTRACE | ||
2 | #define _ASM_METAG_FTRACE | ||
3 | |||
4 | #ifdef CONFIG_FUNCTION_TRACER | ||
5 | #define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */ | ||
6 | |||
7 | #ifndef __ASSEMBLY__ | ||
8 | extern void mcount_wrapper(void); | ||
9 | #define MCOUNT_ADDR ((long)(mcount_wrapper)) | ||
10 | |||
11 | static inline unsigned long ftrace_call_adjust(unsigned long addr) | ||
12 | { | ||
13 | return addr; | ||
14 | } | ||
15 | |||
16 | struct dyn_arch_ftrace { | ||
17 | /* No extra data needed on metag */ | ||
18 | }; | ||
19 | #endif /* __ASSEMBLY__ */ | ||
20 | |||
21 | #endif /* CONFIG_FUNCTION_TRACER */ | ||
22 | |||
23 | #endif /* _ASM_METAG_FTRACE */ | ||
diff --git a/arch/metag/include/asm/global_lock.h b/arch/metag/include/asm/global_lock.h new file mode 100644 index 000000000000..fc831c88c22a --- /dev/null +++ b/arch/metag/include/asm/global_lock.h | |||
@@ -0,0 +1,100 @@ | |||
1 | #ifndef __ASM_METAG_GLOBAL_LOCK_H | ||
2 | #define __ASM_METAG_GLOBAL_LOCK_H | ||
3 | |||
4 | #include <asm/metag_mem.h> | ||
5 | |||
6 | /** | ||
7 | * __global_lock1() - Acquire global voluntary lock (LOCK1). | ||
8 | * @flags: Variable to store flags into. | ||
9 | * | ||
10 | * Acquires the Meta global voluntary lock (LOCK1), also taking care to disable | ||
11 | * all triggers so we cannot be interrupted, and to enforce a compiler barrier | ||
12 | * so that the compiler cannot reorder memory accesses across the lock. | ||
13 | * | ||
14 | * No other hardware thread will be able to acquire the voluntary or exclusive | ||
15 | * locks until the voluntary lock is released with @__global_unlock1, but they | ||
16 | * may continue to execute as long as they aren't trying to acquire either of | ||
17 | * the locks. | ||
18 | */ | ||
19 | #define __global_lock1(flags) do { \ | ||
20 | unsigned int __trval; \ | ||
21 | asm volatile("MOV %0,#0\n\t" \ | ||
22 | "SWAP %0,TXMASKI\n\t" \ | ||
23 | "LOCK1" \ | ||
24 | : "=r" (__trval) \ | ||
25 | : \ | ||
26 | : "memory"); \ | ||
27 | (flags) = __trval; \ | ||
28 | } while (0) | ||
29 | |||
30 | /** | ||
31 | * __global_unlock1() - Release global voluntary lock (LOCK1). | ||
32 | * @flags: Variable to restore flags from. | ||
33 | * | ||
34 | * Releases the Meta global voluntary lock (LOCK1) acquired with | ||
35 | * @__global_lock1, also taking care to re-enable triggers, and to enforce a | ||
36 | * compiler barrier so that the compiler cannot reorder memory accesses across | ||
37 | * the unlock. | ||
38 | * | ||
39 | * This immediately allows another hardware thread to acquire the voluntary or | ||
40 | * exclusive locks. | ||
41 | */ | ||
42 | #define __global_unlock1(flags) do { \ | ||
43 | unsigned int __trval = (flags); \ | ||
44 | asm volatile("LOCK0\n\t" \ | ||
45 | "MOV TXMASKI,%0" \ | ||
46 | : \ | ||
47 | : "r" (__trval) \ | ||
48 | : "memory"); \ | ||
49 | } while (0) | ||
50 | |||
51 | /** | ||
52 | * __global_lock2() - Acquire global exclusive lock (LOCK2). | ||
53 | * @flags: Variable to store flags into. | ||
54 | * | ||
55 | * Acquires the Meta global voluntary lock and global exclusive lock (LOCK2), | ||
56 | * also taking care to disable all triggers so we cannot be interrupted, to take | ||
57 | * the atomic lock (system event) and to enforce a compiler barrier so that the | ||
58 | * compiler cannot reorder memory accesses across the lock. | ||
59 | * | ||
60 | * No other hardware thread will be able to execute code until the locks are | ||
61 | * released with @__global_unlock2. | ||
62 | */ | ||
63 | #define __global_lock2(flags) do { \ | ||
64 | unsigned int __trval; \ | ||
65 | unsigned int __aloc_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \ | ||
66 | asm volatile("MOV %0,#0\n\t" \ | ||
67 | "SWAP %0,TXMASKI\n\t" \ | ||
68 | "LOCK2\n\t" \ | ||
69 | "SETD [%1+#0x40],D1RtP" \ | ||
70 | : "=r&" (__trval) \ | ||
71 | : "u" (__aloc_hi) \ | ||
72 | : "memory"); \ | ||
73 | (flags) = __trval; \ | ||
74 | } while (0) | ||
75 | |||
76 | /** | ||
77 | * __global_unlock2() - Release global exclusive lock (LOCK2). | ||
78 | * @flags: Variable to restore flags from. | ||
79 | * | ||
80 | * Releases the Meta global exclusive lock (LOCK2) and global voluntary lock | ||
81 | * acquired with @__global_lock2, also taking care to release the atomic lock | ||
82 | * (system event), re-enable triggers, and to enforce a compiler barrier so that | ||
83 | * the compiler cannot reorder memory accesses across the unlock. | ||
84 | * | ||
85 | * This immediately allows other hardware threads to continue executing and one | ||
86 | * of them to acquire locks. | ||
87 | */ | ||
88 | #define __global_unlock2(flags) do { \ | ||
89 | unsigned int __trval = (flags); \ | ||
90 | unsigned int __alock_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \ | ||
91 | asm volatile("SETD [%1+#0x00],D1RtP\n\t" \ | ||
92 | "LOCK0\n\t" \ | ||
93 | "MOV TXMASKI,%0" \ | ||
94 | : \ | ||
95 | : "r" (__trval), \ | ||
96 | "u" (__alock_hi) \ | ||
97 | : "memory"); \ | ||
98 | } while (0) | ||
99 | |||
100 | #endif /* __ASM_METAG_GLOBAL_LOCK_H */ | ||
diff --git a/arch/metag/include/asm/gpio.h b/arch/metag/include/asm/gpio.h new file mode 100644 index 000000000000..b3799d88ffcf --- /dev/null +++ b/arch/metag/include/asm/gpio.h | |||
@@ -0,0 +1,4 @@ | |||
1 | #ifndef __LINUX_GPIO_H | ||
2 | #warning Include linux/gpio.h instead of asm/gpio.h | ||
3 | #include <linux/gpio.h> | ||
4 | #endif | ||
diff --git a/arch/metag/include/asm/highmem.h b/arch/metag/include/asm/highmem.h new file mode 100644 index 000000000000..6646a15c73dd --- /dev/null +++ b/arch/metag/include/asm/highmem.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef _ASM_HIGHMEM_H | ||
2 | #define _ASM_HIGHMEM_H | ||
3 | |||
4 | #include <asm/cacheflush.h> | ||
5 | #include <asm/kmap_types.h> | ||
6 | #include <asm/fixmap.h> | ||
7 | |||
8 | /* | ||
9 | * Right now we initialize only a single pte table. It can be extended | ||
10 | * easily, subsequent pte tables have to be allocated in one physical | ||
11 | * chunk of RAM. | ||
12 | */ | ||
13 | /* | ||
14 | * Ordering is (from lower to higher memory addresses): | ||
15 | * | ||
16 | * high_memory | ||
17 | * Persistent kmap area | ||
18 | * PKMAP_BASE | ||
19 | * fixed_addresses | ||
20 | * FIXADDR_START | ||
21 | * FIXADDR_TOP | ||
22 | * Vmalloc area | ||
23 | * VMALLOC_START | ||
24 | * VMALLOC_END | ||
25 | */ | ||
26 | #define PKMAP_BASE (FIXADDR_START - PMD_SIZE) | ||
27 | #define LAST_PKMAP PTRS_PER_PTE | ||
28 | #define LAST_PKMAP_MASK (LAST_PKMAP - 1) | ||
29 | #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) | ||
30 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
31 | |||
32 | #define kmap_prot PAGE_KERNEL | ||
33 | |||
34 | static inline void flush_cache_kmaps(void) | ||
35 | { | ||
36 | flush_cache_all(); | ||
37 | } | ||
38 | |||
39 | /* declarations for highmem.c */ | ||
40 | extern unsigned long highstart_pfn, highend_pfn; | ||
41 | |||
42 | extern pte_t *pkmap_page_table; | ||
43 | |||
44 | extern void *kmap_high(struct page *page); | ||
45 | extern void kunmap_high(struct page *page); | ||
46 | |||
47 | extern void kmap_init(void); | ||
48 | |||
49 | /* | ||
50 | * The following functions are already defined by <linux/highmem.h> | ||
51 | * when CONFIG_HIGHMEM is not set. | ||
52 | */ | ||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | extern void *kmap(struct page *page); | ||
55 | extern void kunmap(struct page *page); | ||
56 | extern void *kmap_atomic(struct page *page); | ||
57 | extern void __kunmap_atomic(void *kvaddr); | ||
58 | extern void *kmap_atomic_pfn(unsigned long pfn); | ||
59 | extern struct page *kmap_atomic_to_page(void *ptr); | ||
60 | #endif | ||
61 | |||
62 | #endif | ||
diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h new file mode 100644 index 000000000000..f545477e61f3 --- /dev/null +++ b/arch/metag/include/asm/hugetlb.h | |||
@@ -0,0 +1,86 @@ | |||
1 | #ifndef _ASM_METAG_HUGETLB_H | ||
2 | #define _ASM_METAG_HUGETLB_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | |||
6 | |||
7 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
8 | unsigned long addr, | ||
9 | unsigned long len) { | ||
10 | return 0; | ||
11 | } | ||
12 | |||
13 | int prepare_hugepage_range(struct file *file, unsigned long addr, | ||
14 | unsigned long len); | ||
15 | |||
16 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
17 | { | ||
18 | } | ||
19 | |||
20 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
21 | unsigned long addr, unsigned long end, | ||
22 | unsigned long floor, | ||
23 | unsigned long ceiling) | ||
24 | { | ||
25 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
26 | } | ||
27 | |||
28 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
29 | pte_t *ptep, pte_t pte) | ||
30 | { | ||
31 | set_pte_at(mm, addr, ptep, pte); | ||
32 | } | ||
33 | |||
34 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
35 | unsigned long addr, pte_t *ptep) | ||
36 | { | ||
37 | return ptep_get_and_clear(mm, addr, ptep); | ||
38 | } | ||
39 | |||
40 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
41 | unsigned long addr, pte_t *ptep) | ||
42 | { | ||
43 | } | ||
44 | |||
45 | static inline int huge_pte_none(pte_t pte) | ||
46 | { | ||
47 | return pte_none(pte); | ||
48 | } | ||
49 | |||
50 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
51 | { | ||
52 | return pte_wrprotect(pte); | ||
53 | } | ||
54 | |||
55 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
56 | unsigned long addr, pte_t *ptep) | ||
57 | { | ||
58 | ptep_set_wrprotect(mm, addr, ptep); | ||
59 | } | ||
60 | |||
61 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
62 | unsigned long addr, pte_t *ptep, | ||
63 | pte_t pte, int dirty) | ||
64 | { | ||
65 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
66 | } | ||
67 | |||
68 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
69 | { | ||
70 | return *ptep; | ||
71 | } | ||
72 | |||
73 | static inline int arch_prepare_hugepage(struct page *page) | ||
74 | { | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static inline void arch_release_hugepage(struct page *page) | ||
79 | { | ||
80 | } | ||
81 | |||
82 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
83 | { | ||
84 | } | ||
85 | |||
86 | #endif /* _ASM_METAG_HUGETLB_H */ | ||
diff --git a/arch/metag/include/asm/hwthread.h b/arch/metag/include/asm/hwthread.h new file mode 100644 index 000000000000..8f9786619b1d --- /dev/null +++ b/arch/metag/include/asm/hwthread.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Imagination Technologies | ||
3 | */ | ||
4 | #ifndef __METAG_HWTHREAD_H | ||
5 | #define __METAG_HWTHREAD_H | ||
6 | |||
7 | #include <linux/bug.h> | ||
8 | #include <linux/io.h> | ||
9 | |||
10 | #include <asm/metag_mem.h> | ||
11 | |||
12 | #define BAD_HWTHREAD_ID (0xFFU) | ||
13 | #define BAD_CPU_ID (0xFFU) | ||
14 | |||
15 | extern u8 cpu_2_hwthread_id[]; | ||
16 | extern u8 hwthread_id_2_cpu[]; | ||
17 | |||
18 | /* | ||
19 | * Each hardware thread's Control Unit registers are memory-mapped | ||
20 | * and can therefore be accessed by any other hardware thread. | ||
21 | * | ||
22 | * This helper function returns the memory address where "thread"'s | ||
23 | * register "regnum" is mapped. | ||
24 | */ | ||
25 | static inline | ||
26 | void __iomem *__CU_addr(unsigned int thread, unsigned int regnum) | ||
27 | { | ||
28 | unsigned int base, thread_offset, thread_regnum; | ||
29 | |||
30 | WARN_ON(thread == BAD_HWTHREAD_ID); | ||
31 | |||
32 | base = T0UCTREG0; /* Control unit base */ | ||
33 | |||
34 | thread_offset = TnUCTRX_STRIDE * thread; | ||
35 | thread_regnum = TXUCTREGn_STRIDE * regnum; | ||
36 | |||
37 | return (void __iomem *)(base + thread_offset + thread_regnum); | ||
38 | } | ||
39 | |||
40 | #endif /* __METAG_HWTHREAD_H */ | ||
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h new file mode 100644 index 000000000000..9359e5048442 --- /dev/null +++ b/arch/metag/include/asm/io.h | |||
@@ -0,0 +1,165 @@ | |||
1 | #ifndef _ASM_METAG_IO_H | ||
2 | #define _ASM_METAG_IO_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | #define IO_SPACE_LIMIT 0 | ||
7 | |||
8 | #define page_to_bus page_to_phys | ||
9 | #define bus_to_page phys_to_page | ||
10 | |||
11 | /* | ||
12 | * Generic I/O | ||
13 | */ | ||
14 | |||
15 | #define __raw_readb __raw_readb | ||
16 | static inline u8 __raw_readb(const volatile void __iomem *addr) | ||
17 | { | ||
18 | u8 ret; | ||
19 | asm volatile("GETB %0,[%1]" | ||
20 | : "=da" (ret) | ||
21 | : "da" (addr) | ||
22 | : "memory"); | ||
23 | return ret; | ||
24 | } | ||
25 | |||
26 | #define __raw_readw __raw_readw | ||
27 | static inline u16 __raw_readw(const volatile void __iomem *addr) | ||
28 | { | ||
29 | u16 ret; | ||
30 | asm volatile("GETW %0,[%1]" | ||
31 | : "=da" (ret) | ||
32 | : "da" (addr) | ||
33 | : "memory"); | ||
34 | return ret; | ||
35 | } | ||
36 | |||
37 | #define __raw_readl __raw_readl | ||
38 | static inline u32 __raw_readl(const volatile void __iomem *addr) | ||
39 | { | ||
40 | u32 ret; | ||
41 | asm volatile("GETD %0,[%1]" | ||
42 | : "=da" (ret) | ||
43 | : "da" (addr) | ||
44 | : "memory"); | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | #define __raw_readq __raw_readq | ||
49 | static inline u64 __raw_readq(const volatile void __iomem *addr) | ||
50 | { | ||
51 | u64 ret; | ||
52 | asm volatile("GETL %0,%t0,[%1]" | ||
53 | : "=da" (ret) | ||
54 | : "da" (addr) | ||
55 | : "memory"); | ||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | #define __raw_writeb __raw_writeb | ||
60 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) | ||
61 | { | ||
62 | asm volatile("SETB [%0],%1" | ||
63 | : | ||
64 | : "da" (addr), | ||
65 | "da" (b) | ||
66 | : "memory"); | ||
67 | } | ||
68 | |||
69 | #define __raw_writew __raw_writew | ||
70 | static inline void __raw_writew(u16 b, volatile void __iomem *addr) | ||
71 | { | ||
72 | asm volatile("SETW [%0],%1" | ||
73 | : | ||
74 | : "da" (addr), | ||
75 | "da" (b) | ||
76 | : "memory"); | ||
77 | } | ||
78 | |||
79 | #define __raw_writel __raw_writel | ||
80 | static inline void __raw_writel(u32 b, volatile void __iomem *addr) | ||
81 | { | ||
82 | asm volatile("SETD [%0],%1" | ||
83 | : | ||
84 | : "da" (addr), | ||
85 | "da" (b) | ||
86 | : "memory"); | ||
87 | } | ||
88 | |||
89 | #define __raw_writeq __raw_writeq | ||
90 | static inline void __raw_writeq(u64 b, volatile void __iomem *addr) | ||
91 | { | ||
92 | asm volatile("SETL [%0],%1,%t1" | ||
93 | : | ||
94 | : "da" (addr), | ||
95 | "da" (b) | ||
96 | : "memory"); | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * The generic io.h can define all the other generic accessors | ||
101 | */ | ||
102 | |||
103 | #include <asm-generic/io.h> | ||
104 | |||
105 | /* | ||
106 | * Despite being a 32bit architecture, Meta can do 64bit memory accesses | ||
107 | * (assuming the bus supports it). | ||
108 | */ | ||
109 | |||
110 | #define readq __raw_readq | ||
111 | #define writeq __raw_writeq | ||
112 | |||
113 | /* | ||
114 | * Meta specific I/O for accessing non-MMU areas. | ||
115 | * | ||
116 | * These can be provided with a physical address rather than an __iomem pointer | ||
117 | * and should only be used by core architecture code for accessing fixed core | ||
118 | * registers. Generic drivers should use ioremap and the generic I/O accessors. | ||
119 | */ | ||
120 | |||
121 | #define metag_in8(addr) __raw_readb((volatile void __iomem *)(addr)) | ||
122 | #define metag_in16(addr) __raw_readw((volatile void __iomem *)(addr)) | ||
123 | #define metag_in32(addr) __raw_readl((volatile void __iomem *)(addr)) | ||
124 | #define metag_in64(addr) __raw_readq((volatile void __iomem *)(addr)) | ||
125 | |||
126 | #define metag_out8(b, addr) __raw_writeb(b, (volatile void __iomem *)(addr)) | ||
127 | #define metag_out16(b, addr) __raw_writew(b, (volatile void __iomem *)(addr)) | ||
128 | #define metag_out32(b, addr) __raw_writel(b, (volatile void __iomem *)(addr)) | ||
129 | #define metag_out64(b, addr) __raw_writeq(b, (volatile void __iomem *)(addr)) | ||
130 | |||
131 | /* | ||
132 | * io remapping functions | ||
133 | */ | ||
134 | |||
135 | extern void __iomem *__ioremap(unsigned long offset, | ||
136 | size_t size, unsigned long flags); | ||
137 | extern void __iounmap(void __iomem *addr); | ||
138 | |||
139 | /** | ||
140 | * ioremap - map bus memory into CPU space | ||
141 | * @offset: bus address of the memory | ||
142 | * @size: size of the resource to map | ||
143 | * | ||
144 | * ioremap performs a platform specific sequence of operations to | ||
145 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | ||
146 | * writew/writel functions and the other mmio helpers. The returned | ||
147 | * address is not guaranteed to be usable directly as a virtual | ||
148 | * address. | ||
149 | */ | ||
150 | #define ioremap(offset, size) \ | ||
151 | __ioremap((offset), (size), 0) | ||
152 | |||
153 | #define ioremap_nocache(offset, size) \ | ||
154 | __ioremap((offset), (size), 0) | ||
155 | |||
156 | #define ioremap_cached(offset, size) \ | ||
157 | __ioremap((offset), (size), _PAGE_CACHEABLE) | ||
158 | |||
159 | #define ioremap_wc(offset, size) \ | ||
160 | __ioremap((offset), (size), _PAGE_WR_COMBINE) | ||
161 | |||
162 | #define iounmap(addr) \ | ||
163 | __iounmap(addr) | ||
164 | |||
165 | #endif /* _ASM_METAG_IO_H */ | ||
diff --git a/arch/metag/include/asm/irq.h b/arch/metag/include/asm/irq.h new file mode 100644 index 000000000000..be0c8f3c5a5d --- /dev/null +++ b/arch/metag/include/asm/irq.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef __ASM_METAG_IRQ_H | ||
2 | #define __ASM_METAG_IRQ_H | ||
3 | |||
4 | #ifdef CONFIG_4KSTACKS | ||
5 | extern void irq_ctx_init(int cpu); | ||
6 | extern void irq_ctx_exit(int cpu); | ||
7 | # define __ARCH_HAS_DO_SOFTIRQ | ||
8 | #else | ||
9 | # define irq_ctx_init(cpu) do { } while (0) | ||
10 | # define irq_ctx_exit(cpu) do { } while (0) | ||
11 | #endif | ||
12 | |||
13 | void tbi_startup_interrupt(int); | ||
14 | void tbi_shutdown_interrupt(int); | ||
15 | |||
16 | struct pt_regs; | ||
17 | |||
18 | int tbisig_map(unsigned int hw); | ||
19 | extern void do_IRQ(int irq, struct pt_regs *regs); | ||
20 | |||
21 | #ifdef CONFIG_METAG_SUSPEND_MEM | ||
22 | int traps_save_context(void); | ||
23 | int traps_restore_context(void); | ||
24 | #endif | ||
25 | |||
26 | #include <asm-generic/irq.h> | ||
27 | |||
28 | #ifdef CONFIG_HOTPLUG_CPU | ||
29 | extern void migrate_irqs(void); | ||
30 | #endif | ||
31 | |||
32 | #endif /* __ASM_METAG_IRQ_H */ | ||
diff --git a/arch/metag/include/asm/irqflags.h b/arch/metag/include/asm/irqflags.h new file mode 100644 index 000000000000..339b16f062eb --- /dev/null +++ b/arch/metag/include/asm/irqflags.h | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * IRQ flags handling | ||
3 | * | ||
4 | * This file gets included from lowlevel asm headers too, to provide | ||
5 | * wrapped versions of the local_irq_*() APIs, based on the | ||
6 | * raw_local_irq_*() functions from the lowlevel headers. | ||
7 | */ | ||
8 | #ifndef _ASM_IRQFLAGS_H | ||
9 | #define _ASM_IRQFLAGS_H | ||
10 | |||
11 | #ifndef __ASSEMBLY__ | ||
12 | |||
13 | #include <asm/core_reg.h> | ||
14 | #include <asm/metag_regs.h> | ||
15 | |||
16 | #define INTS_OFF_MASK TXSTATI_BGNDHALT_BIT | ||
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | extern unsigned int get_trigger_mask(void); | ||
20 | #else | ||
21 | |||
22 | extern unsigned int global_trigger_mask; | ||
23 | |||
24 | static inline unsigned int get_trigger_mask(void) | ||
25 | { | ||
26 | return global_trigger_mask; | ||
27 | } | ||
28 | #endif | ||
29 | |||
30 | static inline unsigned long arch_local_save_flags(void) | ||
31 | { | ||
32 | return __core_reg_get(TXMASKI); | ||
33 | } | ||
34 | |||
35 | static inline int arch_irqs_disabled_flags(unsigned long flags) | ||
36 | { | ||
37 | return (flags & ~INTS_OFF_MASK) == 0; | ||
38 | } | ||
39 | |||
40 | static inline int arch_irqs_disabled(void) | ||
41 | { | ||
42 | unsigned long flags = arch_local_save_flags(); | ||
43 | |||
44 | return arch_irqs_disabled_flags(flags); | ||
45 | } | ||
46 | |||
47 | static inline unsigned long __irqs_disabled(void) | ||
48 | { | ||
49 | /* | ||
50 | * We shouldn't enable exceptions if they are not already | ||
51 | * enabled. This is required for chancalls to work correctly. | ||
52 | */ | ||
53 | return arch_local_save_flags() & INTS_OFF_MASK; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * For spinlocks, etc: | ||
58 | */ | ||
59 | static inline unsigned long arch_local_irq_save(void) | ||
60 | { | ||
61 | unsigned long flags = __irqs_disabled(); | ||
62 | |||
63 | asm volatile("SWAP %0,TXMASKI\n" : "=r" (flags) : "0" (flags) | ||
64 | : "memory"); | ||
65 | |||
66 | return flags; | ||
67 | } | ||
68 | |||
69 | static inline void arch_local_irq_restore(unsigned long flags) | ||
70 | { | ||
71 | asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory"); | ||
72 | } | ||
73 | |||
74 | static inline void arch_local_irq_disable(void) | ||
75 | { | ||
76 | unsigned long flags = __irqs_disabled(); | ||
77 | |||
78 | asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory"); | ||
79 | } | ||
80 | |||
81 | #ifdef CONFIG_SMP | ||
82 | /* Avoid circular include dependencies through <linux/preempt.h> */ | ||
83 | void arch_local_irq_enable(void); | ||
84 | #else | ||
85 | static inline void arch_local_irq_enable(void) | ||
86 | { | ||
87 | arch_local_irq_restore(get_trigger_mask()); | ||
88 | } | ||
89 | #endif | ||
90 | |||
91 | #endif /* (__ASSEMBLY__) */ | ||
92 | |||
93 | #endif /* !(_ASM_IRQFLAGS_H) */ | ||
diff --git a/arch/metag/include/asm/l2cache.h b/arch/metag/include/asm/l2cache.h new file mode 100644 index 000000000000..bffbeaa4d93b --- /dev/null +++ b/arch/metag/include/asm/l2cache.h | |||
@@ -0,0 +1,258 @@ | |||
1 | #ifndef _METAG_L2CACHE_H | ||
2 | #define _METAG_L2CACHE_H | ||
3 | |||
4 | #ifdef CONFIG_METAG_L2C | ||
5 | |||
6 | #include <asm/global_lock.h> | ||
7 | #include <asm/io.h> | ||
8 | |||
9 | /* | ||
10 | * Store the last known value of pfenable (we don't want prefetch enabled while | ||
11 | * L2 is off). | ||
12 | */ | ||
13 | extern int l2c_pfenable; | ||
14 | |||
15 | /* defined in arch/metag/drivers/core-sysfs.c */ | ||
16 | extern struct sysdev_class cache_sysclass; | ||
17 | |||
18 | static inline void wr_fence(void); | ||
19 | |||
20 | /* | ||
21 | * Functions for reading of L2 cache configuration. | ||
22 | */ | ||
23 | |||
24 | /* Get raw L2 config register (CORE_CONFIG3) */ | ||
25 | static inline unsigned int meta_l2c_config(void) | ||
26 | { | ||
27 | const unsigned int *corecfg3 = (const unsigned int *)METAC_CORE_CONFIG3; | ||
28 | return *corecfg3; | ||
29 | } | ||
30 | |||
31 | /* Get whether the L2 is present */ | ||
32 | static inline int meta_l2c_is_present(void) | ||
33 | { | ||
34 | return meta_l2c_config() & METAC_CORECFG3_L2C_HAVE_L2C_BIT; | ||
35 | } | ||
36 | |||
37 | /* Get whether the L2 is configured for write-back instead of write-through */ | ||
38 | static inline int meta_l2c_is_writeback(void) | ||
39 | { | ||
40 | return meta_l2c_config() & METAC_CORECFG3_L2C_MODE_BIT; | ||
41 | } | ||
42 | |||
43 | /* Get whether the L2 is unified instead of separated code/data */ | ||
44 | static inline int meta_l2c_is_unified(void) | ||
45 | { | ||
46 | return meta_l2c_config() & METAC_CORECFG3_L2C_UNIFIED_BIT; | ||
47 | } | ||
48 | |||
49 | /* Get the L2 cache size in bytes */ | ||
50 | static inline unsigned int meta_l2c_size(void) | ||
51 | { | ||
52 | unsigned int size_s; | ||
53 | if (!meta_l2c_is_present()) | ||
54 | return 0; | ||
55 | size_s = (meta_l2c_config() & METAC_CORECFG3_L2C_SIZE_BITS) | ||
56 | >> METAC_CORECFG3_L2C_SIZE_S; | ||
57 | /* L2CSIZE is in KiB */ | ||
58 | return 1024 << size_s; | ||
59 | } | ||
60 | |||
61 | /* Get the number of ways in the L2 cache */ | ||
62 | static inline unsigned int meta_l2c_ways(void) | ||
63 | { | ||
64 | unsigned int ways_s; | ||
65 | if (!meta_l2c_is_present()) | ||
66 | return 0; | ||
67 | ways_s = (meta_l2c_config() & METAC_CORECFG3_L2C_NUM_WAYS_BITS) | ||
68 | >> METAC_CORECFG3_L2C_NUM_WAYS_S; | ||
69 | return 0x1 << ways_s; | ||
70 | } | ||
71 | |||
72 | /* Get the line size of the L2 cache */ | ||
73 | static inline unsigned int meta_l2c_linesize(void) | ||
74 | { | ||
75 | unsigned int line_size; | ||
76 | if (!meta_l2c_is_present()) | ||
77 | return 0; | ||
78 | line_size = (meta_l2c_config() & METAC_CORECFG3_L2C_LINE_SIZE_BITS) | ||
79 | >> METAC_CORECFG3_L2C_LINE_SIZE_S; | ||
80 | switch (line_size) { | ||
81 | case METAC_CORECFG3_L2C_LINE_SIZE_64B: | ||
82 | return 64; | ||
83 | default: | ||
84 | return 0; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* Get the revision ID of the L2 cache */ | ||
89 | static inline unsigned int meta_l2c_revision(void) | ||
90 | { | ||
91 | return (meta_l2c_config() & METAC_CORECFG3_L2C_REV_ID_BITS) | ||
92 | >> METAC_CORECFG3_L2C_REV_ID_S; | ||
93 | } | ||
94 | |||
95 | |||
96 | /* | ||
97 | * Start an initialisation of the L2 cachelines and wait for completion. | ||
98 | * This should only be done in a LOCK1 or LOCK2 critical section while the L2 | ||
99 | * is disabled. | ||
100 | */ | ||
101 | static inline void _meta_l2c_init(void) | ||
102 | { | ||
103 | metag_out32(SYSC_L2C_INIT_INIT, SYSC_L2C_INIT); | ||
104 | while (metag_in32(SYSC_L2C_INIT) == SYSC_L2C_INIT_IN_PROGRESS) | ||
105 | /* do nothing */; | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Start a writeback of dirty L2 cachelines and wait for completion. | ||
110 | * This should only be done in a LOCK1 or LOCK2 critical section. | ||
111 | */ | ||
112 | static inline void _meta_l2c_purge(void) | ||
113 | { | ||
114 | metag_out32(SYSC_L2C_PURGE_PURGE, SYSC_L2C_PURGE); | ||
115 | while (metag_in32(SYSC_L2C_PURGE) == SYSC_L2C_PURGE_IN_PROGRESS) | ||
116 | /* do nothing */; | ||
117 | } | ||
118 | |||
119 | /* Set whether the L2 cache is enabled. */ | ||
120 | static inline void _meta_l2c_enable(int enabled) | ||
121 | { | ||
122 | unsigned int enable; | ||
123 | |||
124 | enable = metag_in32(SYSC_L2C_ENABLE); | ||
125 | if (enabled) | ||
126 | enable |= SYSC_L2C_ENABLE_ENABLE_BIT; | ||
127 | else | ||
128 | enable &= ~SYSC_L2C_ENABLE_ENABLE_BIT; | ||
129 | metag_out32(enable, SYSC_L2C_ENABLE); | ||
130 | } | ||
131 | |||
132 | /* Set whether the L2 cache prefetch is enabled. */ | ||
133 | static inline void _meta_l2c_pf_enable(int pfenabled) | ||
134 | { | ||
135 | unsigned int enable; | ||
136 | |||
137 | enable = metag_in32(SYSC_L2C_ENABLE); | ||
138 | if (pfenabled) | ||
139 | enable |= SYSC_L2C_ENABLE_PFENABLE_BIT; | ||
140 | else | ||
141 | enable &= ~SYSC_L2C_ENABLE_PFENABLE_BIT; | ||
142 | metag_out32(enable, SYSC_L2C_ENABLE); | ||
143 | } | ||
144 | |||
145 | /* Return whether the L2 cache is enabled */ | ||
146 | static inline int _meta_l2c_is_enabled(void) | ||
147 | { | ||
148 | return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_ENABLE_BIT; | ||
149 | } | ||
150 | |||
151 | /* Return whether the L2 cache prefetch is enabled */ | ||
152 | static inline int _meta_l2c_pf_is_enabled(void) | ||
153 | { | ||
154 | return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_PFENABLE_BIT; | ||
155 | } | ||
156 | |||
157 | |||
158 | /* Return whether the L2 cache is enabled */ | ||
159 | static inline int meta_l2c_is_enabled(void) | ||
160 | { | ||
161 | int en; | ||
162 | |||
163 | /* | ||
164 | * There is no need to lock at the moment, as the enable bit is never | ||
165 | * intermediately changed, so we will never see an intermediate result. | ||
166 | */ | ||
167 | en = _meta_l2c_is_enabled(); | ||
168 | |||
169 | return en; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * Ensure the L2 cache is disabled. | ||
174 | * Return whether the L2 was previously disabled. | ||
175 | */ | ||
176 | int meta_l2c_disable(void); | ||
177 | |||
178 | /* | ||
179 | * Ensure the L2 cache is enabled. | ||
180 | * Return whether the L2 was previously enabled. | ||
181 | */ | ||
182 | int meta_l2c_enable(void); | ||
183 | |||
184 | /* Return whether the L2 cache prefetch is enabled */ | ||
185 | static inline int meta_l2c_pf_is_enabled(void) | ||
186 | { | ||
187 | return l2c_pfenable; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Set whether the L2 cache prefetch is enabled. | ||
192 | * Return whether the L2 prefetch was previously enabled. | ||
193 | */ | ||
194 | int meta_l2c_pf_enable(int pfenable); | ||
195 | |||
196 | /* | ||
197 | * Flush the L2 cache. | ||
198 | * Return 1 if the L2 is disabled. | ||
199 | */ | ||
200 | int meta_l2c_flush(void); | ||
201 | |||
202 | /* | ||
203 | * Write back all dirty cache lines in the L2 cache. | ||
204 | * Return 1 if the L2 is disabled or there isn't any writeback. | ||
205 | */ | ||
206 | static inline int meta_l2c_writeback(void) | ||
207 | { | ||
208 | unsigned long flags; | ||
209 | int en; | ||
210 | |||
211 | /* no need to purge if it's not a writeback cache */ | ||
212 | if (!meta_l2c_is_writeback()) | ||
213 | return 1; | ||
214 | |||
215 | /* | ||
216 | * Purge only works if the L2 is enabled, and involves reading back to | ||
217 | * detect completion, so keep this operation atomic with other threads. | ||
218 | */ | ||
219 | __global_lock1(flags); | ||
220 | en = meta_l2c_is_enabled(); | ||
221 | if (likely(en)) { | ||
222 | wr_fence(); | ||
223 | _meta_l2c_purge(); | ||
224 | } | ||
225 | __global_unlock1(flags); | ||
226 | |||
227 | return !en; | ||
228 | } | ||
229 | |||
230 | #else /* CONFIG_METAG_L2C */ | ||
231 | |||
232 | #define meta_l2c_config() 0 | ||
233 | #define meta_l2c_is_present() 0 | ||
234 | #define meta_l2c_is_writeback() 0 | ||
235 | #define meta_l2c_is_unified() 0 | ||
236 | #define meta_l2c_size() 0 | ||
237 | #define meta_l2c_ways() 0 | ||
238 | #define meta_l2c_linesize() 0 | ||
239 | #define meta_l2c_revision() 0 | ||
240 | |||
241 | #define meta_l2c_is_enabled() 0 | ||
242 | #define _meta_l2c_pf_is_enabled() 0 | ||
243 | #define meta_l2c_pf_is_enabled() 0 | ||
244 | #define meta_l2c_disable() 1 | ||
245 | #define meta_l2c_enable() 0 | ||
246 | #define meta_l2c_pf_enable(X) 0 | ||
247 | static inline int meta_l2c_flush(void) | ||
248 | { | ||
249 | return 1; | ||
250 | } | ||
251 | static inline int meta_l2c_writeback(void) | ||
252 | { | ||
253 | return 1; | ||
254 | } | ||
255 | |||
256 | #endif /* CONFIG_METAG_L2C */ | ||
257 | |||
258 | #endif /* _METAG_L2CACHE_H */ | ||
diff --git a/arch/metag/include/asm/linkage.h b/arch/metag/include/asm/linkage.h new file mode 100644 index 000000000000..73bf25ba4e18 --- /dev/null +++ b/arch/metag/include/asm/linkage.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | ||
2 | #define __ASM_LINKAGE_H | ||
3 | |||
4 | #define __ALIGN .p2align 2 | ||
5 | #define __ALIGN_STR ".p2align 2" | ||
6 | |||
7 | #endif | ||
diff --git a/arch/metag/include/asm/mach/arch.h b/arch/metag/include/asm/mach/arch.h new file mode 100644 index 000000000000..12c5664fea6e --- /dev/null +++ b/arch/metag/include/asm/mach/arch.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * arch/metag/include/asm/mach/arch.h | ||
3 | * | ||
4 | * Copyright (C) 2012 Imagination Technologies Ltd. | ||
5 | * | ||
6 | * based on the ARM version: | ||
7 | * Copyright (C) 2000 Russell King | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #ifndef _METAG_MACH_ARCH_H_ | ||
15 | #define _METAG_MACH_ARCH_H_ | ||
16 | |||
17 | #include <linux/stddef.h> | ||
18 | |||
19 | #include <asm/clock.h> | ||
20 | |||
21 | /** | ||
22 | * struct machine_desc - Describes a board controlled by a Meta. | ||
23 | * @name: Board/SoC name. | ||
24 | * @dt_compat: Array of device tree 'compatible' strings. | ||
25 | * @clocks: Clock callbacks. | ||
26 | * | ||
27 | * @nr_irqs: Maximum number of IRQs. | ||
28 | * If 0, defaults to NR_IRQS in asm-generic/irq.h. | ||
29 | * | ||
30 | * @init_early: Early init callback. | ||
31 | * @init_irq: IRQ init callback for setting up IRQ controllers. | ||
32 | * @init_machine: Arch init callback for setting up devices. | ||
33 | * @init_late: Late init callback. | ||
34 | * | ||
35 | * This structure is provided by each board which can be controlled by a Meta. | ||
36 | * It is chosen by matching the compatible strings in the device tree provided | ||
37 | * by the bootloader with the strings in @dt_compat, and sets up any aspects of | ||
38 | * the machine that aren't configured with device tree (yet). | ||
39 | */ | ||
40 | struct machine_desc { | ||
41 | const char *name; | ||
42 | const char **dt_compat; | ||
43 | struct meta_clock_desc *clocks; | ||
44 | |||
45 | unsigned int nr_irqs; | ||
46 | |||
47 | void (*init_early)(void); | ||
48 | void (*init_irq)(void); | ||
49 | void (*init_machine)(void); | ||
50 | void (*init_late)(void); | ||
51 | }; | ||
52 | |||
53 | /* | ||
54 | * Current machine - only accessible during boot. | ||
55 | */ | ||
56 | extern struct machine_desc *machine_desc; | ||
57 | |||
58 | /* | ||
59 | * Machine type table - also only accessible during boot | ||
60 | */ | ||
61 | extern struct machine_desc __arch_info_begin[], __arch_info_end[]; | ||
62 | #define for_each_machine_desc(p) \ | ||
63 | for (p = __arch_info_begin; p < __arch_info_end; p++) | ||
64 | |||
65 | static inline struct machine_desc *default_machine_desc(void) | ||
66 | { | ||
67 | /* the default machine is the last one linked in */ | ||
68 | if (__arch_info_end - 1 < __arch_info_begin) | ||
69 | return NULL; | ||
70 | return __arch_info_end - 1; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Set of macros to define architecture features. This is built into | ||
75 | * a table by the linker. | ||
76 | */ | ||
77 | #define MACHINE_START(_type, _name) \ | ||
78 | static const struct machine_desc __mach_desc_##_type \ | ||
79 | __used \ | ||
80 | __attribute__((__section__(".arch.info.init"))) = { \ | ||
81 | .name = _name, | ||
82 | |||
83 | #define MACHINE_END \ | ||
84 | }; | ||
85 | |||
86 | #endif /* _METAG_MACH_ARCH_H_ */ | ||
diff --git a/arch/metag/include/asm/metag_isa.h b/arch/metag/include/asm/metag_isa.h new file mode 100644 index 000000000000..c8aa2ae3899f --- /dev/null +++ b/arch/metag/include/asm/metag_isa.h | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * asm/metag_isa.h | ||
3 | * | ||
4 | * Copyright (C) 2000-2007, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Various defines for Meta instruction set. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_METAG_ISA_H_ | ||
14 | #define _ASM_METAG_ISA_H_ | ||
15 | |||
16 | |||
17 | /* L1 cache layout */ | ||
18 | |||
19 | /* Data cache line size as bytes and shift */ | ||
20 | #define DCACHE_LINE_BYTES 64 | ||
21 | #define DCACHE_LINE_S 6 | ||
22 | |||
23 | /* Number of ways in the data cache */ | ||
24 | #define DCACHE_WAYS 4 | ||
25 | |||
26 | /* Instruction cache line size as bytes and shift */ | ||
27 | #define ICACHE_LINE_BYTES 64 | ||
28 | #define ICACHE_LINE_S 6 | ||
29 | |||
30 | /* Number of ways in the instruction cache */ | ||
31 | #define ICACHE_WAYS 4 | ||
32 | |||
33 | |||
34 | /* | ||
35 | * CACHEWD/CACHEWL instructions use the bottom 8 bits of the data presented to | ||
36 | * control the operation actually achieved. | ||
37 | */ | ||
38 | /* Use of these two bits should be discouraged since the bits dont have | ||
39 | * consistent meanings | ||
40 | */ | ||
41 | #define CACHEW_ICACHE_BIT 0x01 | ||
42 | #define CACHEW_TLBFLUSH_BIT 0x02 | ||
43 | |||
44 | #define CACHEW_FLUSH_L1D_L2 0x0 | ||
45 | #define CACHEW_INVALIDATE_L1I 0x1 | ||
46 | #define CACHEW_INVALIDATE_L1DTLB 0x2 | ||
47 | #define CACHEW_INVALIDATE_L1ITLB 0x3 | ||
48 | #define CACHEW_WRITEBACK_L1D_L2 0x4 | ||
49 | #define CACHEW_INVALIDATE_L1D 0x8 | ||
50 | #define CACHEW_INVALIDATE_L1D_L2 0xC | ||
51 | |||
52 | /* | ||
53 | * CACHERD/CACHERL instructions use bits 3:5 of the address presented to | ||
54 | * control the operation achieved and hence the specific result. | ||
55 | */ | ||
56 | #define CACHER_ADDR_BITS 0xFFFFFFC0 | ||
57 | #define CACHER_OPER_BITS 0x00000030 | ||
58 | #define CACHER_OPER_S 4 | ||
59 | #define CACHER_OPER_LINPHY 0 | ||
60 | #define CACHER_ICACHE_BIT 0x00000008 | ||
61 | #define CACHER_ICACHE_S 3 | ||
62 | |||
63 | /* | ||
64 | * CACHERD/CACHERL LINPHY Oper result is one/two 32-bit words | ||
65 | * | ||
66 | * If CRLINPHY0_VAL_BIT (Bit 0) set then, | ||
67 | * Lower 32-bits corresponds to MMCU_ENTRY_* above. | ||
68 | * Upper 32-bits corresponds to CRLINPHY1_* values below (if requested). | ||
69 | * else | ||
70 | * Lower 32-bits corresponds to CRLINPHY0_* values below. | ||
71 | * Upper 32-bits undefined. | ||
72 | */ | ||
73 | #define CRLINPHY0_VAL_BIT 0x00000001 | ||
74 | #define CRLINPHY0_FIRST_BIT 0x00000004 /* Set if VAL=0 due to first level */ | ||
75 | |||
76 | #define CRLINPHY1_READ_BIT 0x00000001 /* Set if reads permitted */ | ||
77 | #define CRLINPHY1_SINGLE_BIT 0x00000004 /* Set if TLB does not cache entry */ | ||
78 | #define CRLINPHY1_PAGEMSK_BITS 0x0000FFF0 /* Set to ((2^n-1)>>12) value */ | ||
79 | #define CRLINPHY1_PAGEMSK_S 4 | ||
80 | |||
81 | #endif /* _ASM_METAG_ISA_H_ */ | ||
diff --git a/arch/metag/include/asm/metag_mem.h b/arch/metag/include/asm/metag_mem.h new file mode 100644 index 000000000000..3f7b54d8ccac --- /dev/null +++ b/arch/metag/include/asm/metag_mem.h | |||
@@ -0,0 +1,1106 @@ | |||
1 | /* | ||
2 | * asm/metag_mem.h | ||
3 | * | ||
4 | * Copyright (C) 2000-2007, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Various defines for Meta (memory-mapped) registers. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_METAG_MEM_H_ | ||
14 | #define _ASM_METAG_MEM_H_ | ||
15 | |||
16 | /***************************************************************************** | ||
17 | * META MEMORY MAP LINEAR ADDRESS VALUES | ||
18 | ****************************************************************************/ | ||
19 | /* | ||
20 | * COMMON MEMORY MAP | ||
21 | * ----------------- | ||
22 | */ | ||
23 | |||
24 | #define LINSYSTEM_BASE 0x00200000 | ||
25 | #define LINSYSTEM_LIMIT 0x07FFFFFF | ||
26 | |||
27 | /* Linear cache flush now implemented via DCACHE instruction. These defines | ||
28 | related to a special region that used to exist for achieving cache flushes. | ||
29 | */ | ||
30 | #define LINSYSLFLUSH_S 0 | ||
31 | |||
32 | #define LINSYSRES0_BASE 0x00200000 | ||
33 | #define LINSYSRES0_LIMIT 0x01FFFFFF | ||
34 | |||
35 | #define LINSYSCUSTOM_BASE 0x02000000 | ||
36 | #define LINSYSCUSTOM_LIMIT 0x02FFFFFF | ||
37 | |||
38 | #define LINSYSEXPAND_BASE 0x03000000 | ||
39 | #define LINSYSEXPAND_LIMIT 0x03FFFFFF | ||
40 | |||
41 | #define LINSYSEVENT_BASE 0x04000000 | ||
42 | #define LINSYSEVENT_WR_ATOMIC_UNLOCK 0x04000000 | ||
43 | #define LINSYSEVENT_WR_ATOMIC_LOCK 0x04000040 | ||
44 | #define LINSYSEVENT_WR_CACHE_DISABLE 0x04000080 | ||
45 | #define LINSYSEVENT_WR_CACHE_ENABLE 0x040000C0 | ||
46 | #define LINSYSEVENT_WR_COMBINE_FLUSH 0x04000100 | ||
47 | #define LINSYSEVENT_WR_FENCE 0x04000140 | ||
48 | #define LINSYSEVENT_LIMIT 0x04000FFF | ||
49 | |||
50 | #define LINSYSCFLUSH_BASE 0x04400000 | ||
51 | #define LINSYSCFLUSH_DCACHE_LINE 0x04400000 | ||
52 | #define LINSYSCFLUSH_ICACHE_LINE 0x04500000 | ||
53 | #define LINSYSCFLUSH_MMCU 0x04700000 | ||
54 | #ifndef METAC_1_2 | ||
55 | #define LINSYSCFLUSH_TxMMCU_BASE 0x04700020 | ||
56 | #define LINSYSCFLUSH_TxMMCU_STRIDE 0x00000008 | ||
57 | #endif | ||
58 | #define LINSYSCFLUSH_ADDR_BITS 0x000FFFFF | ||
59 | #define LINSYSCFLUSH_ADDR_S 0 | ||
60 | #define LINSYSCFLUSH_LIMIT 0x047FFFFF | ||
61 | |||
62 | #define LINSYSCTRL_BASE 0x04800000 | ||
63 | #define LINSYSCTRL_LIMIT 0x04FFFFFF | ||
64 | |||
65 | #define LINSYSMTABLE_BASE 0x05000000 | ||
66 | #define LINSYSMTABLE_LIMIT 0x05FFFFFF | ||
67 | |||
68 | #define LINSYSDIRECT_BASE 0x06000000 | ||
69 | #define LINSYSDIRECT_LIMIT 0x07FFFFFF | ||
70 | |||
71 | #define LINLOCAL_BASE 0x08000000 | ||
72 | #define LINLOCAL_LIMIT 0x7FFFFFFF | ||
73 | |||
74 | #define LINCORE_BASE 0x80000000 | ||
75 | #define LINCORE_LIMIT 0x87FFFFFF | ||
76 | |||
77 | #define LINCORE_CODE_BASE 0x80000000 | ||
78 | #define LINCORE_CODE_LIMIT 0x81FFFFFF | ||
79 | |||
80 | #define LINCORE_DATA_BASE 0x82000000 | ||
81 | #define LINCORE_DATA_LIMIT 0x83FFFFFF | ||
82 | |||
83 | |||
84 | /* The core can support locked icache lines in this region */ | ||
85 | #define LINCORE_ICACHE_BASE 0x84000000 | ||
86 | #define LINCORE_ICACHE_LIMIT 0x85FFFFFF | ||
87 | |||
88 | /* The core can support locked dcache lines in this region */ | ||
89 | #define LINCORE_DCACHE_BASE 0x86000000 | ||
90 | #define LINCORE_DCACHE_LIMIT 0x87FFFFFF | ||
91 | |||
92 | #define LINGLOBAL_BASE 0x88000000 | ||
93 | #define LINGLOBAL_LIMIT 0xFFFDFFFF | ||
94 | |||
95 | /* | ||
96 | * CHIP Core Register Map | ||
97 | * ---------------------- | ||
98 | */ | ||
99 | #define CORE_HWBASE 0x04800000 | ||
100 | #define PRIV_HWBASE 0x04810000 | ||
101 | #define TRIG_HWBASE 0x04820000 | ||
102 | #define SYSC_HWBASE 0x04830000 | ||
103 | |||
104 | /***************************************************************************** | ||
105 | * INTER-THREAD KICK REGISTERS FOR SOFTWARE EVENT GENERATION | ||
106 | ****************************************************************************/ | ||
107 | /* | ||
108 | * These values define memory mapped registers that can be used to supply | ||
109 | * kicks to threads that service arbitrary software events. | ||
110 | */ | ||
111 | |||
112 | #define T0KICK 0x04800800 /* Background kick 0 */ | ||
113 | #define TXXKICK_MAX 0xFFFF /* Maximum kicks */ | ||
114 | #define TnXKICK_STRIDE 0x00001000 /* Thread scale value */ | ||
115 | #define TnXKICK_STRIDE_S 12 | ||
116 | #define T0KICKI 0x04800808 /* Interrupt kick 0 */ | ||
117 | #define TXIKICK_OFFSET 0x00000008 /* Int level offset value */ | ||
118 | #define T1KICK 0x04801800 /* Background kick 1 */ | ||
119 | #define T1KICKI 0x04801808 /* Interrupt kick 1 */ | ||
120 | #define T2KICK 0x04802800 /* Background kick 2 */ | ||
121 | #define T2KICKI 0x04802808 /* Interrupt kick 2 */ | ||
122 | #define T3KICK 0x04803800 /* Background kick 3 */ | ||
123 | #define T3KICKI 0x04803808 /* Interrupt kick 3 */ | ||
124 | |||
125 | /***************************************************************************** | ||
126 | * GLOBAL REGISTER ACCESS RESOURCES | ||
127 | ****************************************************************************/ | ||
128 | /* | ||
129 | * These values define memory mapped registers that allow access to the | ||
130 | * internal state of all threads in order to allow global set-up of thread | ||
131 | * state and external handling of thread events, errors, or debugging. | ||
132 | * | ||
133 | * The actual unit and register index values needed to access individul | ||
134 | * registers are chip specific see - METAC_TXUXX_VALUES in metac_x_y.h. | ||
135 | * However two C array initialisers TXUXX_MASKS and TGUXX_MASKS will always be | ||
136 | * defined to allow arbitrary loading, display, and saving of all valid | ||
137 | * register states without detailed knowledge of their purpose - TXUXX sets | ||
138 | * bits for all valid registers and TGUXX sets bits for the sub-set which are | ||
139 | * global. | ||
140 | */ | ||
141 | |||
142 | #define T0UCTREG0 0x04800000 /* Access to all CT regs */ | ||
143 | #define TnUCTRX_STRIDE 0x00001000 /* Thread scale value */ | ||
144 | #define TXUCTREGn_STRIDE 0x00000008 /* Register scale value */ | ||
145 | |||
146 | #define TXUXXRXDT 0x0480FFF0 /* Data to/from any threads reg */ | ||
147 | #define TXUXXRXRQ 0x0480FFF8 | ||
148 | #define TXUXXRXRQ_DREADY_BIT 0x80000000 /* Poll for done */ | ||
149 | #define TXUXXRXRQ_DSPEXT_BIT 0x00020000 /* Addr DSP Regs */ | ||
150 | #define TXUXXRXRQ_RDnWR_BIT 0x00010000 /* Set for read */ | ||
151 | #define TXUXXRXRQ_TX_BITS 0x00003000 /* Thread number */ | ||
152 | #define TXUXXRXRQ_TX_S 12 | ||
153 | #define TXUXXRXRQ_RX_BITS 0x000001F0 /* Register num */ | ||
154 | #define TXUXXRXRQ_RX_S 4 | ||
155 | #define TXUXXRXRQ_DSPRARD0 0 /* DSP RAM A Read Pointer 0 */ | ||
156 | #define TXUXXRXRQ_DSPRARD1 1 /* DSP RAM A Read Pointer 1 */ | ||
157 | #define TXUXXRXRQ_DSPRAWR0 2 /* DSP RAM A Write Pointer 0 */ | ||
158 | #define TXUXXRXRQ_DSPRAWR2 3 /* DSP RAM A Write Pointer 1 */ | ||
159 | #define TXUXXRXRQ_DSPRBRD0 4 /* DSP RAM B Read Pointer 0 */ | ||
160 | #define TXUXXRXRQ_DSPRBRD1 5 /* DSP RAM B Read Pointer 1 */ | ||
161 | #define TXUXXRXRQ_DSPRBWR0 6 /* DSP RAM B Write Pointer 0 */ | ||
162 | #define TXUXXRXRQ_DSPRBWR1 7 /* DSP RAM B Write Pointer 1 */ | ||
163 | #define TXUXXRXRQ_DSPRARINC0 8 /* DSP RAM A Read Increment 0 */ | ||
164 | #define TXUXXRXRQ_DSPRARINC1 9 /* DSP RAM A Read Increment 1 */ | ||
165 | #define TXUXXRXRQ_DSPRAWINC0 10 /* DSP RAM A Write Increment 0 */ | ||
166 | #define TXUXXRXRQ_DSPRAWINC1 11 /* DSP RAM A Write Increment 1 */ | ||
167 | #define TXUXXRXRQ_DSPRBRINC0 12 /* DSP RAM B Read Increment 0 */ | ||
168 | #define TXUXXRXRQ_DSPRBRINC1 13 /* DSP RAM B Read Increment 1 */ | ||
169 | #define TXUXXRXRQ_DSPRBWINC0 14 /* DSP RAM B Write Increment 0 */ | ||
170 | #define TXUXXRXRQ_DSPRBWINC1 15 /* DSP RAM B Write Increment 1 */ | ||
171 | |||
172 | #define TXUXXRXRQ_ACC0L0 16 /* Accumulator 0 bottom 32-bits */ | ||
173 | #define TXUXXRXRQ_ACC1L0 17 /* Accumulator 1 bottom 32-bits */ | ||
174 | #define TXUXXRXRQ_ACC2L0 18 /* Accumulator 2 bottom 32-bits */ | ||
175 | #define TXUXXRXRQ_ACC3L0 19 /* Accumulator 3 bottom 32-bits */ | ||
176 | #define TXUXXRXRQ_ACC0HI 20 /* Accumulator 0 top 8-bits */ | ||
177 | #define TXUXXRXRQ_ACC1HI 21 /* Accumulator 1 top 8-bits */ | ||
178 | #define TXUXXRXRQ_ACC2HI 22 /* Accumulator 2 top 8-bits */ | ||
179 | #define TXUXXRXRQ_ACC3HI 23 /* Accumulator 3 top 8-bits */ | ||
180 | #define TXUXXRXRQ_UXX_BITS 0x0000000F /* Unit number */ | ||
181 | #define TXUXXRXRQ_UXX_S 0 | ||
182 | |||
183 | /***************************************************************************** | ||
184 | * PRIVILEGE CONTROL VALUES FOR MEMORY MAPPED RESOURCES | ||
185 | ****************************************************************************/ | ||
186 | /* | ||
187 | * These values define memory mapped registers that give control over and | ||
188 | * the privilege required to access other memory mapped resources. These | ||
189 | * registers themselves always require privilege to update them. | ||
190 | */ | ||
191 | |||
192 | #define TXPRIVREG_STRIDE 0x8 /* Delta between per-thread regs */ | ||
193 | #define TXPRIVREG_STRIDE_S 3 | ||
194 | |||
195 | /* | ||
196 | * Each bit 0 to 15 defines privilege required to access internal register | ||
197 | * regions 0x04800000 to 0x048FFFFF in 64k chunks | ||
198 | */ | ||
199 | #define T0PIOREG 0x04810100 | ||
200 | #define T1PIOREG 0x04810108 | ||
201 | #define T2PIOREG 0x04810110 | ||
202 | #define T3PIOREG 0x04810118 | ||
203 | |||
204 | /* | ||
205 | * Each bit 0 to 31 defines privilege required to use the pair of | ||
206 | * system events implemented as writee in the regions 0x04000000 to | ||
207 | * 0x04000FFF in 2*64 byte chunks. | ||
208 | */ | ||
209 | #define T0PSYREG 0x04810180 | ||
210 | #define T1PSYREG 0x04810188 | ||
211 | #define T2PSYREG 0x04810190 | ||
212 | #define T3PSYREG 0x04810198 | ||
213 | |||
214 | /* | ||
215 | * CHIP PRIV CONTROLS | ||
216 | * ------------------ | ||
217 | */ | ||
218 | |||
219 | /* The TXPIOREG register holds a bit mask directly mappable to | ||
220 | corresponding addresses in the range 0x04800000 to 049FFFFF */ | ||
221 | #define TXPIOREG_ADDR_BITS 0x1F0000 /* Up to 32x64K bytes */ | ||
222 | #define TXPIOREG_ADDR_S 16 | ||
223 | |||
224 | /* Hence based on the _HWBASE values ... */ | ||
225 | #define TXPIOREG_CORE_BIT (1<<((0x04800000>>16)&0x1F)) | ||
226 | #define TXPIOREG_PRIV_BIT (1<<((0x04810000>>16)&0x1F)) | ||
227 | #define TXPIOREG_TRIG_BIT (1<<((0x04820000>>16)&0x1F)) | ||
228 | #define TXPIOREG_SYSC_BIT (1<<((0x04830000>>16)&0x1F)) | ||
229 | |||
230 | #define TXPIOREG_WRC_BIT 0x00080000 /* Wr combiner reg priv */ | ||
231 | #define TXPIOREG_LOCALBUS_RW_BIT 0x00040000 /* Local bus rd/wr priv */ | ||
232 | #define TXPIOREG_SYSREGBUS_RD_BIT 0x00020000 /* Sys reg bus write priv */ | ||
233 | #define TXPIOREG_SYSREGBUS_WR_BIT 0x00010000 /* Sys reg bus read priv */ | ||
234 | |||
235 | /* CORE region privilege controls */ | ||
236 | #define T0PRIVCORE 0x04800828 | ||
237 | #define TXPRIVCORE_TXBKICK_BIT 0x001 /* Background kick priv */ | ||
238 | #define TXPRIVCORE_TXIKICK_BIT 0x002 /* Interrupt kick priv */ | ||
239 | #define TXPRIVCORE_TXAMAREGX_BIT 0x004 /* TXAMAREG4|5|6 priv */ | ||
240 | #define TnPRIVCORE_STRIDE 0x00001000 | ||
241 | |||
242 | #define T0PRIVSYSR 0x04810000 | ||
243 | #define TnPRIVSYSR_STRIDE 0x00000008 | ||
244 | #define TnPRIVSYSR_STRIDE_S 3 | ||
245 | #define TXPRIVSYSR_CFLUSH_BIT 0x01 | ||
246 | #define TXPRIVSYSR_MTABLE_BIT 0x02 | ||
247 | #define TXPRIVSYSR_DIRECT_BIT 0x04 | ||
248 | #ifdef METAC_1_2 | ||
249 | #define TXPRIVSYSR_ALL_BITS 0x07 | ||
250 | #else | ||
251 | #define TXPRIVSYSR_CORE_BIT 0x08 | ||
252 | #define TXPRIVSYSR_CORECODE_BIT 0x10 | ||
253 | #define TXPRIVSYSR_ALL_BITS 0x1F | ||
254 | #endif | ||
255 | #define T1PRIVSYSR 0x04810008 | ||
256 | #define T2PRIVSYSR 0x04810010 | ||
257 | #define T3PRIVSYSR 0x04810018 | ||
258 | |||
259 | /***************************************************************************** | ||
260 | * H/W TRIGGER STATE/LEVEL REGISTERS AND H/W TRIGGER VECTORS | ||
261 | ****************************************************************************/ | ||
262 | /* | ||
263 | * These values define memory mapped registers that give control over and | ||
264 | * the state of hardware trigger sources both external to the META processor | ||
265 | * and internal to it. | ||
266 | */ | ||
267 | |||
268 | #define HWSTATMETA 0x04820000 /* Hardware status/clear META trig */ | ||
269 | #define HWSTATMETA_T0HALT_BITS 0xF | ||
270 | #define HWSTATMETA_T0HALT_S 0 | ||
271 | #define HWSTATMETA_T0BHALT_BIT 0x1 /* Background HALT */ | ||
272 | #define HWSTATMETA_T0IHALT_BIT 0x2 /* Interrupt HALT */ | ||
273 | #define HWSTATMETA_T0PHALT_BIT 0x4 /* PF/RO Memory HALT */ | ||
274 | #define HWSTATMETA_T0AMATR_BIT 0x8 /* AMA trigger */ | ||
275 | #define HWSTATMETA_TnINT_S 4 /* Shift by (thread*4) */ | ||
276 | #define HWSTATEXT 0x04820010 /* H/W status/clear external trigs 0-31 */ | ||
277 | #define HWSTATEXT2 0x04820018 /* H/W status/clear external trigs 32-63 */ | ||
278 | #define HWSTATEXT4 0x04820020 /* H/W status/clear external trigs 64-95 */ | ||
279 | #define HWSTATEXT6 0x04820028 /* H/W status/clear external trigs 96-128 */ | ||
280 | #define HWLEVELEXT 0x04820030 /* Edge/Level type of external trigs 0-31 */ | ||
281 | #define HWLEVELEXT2 0x04820038 /* Edge/Level type of external trigs 32-63 */ | ||
282 | #define HWLEVELEXT4 0x04820040 /* Edge/Level type of external trigs 64-95 */ | ||
283 | #define HWLEVELEXT6 0x04820048 /* Edge/Level type of external trigs 96-128 */ | ||
284 | #define HWLEVELEXT_XXX_LEVEL 1 /* Level sense logic in HWSTATEXTn */ | ||
285 | #define HWLEVELEXT_XXX_EDGE 0 | ||
286 | #define HWMASKEXT 0x04820050 /* Enable/disable of external trigs 0-31 */ | ||
287 | #define HWMASKEXT2 0x04820058 /* Enable/disable of external trigs 32-63 */ | ||
288 | #define HWMASKEXT4 0x04820060 /* Enable/disable of external trigs 64-95 */ | ||
289 | #define HWMASKEXT6 0x04820068 /* Enable/disable of external trigs 96-128 */ | ||
290 | #define T0VECINT_BHALT 0x04820500 /* Background HALT trigger vector */ | ||
291 | #define TXVECXXX_BITS 0xF /* Per-trigger vector vals 0,1,4-15 */ | ||
292 | #define TXVECXXX_S 0 | ||
293 | #define T0VECINT_IHALT 0x04820508 /* Interrupt HALT */ | ||
294 | #define T0VECINT_PHALT 0x04820510 /* PF/RO memory fault */ | ||
295 | #define T0VECINT_AMATR 0x04820518 /* AMA trigger */ | ||
296 | #define TnVECINT_STRIDE 0x00000020 /* Per thread stride */ | ||
297 | #define HWVEC0EXT 0x04820700 /* Vectors for external triggers 0-31 */ | ||
298 | #define HWVEC20EXT 0x04821700 /* Vectors for external triggers 32-63 */ | ||
299 | #define HWVEC40EXT 0x04822700 /* Vectors for external triggers 64-95 */ | ||
300 | #define HWVEC60EXT 0x04823700 /* Vectors for external triggers 96-127 */ | ||
301 | #define HWVECnEXT_STRIDE 0x00000008 /* Per trigger stride */ | ||
302 | #define HWVECnEXT_DEBUG 0x1 /* Redirect trigger to debug i/f */ | ||
303 | |||
304 | /* | ||
305 | * CORE HWCODE-BREAKPOINT REGISTERS/VALUES | ||
306 | * --------------------------------------- | ||
307 | */ | ||
308 | #define CODEB0ADDR 0x0480FF00 /* Address specifier */ | ||
309 | #define CODEBXADDR_MATCHX_BITS 0xFFFFFFFC | ||
310 | #define CODEBXADDR_MATCHX_S 2 | ||
311 | #define CODEB0CTRL 0x0480FF08 /* Control */ | ||
312 | #define CODEBXCTRL_MATEN_BIT 0x80000000 /* Match 'Enable' */ | ||
313 | #define CODEBXCTRL_MATTXEN_BIT 0x10000000 /* Match threadn enable */ | ||
314 | #define CODEBXCTRL_HITC_BITS 0x00FF0000 /* Hit counter */ | ||
315 | #define CODEBXCTRL_HITC_S 16 | ||
316 | #define CODEBXHITC_NEXT 0xFF /* Next 'hit' will trigger */ | ||
317 | #define CODEBXHITC_HIT1 0x00 /* No 'hits' after trigger */ | ||
318 | #define CODEBXCTRL_MMASK_BITS 0x0000FFFC /* Mask ADDR_MATCH bits */ | ||
319 | #define CODEBXCTRL_MMASK_S 2 | ||
320 | #define CODEBXCTRL_MATLTX_BITS 0x00000003 /* Match threadn LOCAL addr */ | ||
321 | #define CODEBXCTRL_MATLTX_S 0 /* Match threadn LOCAL addr */ | ||
322 | #define CODEBnXXXX_STRIDE 0x00000010 /* Stride between CODEB reg sets */ | ||
323 | #define CODEBnXXXX_STRIDE_S 4 | ||
324 | #define CODEBnXXXX_LIMIT 3 /* Sets 0-3 */ | ||
325 | |||
326 | /* | ||
327 | * CORE DATA-WATCHPOINT REGISTERS/VALUES | ||
328 | * ------------------------------------- | ||
329 | */ | ||
330 | #define DATAW0ADDR 0x0480FF40 /* Address specifier */ | ||
331 | #define DATAWXADDR_MATCHR_BITS 0xFFFFFFF8 | ||
332 | #define DATAWXADDR_MATCHR_S 3 | ||
333 | #define DATAWXADDR_MATCHW_BITS 0xFFFFFFFF | ||
334 | #define DATAWXADDR_MATCHW_S 0 | ||
335 | #define DATAW0CTRL 0x0480FF48 /* Control */ | ||
336 | #define DATAWXCTRL_MATRD_BIT 0x80000000 /* Match 'Read' */ | ||
337 | #ifndef METAC_1_2 | ||
338 | #define DATAWXCTRL_MATNOTTX_BIT 0x20000000 /* Invert threadn enable */ | ||
339 | #endif | ||
340 | #define DATAWXCTRL_MATWR_BIT 0x40000000 /* Match 'Write' */ | ||
341 | #define DATAWXCTRL_MATTXEN_BIT 0x10000000 /* Match threadn enable */ | ||
342 | #define DATAWXCTRL_WRSIZE_BITS 0x0F000000 /* Write Match Size */ | ||
343 | #define DATAWXCTRL_WRSIZE_S 24 | ||
344 | #define DATAWWRSIZE_ANY 0 /* Any size transaction matches */ | ||
345 | #define DATAWWRSIZE_8BIT 1 /* Specific sizes ... */ | ||
346 | #define DATAWWRSIZE_16BIT 2 | ||
347 | #define DATAWWRSIZE_32BIT 3 | ||
348 | #define DATAWWRSIZE_64BIT 4 | ||
349 | #define DATAWXCTRL_HITC_BITS 0x00FF0000 /* Hit counter */ | ||
350 | #define DATAWXCTRL_HITC_S 16 | ||
351 | #define DATAWXHITC_NEXT 0xFF /* Next 'hit' will trigger */ | ||
352 | #define DATAWXHITC_HIT1 0x00 /* No 'hits' after trigger */ | ||
353 | #define DATAWXCTRL_MMASK_BITS 0x0000FFF8 /* Mask ADDR_MATCH bits */ | ||
354 | #define DATAWXCTRL_MMASK_S 3 | ||
355 | #define DATAWXCTRL_MATLTX_BITS 0x00000003 /* Match threadn LOCAL addr */ | ||
356 | #define DATAWXCTRL_MATLTX_S 0 /* Match threadn LOCAL addr */ | ||
357 | #define DATAW0DMATCH0 0x0480FF50 /* Write match data */ | ||
358 | #define DATAW0DMATCH1 0x0480FF58 | ||
359 | #define DATAW0DMASK0 0x0480FF60 /* Write match data mask */ | ||
360 | #define DATAW0DMASK1 0x0480FF68 | ||
361 | #define DATAWnXXXX_STRIDE 0x00000040 /* Stride between DATAW reg sets */ | ||
362 | #define DATAWnXXXX_STRIDE_S 6 | ||
363 | #define DATAWnXXXX_LIMIT 1 /* Sets 0,1 */ | ||
364 | |||
365 | /* | ||
366 | * CHIP Automatic Mips Allocation control registers | ||
367 | * ------------------------------------------------ | ||
368 | */ | ||
369 | |||
370 | /* CORE memory mapped AMA registers */ | ||
371 | #define T0AMAREG4 0x04800810 | ||
372 | #define TXAMAREG4_POOLSIZE_BITS 0x3FFFFF00 | ||
373 | #define TXAMAREG4_POOLSIZE_S 8 | ||
374 | #define TXAMAREG4_AVALUE_BITS 0x000000FF | ||
375 | #define TXAMAREG4_AVALUE_S 0 | ||
376 | #define T0AMAREG5 0x04800818 | ||
377 | #define TXAMAREG5_POOLC_BITS 0x07FFFFFF | ||
378 | #define TXAMAREG5_POOLC_S 0 | ||
379 | #define T0AMAREG6 0x04800820 | ||
380 | #define TXAMAREG6_DLINEDEF_BITS 0x00FFFFF0 | ||
381 | #define TXAMAREG6_DLINEDEF_S 0 | ||
382 | #define TnAMAREGX_STRIDE 0x00001000 | ||
383 | |||
384 | /* | ||
385 | * Memory Management Control Unit Table Entries | ||
386 | * -------------------------------------------- | ||
387 | */ | ||
388 | #define MMCU_ENTRY_S 4 /* -> Entry size */ | ||
389 | #define MMCU_ENTRY_ADDR_BITS 0xFFFFF000 /* Physical address */ | ||
390 | #define MMCU_ENTRY_ADDR_S 12 /* -> Page size */ | ||
391 | #define MMCU_ENTRY_CWIN_BITS 0x000000C0 /* Caching 'window' selection */ | ||
392 | #define MMCU_ENTRY_CWIN_S 6 | ||
393 | #define MMCU_CWIN_UNCACHED 0 /* May not be memory etc. */ | ||
394 | #define MMCU_CWIN_BURST 1 /* Cached but LRU unset */ | ||
395 | #define MMCU_CWIN_C1SET 2 /* Cached in 1 set only */ | ||
396 | #define MMCU_CWIN_CACHED 3 /* Fully cached */ | ||
397 | #define MMCU_ENTRY_CACHE_BIT 0x00000080 /* Set for cached region */ | ||
398 | #define MMCU_ECACHE1_FULL_BIT 0x00000040 /* Use all the sets */ | ||
399 | #define MMCU_ECACHE0_BURST_BIT 0x00000040 /* Match bursts */ | ||
400 | #define MMCU_ENTRY_SYS_BIT 0x00000010 /* Sys-coherent access required */ | ||
401 | #define MMCU_ENTRY_WRC_BIT 0x00000008 /* Write combining allowed */ | ||
402 | #define MMCU_ENTRY_PRIV_BIT 0x00000004 /* Privilege required */ | ||
403 | #define MMCU_ENTRY_WR_BIT 0x00000002 /* Writes allowed */ | ||
404 | #define MMCU_ENTRY_VAL_BIT 0x00000001 /* Entry is valid */ | ||
405 | |||
406 | #ifdef METAC_2_1 | ||
407 | /* | ||
408 | * Extended first-level/top table entries have extra/larger fields in later | ||
409 | * cores as bits 11:0 previously had no effect in such table entries. | ||
410 | */ | ||
411 | #define MMCU_E1ENT_ADDR_BITS 0xFFFFFFC0 /* Physical address */ | ||
412 | #define MMCU_E1ENT_ADDR_S 6 /* -> resolution < page size */ | ||
413 | #define MMCU_E1ENT_PGSZ_BITS 0x0000001E /* Page size for 2nd level */ | ||
414 | #define MMCU_E1ENT_PGSZ_S 1 | ||
415 | #define MMCU_E1ENT_PGSZ0_POWER 12 /* PgSz 0 -> 4K */ | ||
416 | #define MMCU_E1ENT_PGSZ_MAX 10 /* PgSz 10 -> 4M maximum */ | ||
417 | #define MMCU_E1ENT_MINIM_BIT 0x00000020 | ||
418 | #endif /* METAC_2_1 */ | ||
419 | |||
420 | /* MMCU control register in SYSC region */ | ||
421 | #define MMCU_TABLE_PHYS_ADDR 0x04830010 | ||
422 | #define MMCU_TABLE_PHYS_ADDR_BITS 0xFFFFFFFC | ||
423 | #ifdef METAC_2_1 | ||
424 | #define MMCU_TABLE_PHYS_EXTEND 0x00000001 /* See below */ | ||
425 | #endif | ||
426 | #define MMCU_DCACHE_CTRL_ADDR 0x04830018 | ||
427 | #define MMCU_xCACHE_CTRL_ENABLE_BIT 0x00000001 | ||
428 | #define MMCU_xCACHE_CTRL_PARTITION_BIT 0x00000000 /* See xCPART below */ | ||
429 | #define MMCU_ICACHE_CTRL_ADDR 0x04830020 | ||
430 | |||
431 | #ifdef METAC_2_1 | ||
432 | |||
433 | /* | ||
434 | * Allow direct access to physical memory used to implement MMU table. | ||
435 | * | ||
436 | * Each is based on a corresponding MMCU_TnLOCAL_TABLE_PHYSn or similar | ||
437 | * MMCU_TnGLOBAL_TABLE_PHYSn register pair (see next). | ||
438 | */ | ||
439 | #define LINSYSMEMT0L_BASE 0x05000000 | ||
440 | #define LINSYSMEMT0L_LIMIT 0x051FFFFF | ||
441 | #define LINSYSMEMTnX_STRIDE 0x00200000 /* 2MB Local per thread */ | ||
442 | #define LINSYSMEMTnX_STRIDE_S 21 | ||
443 | #define LINSYSMEMTXG_OFFSET 0x00800000 /* +2MB Global per thread */ | ||
444 | #define LINSYSMEMTXG_OFFSET_S 23 | ||
445 | #define LINSYSMEMT1L_BASE 0x05200000 | ||
446 | #define LINSYSMEMT1L_LIMIT 0x053FFFFF | ||
447 | #define LINSYSMEMT2L_BASE 0x05400000 | ||
448 | #define LINSYSMEMT2L_LIMIT 0x055FFFFF | ||
449 | #define LINSYSMEMT3L_BASE 0x05600000 | ||
450 | #define LINSYSMEMT3L_LIMIT 0x057FFFFF | ||
451 | #define LINSYSMEMT0G_BASE 0x05800000 | ||
452 | #define LINSYSMEMT0G_LIMIT 0x059FFFFF | ||
453 | #define LINSYSMEMT1G_BASE 0x05A00000 | ||
454 | #define LINSYSMEMT1G_LIMIT 0x05BFFFFF | ||
455 | #define LINSYSMEMT2G_BASE 0x05C00000 | ||
456 | #define LINSYSMEMT2G_LIMIT 0x05DFFFFF | ||
457 | #define LINSYSMEMT3G_BASE 0x05E00000 | ||
458 | #define LINSYSMEMT3G_LIMIT 0x05FFFFFF | ||
459 | |||
460 | /* | ||
461 | * Extended MMU table functionality allows a sparse or flat table to be | ||
462 | * described much more efficiently than before. | ||
463 | */ | ||
464 | #define MMCU_T0LOCAL_TABLE_PHYS0 0x04830700 | ||
465 | #define MMCU_TnX_TABLE_PHYSX_STRIDE 0x20 /* Offset per thread */ | ||
466 | #define MMCU_TnX_TABLE_PHYSX_STRIDE_S 5 | ||
467 | #define MMCU_TXG_TABLE_PHYSX_OFFSET 0x10 /* Global versus local */ | ||
468 | #define MMCU_TXG_TABLE_PHYSX_OFFSET_S 4 | ||
469 | #define MMCU_TBLPHYS0_DCCTRL_BITS 0x000000DF /* DC controls */ | ||
470 | #define MMCU_TBLPHYS0_ENTLB_BIT 0x00000020 /* Cache in TLB */ | ||
471 | #define MMCU_TBLPHYS0_TBLSZ_BITS 0x00000F00 /* Area supported */ | ||
472 | #define MMCU_TBLPHYS0_TBLSZ_S 8 | ||
473 | #define MMCU_TBLPHYS0_TBLSZ0_POWER 22 /* 0 -> 4M */ | ||
474 | #define MMCU_TBLPHYS0_TBLSZ_MAX 9 /* 9 -> 2G */ | ||
475 | #define MMCU_TBLPHYS0_LINBASE_BITS 0xFFC00000 /* Linear base */ | ||
476 | #define MMCU_TBLPHYS0_LINBASE_S 22 | ||
477 | |||
478 | #define MMCU_T0LOCAL_TABLE_PHYS1 0x04830708 | ||
479 | #define MMCU_TBLPHYS1_ADDR_BITS 0xFFFFFFFC /* Physical base */ | ||
480 | #define MMCU_TBLPHYS1_ADDR_S 2 | ||
481 | |||
482 | #define MMCU_T0GLOBAL_TABLE_PHYS0 0x04830710 | ||
483 | #define MMCU_T0GLOBAL_TABLE_PHYS1 0x04830718 | ||
484 | #define MMCU_T1LOCAL_TABLE_PHYS0 0x04830720 | ||
485 | #define MMCU_T1LOCAL_TABLE_PHYS1 0x04830728 | ||
486 | #define MMCU_T1GLOBAL_TABLE_PHYS0 0x04830730 | ||
487 | #define MMCU_T1GLOBAL_TABLE_PHYS1 0x04830738 | ||
488 | #define MMCU_T2LOCAL_TABLE_PHYS0 0x04830740 | ||
489 | #define MMCU_T2LOCAL_TABLE_PHYS1 0x04830748 | ||
490 | #define MMCU_T2GLOBAL_TABLE_PHYS0 0x04830750 | ||
491 | #define MMCU_T2GLOBAL_TABLE_PHYS1 0x04830758 | ||
492 | #define MMCU_T3LOCAL_TABLE_PHYS0 0x04830760 | ||
493 | #define MMCU_T3LOCAL_TABLE_PHYS1 0x04830768 | ||
494 | #define MMCU_T3GLOBAL_TABLE_PHYS0 0x04830770 | ||
495 | #define MMCU_T3GLOBAL_TABLE_PHYS1 0x04830778 | ||
496 | |||
497 | #define MMCU_T0EBWCCTRL 0x04830640 | ||
498 | #define MMCU_TnEBWCCTRL_BITS 0x00000007 | ||
499 | #define MMCU_TnEBWCCTRL_S 0 | ||
500 | #define MMCU_TnEBWCCCTRL_DISABLE_ALL 0 | ||
501 | #define MMCU_TnEBWCCCTRL_ABIT25 1 | ||
502 | #define MMCU_TnEBWCCCTRL_ABIT26 2 | ||
503 | #define MMCU_TnEBWCCCTRL_ABIT27 3 | ||
504 | #define MMCU_TnEBWCCCTRL_ABIT28 4 | ||
505 | #define MMCU_TnEBWCCCTRL_ABIT29 5 | ||
506 | #define MMCU_TnEBWCCCTRL_ABIT30 6 | ||
507 | #define MMCU_TnEBWCCCTRL_ENABLE_ALL 7 | ||
508 | #define MMCU_TnEBWCCTRL_STRIDE 8 | ||
509 | |||
510 | #endif /* METAC_2_1 */ | ||
511 | |||
512 | |||
513 | /* Registers within the SYSC register region */ | ||
514 | #define METAC_ID 0x04830000 | ||
515 | #define METAC_ID_MAJOR_BITS 0xFF000000 | ||
516 | #define METAC_ID_MAJOR_S 24 | ||
517 | #define METAC_ID_MINOR_BITS 0x00FF0000 | ||
518 | #define METAC_ID_MINOR_S 16 | ||
519 | #define METAC_ID_REV_BITS 0x0000FF00 | ||
520 | #define METAC_ID_REV_S 8 | ||
521 | #define METAC_ID_MAINT_BITS 0x000000FF | ||
522 | #define METAC_ID_MAINT_S 0 | ||
523 | |||
524 | #ifdef METAC_2_1 | ||
525 | /* Use of this section is strongly deprecated */ | ||
526 | #define METAC_ID2 0x04830008 | ||
527 | #define METAC_ID2_DESIGNER_BITS 0xFFFF0000 /* Modified by customer */ | ||
528 | #define METAC_ID2_DESIGNER_S 16 | ||
529 | #define METAC_ID2_MINOR2_BITS 0x00000F00 /* 3rd digit of prod rev */ | ||
530 | #define METAC_ID2_MINOR2_S 8 | ||
531 | #define METAC_ID2_CONFIG_BITS 0x000000FF /* Wrapper configuration */ | ||
532 | #define METAC_ID2_CONFIG_S 0 | ||
533 | |||
534 | /* Primary core identification and configuration information */ | ||
535 | #define METAC_CORE_ID 0x04831000 | ||
536 | #define METAC_COREID_GROUP_BITS 0xFF000000 | ||
537 | #define METAC_COREID_GROUP_S 24 | ||
538 | #define METAC_COREID_GROUP_METAG 0x14 | ||
539 | #define METAC_COREID_ID_BITS 0x00FF0000 | ||
540 | #define METAC_COREID_ID_S 16 | ||
541 | #define METAC_COREID_ID_W32 0x10 /* >= for 32-bit pipeline */ | ||
542 | #define METAC_COREID_CONFIG_BITS 0x0000FFFF | ||
543 | #define METAC_COREID_CONFIG_S 0 | ||
544 | #define METAC_COREID_CFGCACHE_BITS 0x0007 | ||
545 | #define METAC_COREID_CFGCACHE_S 0 | ||
546 | #define METAC_COREID_CFGCACHE_NOM 0 | ||
547 | #define METAC_COREID_CFGCACHE_TYPE0 1 | ||
548 | #define METAC_COREID_CFGCACHE_NOMMU 1 /* Alias for TYPE0 */ | ||
549 | #define METAC_COREID_CFGCACHE_NOCACHE 2 | ||
550 | #define METAC_COREID_CFGCACHE_PRIVNOMMU 3 | ||
551 | #define METAC_COREID_CFGDSP_BITS 0x0038 | ||
552 | #define METAC_COREID_CFGDSP_S 3 | ||
553 | #define METAC_COREID_CFGDSP_NOM 0 | ||
554 | #define METAC_COREID_CFGDSP_MIN 1 | ||
555 | #define METAC_COREID_NOFPACC_BIT 0x0040 /* Set if no FPU accum */ | ||
556 | #define METAC_COREID_CFGFPU_BITS 0x0180 | ||
557 | #define METAC_COREID_CFGFPU_S 7 | ||
558 | #define METAC_COREID_CFGFPU_NOM 0 | ||
559 | #define METAC_COREID_CFGFPU_SNGL 1 | ||
560 | #define METAC_COREID_CFGFPU_DBL 2 | ||
561 | #define METAC_COREID_NOAMA_BIT 0x0200 /* Set if no AMA present */ | ||
562 | #define METAC_COREID_NOCOH_BIT 0x0400 /* Set if no Gbl coherency */ | ||
563 | |||
564 | /* Core revision information */ | ||
565 | #define METAC_CORE_REV 0x04831008 | ||
566 | #define METAC_COREREV_DESIGN_BITS 0xFF000000 | ||
567 | #define METAC_COREREV_DESIGN_S 24 | ||
568 | #define METAC_COREREV_MAJOR_BITS 0x00FF0000 | ||
569 | #define METAC_COREREV_MAJOR_S 16 | ||
570 | #define METAC_COREREV_MINOR_BITS 0x0000FF00 | ||
571 | #define METAC_COREREV_MINOR_S 8 | ||
572 | #define METAC_COREREV_MAINT_BITS 0x000000FF | ||
573 | #define METAC_COREREV_MAINT_S 0 | ||
574 | |||
575 | /* Configuration information control outside the core */ | ||
576 | #define METAC_CORE_DESIGNER1 0x04831010 /* Arbitrary value */ | ||
577 | #define METAC_CORE_DESIGNER2 0x04831018 /* Arbitrary value */ | ||
578 | |||
579 | /* Configuration information covering presence/number of various features */ | ||
580 | #define METAC_CORE_CONFIG2 0x04831020 | ||
581 | #define METAC_CORECFG2_COREDBGTYPE_BITS 0x60000000 /* Core debug type */ | ||
582 | #define METAC_CORECFG2_COREDBGTYPE_S 29 | ||
583 | #define METAC_CORECFG2_DCSMALL_BIT 0x04000000 /* Data cache small */ | ||
584 | #define METAC_CORECFG2_ICSMALL_BIT 0x02000000 /* Inst cache small */ | ||
585 | #define METAC_CORECFG2_DCSZNP_BITS 0x01C00000 /* Data cache size np */ | ||
586 | #define METAC_CORECFG2_DCSZNP_S 22 | ||
587 | #define METAC_CORECFG2_ICSZNP_BITS 0x00380000 /* Inst cache size np */ | ||
588 | #define METAC_CORECFG2_ICSZNP_S 19 | ||
589 | #define METAC_CORECFG2_DCSZ_BITS 0x00070000 /* Data cache size */ | ||
590 | #define METAC_CORECFG2_DCSZ_S 16 | ||
591 | #define METAC_CORECFG2_xCSZ_4K 0 /* Allocated values */ | ||
592 | #define METAC_CORECFG2_xCSZ_8K 1 | ||
593 | #define METAC_CORECFG2_xCSZ_16K 2 | ||
594 | #define METAC_CORECFG2_xCSZ_32K 3 | ||
595 | #define METAC_CORECFG2_xCSZ_64K 4 | ||
596 | #define METAC_CORE_C2ICSZ_BITS 0x0000E000 /* Inst cache size */ | ||
597 | #define METAC_CORE_C2ICSZ_S 13 | ||
598 | #define METAC_CORE_GBLACC_BITS 0x00001800 /* Number of Global Acc */ | ||
599 | #define METAC_CORE_GBLACC_S 11 | ||
600 | #define METAC_CORE_GBLDXR_BITS 0x00000700 /* 0 -> 0, R -> 2^(R-1) */ | ||
601 | #define METAC_CORE_GBLDXR_S 8 | ||
602 | #define METAC_CORE_GBLAXR_BITS 0x000000E0 /* 0 -> 0, R -> 2^(R-1) */ | ||
603 | #define METAC_CORE_GBLAXR_S 5 | ||
604 | #define METAC_CORE_RTTRACE_BIT 0x00000010 | ||
605 | #define METAC_CORE_WATCHN_BITS 0x0000000C /* 0 -> 0, N -> 2^N */ | ||
606 | #define METAC_CORE_WATCHN_S 2 | ||
607 | #define METAC_CORE_BREAKN_BITS 0x00000003 /* 0 -> 0, N -> 2^N */ | ||
608 | #define METAC_CORE_BREAKN_S 0 | ||
609 | |||
610 | /* Configuration information covering presence/number of various features */ | ||
611 | #define METAC_CORE_CONFIG3 0x04831028 | ||
612 | #define METAC_CORECFG3_L2C_REV_ID_BITS 0x000F0000 /* Revision of L2 cache */ | ||
613 | #define METAC_CORECFG3_L2C_REV_ID_S 16 | ||
614 | #define METAC_CORECFG3_L2C_LINE_SIZE_BITS 0x00003000 /* L2 line size */ | ||
615 | #define METAC_CORECFG3_L2C_LINE_SIZE_S 12 | ||
616 | #define METAC_CORECFG3_L2C_LINE_SIZE_64B 0x0 /* 64 bytes */ | ||
617 | #define METAC_CORECFG3_L2C_NUM_WAYS_BITS 0x00000F00 /* L2 number of ways (2^n) */ | ||
618 | #define METAC_CORECFG3_L2C_NUM_WAYS_S 8 | ||
619 | #define METAC_CORECFG3_L2C_SIZE_BITS 0x000000F0 /* L2 size (2^n) */ | ||
620 | #define METAC_CORECFG3_L2C_SIZE_S 4 | ||
621 | #define METAC_CORECFG3_L2C_UNIFIED_BIT 0x00000004 /* Unified cache: */ | ||
622 | #define METAC_CORECFG3_L2C_UNIFIED_S 2 | ||
623 | #define METAC_CORECFG3_L2C_UNIFIED_UNIFIED 1 /* - Unified D/I cache */ | ||
624 | #define METAC_CORECFG3_L2C_UNIFIED_SEPARATE 0 /* - Separate D/I cache */ | ||
625 | #define METAC_CORECFG3_L2C_MODE_BIT 0x00000002 /* Cache Mode: */ | ||
626 | #define METAC_CORECFG3_L2C_MODE_S 1 | ||
627 | #define METAC_CORECFG3_L2C_MODE_WRITE_BACK 1 /* - Write back */ | ||
628 | #define METAC_CORECFG3_L2C_MODE_WRITE_THROUGH 0 /* - Write through */ | ||
629 | #define METAC_CORECFG3_L2C_HAVE_L2C_BIT 0x00000001 /* Have L2C */ | ||
630 | #define METAC_CORECFG3_L2C_HAVE_L2C_S 0 | ||
631 | |||
632 | #endif /* METAC_2_1 */ | ||
633 | |||
634 | #define SYSC_CACHE_MMU_CONFIG 0x04830028 | ||
635 | #ifdef METAC_2_1 | ||
636 | #define SYSC_CMMUCFG_DCSKEWABLE_BIT 0x00000040 | ||
637 | #define SYSC_CMMUCFG_ICSKEWABLE_BIT 0x00000020 | ||
638 | #define SYSC_CMMUCFG_DCSKEWOFF_BIT 0x00000010 /* Skew association override */ | ||
639 | #define SYSC_CMMUCFG_ICSKEWOFF_BIT 0x00000008 /* -> default 0 on if present */ | ||
640 | #define SYSC_CMMUCFG_MODE_BITS 0x00000007 /* Access to old state */ | ||
641 | #define SYSC_CMMUCFG_MODE_S 0 | ||
642 | #define SYSC_CMMUCFG_ON 0x7 | ||
643 | #define SYSC_CMMUCFG_EBYPASS 0x6 /* Enhanced by-pass mode */ | ||
644 | #define SYSC_CMMUCFG_EBYPASSIC 0x4 /* EB just inst cache */ | ||
645 | #define SYSC_CMMUCFG_EBYPASSDC 0x2 /* EB just data cache */ | ||
646 | #endif /* METAC_2_1 */ | ||
647 | /* Old definitions, Keep them for now */ | ||
648 | #define SYSC_CMMUCFG_MMU_ON_BIT 0x1 | ||
649 | #define SYSC_CMMUCFG_DC_ON_BIT 0x2 | ||
650 | #define SYSC_CMMUCFG_IC_ON_BIT 0x4 | ||
651 | |||
652 | #define SYSC_JTAG_THREAD 0x04830030 | ||
653 | #define SYSC_JTAG_TX_BITS 0x00000003 /* Read only bits! */ | ||
654 | #define SYSC_JTAG_TX_S 0 | ||
655 | #define SYSC_JTAG_PRIV_BIT 0x00000004 | ||
656 | #ifdef METAC_2_1 | ||
657 | #define SYSC_JTAG_SLAVETX_BITS 0x00000018 | ||
658 | #define SYSC_JTAG_SLAVETX_S 3 | ||
659 | #endif /* METAC_2_1 */ | ||
660 | |||
661 | #define SYSC_DCACHE_FLUSH 0x04830038 | ||
662 | #define SYSC_ICACHE_FLUSH 0x04830040 | ||
663 | #define SYSC_xCACHE_FLUSH_INIT 0x1 | ||
664 | #define MMCU_DIRECTMAP0_ADDR 0x04830080 /* LINSYSDIRECT_BASE -> */ | ||
665 | #define MMCU_DIRECTMAPn_STRIDE 0x00000010 /* 4 Region settings */ | ||
666 | #define MMCU_DIRECTMAPn_S 4 | ||
667 | #define MMCU_DIRECTMAPn_ADDR_BITS 0xFF800000 | ||
668 | #define MMCU_DIRECTMAPn_ADDR_S 23 | ||
669 | #define MMCU_DIRECTMAPn_ADDR_SCALE 0x00800000 /* 8M Regions */ | ||
670 | #ifdef METAC_2_1 | ||
671 | /* | ||
672 | * These fields in the above registers provide MMCU_ENTRY_* values | ||
673 | * for each direct mapped region to enable optimisation of these areas. | ||
674 | * (LSB similar to VALID must be set for enhancments to be active) | ||
675 | */ | ||
676 | #define MMCU_DIRECTMAPn_ENHANCE_BIT 0x00000001 /* 0 = no optim */ | ||
677 | #define MMCU_DIRECTMAPn_DCCTRL_BITS 0x000000DF /* Get DC Ctrl */ | ||
678 | #define MMCU_DIRECTMAPn_DCCTRL_S 0 | ||
679 | #define MMCU_DIRECTMAPn_ICCTRL_BITS 0x0000C000 /* Get IC Ctrl */ | ||
680 | #define MMCU_DIRECTMAPn_ICCTRL_S 8 | ||
681 | #define MMCU_DIRECTMAPn_ENTLB_BIT 0x00000020 /* Cache in TLB */ | ||
682 | #define MMCU_DIRECTMAPn_ICCWIN_BITS 0x0000C000 /* Get IC Win Bits */ | ||
683 | #define MMCU_DIRECTMAPn_ICCWIN_S 14 | ||
684 | #endif /* METAC_2_1 */ | ||
685 | |||
686 | #define MMCU_DIRECTMAP1_ADDR 0x04830090 | ||
687 | #define MMCU_DIRECTMAP2_ADDR 0x048300a0 | ||
688 | #define MMCU_DIRECTMAP3_ADDR 0x048300b0 | ||
689 | |||
690 | /* | ||
691 | * These bits partion each threads use of data cache or instruction cache | ||
692 | * resource by modifying the top 4 bits of the address within the cache | ||
693 | * storage area. | ||
694 | */ | ||
695 | #define SYSC_DCPART0 0x04830200 | ||
696 | #define SYSC_xCPARTn_STRIDE 0x00000008 | ||
697 | #define SYSC_xCPARTL_AND_BITS 0x0000000F /* Masks top 4 bits */ | ||
698 | #define SYSC_xCPARTL_AND_S 0 | ||
699 | #define SYSC_xCPARTG_AND_BITS 0x00000F00 /* Masks top 4 bits */ | ||
700 | #define SYSC_xCPARTG_AND_S 8 | ||
701 | #define SYSC_xCPARTL_OR_BITS 0x000F0000 /* Ors into top 4 bits */ | ||
702 | #define SYSC_xCPARTL_OR_S 16 | ||
703 | #define SYSC_xCPARTG_OR_BITS 0x0F000000 /* Ors into top 4 bits */ | ||
704 | #define SYSC_xCPARTG_OR_S 24 | ||
705 | #define SYSC_CWRMODE_BIT 0x80000000 /* Write cache mode bit */ | ||
706 | |||
707 | #define SYSC_DCPART1 0x04830208 | ||
708 | #define SYSC_DCPART2 0x04830210 | ||
709 | #define SYSC_DCPART3 0x04830218 | ||
710 | #define SYSC_ICPART0 0x04830220 | ||
711 | #define SYSC_ICPART1 0x04830228 | ||
712 | #define SYSC_ICPART2 0x04830230 | ||
713 | #define SYSC_ICPART3 0x04830238 | ||
714 | |||
715 | /* | ||
716 | * META Core Memory and Cache Update registers | ||
717 | */ | ||
718 | #define SYSC_MCMDATAX 0x04830300 /* 32-bit read/write data register */ | ||
719 | #define SYSC_MCMDATAT 0x04830308 /* Read or write data triggers oper */ | ||
720 | #define SYSC_MCMGCTRL 0x04830310 /* Control register */ | ||
721 | #define SYSC_MCMGCTRL_READ_BIT 0x00000001 /* Set to issue 1st read */ | ||
722 | #define SYSC_MCMGCTRL_AINC_BIT 0x00000002 /* Set for auto-increment */ | ||
723 | #define SYSC_MCMGCTRL_ADDR_BITS 0x000FFFFC /* Address or index */ | ||
724 | #define SYSC_MCMGCTRL_ADDR_S 2 | ||
725 | #define SYSC_MCMGCTRL_ID_BITS 0x0FF00000 /* Internal memory block Id */ | ||
726 | #define SYSC_MCMGCTRL_ID_S 20 | ||
727 | #define SYSC_MCMGID_NODEV 0xFF /* No Device Selected */ | ||
728 | #define SYSC_MCMGID_DSPRAM0A 0x04 /* DSP RAM D0 block A access */ | ||
729 | #define SYSC_MCMGID_DSPRAM0B 0x05 /* DSP RAM D0 block B access */ | ||
730 | #define SYSC_MCMGID_DSPRAM1A 0x06 /* DSP RAM D1 block A access */ | ||
731 | #define SYSC_MCMGID_DSPRAM1B 0x07 /* DSP RAM D1 block B access */ | ||
732 | #define SYSC_MCMGID_DCACHEL 0x08 /* DCACHE lines (64-bytes/line) */ | ||
733 | #ifdef METAC_2_1 | ||
734 | #define SYSC_MCMGID_DCACHETLB 0x09 /* DCACHE TLB ( Read Only ) */ | ||
735 | #endif /* METAC_2_1 */ | ||
736 | #define SYSC_MCMGID_DCACHET 0x0A /* DCACHE tags (32-bits/line) */ | ||
737 | #define SYSC_MCMGID_DCACHELRU 0x0B /* DCACHE LRU (8-bits/line) */ | ||
738 | #define SYSC_MCMGID_ICACHEL 0x0C /* ICACHE lines (64-bytes/line */ | ||
739 | #ifdef METAC_2_1 | ||
740 | #define SYSC_MCMGID_ICACHETLB 0x0D /* ICACHE TLB (Read Only ) */ | ||
741 | #endif /* METAC_2_1 */ | ||
742 | #define SYSC_MCMGID_ICACHET 0x0E /* ICACHE Tags (32-bits/line) */ | ||
743 | #define SYSC_MCMGID_ICACHELRU 0x0F /* ICACHE LRU (8-bits/line ) */ | ||
744 | #define SYSC_MCMGID_COREIRAM0 0x10 /* Core code mem id 0 */ | ||
745 | #define SYSC_MCMGID_COREIRAMn 0x17 | ||
746 | #define SYSC_MCMGID_COREDRAM0 0x18 /* Core data mem id 0 */ | ||
747 | #define SYSC_MCMGID_COREDRAMn 0x1F | ||
748 | #ifdef METAC_2_1 | ||
749 | #define SYSC_MCMGID_DCACHEST 0x20 /* DCACHE ST ( Read Only ) */ | ||
750 | #define SYSC_MCMGID_ICACHEST 0x21 /* ICACHE ST ( Read Only ) */ | ||
751 | #define SYSC_MCMGID_DCACHETLBLRU 0x22 /* DCACHE TLB LRU ( Read Only )*/ | ||
752 | #define SYSC_MCMGID_ICACHETLBLRU 0x23 /* ICACHE TLB LRU( Read Only ) */ | ||
753 | #define SYSC_MCMGID_DCACHESTLRU 0x24 /* DCACHE ST LRU ( Read Only ) */ | ||
754 | #define SYSC_MCMGID_ICACHESTLRU 0x25 /* ICACHE ST LRU ( Read Only ) */ | ||
755 | #define SYSC_MCMGID_DEBUGTLB 0x26 /* DEBUG TLB ( Read Only ) */ | ||
756 | #define SYSC_MCMGID_DEBUGST 0x27 /* DEBUG ST ( Read Only ) */ | ||
757 | #define SYSC_MCMGID_L2CACHEL 0x30 /* L2 Cache Lines (64-bytes/line) */ | ||
758 | #define SYSC_MCMGID_L2CACHET 0x31 /* L2 Cache Tags (32-bits/line) */ | ||
759 | #define SYSC_MCMGID_COPROX0 0x70 /* Coprocessor port id 0 */ | ||
760 | #define SYSC_MCMGID_COPROXn 0x77 | ||
761 | #endif /* METAC_2_1 */ | ||
762 | #define SYSC_MCMGCTRL_TR31_BIT 0x80000000 /* Trigger 31 on completion */ | ||
763 | #define SYSC_MCMSTATUS 0x04830318 /* Status read only */ | ||
764 | #define SYSC_MCMSTATUS_IDLE_BIT 0x00000001 | ||
765 | |||
766 | /* META System Events */ | ||
767 | #define SYSC_SYS_EVENT 0x04830400 | ||
768 | #define SYSC_SYSEVT_ATOMIC_BIT 0x00000001 | ||
769 | #define SYSC_SYSEVT_CACHEX_BIT 0x00000002 | ||
770 | #define SYSC_ATOMIC_LOCK 0x04830408 | ||
771 | #define SYSC_ATOMIC_STATE_TX_BITS 0x0000000F | ||
772 | #define SYSC_ATOMIC_STATE_TX_S 0 | ||
773 | #ifdef METAC_1_2 | ||
774 | #define SYSC_ATOMIC_STATE_DX_BITS 0x000000F0 | ||
775 | #define SYSC_ATOMIC_STATE_DX_S 4 | ||
776 | #else /* METAC_1_2 */ | ||
777 | #define SYSC_ATOMIC_SOURCE_BIT 0x00000010 | ||
778 | #endif /* !METAC_1_2 */ | ||
779 | |||
780 | |||
781 | #ifdef METAC_2_1 | ||
782 | |||
783 | /* These definitions replace the EXPAND_TIMER_DIV register defines which are to | ||
784 | * be deprecated. | ||
785 | */ | ||
786 | #define SYSC_TIMER_DIV 0x04830140 | ||
787 | #define SYSC_TIMDIV_BITS 0x000000FF | ||
788 | #define SYSC_TIMDIV_S 0 | ||
789 | |||
790 | /* META Enhanced by-pass control for local and global region */ | ||
791 | #define MMCU_LOCAL_EBCTRL 0x04830600 | ||
792 | #define MMCU_GLOBAL_EBCTRL 0x04830608 | ||
793 | #define MMCU_EBCTRL_SINGLE_BIT 0x00000020 /* TLB Uncached */ | ||
794 | /* | ||
795 | * These fields in the above registers provide MMCU_ENTRY_* values | ||
796 | * for each direct mapped region to enable optimisation of these areas. | ||
797 | */ | ||
798 | #define MMCU_EBCTRL_DCCTRL_BITS 0x000000C0 /* Get DC Ctrl */ | ||
799 | #define MMCU_EBCTRL_DCCTRL_S 0 | ||
800 | #define MMCU_EBCTRL_ICCTRL_BITS 0x0000C000 /* Get DC Ctrl */ | ||
801 | #define MMCU_EBCTRL_ICCTRL_S 8 | ||
802 | |||
803 | /* META Cached Core Mode Registers */ | ||
804 | #define MMCU_T0CCM_ICCTRL 0x04830680 /* Core cached code control */ | ||
805 | #define MMCU_TnCCM_xxCTRL_STRIDE 8 | ||
806 | #define MMCU_TnCCM_xxCTRL_STRIDE_S 3 | ||
807 | #define MMCU_T1CCM_ICCTRL 0x04830688 | ||
808 | #define MMCU_T2CCM_ICCTRL 0x04830690 | ||
809 | #define MMCU_T3CCM_ICCTRL 0x04830698 | ||
810 | #define MMCU_T0CCM_DCCTRL 0x048306C0 /* Core cached data control */ | ||
811 | #define MMCU_T1CCM_DCCTRL 0x048306C8 | ||
812 | #define MMCU_T2CCM_DCCTRL 0x048306D0 | ||
813 | #define MMCU_T3CCM_DCCTRL 0x048306D8 | ||
814 | #define MMCU_TnCCM_ENABLE_BIT 0x00000001 | ||
815 | #define MMCU_TnCCM_WIN3_BIT 0x00000002 | ||
816 | #define MMCU_TnCCM_DCWRITE_BIT 0x00000004 /* In DCCTRL only */ | ||
817 | #define MMCU_TnCCM_REGSZ_BITS 0x00000F00 | ||
818 | #define MMCU_TnCCM_REGSZ_S 8 | ||
819 | #define MMCU_TnCCM_REGSZ0_POWER 12 /* RegSz 0 -> 4K */ | ||
820 | #define MMCU_TnCCM_REGSZ_MAXBYTES 0x00080000 /* 512K max */ | ||
821 | #define MMCU_TnCCM_ADDR_BITS 0xFFFFF000 | ||
822 | #define MMCU_TnCCM_ADDR_S 12 | ||
823 | |||
824 | #endif /* METAC_2_1 */ | ||
825 | |||
826 | /* | ||
827 | * Hardware performance counter registers | ||
828 | * -------------------------------------- | ||
829 | */ | ||
830 | #ifdef METAC_2_1 | ||
831 | /* Two Performance Counter Internal Core Events Control registers */ | ||
832 | #define PERF_ICORE0 0x0480FFD0 | ||
833 | #define PERF_ICORE1 0x0480FFD8 | ||
834 | #define PERFI_CTRL_BITS 0x0000000F | ||
835 | #define PERFI_CTRL_S 0 | ||
836 | #define PERFI_CAH_DMISS 0x0 /* Dcache Misses in cache (TLB Hit) */ | ||
837 | #define PERFI_CAH_IMISS 0x1 /* Icache Misses in cache (TLB Hit) */ | ||
838 | #define PERFI_TLB_DMISS 0x2 /* Dcache Misses in per-thread TLB */ | ||
839 | #define PERFI_TLB_IMISS 0x3 /* Icache Misses in per-thread TLB */ | ||
840 | #define PERFI_TLB_DWRHITS 0x4 /* DC Write-Hits in per-thread TLB */ | ||
841 | #define PERFI_TLB_DWRMISS 0x5 /* DC Write-Miss in per-thread TLB */ | ||
842 | #define PERFI_CAH_DLFETCH 0x8 /* DC Read cache line fetch */ | ||
843 | #define PERFI_CAH_ILFETCH 0x9 /* DC Read cache line fetch */ | ||
844 | #define PERFI_CAH_DWFETCH 0xA /* DC Read cache word fetch */ | ||
845 | #define PERFI_CAH_IWFETCH 0xB /* DC Read cache word fetch */ | ||
846 | #endif /* METAC_2_1 */ | ||
847 | |||
848 | /* Two memory-mapped hardware performance counter registers */ | ||
849 | #define PERF_COUNT0 0x0480FFE0 | ||
850 | #define PERF_COUNT1 0x0480FFE8 | ||
851 | |||
852 | /* Fields in PERF_COUNTn registers */ | ||
853 | #define PERF_COUNT_BITS 0x00ffffff /* Event count value */ | ||
854 | |||
855 | #define PERF_THREAD_BITS 0x0f000000 /* Thread mask selects threads */ | ||
856 | #define PERF_THREAD_S 24 | ||
857 | |||
858 | #define PERF_CTRL_BITS 0xf0000000 /* Event filter control */ | ||
859 | #define PERF_CTRL_S 28 | ||
860 | |||
861 | #define PERFCTRL_SUPER 0 /* Superthread cycles */ | ||
862 | #define PERFCTRL_REWIND 1 /* Rewinds due to Dcache Misses */ | ||
863 | #ifdef METAC_2_1 | ||
864 | #define PERFCTRL_SUPREW 2 /* Rewinds of superthreaded cycles (no mask) */ | ||
865 | |||
866 | #define PERFCTRL_CYCLES 3 /* Counts all cycles (no mask) */ | ||
867 | |||
868 | #define PERFCTRL_PREDBC 4 /* Conditional branch predictions */ | ||
869 | #define PERFCTRL_MISPBC 5 /* Conditional branch mispredictions */ | ||
870 | #define PERFCTRL_PREDRT 6 /* Return predictions */ | ||
871 | #define PERFCTRL_MISPRT 7 /* Return mispredictions */ | ||
872 | #endif /* METAC_2_1 */ | ||
873 | |||
874 | #define PERFCTRL_DHITS 8 /* Dcache Hits */ | ||
875 | #define PERFCTRL_IHITS 9 /* Icache Hits */ | ||
876 | #define PERFCTRL_IMISS 10 /* Icache Misses in cache or TLB */ | ||
877 | #ifdef METAC_2_1 | ||
878 | #define PERFCTRL_DCSTALL 11 /* Dcache+TLB o/p delayed (per-thread) */ | ||
879 | #define PERFCTRL_ICSTALL 12 /* Icache+TLB o/p delayed (per-thread) */ | ||
880 | |||
881 | #define PERFCTRL_INT 13 /* Internal core delailed events (see next) */ | ||
882 | #define PERFCTRL_EXT 15 /* External source in core periphery */ | ||
883 | #endif /* METAC_2_1 */ | ||
884 | |||
885 | #ifdef METAC_2_1 | ||
886 | /* These definitions replace the EXPAND_PERFCHANx register defines which are to | ||
887 | * be deprecated. | ||
888 | */ | ||
889 | #define PERF_CHAN0 0x04830150 | ||
890 | #define PERF_CHAN1 0x04830158 | ||
891 | #define PERF_CHAN_BITS 0x0000000F | ||
892 | #define PERF_CHAN_S 0 | ||
893 | #define PERFCHAN_WRC_WRBURST 0x0 /* Write combiner write burst */ | ||
894 | #define PERFCHAN_WRC_WRITE 0x1 /* Write combiner write */ | ||
895 | #define PERFCHAN_WRC_RDBURST 0x2 /* Write combiner read burst */ | ||
896 | #define PERFCHAN_WRC_READ 0x3 /* Write combiner read */ | ||
897 | #define PERFCHAN_PREARB_DELAY 0x4 /* Pre-arbiter delay cycle */ | ||
898 | /* Cross-bar hold-off cycle: */ | ||
899 | #define PERFCHAN_XBAR_HOLDWRAP 0x5 /* wrapper register */ | ||
900 | #define PERFCHAN_XBAR_HOLDSBUS 0x6 /* system bus (ATP only) */ | ||
901 | #define PERFCHAN_XBAR_HOLDCREG 0x9 /* core registers */ | ||
902 | #define PERFCHAN_L2C_MISS 0x6 /* L2 Cache miss */ | ||
903 | #define PERFCHAN_L2C_HIT 0x7 /* L2 Cache hit */ | ||
904 | #define PERFCHAN_L2C_WRITEBACK 0x8 /* L2 Cache writeback */ | ||
905 | /* Admission delay cycle: */ | ||
906 | #define PERFCHAN_INPUT_CREG 0xB /* core registers */ | ||
907 | #define PERFCHAN_INPUT_INTR 0xC /* internal ram */ | ||
908 | #define PERFCHAN_INPUT_WRC 0xD /* write combiners(memory) */ | ||
909 | |||
910 | /* Should following be removed as not in TRM anywhere? */ | ||
911 | #define PERFCHAN_XBAR_HOLDINTR 0x8 /* internal ram */ | ||
912 | #define PERFCHAN_INPUT_SBUS 0xA /* register port */ | ||
913 | /* End of remove section. */ | ||
914 | |||
915 | #define PERFCHAN_MAINARB_DELAY 0xF /* Main arbiter delay cycle */ | ||
916 | |||
917 | #endif /* METAC_2_1 */ | ||
918 | |||
919 | #ifdef METAC_2_1 | ||
920 | /* | ||
921 | * Write combiner registers | ||
922 | * ------------------------ | ||
923 | * | ||
924 | * These replace the EXPAND_T0WRCOMBINE register defines, which will be | ||
925 | * deprecated. | ||
926 | */ | ||
927 | #define WRCOMB_CONFIG0 0x04830100 | ||
928 | #define WRCOMB_LFFEn_BIT 0x00004000 /* Enable auto line full flush */ | ||
929 | #define WRCOMB_ENABLE_BIT 0x00002000 /* Enable write combiner */ | ||
930 | #define WRCOMB_TIMEOUT_ENABLE_BIT 0x00001000 /* Timeout flush enable */ | ||
931 | #define WRCOMB_TIMEOUT_COUNT_BITS 0x000003FF | ||
932 | #define WRCOMB_TIMEOUT_COUNT_S 0 | ||
933 | #define WRCOMB_CONFIG4 0x04830180 | ||
934 | #define WRCOMB_PARTALLOC_BITS 0x000000C0 | ||
935 | #define WRCOMB_PARTALLOC_S 64 | ||
936 | #define WRCOMB_PARTSIZE_BITS 0x00000030 | ||
937 | #define WRCOMB_PARTSIZE_S 4 | ||
938 | #define WRCOMB_PARTOFFSET_BITS 0x0000000F | ||
939 | #define WRCOMB_PARTOFFSET_S 0 | ||
940 | #define WRCOMB_CONFIG_STRIDE 8 | ||
941 | #endif /* METAC_2_1 */ | ||
942 | |||
943 | #ifdef METAC_2_1 | ||
944 | /* | ||
945 | * Thread arbiter registers | ||
946 | * ------------------------ | ||
947 | * | ||
948 | * These replace the EXPAND_T0ARBITER register defines, which will be | ||
949 | * deprecated. | ||
950 | */ | ||
951 | #define ARBITER_ARBCONFIG0 0x04830120 | ||
952 | #define ARBCFG_BPRIORITY_BIT 0x02000000 | ||
953 | #define ARBCFG_IPRIORITY_BIT 0x01000000 | ||
954 | #define ARBCFG_PAGE_BITS 0x00FF0000 | ||
955 | #define ARBCFG_PAGE_S 16 | ||
956 | #define ARBCFG_BBASE_BITS 0x0000FF00 | ||
957 | #define ARGCFG_BBASE_S 8 | ||
958 | #define ARBCFG_IBASE_BITS 0x000000FF | ||
959 | #define ARBCFG_IBASE_S 0 | ||
960 | #define ARBITER_TTECONFIG0 0x04820160 | ||
961 | #define ARBTTE_IUPPER_BITS 0xFF000000 | ||
962 | #define ARBTTE_IUPPER_S 24 | ||
963 | #define ARBTTE_ILOWER_BITS 0x00FF0000 | ||
964 | #define ARBTTE_ILOWER_S 16 | ||
965 | #define ARBTTE_BUPPER_BITS 0x0000FF00 | ||
966 | #define ARBTTE_BUPPER_S 8 | ||
967 | #define ARBTTE_BLOWER_BITS 0x000000FF | ||
968 | #define ARBTTE_BLOWER_S 0 | ||
969 | #define ARBITER_STRIDE 8 | ||
970 | #endif /* METAC_2_1 */ | ||
971 | |||
972 | /* | ||
973 | * Expansion area registers | ||
974 | * -------------------------------------- | ||
975 | */ | ||
976 | |||
977 | /* These defines are to be deprecated. See above instead. */ | ||
978 | #define EXPAND_T0WRCOMBINE 0x03000000 | ||
979 | #ifdef METAC_2_1 | ||
980 | #define EXPWRC_LFFEn_BIT 0x00004000 /* Enable auto line full flush */ | ||
981 | #endif /* METAC_2_1 */ | ||
982 | #define EXPWRC_ENABLE_BIT 0x00002000 /* Enable write combiner */ | ||
983 | #define EXPWRC_TIMEOUT_ENABLE_BIT 0x00001000 /* Timeout flush enable */ | ||
984 | #define EXPWRC_TIMEOUT_COUNT_BITS 0x000003FF | ||
985 | #define EXPWRC_TIMEOUT_COUNT_S 0 | ||
986 | #define EXPAND_TnWRCOMBINE_STRIDE 0x00000008 | ||
987 | |||
988 | /* These defines are to be deprecated. See above instead. */ | ||
989 | #define EXPAND_T0ARBITER 0x03000020 | ||
990 | #define EXPARB_BPRIORITY_BIT 0x02000000 | ||
991 | #define EXPARB_IPRIORITY_BIT 0x01000000 | ||
992 | #define EXPARB_PAGE_BITS 0x00FF0000 | ||
993 | #define EXPARB_PAGE_S 16 | ||
994 | #define EXPARB_BBASE_BITS 0x0000FF00 | ||
995 | #define EXPARB_BBASE_S 8 | ||
996 | #define EXPARB_IBASE_BITS 0x000000FF | ||
997 | #define EXPARB_IBASE_S 0 | ||
998 | #define EXPAND_TnARBITER_STRIDE 0x00000008 | ||
999 | |||
1000 | /* These definitions are to be deprecated. See above instead. */ | ||
1001 | #define EXPAND_TIMER_DIV 0x03000040 | ||
1002 | #define EXPTIM_DIV_BITS 0x000000FF | ||
1003 | #define EXPTIM_DIV_S 0 | ||
1004 | |||
1005 | /* These definitions are to be deprecated. See above instead. */ | ||
1006 | #define EXPAND_PERFCHAN0 0x03000050 | ||
1007 | #define EXPAND_PERFCHAN1 0x03000058 | ||
1008 | #define EXPPERF_CTRL_BITS 0x0000000F | ||
1009 | #define EXPPERF_CTRL_S 0 | ||
1010 | #define EXPPERF_WRC_WRBURST 0x0 /* Write combiner write burst */ | ||
1011 | #define EXPPERF_WRC_WRITE 0x1 /* Write combiner write */ | ||
1012 | #define EXPPERF_WRC_RDBURST 0x2 /* Write combiner read burst */ | ||
1013 | #define EXPPERF_WRC_READ 0x3 /* Write combiner read */ | ||
1014 | #define EXPPERF_PREARB_DELAY 0x4 /* Pre-arbiter delay cycle */ | ||
1015 | /* Cross-bar hold-off cycle: */ | ||
1016 | #define EXPPERF_XBAR_HOLDWRAP 0x5 /* wrapper register */ | ||
1017 | #define EXPPERF_XBAR_HOLDSBUS 0x6 /* system bus */ | ||
1018 | #ifdef METAC_1_2 | ||
1019 | #define EXPPERF_XBAR_HOLDLBUS 0x7 /* local bus */ | ||
1020 | #else /* METAC_1_2 */ | ||
1021 | #define EXPPERF_XBAR_HOLDINTR 0x8 /* internal ram */ | ||
1022 | #define EXPPERF_XBAR_HOLDCREG 0x9 /* core registers */ | ||
1023 | /* Admission delay cycle: */ | ||
1024 | #define EXPPERF_INPUT_SBUS 0xA /* register port */ | ||
1025 | #define EXPPERF_INPUT_CREG 0xB /* core registers */ | ||
1026 | #define EXPPERF_INPUT_INTR 0xC /* internal ram */ | ||
1027 | #define EXPPERF_INPUT_WRC 0xD /* write combiners(memory) */ | ||
1028 | #endif /* !METAC_1_2 */ | ||
1029 | #define EXPPERF_MAINARB_DELAY 0xF /* Main arbiter delay cycle */ | ||
1030 | |||
1031 | /* | ||
1032 | * Debug port registers | ||
1033 | * -------------------------------------- | ||
1034 | */ | ||
1035 | |||
1036 | /* Data Exchange Register */ | ||
1037 | #define DBGPORT_MDBGDATAX 0x0 | ||
1038 | |||
1039 | /* Data Transfer register */ | ||
1040 | #define DBGPORT_MDBGDATAT 0x4 | ||
1041 | |||
1042 | /* Control Register 0 */ | ||
1043 | #define DBGPORT_MDBGCTRL0 0x8 | ||
1044 | #define DBGPORT_MDBGCTRL0_ADDR_BITS 0xFFFFFFFC | ||
1045 | #define DBGPORT_MDBGCTRL0_ADDR_S 2 | ||
1046 | #define DBGPORT_MDBGCTRL0_AUTOINCR_BIT 0x00000002 | ||
1047 | #define DBGPORT_MDBGCTRL0_RD_BIT 0x00000001 | ||
1048 | |||
1049 | /* Control Register 1 */ | ||
1050 | #define DBGPORT_MDBGCTRL1 0xC | ||
1051 | #ifdef METAC_2_1 | ||
1052 | #define DBGPORT_MDBGCTRL1_DEFERRTHREAD_BITS 0xC0000000 | ||
1053 | #define DBGPORT_MDBGCTRL1_DEFERRTHREAD_S 30 | ||
1054 | #endif /* METAC_2_1 */ | ||
1055 | #define DBGPORT_MDBGCTRL1_LOCK2_INTERLOCK_BIT 0x20000000 | ||
1056 | #define DBGPORT_MDBGCTRL1_ATOMIC_INTERLOCK_BIT 0x10000000 | ||
1057 | #define DBGPORT_MDBGCTRL1_TRIGSTATUS_BIT 0x08000000 | ||
1058 | #define DBGPORT_MDBGCTRL1_GBLPORT_IDLE_BIT 0x04000000 | ||
1059 | #define DBGPORT_MDBGCTRL1_COREMEM_IDLE_BIT 0x02000000 | ||
1060 | #define DBGPORT_MDBGCTRL1_READY_BIT 0x01000000 | ||
1061 | #ifdef METAC_2_1 | ||
1062 | #define DBGPORT_MDBGCTRL1_DEFERRID_BITS 0x00E00000 | ||
1063 | #define DBGPORT_MDBGCTRL1_DEFERRID_S 21 | ||
1064 | #define DBGPORT_MDBGCTRL1_DEFERR_BIT 0x00100000 | ||
1065 | #endif /* METAC_2_1 */ | ||
1066 | #define DBGPORT_MDBGCTRL1_WR_ACTIVE_BIT 0x00040000 | ||
1067 | #define DBGPORT_MDBGCTRL1_COND_LOCK2_BIT 0x00020000 | ||
1068 | #define DBGPORT_MDBGCTRL1_LOCK2_BIT 0x00010000 | ||
1069 | #define DBGPORT_MDBGCTRL1_DIAGNOSE_BIT 0x00008000 | ||
1070 | #define DBGPORT_MDBGCTRL1_FORCEDIAG_BIT 0x00004000 | ||
1071 | #define DBGPORT_MDBGCTRL1_MEMFAULT_BITS 0x00003000 | ||
1072 | #define DBGPORT_MDBGCTRL1_MEMFAULT_S 12 | ||
1073 | #define DBGPORT_MDBGCTRL1_TRIGGER_BIT 0x00000100 | ||
1074 | #ifdef METAC_2_1 | ||
1075 | #define DBGPORT_MDBGCTRL1_INTSPECIAL_BIT 0x00000080 | ||
1076 | #define DBGPORT_MDBGCTRL1_INTRUSIVE_BIT 0x00000040 | ||
1077 | #endif /* METAC_2_1 */ | ||
1078 | #define DBGPORT_MDBGCTRL1_THREAD_BITS 0x00000030 /* Thread mask selects threads */ | ||
1079 | #define DBGPORT_MDBGCTRL1_THREAD_S 4 | ||
1080 | #define DBGPORT_MDBGCTRL1_TRANS_SIZE_BITS 0x0000000C | ||
1081 | #define DBGPORT_MDBGCTRL1_TRANS_SIZE_S 2 | ||
1082 | #define DBGPORT_MDBGCTRL1_TRANS_SIZE_32_BIT 0x00000000 | ||
1083 | #define DBGPORT_MDBGCTRL1_TRANS_SIZE_16_BIT 0x00000004 | ||
1084 | #define DBGPORT_MDBGCTRL1_TRANS_SIZE_8_BIT 0x00000008 | ||
1085 | #define DBGPORT_MDBGCTRL1_BYTE_ROUND_BITS 0x00000003 | ||
1086 | #define DBGPORT_MDBGCTRL1_BYTE_ROUND_S 0 | ||
1087 | #define DBGPORT_MDBGCTRL1_BYTE_ROUND_8_BIT 0x00000001 | ||
1088 | #define DBGPORT_MDBGCTRL1_BYTE_ROUND_16_BIT 0x00000002 | ||
1089 | |||
1090 | |||
1091 | /* L2 Cache registers */ | ||
1092 | #define SYSC_L2C_INIT 0x048300C0 | ||
1093 | #define SYSC_L2C_INIT_INIT 1 | ||
1094 | #define SYSC_L2C_INIT_IN_PROGRESS 0 | ||
1095 | #define SYSC_L2C_INIT_COMPLETE 1 | ||
1096 | |||
1097 | #define SYSC_L2C_ENABLE 0x048300D0 | ||
1098 | #define SYSC_L2C_ENABLE_ENABLE_BIT 0x00000001 | ||
1099 | #define SYSC_L2C_ENABLE_PFENABLE_BIT 0x00000002 | ||
1100 | |||
1101 | #define SYSC_L2C_PURGE 0x048300C8 | ||
1102 | #define SYSC_L2C_PURGE_PURGE 1 | ||
1103 | #define SYSC_L2C_PURGE_IN_PROGRESS 0 | ||
1104 | #define SYSC_L2C_PURGE_COMPLETE 1 | ||
1105 | |||
1106 | #endif /* _ASM_METAG_MEM_H_ */ | ||
diff --git a/arch/metag/include/asm/metag_regs.h b/arch/metag/include/asm/metag_regs.h new file mode 100644 index 000000000000..acf4b8e6e9d1 --- /dev/null +++ b/arch/metag/include/asm/metag_regs.h | |||
@@ -0,0 +1,1184 @@ | |||
1 | /* | ||
2 | * asm/metag_regs.h | ||
3 | * | ||
4 | * Copyright (C) 2000-2007, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Various defines for Meta core (non memory-mapped) registers. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_METAG_REGS_H_ | ||
14 | #define _ASM_METAG_REGS_H_ | ||
15 | |||
16 | /* | ||
17 | * CHIP Unit Identifiers and Valid/Global register number masks | ||
18 | * ------------------------------------------------------------ | ||
19 | */ | ||
20 | #define TXUCT_ID 0x0 /* Control unit regs */ | ||
21 | #ifdef METAC_1_2 | ||
22 | #define TXUCT_MASK 0xFF0FFFFF /* Valid regs 0..31 */ | ||
23 | #else | ||
24 | #define TXUCT_MASK 0xFF1FFFFF /* Valid regs 0..31 */ | ||
25 | #endif | ||
26 | #define TGUCT_MASK 0x00000000 /* No global regs */ | ||
27 | #define TXUD0_ID 0x1 /* Data unit regs */ | ||
28 | #define TXUD1_ID 0x2 | ||
29 | #define TXUDX_MASK 0xFFFFFFFF /* Valid regs 0..31 */ | ||
30 | #define TGUDX_MASK 0xFFFF0000 /* Global regs for base inst */ | ||
31 | #define TXUDXDSP_MASK 0x0F0FFFFF /* Valid DSP regs */ | ||
32 | #define TGUDXDSP_MASK 0x0E0E0000 /* Global DSP ACC regs */ | ||
33 | #define TXUA0_ID 0x3 /* Address unit regs */ | ||
34 | #define TXUA1_ID 0x4 | ||
35 | #define TXUAX_MASK 0x0000FFFF /* Valid regs 0-15 */ | ||
36 | #define TGUAX_MASK 0x0000FF00 /* Global regs 8-15 */ | ||
37 | #define TXUPC_ID 0x5 /* PC registers */ | ||
38 | #define TXUPC_MASK 0x00000003 /* Valid regs 0- 1 */ | ||
39 | #define TGUPC_MASK 0x00000000 /* No global regs */ | ||
40 | #define TXUPORT_ID 0x6 /* Ports are not registers */ | ||
41 | #define TXUTR_ID 0x7 | ||
42 | #define TXUTR_MASK 0x0000005F /* Valid regs 0-3,4,6 */ | ||
43 | #define TGUTR_MASK 0x00000000 /* No global regs */ | ||
44 | #ifdef METAC_2_1 | ||
45 | #define TXUTT_ID 0x8 | ||
46 | #define TXUTT_MASK 0x0000000F /* Valid regs 0-3 */ | ||
47 | #define TGUTT_MASK 0x00000010 /* Global reg 4 */ | ||
48 | #define TXUFP_ID 0x9 /* FPU regs */ | ||
49 | #define TXUFP_MASK 0x0000FFFF /* Valid regs 0-15 */ | ||
50 | #define TGUFP_MASK 0x00000000 /* No global regs */ | ||
51 | #endif /* METAC_2_1 */ | ||
52 | |||
53 | #ifdef METAC_1_2 | ||
54 | #define TXUXX_MASKS { TXUCT_MASK, TXUDX_MASK, TXUDX_MASK, TXUAX_MASK, \ | ||
55 | TXUAX_MASK, TXUPC_MASK, 0, TXUTR_MASK, \ | ||
56 | 0, 0, 0, 0, 0, 0, 0, 0 } | ||
57 | #define TGUXX_MASKS { TGUCT_MASK, TGUDX_MASK, TGUDX_MASK, TGUAX_MASK, \ | ||
58 | TGUAX_MASK, TGUPC_MASK, 0, TGUTR_MASK, \ | ||
59 | 0, 0, 0, 0, 0, 0, 0, 0 } | ||
60 | #else /* METAC_1_2 */ | ||
61 | #define TXUXX_MASKS { TXUCT_MASK, TXUDX_MASK, TXUDX_MASK, TXUAX_MASK, \ | ||
62 | TXUAX_MASK, TXUPC_MASK, 0, TXUTR_MASK, \ | ||
63 | TXUTT_MASK, TXUFP_MASK, 0, 0, \ | ||
64 | 0, 0, 0, 0 } | ||
65 | #define TGUXX_MASKS { TGUCT_MASK, TGUDX_MASK, TGUDX_MASK, TGUAX_MASK, \ | ||
66 | TGUAX_MASK, TGUPC_MASK, 0, TGUTR_MASK, \ | ||
67 | TGUTT_MASK, TGUFP_MASK, 0, 0, \ | ||
68 | 0, 0, 0, 0 } | ||
69 | #endif /* !METAC_1_2 */ | ||
70 | |||
71 | #define TXUXXDSP_MASKS { 0, TXUDXDSP_MASK, TXUDXDSP_MASK, 0, 0, 0, 0, 0, \ | ||
72 | 0, 0, 0, 0, 0, 0, 0, 0 } | ||
73 | #define TGUXXDSP_MASKS { 0, TGUDXDSP_MASK, TGUDXDSP_MASK, 0, 0, 0, 0, 0, \ | ||
74 | 0, 0, 0, 0, 0, 0, 0, 0 } | ||
75 | |||
76 | /* ------------------------------------------------------------------------- | ||
77 | ; DATA AND ADDRESS UNIT REGISTERS | ||
78 | ; -----------------------------------------------------------------------*/ | ||
79 | /* | ||
80 | Thread local D0 registers | ||
81 | */ | ||
82 | /* D0.0 ; Holds 32-bit result, can be used as scratch */ | ||
83 | #define D0Re0 D0.0 | ||
84 | /* D0.1 ; Used to pass Arg6_32 */ | ||
85 | #define D0Ar6 D0.1 | ||
86 | /* D0.2 ; Used to pass Arg4_32 */ | ||
87 | #define D0Ar4 D0.2 | ||
88 | /* D0.3 ; Used to pass Arg2_32 to a called routine (see D1.3 below) */ | ||
89 | #define D0Ar2 D0.3 | ||
90 | /* D0.4 ; Can be used as scratch; used to save A0FrP in entry sequences */ | ||
91 | #define D0FrT D0.4 | ||
92 | /* D0.5 ; C compiler assumes preservation, save with D1.5 if used */ | ||
93 | /* D0.6 ; C compiler assumes preservation, save with D1.6 if used */ | ||
94 | /* D0.7 ; C compiler assumes preservation, save with D1.7 if used */ | ||
95 | /* D0.8 ; Use of D0.8 and above is not encouraged */ | ||
96 | /* D0.9 */ | ||
97 | /* D0.10 */ | ||
98 | /* D0.11 */ | ||
99 | /* D0.12 */ | ||
100 | /* D0.13 */ | ||
101 | /* D0.14 */ | ||
102 | /* D0.15 */ | ||
103 | /* | ||
104 | Thread local D1 registers | ||
105 | */ | ||
106 | /* D1.0 ; Holds top 32-bits of 64-bit result, can be used as scratch */ | ||
107 | #define D1Re0 D1.0 | ||
108 | /* D1.1 ; Used to pass Arg5_32 */ | ||
109 | #define D1Ar5 D1.1 | ||
110 | /* D1.2 ; Used to pass Arg3_32 */ | ||
111 | #define D1Ar3 D1.2 | ||
112 | /* D1.3 ; Used to pass Arg1_32 (first 32-bit argument) to a called routine */ | ||
113 | #define D1Ar1 D1.3 | ||
114 | /* D1.4 ; Used for Return Pointer, save during entry with A0FrP (via D0.4) */ | ||
115 | #define D1RtP D1.4 | ||
116 | /* D1.5 ; C compiler assumes preservation, save if used */ | ||
117 | /* D1.6 ; C compiler assumes preservation, save if used */ | ||
118 | /* D1.7 ; C compiler assumes preservation, save if used */ | ||
119 | /* D1.8 ; Use of D1.8 and above is not encouraged */ | ||
120 | /* D1.9 */ | ||
121 | /* D1.10 */ | ||
122 | /* D1.11 */ | ||
123 | /* D1.12 */ | ||
124 | /* D1.13 */ | ||
125 | /* D1.14 */ | ||
126 | /* D1.15 */ | ||
127 | /* | ||
128 | Thread local A0 registers | ||
129 | */ | ||
130 | /* A0.0 ; Primary stack pointer */ | ||
131 | #define A0StP A0.0 | ||
132 | /* A0.1 ; Used as local frame pointer in C, save if used (via D0.4) */ | ||
133 | #define A0FrP A0.1 | ||
134 | /* A0.2 */ | ||
135 | /* A0.3 */ | ||
136 | /* A0.4 ; Use of A0.4 and above is not encouraged */ | ||
137 | /* A0.5 */ | ||
138 | /* A0.6 */ | ||
139 | /* A0.7 */ | ||
140 | /* | ||
141 | Thread local A1 registers | ||
142 | */ | ||
143 | /* A1.0 ; Global static chain pointer - do not modify */ | ||
144 | #define A1GbP A1.0 | ||
145 | /* A1.1 ; Local static chain pointer in C, can be used as scratch */ | ||
146 | #define A1LbP A1.1 | ||
147 | /* A1.2 */ | ||
148 | /* A1.3 */ | ||
149 | /* A1.4 ; Use of A1.4 and above is not encouraged */ | ||
150 | /* A1.5 */ | ||
151 | /* A1.6 */ | ||
152 | /* A1.7 */ | ||
153 | #ifdef METAC_2_1 | ||
154 | /* Renameable registers for use with Fast Interrupts */ | ||
155 | /* The interrupt stack pointer (usually a global register) */ | ||
156 | #define A0IStP A0IReg | ||
157 | /* The interrupt global pointer (usually a global register) */ | ||
158 | #define A1IGbP A1IReg | ||
159 | #endif | ||
160 | /* | ||
161 | Further registers may be globally allocated via linkage/loading tools, | ||
162 | normally they are not used. | ||
163 | */ | ||
164 | /*------------------------------------------------------------------------- | ||
165 | ; STACK STRUCTURE and CALLING CONVENTION | ||
166 | ; -----------------------------------------------------------------------*/ | ||
167 | /* | ||
168 | ; Calling convention indicates that the following is the state of the | ||
169 | ; stack frame at the start of a routine- | ||
170 | ; | ||
171 | ; Arg9_32 [A0StP+#-12] | ||
172 | ; Arg8_32 [A0StP+#- 8] | ||
173 | ; Arg7_32 [A0StP+#- 4] | ||
174 | ; A0StP-> | ||
175 | ; | ||
176 | ; Registers D1.3, D0.3, ..., to D0.1 are used to pass Arg1_32 to Arg6_32 | ||
177 | ; respectively. If a routine needs to store them on the stack in order | ||
178 | ; to make sub-calls or because of the general complexity of the routine it | ||
179 | ; is best to dump these registers immediately at the start of a routine | ||
180 | ; using a MSETL or SETL instruction- | ||
181 | ; | ||
182 | ; MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2; Only dump argments expected | ||
183 | ;or SETL [A0StP+#8++],D0Ar2 ; Up to two 32-bit args expected | ||
184 | ; | ||
185 | ; For non-leaf routines it is always necessary to save and restore at least | ||
186 | ; the return address value D1RtP on the stack. Also by convention if the | ||
187 | ; frame is saved then a new A0FrP value must be set-up. So for non-leaf | ||
188 | ; routines at this point both these registers must be saved onto the stack | ||
189 | ; using a SETL instruction and the new A0FrP value is then set-up- | ||
190 | ; | ||
191 | ; MOV D0FrT,A0FrP | ||
192 | ; ADD A0FrP,A0StP,#0 | ||
193 | ; SETL [A0StP+#8++],D0FrT,D1RtP | ||
194 | ; | ||
195 | ; Registers D0.5, D1.5, to D1.7 are assumed to be preserved across calls so | ||
196 | ; a SETL or MSETL instruction can be used to save the current state | ||
197 | ; of these registers if they are modified by the current routine- | ||
198 | ; | ||
199 | ; MSETL [A0StP],D0.5,D0.6,D0.7 ; Only save registers modified | ||
200 | ;or SETL [A0StP+#8++],D0.5 ; Only D0.5 and/or D1.5 modified | ||
201 | ; | ||
202 | ; All of the above sequences can be combined into one maximal case- | ||
203 | ; | ||
204 | ; MOV D0FrT,A0FrP ; Save and calculate new frame pointer | ||
205 | ; ADD A0FrP,A0StP,#(ARS) | ||
206 | ; MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7 | ||
207 | ; | ||
208 | ; Having completed the above sequence the only remaining task on routine | ||
209 | ; entry is to reserve any local and outgoing argment storage space on the | ||
210 | ; stack. This instruction may be omitted if the size of this region is zero- | ||
211 | ; | ||
212 | ; ADD A0StP,A0StP,#(LCS) | ||
213 | ; | ||
214 | ; LCS is the first example use of one of a number of standard local defined | ||
215 | ; values that can be created to make assembler code more readable and | ||
216 | ; potentially more robust- | ||
217 | ; | ||
218 | ; #define ARS 0x18 ; Register arg bytes saved on stack | ||
219 | ; #define FRS 0x20 ; Frame save area size in bytes | ||
220 | ; #define LCS 0x00 ; Locals and Outgoing arg size | ||
221 | ; #define ARO (LCS+FRS) ; Stack offset to access args | ||
222 | ; | ||
223 | ; All of the above defines should be undefined (#undef) at the end of each | ||
224 | ; routine to avoid accidental use in the next routine. | ||
225 | ; | ||
226 | ; Given all of the above the following stack structure is expected during | ||
227 | ; the body of a routine if all args passed in registers are saved during | ||
228 | ; entry- | ||
229 | ; | ||
230 | ; ; 'Incoming args area' | ||
231 | ; Arg10_32 [A0StP+#-((10*4)+ARO)] Arg9_32 [A0StP+#-(( 9*4)+ARO)] | ||
232 | ; Arg8_32 [A0StP+#-(( 8*4)+ARO)] Arg7_32 [A0StP+#-(( 7*4)+ARO)] | ||
233 | ;--- Call point | ||
234 | ; D0Ar6= Arg6_32 [A0StP+#-(( 6*4)+ARO)] D1Ar5=Arg5_32 [A0StP+#-(( 5*4)+ARO)] | ||
235 | ; D0Ar4= Arg4_32 [A0StP+#-(( 4*4)+ARO)] D1Ar3=Arg3_32 [A0StP+#-(( 3*4)+ARO)] | ||
236 | ; D0Ar2= Arg2_32 [A0StP+#-(( 2*4)+ARO)] D1Ar2=Arg1_32 [A0StP+#-(( 1*4)+ARO)] | ||
237 | ; ; 'Frame area' | ||
238 | ; A0FrP-> D0FrT, D1RtP, | ||
239 | ; D0.5, D1.5, | ||
240 | ; D0.6, D1.6, | ||
241 | ; D0.7, D1.7, | ||
242 | ; ; 'Locals area' | ||
243 | ; Loc0_32 [A0StP+# (( 0*4)-LCS)], Loc1_32 [A0StP+# (( 1*4)-LCS)] | ||
244 | ; .... other locals | ||
245 | ; Locn_32 [A0StP+# (( n*4)-LCS)] | ||
246 | ; ; 'Outgoing args area' | ||
247 | ; Outm_32 [A0StP+#- ( m*4)] .... other outgoing args | ||
248 | ; Out8_32 [A0StP+#- ( 1*4)] Out7_32 [A0StP+#- ( 1*4)] | ||
249 | ; A0StP-> (Out1_32-Out6_32 in regs D1Ar1-D0Ar6) | ||
250 | ; | ||
251 | ; The exit sequence for a non-leaf routine can use the frame pointer created | ||
252 | ; in the entry sequence to optimise the recovery of the full state- | ||
253 | ; | ||
254 | ; MGETL D0FrT,D0.5,D0.6,D0.7,[A0FrP] | ||
255 | ; SUB A0StP,A0FrP,#(ARS+FRS) | ||
256 | ; MOV A0FrP,D0FrT | ||
257 | ; MOV PC,D1RtP | ||
258 | ; | ||
259 | ; Having described the most complex non-leaf case above, it is worth noting | ||
260 | ; that if a routine is a leaf and does not use any of the caller-preserved | ||
261 | ; state. The routine can be implemented as- | ||
262 | ; | ||
263 | ; ADD A0StP,A0StP,#LCS | ||
264 | ; .... body of routine | ||
265 | ; SUB A0StP,A0StP,#LCS | ||
266 | ; MOV PC,D1RtP | ||
267 | ; | ||
268 | ; The stack adjustments can also be omitted if no local storage is required. | ||
269 | ; | ||
270 | ; Another exit sequence structure is more applicable if for a leaf routine | ||
271 | ; with no local frame pointer saved/generated in which the call saved | ||
272 | ; registers need to be saved and restored- | ||
273 | ; | ||
274 | ; MSETL [A0StP],D0.5,D0.6,D0.7 ; Hence FRS is 0x18, ARS is 0x00 | ||
275 | ; ADD A0StP,A0StP,#LCS | ||
276 | ; .... body of routine | ||
277 | ; GETL D0.5,D1.5,[A0StP+#((0*8)-(FRS+LCS))] | ||
278 | ; GETL D0.6,D1.6,[A0StP+#((1*8)-(FRS+LCS))] | ||
279 | ; GETL D0.7,D1.7,[A0StP+#((2*8)-(FRS+LCS))] | ||
280 | ; SUB A0StP,A0StP,#(ARS+FRS+LCS) | ||
281 | ; MOV PC,D1RtP | ||
282 | ; | ||
283 | ; Lastly, to support profiling assembler code should use a fixed entry/exit | ||
284 | ; sequence if the trigger define _GMON_ASM is defined- | ||
285 | ; | ||
286 | ; #ifndef _GMON_ASM | ||
287 | ; ... optimised entry code | ||
288 | ; #else | ||
289 | ; ; Profiling entry case | ||
290 | ; MOV D0FrT,A0FrP ; Save and calculate new frame pointer | ||
291 | ; ADD A0FrP,A0StP,#(ARS) | ||
292 | ; MSETL [A0StP],...,D0FrT,... or SETL [A0FrP],D0FrT,D1RtP | ||
293 | ; CALLR D0FrT,_mcount_wrapper | ||
294 | ; #endif | ||
295 | ; ... body of routine | ||
296 | ; #ifndef _GMON_ASM | ||
297 | ; ... optimised exit code | ||
298 | ; #else | ||
299 | ; ; Profiling exit case | ||
300 | ; MGETL D0FrT,...,[A0FrP] or GETL D0FrT,D1RtP,[A0FrP++] | ||
301 | ; SUB A0StP,A0FrP,#(ARS+FRS) | ||
302 | ; MOV A0FrP,D0FrT | ||
303 | ; MOV PC,D1RtP | ||
304 | ; #endif | ||
305 | |||
306 | |||
307 | ; ------------------------------------------------------------------------- | ||
308 | ; CONTROL UNIT REGISTERS | ||
309 | ; ------------------------------------------------------------------------- | ||
310 | ; | ||
311 | ; See the assembler guide, hardware documentation, or the field values | ||
312 | ; defined below for some details of the use of these registers. | ||
313 | */ | ||
314 | #define TXENABLE CT.0 /* Need to define bit-field values in these */ | ||
315 | #define TXMODE CT.1 | ||
316 | #define TXSTATUS CT.2 /* DEFAULT 0x00020000 */ | ||
317 | #define TXRPT CT.3 | ||
318 | #define TXTIMER CT.4 | ||
319 | #define TXL1START CT.5 | ||
320 | #define TXL1END CT.6 | ||
321 | #define TXL1COUNT CT.7 | ||
322 | #define TXL2START CT.8 | ||
323 | #define TXL2END CT.9 | ||
324 | #define TXL2COUNT CT.10 | ||
325 | #define TXBPOBITS CT.11 | ||
326 | #define TXMRSIZE CT.12 | ||
327 | #define TXTIMERI CT.13 | ||
328 | #define TXDRCTRL CT.14 /* DEFAULT 0x0XXXF0F0 */ | ||
329 | #define TXDRSIZE CT.15 | ||
330 | #define TXCATCH0 CT.16 | ||
331 | #define TXCATCH1 CT.17 | ||
332 | #define TXCATCH2 CT.18 | ||
333 | #define TXCATCH3 CT.19 | ||
334 | |||
335 | #ifdef METAC_2_1 | ||
336 | #define TXDEFR CT.20 | ||
337 | #define TXCPRS CT.21 | ||
338 | #endif | ||
339 | |||
340 | #define TXINTERN0 CT.23 | ||
341 | #define TXAMAREG0 CT.24 | ||
342 | #define TXAMAREG1 CT.25 | ||
343 | #define TXAMAREG2 CT.26 | ||
344 | #define TXAMAREG3 CT.27 | ||
345 | #define TXDIVTIME CT.28 /* DEFAULT 0x00000001 */ | ||
346 | #define TXPRIVEXT CT.29 /* DEFAULT 0x003B0000 */ | ||
347 | #define TXTACTCYC CT.30 | ||
348 | #define TXIDLECYC CT.31 | ||
349 | |||
350 | /***************************************************************************** | ||
351 | * CONTROL UNIT REGISTER BITS | ||
352 | ****************************************************************************/ | ||
353 | /* | ||
354 | * The following registers and where appropriate the sub-fields of those | ||
355 | * registers are defined for pervasive use in controlling program flow. | ||
356 | */ | ||
357 | |||
358 | /* | ||
359 | * TXENABLE register fields - only the thread id is routinely useful | ||
360 | */ | ||
361 | #define TXENABLE_REGNUM 0 | ||
362 | #define TXENABLE_THREAD_BITS 0x00000700 | ||
363 | #define TXENABLE_THREAD_S 8 | ||
364 | #define TXENABLE_REV_STEP_BITS 0x000000F0 | ||
365 | #define TXENABLE_REV_STEP_S 4 | ||
366 | |||
367 | /* | ||
368 | * TXMODE register - controls extensions of the instruction set | ||
369 | */ | ||
370 | #define TXMODE_REGNUM 1 | ||
371 | #define TXMODE_DEFAULT 0 /* All fields default to zero */ | ||
372 | |||
373 | /* | ||
374 | * TXSTATUS register - contains a couple of stable bits that can be used | ||
375 | * to determine the privilege processing level and interrupt | ||
376 | * processing level of the current thread. | ||
377 | */ | ||
378 | #define TXSTATUS_REGNUM 2 | ||
379 | #define TXSTATUS_PSTAT_BIT 0x00020000 /* -> Privilege active */ | ||
380 | #define TXSTATUS_PSTAT_S 17 | ||
381 | #define TXSTATUS_ISTAT_BIT 0x00010000 /* -> In interrupt state */ | ||
382 | #define TXSTATUS_ISTAT_S 16 | ||
383 | |||
384 | /* | ||
385 | * These are all relatively boring registers, mostly full 32-bit | ||
386 | */ | ||
387 | #define TXRPT_REGNUM 3 /* Repeat counter for XFR... instructions */ | ||
388 | #define TXTIMER_REGNUM 4 /* Timer-- causes timer trigger on overflow */ | ||
389 | #define TXL1START_REGNUM 5 /* Hardware Loop 1 Start-PC/End-PC/Count */ | ||
390 | #define TXL1END_REGNUM 6 | ||
391 | #define TXL1COUNT_REGNUM 7 | ||
392 | #define TXL2START_REGNUM 8 /* Hardware Loop 2 Start-PC/End-PC/Count */ | ||
393 | #define TXL2END_REGNUM 9 | ||
394 | #define TXL2COUNT_REGNUM 10 | ||
395 | #define TXBPOBITS_REGNUM 11 /* Branch predict override bits - tune perf */ | ||
396 | #define TXTIMERI_REGNUM 13 /* Timer-- time based interrupt trigger */ | ||
397 | |||
398 | /* | ||
399 | * TXDIVTIME register is routinely read to calculate the time-base for | ||
400 | * the TXTIMER register. | ||
401 | */ | ||
402 | #define TXDIVTIME_REGNUM 28 | ||
403 | #define TXDIVTIME_DIV_BITS 0x000000FF | ||
404 | #define TXDIVTIME_DIV_S 0 | ||
405 | #define TXDIVTIME_DIV_MIN 0x00000001 /* Maximum resolution */ | ||
406 | #define TXDIVTIME_DIV_MAX 0x00000100 /* 1/1 -> 1/256 resolution */ | ||
407 | #define TXDIVTIME_BASE_HZ 1000000 /* Timers run at 1Mhz @1/1 */ | ||
408 | |||
409 | /* | ||
410 | * TXPRIVEXT register can be consulted to decide if write access to a | ||
411 | * part of the threads register set is not permitted when in | ||
412 | * unprivileged mode (PSTAT == 0). | ||
413 | */ | ||
414 | #define TXPRIVEXT_REGNUM 29 | ||
415 | #define TXPRIVEXT_COPRO_BITS 0xFF000000 /* Co-processor 0-7 */ | ||
416 | #define TXPRIVEXT_COPRO_S 24 | ||
417 | #ifndef METAC_1_2 | ||
418 | #define TXPRIVEXT_TXTIMER_BIT 0x00080000 /* TXTIMER priv */ | ||
419 | #define TXPRIVEXT_TRACE_BIT 0x00040000 /* TTEXEC|TTCTRL|GTEXEC */ | ||
420 | #endif | ||
421 | #define TXPRIVEXT_TXTRIGGER_BIT 0x00020000 /* TXSTAT|TXMASK|TXPOLL */ | ||
422 | #define TXPRIVEXT_TXGBLCREG_BIT 0x00010000 /* Global common regs */ | ||
423 | #define TXPRIVEXT_CBPRIV_BIT 0x00008000 /* Mem i/f dump priv */ | ||
424 | #define TXPRIVEXT_ILOCK_BIT 0x00004000 /* LOCK inst priv */ | ||
425 | #define TXPRIVEXT_TXITACCYC_BIT 0x00002000 /* TXIDLECYC|TXTACTCYC */ | ||
426 | #define TXPRIVEXT_TXDIVTIME_BIT 0x00001000 /* TXDIVTIME priv */ | ||
427 | #define TXPRIVEXT_TXAMAREGX_BIT 0x00000800 /* TXAMAREGX priv */ | ||
428 | #define TXPRIVEXT_TXTIMERI_BIT 0x00000400 /* TXTIMERI priv */ | ||
429 | #define TXPRIVEXT_TXSTATUS_BIT 0x00000200 /* TXSTATUS priv */ | ||
430 | #define TXPRIVEXT_TXDISABLE_BIT 0x00000100 /* TXENABLE priv */ | ||
431 | #ifndef METAC_1_2 | ||
432 | #define TXPRIVEXT_MINIMON_BIT 0x00000080 /* Enable Minim features */ | ||
433 | #define TXPRIVEXT_OLDBCCON_BIT 0x00000020 /* Restore Static predictions */ | ||
434 | #define TXPRIVEXT_ALIGNREW_BIT 0x00000010 /* Align & precise checks */ | ||
435 | #endif | ||
436 | #define TXPRIVEXT_KEEPPRI_BIT 0x00000008 /* Use AMA_Priority if ISTAT=1*/ | ||
437 | #define TXPRIVEXT_TXTOGGLEI_BIT 0x00000001 /* TX.....I priv */ | ||
438 | |||
439 | /* | ||
440 | * TXTACTCYC register - counts instructions issued for this thread | ||
441 | */ | ||
442 | #define TXTACTCYC_REGNUM 30 | ||
443 | #define TXTACTCYC_COUNT_MASK 0x00FFFFFF | ||
444 | |||
445 | /* | ||
446 | * TXIDLECYC register - counts idle cycles | ||
447 | */ | ||
448 | #define TXIDLECYC_REGNUM 31 | ||
449 | #define TXIDLECYC_COUNT_MASK 0x00FFFFFF | ||
450 | |||
451 | /***************************************************************************** | ||
452 | * DSP EXTENSIONS | ||
453 | ****************************************************************************/ | ||
454 | /* | ||
455 | * The following values relate to fields and controls that only a program | ||
456 | * using the DSP extensions of the META instruction set need to know. | ||
457 | */ | ||
458 | |||
459 | |||
460 | #ifndef METAC_1_2 | ||
461 | /* | ||
462 | * Allow co-processor hardware to replace the read pipeline data source in | ||
463 | * appropriate cases. | ||
464 | */ | ||
465 | #define TXMODE_RDCPEN_BIT 0x00800000 | ||
466 | #endif | ||
467 | |||
468 | /* | ||
469 | * Address unit addressing modes | ||
470 | */ | ||
471 | #define TXMODE_A1ADDR_BITS 0x00007000 | ||
472 | #define TXMODE_A1ADDR_S 12 | ||
473 | #define TXMODE_A0ADDR_BITS 0x00000700 | ||
474 | #define TXMODE_A0ADDR_S 8 | ||
475 | #define TXMODE_AXADDR_MODULO 3 | ||
476 | #define TXMODE_AXADDR_REVB 4 | ||
477 | #define TXMODE_AXADDR_REVW 5 | ||
478 | #define TXMODE_AXADDR_REVD 6 | ||
479 | #define TXMODE_AXADDR_REVL 7 | ||
480 | |||
481 | /* | ||
482 | * Data unit OverScale select (default 0 -> normal, 1 -> top 16 bits) | ||
483 | */ | ||
484 | #define TXMODE_DXOVERSCALE_BIT 0x00000080 | ||
485 | |||
486 | /* | ||
487 | * Data unit MX mode select (default 0 -> MX16, 1 -> MX8) | ||
488 | */ | ||
489 | #define TXMODE_M8_BIT 0x00000040 | ||
490 | |||
491 | /* | ||
492 | * Data unit accumulator saturation point (default -> 40 bit accumulator) | ||
493 | */ | ||
494 | #define TXMODE_DXACCSAT_BIT 0x00000020 /* Set for 32-bit accumulator */ | ||
495 | |||
496 | /* | ||
497 | * Data unit accumulator saturation enable (default 0 -> no saturation) | ||
498 | */ | ||
499 | #define TXMODE_DXSAT_BIT 0x00000010 | ||
500 | |||
501 | /* | ||
502 | * Data unit master rounding control (default 0 -> normal, 1 -> convergent) | ||
503 | */ | ||
504 | #define TXMODE_DXROUNDING_BIT 0x00000008 | ||
505 | |||
506 | /* | ||
507 | * Data unit product shift for fractional arithmetic (default off) | ||
508 | */ | ||
509 | #define TXMODE_DXPRODSHIFT_BIT 0x00000004 | ||
510 | |||
511 | /* | ||
512 | * Select the arithmetic mode (multiply mostly) for both data units | ||
513 | */ | ||
514 | #define TXMODE_DXARITH_BITS 0x00000003 | ||
515 | #define TXMODE_DXARITH_32 3 | ||
516 | #define TXMODE_DXARITH_32H 2 | ||
517 | #define TXMODE_DXARITH_S16 1 | ||
518 | #define TXMODE_DXARITH_16 0 | ||
519 | |||
520 | /* | ||
521 | * TXMRSIZE register value only relevant when DSP modulo addressing active | ||
522 | */ | ||
523 | #define TXMRSIZE_REGNUM 12 | ||
524 | #define TXMRSIZE_MIN 0x0002 /* 0, 1 -> normal addressing logic */ | ||
525 | #define TXMRSIZE_MAX 0xFFFF | ||
526 | |||
527 | /* | ||
528 | * TXDRCTRL register can be used to detect the actaul size of the DSP RAM | ||
529 | * partitions allocated to this thread. | ||
530 | */ | ||
531 | #define TXDRCTRL_REGNUM 14 | ||
532 | #define TXDRCTRL_SINESIZE_BITS 0x0F000000 | ||
533 | #define TXDRCTRL_SINESIZE_S 24 | ||
534 | #define TXDRCTRL_RAMSZPOW_BITS 0x001F0000 /* Limit = (1<<RAMSZPOW)-1 */ | ||
535 | #define TXDRCTRL_RAMSZPOW_S 16 | ||
536 | #define TXDRCTRL_D1RSZAND_BITS 0x0000F000 /* Mask top 4 bits - D1 */ | ||
537 | #define TXDRCTRL_D1RSZAND_S 12 | ||
538 | #define TXDRCTRL_D0RSZAND_BITS 0x000000F0 /* Mask top 4 bits - D0 */ | ||
539 | #define TXDRCTRL_D0RSZAND_S 4 | ||
540 | /* Given extracted RAMSZPOW and DnRSZAND fields this returns the size */ | ||
541 | #define TXDRCTRL_DXSIZE(Pow, AndBits) \ | ||
542 | ((((~(AndBits)) & 0x0f) + 1) << ((Pow)-4)) | ||
543 | |||
544 | /* | ||
545 | * TXDRSIZE register provides modulo addressing options for each DSP RAM | ||
546 | */ | ||
547 | #define TXDRSIZE_REGNUM 15 | ||
548 | #define TXDRSIZE_R1MOD_BITS 0xFFFF0000 | ||
549 | #define TXDRSIZE_R1MOD_S 16 | ||
550 | #define TXDRSIZE_R0MOD_BITS 0x0000FFFF | ||
551 | #define TXDRSIZE_R0MOD_S 0 | ||
552 | |||
553 | #define TXDRSIZE_RBRAD_SCALE_BITS 0x70000000 | ||
554 | #define TXDRSIZE_RBRAD_SCALE_S 28 | ||
555 | #define TXDRSIZE_RBMODSIZE_BITS 0x0FFF0000 | ||
556 | #define TXDRSIZE_RBMODSIZE_S 16 | ||
557 | #define TXDRSIZE_RARAD_SCALE_BITS 0x00007000 | ||
558 | #define TXDRSIZE_RARAD_SCALE_S 12 | ||
559 | #define TXDRSIZE_RAMODSIZE_BITS 0x00000FFF | ||
560 | #define TXDRSIZE_RAMODSIZE_S 0 | ||
561 | |||
562 | /***************************************************************************** | ||
563 | * DEFERRED and BUS ERROR EXTENSION | ||
564 | ****************************************************************************/ | ||
565 | |||
566 | /* | ||
567 | * TXDEFR register - Deferred exception control | ||
568 | */ | ||
569 | #define TXDEFR_REGNUM 20 | ||
570 | #define TXDEFR_DEFAULT 0 /* All fields default to zero */ | ||
571 | |||
572 | /* | ||
573 | * Bus error state is a multi-bit positive/negative event notification from | ||
574 | * the bus infrastructure. | ||
575 | */ | ||
576 | #define TXDEFR_BUS_ERR_BIT 0x80000000 /* Set if error (LSB STATE) */ | ||
577 | #define TXDEFR_BUS_ERRI_BIT 0x40000000 /* Fetch returned error */ | ||
578 | #define TXDEFR_BUS_STATE_BITS 0x3F000000 /* Bus event/state data */ | ||
579 | #define TXDEFR_BUS_STATE_S 24 | ||
580 | #define TXDEFR_BUS_TRIG_BIT 0x00800000 /* Set when bus error seen */ | ||
581 | |||
582 | /* | ||
583 | * Bus events are collected by background code in a deferred manner unless | ||
584 | * selected to trigger an extended interrupt HALT trigger when they occur. | ||
585 | */ | ||
586 | #define TXDEFR_BUS_ICTRL_BIT 0x00000080 /* Enable interrupt trigger */ | ||
587 | |||
588 | /* | ||
589 | * CHIP Automatic Mips Allocation control registers | ||
590 | * ------------------------------------------------ | ||
591 | */ | ||
592 | |||
593 | /* CT Bank AMA Registers */ | ||
594 | #define TXAMAREG0_REGNUM 24 | ||
595 | #ifdef METAC_1_2 | ||
596 | #define TXAMAREG0_CTRL_BITS 0x07000000 | ||
597 | #else /* METAC_1_2 */ | ||
598 | #define TXAMAREG0_RCOFF_BIT 0x08000000 | ||
599 | #define TXAMAREG0_DLINEHLT_BIT 0x04000000 | ||
600 | #define TXAMAREG0_DLINEDIS_BIT 0x02000000 | ||
601 | #define TXAMAREG0_CYCSTRICT_BIT 0x01000000 | ||
602 | #define TXAMAREG0_CTRL_BITS (TXAMAREG0_RCOFF_BIT | \ | ||
603 | TXAMAREG0_DLINEHLT_BIT | \ | ||
604 | TXAMAREG0_DLINEDIS_BIT | \ | ||
605 | TXAMAREG0_CYCSTRICT_BIT) | ||
606 | #endif /* !METAC_1_2 */ | ||
607 | #define TXAMAREG0_CTRL_S 24 | ||
608 | #define TXAMAREG0_MDM_BIT 0x00400000 | ||
609 | #define TXAMAREG0_MPF_BIT 0x00200000 | ||
610 | #define TXAMAREG0_MPE_BIT 0x00100000 | ||
611 | #define TXAMAREG0_MASK_BITS (TXAMAREG0_MDM_BIT | \ | ||
612 | TXAMAREG0_MPF_BIT | \ | ||
613 | TXAMAREG0_MPE_BIT) | ||
614 | #define TXAMAREG0_MASK_S 20 | ||
615 | #define TXAMAREG0_SDM_BIT 0x00040000 | ||
616 | #define TXAMAREG0_SPF_BIT 0x00020000 | ||
617 | #define TXAMAREG0_SPE_BIT 0x00010000 | ||
618 | #define TXAMAREG0_STATUS_BITS (TXAMAREG0_SDM_BIT | \ | ||
619 | TXAMAREG0_SPF_BIT | \ | ||
620 | TXAMAREG0_SPE_BIT) | ||
621 | #define TXAMAREG0_STATUS_S 16 | ||
622 | #define TXAMAREG0_PRIORITY_BITS 0x0000FF00 | ||
623 | #define TXAMAREG0_PRIORITY_S 8 | ||
624 | #define TXAMAREG0_BVALUE_BITS 0x000000FF | ||
625 | #define TXAMAREG0_BVALUE_S 0 | ||
626 | |||
627 | #define TXAMAREG1_REGNUM 25 | ||
628 | #define TXAMAREG1_DELAYC_BITS 0x07FFFFFF | ||
629 | #define TXAMAREG1_DELAYC_S 0 | ||
630 | |||
631 | #define TXAMAREG2_REGNUM 26 | ||
632 | #ifdef METAC_1_2 | ||
633 | #define TXAMAREG2_DLINEC_BITS 0x00FFFFFF | ||
634 | #define TXAMAREG2_DLINEC_S 0 | ||
635 | #else /* METAC_1_2 */ | ||
636 | #define TXAMAREG2_IRQPRIORITY_BIT 0xFF000000 | ||
637 | #define TXAMAREG2_IRQPRIORITY_S 24 | ||
638 | #define TXAMAREG2_DLINEC_BITS 0x00FFFFF0 | ||
639 | #define TXAMAREG2_DLINEC_S 4 | ||
640 | #endif /* !METAC_1_2 */ | ||
641 | |||
642 | #define TXAMAREG3_REGNUM 27 | ||
643 | #define TXAMAREG2_AMABLOCK_BIT 0x00080000 | ||
644 | #define TXAMAREG2_AMAC_BITS 0x0000FFFF | ||
645 | #define TXAMAREG2_AMAC_S 0 | ||
646 | |||
647 | /***************************************************************************** | ||
648 | * FPU EXTENSIONS | ||
649 | ****************************************************************************/ | ||
650 | /* | ||
651 | * The following registers only exist in FPU enabled cores. | ||
652 | */ | ||
653 | |||
654 | /* | ||
655 | * TXMODE register - FPU rounding mode control/status fields | ||
656 | */ | ||
657 | #define TXMODE_FPURMODE_BITS 0x00030000 | ||
658 | #define TXMODE_FPURMODE_S 16 | ||
659 | #define TXMODE_FPURMODEWRITE_BIT 0x00040000 /* Set to change FPURMODE */ | ||
660 | |||
661 | /* | ||
662 | * TXDEFR register - FPU exception handling/state is a significant source | ||
663 | * of deferrable errors. Run-time S/W can move handling to interrupt level | ||
664 | * using DEFR instruction to collect state. | ||
665 | */ | ||
666 | #define TXDEFR_FPE_FE_BITS 0x003F0000 /* Set by FPU_FE events */ | ||
667 | #define TXDEFR_FPE_FE_S 16 | ||
668 | |||
669 | #define TXDEFR_FPE_INEXACT_FE_BIT 0x010000 | ||
670 | #define TXDEFR_FPE_UNDERFLOW_FE_BIT 0x020000 | ||
671 | #define TXDEFR_FPE_OVERFLOW_FE_BIT 0x040000 | ||
672 | #define TXDEFR_FPE_DIVBYZERO_FE_BIT 0x080000 | ||
673 | #define TXDEFR_FPE_INVALID_FE_BIT 0x100000 | ||
674 | #define TXDEFR_FPE_DENORMAL_FE_BIT 0x200000 | ||
675 | |||
676 | #define TXDEFR_FPE_ICTRL_BITS 0x000003F /* Route to interrupts */ | ||
677 | #define TXDEFR_FPE_ICTRL_S 0 | ||
678 | |||
679 | #define TXDEFR_FPE_INEXACT_ICTRL_BIT 0x01 | ||
680 | #define TXDEFR_FPE_UNDERFLOW_ICTRL_BIT 0x02 | ||
681 | #define TXDEFR_FPE_OVERFLOW_ICTRL_BIT 0x04 | ||
682 | #define TXDEFR_FPE_DIVBYZERO_ICTRL_BIT 0x08 | ||
683 | #define TXDEFR_FPE_INVALID_ICTRL_BIT 0x10 | ||
684 | #define TXDEFR_FPE_DENORMAL_ICTRL_BIT 0x20 | ||
685 | |||
686 | /* | ||
687 | * DETAILED FPU RELATED VALUES | ||
688 | * --------------------------- | ||
689 | */ | ||
690 | |||
691 | /* | ||
692 | * Rounding mode field in TXMODE can hold a number of logical values | ||
693 | */ | ||
694 | #define METAG_FPURMODE_TONEAREST 0x0 /* Default */ | ||
695 | #define METAG_FPURMODE_TOWARDZERO 0x1 | ||
696 | #define METAG_FPURMODE_UPWARD 0x2 | ||
697 | #define METAG_FPURMODE_DOWNWARD 0x3 | ||
698 | |||
699 | /* | ||
700 | * In order to set the TXMODE register field that controls the rounding mode | ||
701 | * an extra bit must be set in the value written versus that read in order | ||
702 | * to gate writes to the rounding mode field. This allows other non-FPU code | ||
703 | * to modify TXMODE without knowledge of the FPU units presence and not | ||
704 | * influence the FPU rounding mode. This macro adds the required bit so new | ||
705 | * rounding modes are accepted. | ||
706 | */ | ||
707 | #define TXMODE_FPURMODE_SET(FPURMode) \ | ||
708 | (TXMODE_FPURMODEWRITE_BIT + ((FPURMode)<<TXMODE_FPURMODE_S)) | ||
709 | |||
710 | /* | ||
711 | * To successfully restore TXMODE to zero at the end of the function the | ||
712 | * following value (rather than zero) must be used. | ||
713 | */ | ||
714 | #define TXMODE_FPURMODE_RESET (TXMODE_FPURMODEWRITE_BIT) | ||
715 | |||
716 | /* | ||
717 | * In TXSTATUS a special bit exists to indicate if FPU H/W has been accessed | ||
718 | * since it was last reset. | ||
719 | */ | ||
720 | #define TXSTATUS_FPACTIVE_BIT 0x01000000 | ||
721 | |||
722 | /* | ||
723 | * Exception state (see TXDEFR_FPU_FE_*) and enabling (for interrupt | ||
724 | * level processing (see TXDEFR_FPU_ICTRL_*) are controlled by similar | ||
725 | * bit mask locations within each field. | ||
726 | */ | ||
727 | #define METAG_FPU_FE_INEXACT 0x01 | ||
728 | #define METAG_FPU_FE_UNDERFLOW 0x02 | ||
729 | #define METAG_FPU_FE_OVERFLOW 0x04 | ||
730 | #define METAG_FPU_FE_DIVBYZERO 0x08 | ||
731 | #define METAG_FPU_FE_INVALID 0x10 | ||
732 | #define METAG_FPU_FE_DENORMAL 0x20 | ||
733 | #define METAG_FPU_FE_ALL_EXCEPT (METAG_FPU_FE_INEXACT | \ | ||
734 | METAG_FPU_FE_UNDERFLOW | \ | ||
735 | METAG_FPU_FE_OVERFLOW | \ | ||
736 | METAG_FPU_FE_DIVBYZERO | \ | ||
737 | METAG_FPU_FE_INVALID | \ | ||
738 | METAG_FPU_FE_DENORMAL) | ||
739 | |||
740 | /***************************************************************************** | ||
741 | * THREAD CONTROL, ERROR, OR INTERRUPT STATE EXTENSIONS | ||
742 | ****************************************************************************/ | ||
743 | /* | ||
744 | * The following values are only relevant to code that externally controls | ||
745 | * threads, handles errors/interrupts, and/or set-up interrupt/error handlers | ||
746 | * for subsequent use. | ||
747 | */ | ||
748 | |||
749 | /* | ||
750 | * TXENABLE register fields - only ENABLE_BIT is potentially read/write | ||
751 | */ | ||
752 | #define TXENABLE_MAJOR_REV_BITS 0xFF000000 | ||
753 | #define TXENABLE_MAJOR_REV_S 24 | ||
754 | #define TXENABLE_MINOR_REV_BITS 0x00FF0000 | ||
755 | #define TXENABLE_MINOR_REV_S 16 | ||
756 | #define TXENABLE_CLASS_BITS 0x0000F000 | ||
757 | #define TXENABLE_CLASS_S 12 | ||
758 | #define TXENABLE_CLASS_DSP 0x0 /* -> DSP Thread */ | ||
759 | #define TXENABLE_CLASS_LDSP 0x8 /* -> DSP LITE Thread */ | ||
760 | #define TXENABLE_CLASS_GP 0xC /* -> General Purpose Thread */ | ||
761 | #define TXENABLE_CLASSALT_LFPU 0x2 /* Set to indicate LITE FPU */ | ||
762 | #define TXENABLE_CLASSALT_FPUR8 0x1 /* Set to indicate 8xFPU regs */ | ||
763 | #define TXENABLE_MTXARCH_BIT 0x00000800 | ||
764 | #define TXENABLE_STEP_REV_BITS 0x000000F0 | ||
765 | #define TXENABLE_STEP_REV_S 4 | ||
766 | #define TXENABLE_STOPPED_BIT 0x00000004 /* TXOFF due to ENABLE->0 */ | ||
767 | #define TXENABLE_OFF_BIT 0x00000002 /* Thread is in off state */ | ||
768 | #define TXENABLE_ENABLE_BIT 0x00000001 /* Set if running */ | ||
769 | |||
770 | /* | ||
771 | * TXSTATUS register - used by external/internal interrupt/error handler | ||
772 | */ | ||
773 | #define TXSTATUS_CB1MARKER_BIT 0x00800000 /* -> int level mem state */ | ||
774 | #define TXSTATUS_CBMARKER_BIT 0x00400000 /* -> mem i/f state dumped */ | ||
775 | #define TXSTATUS_MEM_FAULT_BITS 0x00300000 | ||
776 | #define TXSTATUS_MEM_FAULT_S 20 | ||
777 | #define TXSTATUS_MEMFAULT_NONE 0x0 /* -> No memory fault */ | ||
778 | #define TXSTATUS_MEMFAULT_GEN 0x1 /* -> General fault */ | ||
779 | #define TXSTATUS_MEMFAULT_PF 0x2 /* -> Page fault */ | ||
780 | #define TXSTATUS_MEMFAULT_RO 0x3 /* -> Read only fault */ | ||
781 | #define TXSTATUS_MAJOR_HALT_BITS 0x000C0000 | ||
782 | #define TXSTATUS_MAJOR_HALT_S 18 | ||
783 | #define TXSTATUS_MAJHALT_TRAP 0x0 /* -> SWITCH inst used */ | ||
784 | #define TXSTATUS_MAJHALT_INST 0x1 /* -> Unknown inst or fetch */ | ||
785 | #define TXSTATUS_MAJHALT_PRIV 0x2 /* -> Internal privilege */ | ||
786 | #define TXSTATUS_MAJHALT_MEM 0x3 /* -> Memory i/f fault */ | ||
787 | #define TXSTATUS_L_STEP_BITS 0x00000800 /* -> Progress of L oper */ | ||
788 | #define TXSTATUS_LSM_STEP_BITS 0x00000700 /* -> Progress of L/S mult */ | ||
789 | #define TXSTATUS_LSM_STEP_S 8 | ||
790 | #define TXSTATUS_FLAG_BITS 0x0000001F /* -> All the flags */ | ||
791 | #define TXSTATUS_SCC_BIT 0x00000010 /* -> Split-16 flags ... */ | ||
792 | #define TXSTATUS_SCF_LZ_BIT 0x00000008 /* -> Split-16 Low Z flag */ | ||
793 | #define TXSTATUS_SCF_HZ_BIT 0x00000004 /* -> Split-16 High Z flag */ | ||
794 | #define TXSTATUS_SCF_HC_BIT 0x00000002 /* -> Split-16 High C flag */ | ||
795 | #define TXSTATUS_SCF_LC_BIT 0x00000001 /* -> Split-16 Low C flag */ | ||
796 | #define TXSTATUS_CF_Z_BIT 0x00000008 /* -> Condition Z flag */ | ||
797 | #define TXSTATUS_CF_N_BIT 0x00000004 /* -> Condition N flag */ | ||
798 | #define TXSTATUS_CF_O_BIT 0x00000002 /* -> Condition O flag */ | ||
799 | #define TXSTATUS_CF_C_BIT 0x00000001 /* -> Condition C flag */ | ||
800 | |||
801 | /* | ||
802 | * TXCATCH0-3 register contents may store information on a memory operation | ||
803 | * that has failed if the bit TXSTATUS_CBMARKER_BIT is set. | ||
804 | */ | ||
805 | #define TXCATCH0_REGNUM 16 | ||
806 | #define TXCATCH1_REGNUM 17 | ||
807 | #define TXCATCH1_ADDR_BITS 0xFFFFFFFF /* TXCATCH1 is Addr 0-31 */ | ||
808 | #define TXCATCH1_ADDR_S 0 | ||
809 | #define TXCATCH2_REGNUM 18 | ||
810 | #define TXCATCH2_DATA0_BITS 0xFFFFFFFF /* TXCATCH2 is Data 0-31 */ | ||
811 | #define TXCATCH2_DATA0_S 0 | ||
812 | #define TXCATCH3_REGNUM 19 | ||
813 | #define TXCATCH3_DATA1_BITS 0xFFFFFFFF /* TXCATCH3 is Data 32-63 */ | ||
814 | #define TXCATCH3_DATA1_S 0 | ||
815 | |||
816 | /* | ||
817 | * Detailed catch state information | ||
818 | * -------------------------------- | ||
819 | */ | ||
820 | |||
821 | /* Contents of TXCATCH0 register */ | ||
822 | #define TXCATCH0_LDRXX_BITS 0xF8000000 /* Load destination reg 0-31 */ | ||
823 | #define TXCATCH0_LDRXX_S 27 | ||
824 | #define TXCATCH0_LDDST_BITS 0x07FF0000 /* Load destination bits */ | ||
825 | #define TXCATCH0_LDDST_S 16 | ||
826 | #define TXCATCH0_LDDST_D1DSP 0x400 /* One bit set if it's a LOAD */ | ||
827 | #define TXCATCH0_LDDST_D0DSP 0x200 | ||
828 | #define TXCATCH0_LDDST_TMPLT 0x100 | ||
829 | #define TXCATCH0_LDDST_TR 0x080 | ||
830 | #ifdef METAC_2_1 | ||
831 | #define TXCATCH0_LDDST_FPU 0x040 | ||
832 | #endif | ||
833 | #define TXCATCH0_LDDST_PC 0x020 | ||
834 | #define TXCATCH0_LDDST_A1 0x010 | ||
835 | #define TXCATCH0_LDDST_A0 0x008 | ||
836 | #define TXCATCH0_LDDST_D1 0x004 | ||
837 | #define TXCATCH0_LDDST_D0 0x002 | ||
838 | #define TXCATCH0_LDDST_CT 0x001 | ||
839 | #ifdef METAC_2_1 | ||
840 | #define TXCATCH0_WATCHSTOP_BIT 0x00004000 /* Set if Data Watch set fault */ | ||
841 | #endif | ||
842 | #define TXCATCH0_WATCHS_BIT 0x00004000 /* Set if Data Watch set fault */ | ||
843 | #define TXCATCH0_WATCH1_BIT 0x00002000 /* Set if Data Watch 1 matches */ | ||
844 | #define TXCATCH0_WATCH0_BIT 0x00001000 /* Set if Data Watch 0 matches */ | ||
845 | #define TXCATCH0_FAULT_BITS 0x00000C00 /* See TXSTATUS_MEMFAULT_* */ | ||
846 | #define TXCATCH0_FAULT_S 10 | ||
847 | #define TXCATCH0_PRIV_BIT 0x00000200 /* Privilege of transaction */ | ||
848 | #define TXCATCH0_READ_BIT 0x00000100 /* Set for Read or Load cases */ | ||
849 | |||
850 | #ifdef METAC_2_1 | ||
851 | /* LNKGET Marker bit in TXCATCH0 */ | ||
852 | #define TXCATCH0_LNKGET_MARKER_BIT 0x00000008 | ||
853 | #define TXCATCH0_PREPROC_BIT 0x00000004 | ||
854 | #endif | ||
855 | |||
856 | /* Loads are indicated by one of the LDDST bits being set */ | ||
857 | #define TXCATCH0_LDM16_BIT 0x00000004 /* Load M16 flag */ | ||
858 | #define TXCATCH0_LDL2L1_BITS 0x00000003 /* Load data size L2,L1 */ | ||
859 | #define TXCATCH0_LDL2L1_S 0 | ||
860 | |||
861 | /* Reads are indicated by the READ bit being set without LDDST bits */ | ||
862 | #define TXCATCH0_RAXX_BITS 0x0000001F /* RAXX issue port for read */ | ||
863 | #define TXCATCH0_RAXX_S 0 | ||
864 | |||
865 | /* Write operations are all that remain if READ bit is not set */ | ||
866 | #define TXCATCH0_WMASK_BITS 0x000000FF /* Write byte lane mask */ | ||
867 | #define TXCATCH0_WMASK_S 0 | ||
868 | |||
869 | #ifdef METAC_2_1 | ||
870 | |||
871 | /* When a FPU exception is signalled then FPUSPEC == FPUSPEC_TAG */ | ||
872 | #define TXCATCH0_FPURDREG_BITS 0xF8000000 | ||
873 | #define TXCATCH0_FPURDREG_S 27 | ||
874 | #define TXCATCH0_FPUR1REG_BITS 0x07C00000 | ||
875 | #define TXCATCH0_FPUR1REG_S 22 | ||
876 | #define TXCATCH0_FPUSPEC_BITS 0x000F0000 | ||
877 | #define TXCATCH0_FPUSPEC_S 16 | ||
878 | #define TXCATCH0_FPUSPEC_TAG 0xF | ||
879 | #define TXCATCH0_FPUINSTA_BIT 0x00001000 | ||
880 | #define TXCATCH0_FPUINSTQ_BIT 0x00000800 | ||
881 | #define TXCATCH0_FPUINSTZ_BIT 0x00000400 | ||
882 | #define TXCATCH0_FPUINSTN_BIT 0x00000200 | ||
883 | #define TXCATCH0_FPUINSTO3O_BIT 0x00000100 | ||
884 | #define TXCATCH0_FPUWIDTH_BITS 0x000000C0 | ||
885 | #define TXCATCH0_FPUWIDTH_S 6 | ||
886 | #define TXCATCH0_FPUWIDTH_FLOAT 0 | ||
887 | #define TXCATCH0_FPUWIDTH_DOUBLE 1 | ||
888 | #define TXCATCH0_FPUWIDTH_PAIRED 2 | ||
889 | #define TXCATCH0_FPUOPENC_BITS 0x0000003F | ||
890 | #define TXCATCH0_FPUOPENC_S 0 | ||
891 | #define TXCATCH0_FPUOPENC_ADD 0 /* rop1=Rs1, rop3=Rs2 */ | ||
892 | #define TXCATCH0_FPUOPENC_SUB 1 /* rop1=Rs1, rop3=Rs2 */ | ||
893 | #define TXCATCH0_FPUOPENC_MUL 2 /* rop1=Rs1, rop2=Rs2 */ | ||
894 | #define TXCATCH0_FPUOPENC_ATOI 3 /* rop3=Rs */ | ||
895 | #define TXCATCH0_FPUOPENC_ATOX 4 /* rop3=Rs, uses #Imm */ | ||
896 | #define TXCATCH0_FPUOPENC_ITOA 5 /* rop3=Rs */ | ||
897 | #define TXCATCH0_FPUOPENC_XTOA 6 /* rop3=Rs, uses #Imm */ | ||
898 | #define TXCATCH0_FPUOPENC_ATOH 7 /* rop2=Rs */ | ||
899 | #define TXCATCH0_FPUOPENC_HTOA 8 /* rop2=Rs */ | ||
900 | #define TXCATCH0_FPUOPENC_DTOF 9 /* rop3=Rs */ | ||
901 | #define TXCATCH0_FPUOPENC_FTOD 10 /* rop3=Rs */ | ||
902 | #define TXCATCH0_FPUOPENC_DTOL 11 /* rop3=Rs */ | ||
903 | #define TXCATCH0_FPUOPENC_LTOD 12 /* rop3=Rs */ | ||
904 | #define TXCATCH0_FPUOPENC_DTOXL 13 /* rop3=Rs, uses #imm */ | ||
905 | #define TXCATCH0_FPUOPENC_XLTOD 14 /* rop3=Rs, uses #imm */ | ||
906 | #define TXCATCH0_FPUOPENC_CMP 15 /* rop1=Rs1, rop2=Rs2 */ | ||
907 | #define TXCATCH0_FPUOPENC_MIN 16 /* rop1=Rs1, rop2=Rs2 */ | ||
908 | #define TXCATCH0_FPUOPENC_MAX 17 /* rop1=Rs1, rop2=Rs2 */ | ||
909 | #define TXCATCH0_FPUOPENC_ADDRE 18 /* rop1=Rs1, rop3=Rs2 */ | ||
910 | #define TXCATCH0_FPUOPENC_SUBRE 19 /* rop1=Rs1, rop3=Rs2 */ | ||
911 | #define TXCATCH0_FPUOPENC_MULRE 20 /* rop1=Rs1, rop2=Rs2 */ | ||
912 | #define TXCATCH0_FPUOPENC_MXA 21 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/ | ||
913 | #define TXCATCH0_FPUOPENC_MXAS 22 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/ | ||
914 | #define TXCATCH0_FPUOPENC_MAR 23 /* rop1=Rs1, rop2=Rs2 */ | ||
915 | #define TXCATCH0_FPUOPENC_MARS 24 /* rop1=Rs1, rop2=Rs2 */ | ||
916 | #define TXCATCH0_FPUOPENC_MUZ 25 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/ | ||
917 | #define TXCATCH0_FPUOPENC_MUZS 26 /* rop1=Rs1, rop2=Rs2, rop3=Rs3*/ | ||
918 | #define TXCATCH0_FPUOPENC_RCP 27 /* rop2=Rs */ | ||
919 | #define TXCATCH0_FPUOPENC_RSQ 28 /* rop2=Rs */ | ||
920 | |||
921 | /* For floating point exceptions TXCATCH1 is used to carry extra data */ | ||
922 | #define TXCATCH1_FPUR2REG_BITS 0xF8000000 | ||
923 | #define TXCATCH1_FPUR2REG_S 27 | ||
924 | #define TXCATCH1_FPUR3REG_BITS 0x07C00000 /* Undefined if O3O set */ | ||
925 | #define TXCATCH1_FPUR3REG_S 22 | ||
926 | #define TXCATCH1_FPUIMM16_BITS 0x0000FFFF | ||
927 | #define TXCATCH1_FPUIMM16_S 0 | ||
928 | |||
929 | #endif /* METAC_2_1 */ | ||
930 | |||
931 | /* | ||
932 | * TXDIVTIME register used to hold the partial base address of memory i/f | ||
933 | * state dump area. Now deprecated. | ||
934 | */ | ||
935 | #define TXDIVTIME_CBBASE_MASK 0x03FFFE00 | ||
936 | #define TXDIVTIME_CBBASE_LINBASE 0x80000000 | ||
937 | #define TXDIVTIME_CBBASE_LINBOFF 0x00000000 /* BGnd state */ | ||
938 | #define TXDIVTIME_CBBASE_LINIOFF 0x00000100 /* Int state */ | ||
939 | |||
940 | /* | ||
941 | * TXDIVTIME register used to indicate if the read pipeline was dirty when a | ||
942 | * thread was interrupted, halted, or generated an exception. It is invalid | ||
943 | * to attempt to issue a further pipeline read address while the read | ||
944 | * pipeline is in the dirty state. | ||
945 | */ | ||
946 | #define TXDIVTIME_RPDIRTY_BIT 0x80000000 | ||
947 | |||
948 | /* | ||
949 | * Further bits in the TXDIVTIME register allow interrupt handling code to | ||
950 | * short-cut the discovery the most significant bit last read from TXSTATI. | ||
951 | * | ||
952 | * This is the bit number of the trigger line that a low level interrupt | ||
953 | * handler should acknowledge and then perhaps the index of a corresponding | ||
954 | * handler function. | ||
955 | */ | ||
956 | #define TXDIVTIME_IRQENC_BITS 0x0F000000 | ||
957 | #define TXDIVTIME_IRQENC_S 24 | ||
958 | |||
959 | /* | ||
960 | * If TXDIVTIME_RPVALID_BIT is set the read pipeline contained significant | ||
961 | * information when the thread was interrupted|halted|exceptioned. Each slot | ||
962 | * containing data is indicated by a one bit in the corresponding | ||
963 | * TXDIVTIME_RPMASK_BITS bit (least significance bit relates to first | ||
964 | * location in read pipeline - most likely to have the 1 state). Empty slots | ||
965 | * contain zeroes with no interlock applied on reads if RPDIRTY is currently | ||
966 | * set with RPMASK itself being read-only state. | ||
967 | */ | ||
968 | #define TXDIVTIME_RPMASK_BITS 0x003F0000 /* -> Full (1) Empty (0) */ | ||
969 | #define TXDIVTIME_RPMASK_S 16 | ||
970 | |||
971 | /* | ||
972 | * TXPRIVEXT register can be used to single step thread execution and | ||
973 | * enforce synchronous memory i/f address checking for debugging purposes. | ||
974 | */ | ||
975 | #define TXPRIVEXT_TXSTEP_BIT 0x00000004 | ||
976 | #define TXPRIVEXT_MEMCHECK_BIT 0x00000002 | ||
977 | |||
978 | /* | ||
979 | * TXINTERNx registers holds internal state information for H/W debugging only | ||
980 | */ | ||
981 | #define TXINTERN0_REGNUM 23 | ||
982 | #define TXINTERN0_LOCK2_BITS 0xF0000000 | ||
983 | #define TXINTERN0_LOCK2_S 28 | ||
984 | #define TXINTERN0_LOCK1_BITS 0x0F000000 | ||
985 | #define TXINTERN0_LOCK1_S 24 | ||
986 | #define TXINTERN0_TIFDF_BITS 0x0000F000 | ||
987 | #define TXINTERN0_TIFDF_S 12 | ||
988 | #define TXINTERN0_TIFIB_BITS 0x00000F00 | ||
989 | #define TXINTERN0_TIFIB_S 8 | ||
990 | #define TXINTERN0_TIFAF_BITS 0x000000F0 | ||
991 | #define TXINTERN0_TIFAF_S 4 | ||
992 | #define TXINTERN0_MSTATE_BITS 0x0000000F | ||
993 | #define TXINTERN0_MSTATE_S 0 | ||
994 | |||
995 | /* | ||
996 | * TXSTAT, TXMASK, TXPOLL, TXSTATI, TXMASKI, TXPOLLI registers from trigger | ||
997 | * bank all have similar contents (upper kick count bits not in MASK regs) | ||
998 | */ | ||
999 | #define TXSTAT_REGNUM 0 | ||
1000 | #define TXSTAT_TIMER_BIT 0x00000001 | ||
1001 | #define TXSTAT_TIMER_S 0 | ||
1002 | #define TXSTAT_KICK_BIT 0x00000002 | ||
1003 | #define TXSTAT_KICK_S 1 | ||
1004 | #define TXSTAT_DEFER_BIT 0x00000008 | ||
1005 | #define TXSTAT_DEFER_S 3 | ||
1006 | #define TXSTAT_EXTTRIG_BITS 0x0000FFF0 | ||
1007 | #define TXSTAT_EXTTRIG_S 4 | ||
1008 | #define TXSTAT_FPE_BITS 0x003F0000 | ||
1009 | #define TXSTAT_FPE_S 16 | ||
1010 | #define TXSTAT_FPE_DENORMAL_BIT 0x00200000 | ||
1011 | #define TXSTAT_FPE_DENORMAL_S 21 | ||
1012 | #define TXSTAT_FPE_INVALID_BIT 0x00100000 | ||
1013 | #define TXSTAT_FPE_INVALID_S 20 | ||
1014 | #define TXSTAT_FPE_DIVBYZERO_BIT 0x00080000 | ||
1015 | #define TXSTAT_FPE_DIVBYZERO_S 19 | ||
1016 | #define TXSTAT_FPE_OVERFLOW_BIT 0x00040000 | ||
1017 | #define TXSTAT_FPE_OVERFLOW_S 18 | ||
1018 | #define TXSTAT_FPE_UNDERFLOW_BIT 0x00020000 | ||
1019 | #define TXSTAT_FPE_UNDERFLOW_S 17 | ||
1020 | #define TXSTAT_FPE_INEXACT_BIT 0x00010000 | ||
1021 | #define TXSTAT_FPE_INEXACT_S 16 | ||
1022 | #define TXSTAT_BUSERR_BIT 0x00800000 /* Set if bus error/ack state */ | ||
1023 | #define TXSTAT_BUSERR_S 23 | ||
1024 | #define TXSTAT_BUSSTATE_BITS 0xFF000000 /* Read only */ | ||
1025 | #define TXSTAT_BUSSTATE_S 24 | ||
1026 | #define TXSTAT_KICKCNT_BITS 0xFFFF0000 | ||
1027 | #define TXSTAT_KICKCNT_S 16 | ||
1028 | #define TXMASK_REGNUM 1 | ||
1029 | #define TXSTATI_REGNUM 2 | ||
1030 | #define TXSTATI_BGNDHALT_BIT 0x00000004 | ||
1031 | #define TXMASKI_REGNUM 3 | ||
1032 | #define TXPOLL_REGNUM 4 | ||
1033 | #define TXPOLLI_REGNUM 6 | ||
1034 | |||
1035 | /* | ||
1036 | * TXDRCTRL register can be used to partition the DSP RAM space available to | ||
1037 | * this thread at startup. This is achieved by offsetting the region allocated | ||
1038 | * to each thread. | ||
1039 | */ | ||
1040 | #define TXDRCTRL_D1PARTOR_BITS 0x00000F00 /* OR's into top 4 bits */ | ||
1041 | #define TXDRCTRL_D1PARTOR_S 8 | ||
1042 | #define TXDRCTRL_D0PARTOR_BITS 0x0000000F /* OR's into top 4 bits */ | ||
1043 | #define TXDRCTRL_D0PARTOR_S 0 | ||
1044 | /* Given extracted Pow and Or fields this is threads base within DSP RAM */ | ||
1045 | #define TXDRCTRL_DXBASE(Pow, Or) ((Or)<<((Pow)-4)) | ||
1046 | |||
1047 | /***************************************************************************** | ||
1048 | * RUN TIME TRACE CONTROL REGISTERS | ||
1049 | ****************************************************************************/ | ||
1050 | /* | ||
1051 | * The following values are only relevant to code that implements run-time | ||
1052 | * trace features within the META Core | ||
1053 | */ | ||
1054 | #define TTEXEC TT.0 | ||
1055 | #define TTCTRL TT.1 | ||
1056 | #define TTMARK TT.2 | ||
1057 | #define TTREC TT.3 | ||
1058 | #define GTEXEC TT.4 | ||
1059 | |||
1060 | #define TTEXEC_REGNUM 0 | ||
1061 | #define TTEXEC_EXTTRIGAND_BITS 0x7F000000 | ||
1062 | #define TTEXEC_EXTTRIGAND_S 24 | ||
1063 | #define TTEXEC_EXTTRIGEN_BIT 0x00008000 | ||
1064 | #define TTEXEC_EXTTRIGMATCH_BITS 0x00007F00 | ||
1065 | #define TTEXEC_EXTTRIGMATCH_S 8 | ||
1066 | #define TTEXEC_TCMODE_BITS 0x00000003 | ||
1067 | #define TTEXEC_TCMODE_S 0 | ||
1068 | |||
1069 | #define TTCTRL_REGNUM 1 | ||
1070 | #define TTCTRL_TRACETT_BITS 0x00008000 | ||
1071 | #define TTCTRL_TRACETT_S 15 | ||
1072 | #define TTCTRL_TRACEALL_BITS 0x00002000 | ||
1073 | #define TTCTRL_TRACEALL_S 13 | ||
1074 | #ifdef METAC_2_1 | ||
1075 | #define TTCTRL_TRACEALLTAG_BITS 0x00000400 | ||
1076 | #define TTCTRL_TRACEALLTAG_S 10 | ||
1077 | #endif /* METAC_2_1 */ | ||
1078 | #define TTCTRL_TRACETAG_BITS 0x00000200 | ||
1079 | #define TTCTRL_TRACETAG_S 9 | ||
1080 | #define TTCTRL_TRACETTPC_BITS 0x00000080 | ||
1081 | #define TTCTRL_TRACETTPC_S 7 | ||
1082 | #define TTCTRL_TRACEMPC_BITS 0x00000020 | ||
1083 | #define TTCTRL_TRACEMPC_S 5 | ||
1084 | #define TTCTRL_TRACEEN_BITS 0x00000008 | ||
1085 | #define TTCTRL_TRACEEN_S 3 | ||
1086 | #define TTCTRL_TRACEEN1_BITS 0x00000004 | ||
1087 | #define TTCTRL_TRACEEN1_S 2 | ||
1088 | #define TTCTRL_TRACEPC_BITS 0x00000002 | ||
1089 | #define TTCTRL_TRACEPC_S 1 | ||
1090 | |||
1091 | #ifdef METAC_2_1 | ||
1092 | #define TTMARK_REGNUM 2 | ||
1093 | #define TTMARK_BITS 0xFFFFFFFF | ||
1094 | #define TTMARK_S 0x0 | ||
1095 | |||
1096 | #define TTREC_REGNUM 3 | ||
1097 | #define TTREC_BITS 0xFFFFFFFFFFFFFFFF | ||
1098 | #define TTREC_S 0x0 | ||
1099 | #endif /* METAC_2_1 */ | ||
1100 | |||
1101 | #define GTEXEC_REGNUM 4 | ||
1102 | #define GTEXEC_DCRUN_BITS 0x80000000 | ||
1103 | #define GTEXEC_DCRUN_S 31 | ||
1104 | #define GTEXEC_ICMODE_BITS 0x0C000000 | ||
1105 | #define GTEXEC_ICMODE_S 26 | ||
1106 | #define GTEXEC_TCMODE_BITS 0x03000000 | ||
1107 | #define GTEXEC_TCMODE_S 24 | ||
1108 | #define GTEXEC_PERF1CMODE_BITS 0x00040000 | ||
1109 | #define GTEXEC_PERF1CMODE_S 18 | ||
1110 | #define GTEXEC_PERF0CMODE_BITS 0x00010000 | ||
1111 | #define GTEXEC_PERF0CMODE_S 16 | ||
1112 | #define GTEXEC_REFMSEL_BITS 0x0000F000 | ||
1113 | #define GTEXEC_REFMSEL_S 12 | ||
1114 | #define GTEXEC_METRICTH_BITS 0x000003FF | ||
1115 | #define GTEXEC_METRICTH_S 0 | ||
1116 | |||
1117 | #ifdef METAC_2_1 | ||
1118 | /* | ||
1119 | * Clock Control registers | ||
1120 | * ----------------------- | ||
1121 | */ | ||
1122 | #define TXCLKCTRL_REGNUM 22 | ||
1123 | |||
1124 | /* | ||
1125 | * Default setting is with clocks always on (DEFON), turning all clocks off | ||
1126 | * can only be done from external devices (OFF), enabling automatic clock | ||
1127 | * gating will allow clocks to stop as units fall idle. | ||
1128 | */ | ||
1129 | #define TXCLKCTRL_ALL_OFF 0x02222222 | ||
1130 | #define TXCLKCTRL_ALL_DEFON 0x01111111 | ||
1131 | #define TXCLKCTRL_ALL_AUTO 0x02222222 | ||
1132 | |||
1133 | /* | ||
1134 | * Individual fields control caches, floating point and main data/addr units | ||
1135 | */ | ||
1136 | #define TXCLKCTRL_CLOCKIC_BITS 0x03000000 | ||
1137 | #define TXCLKCTRL_CLOCKIC_S 24 | ||
1138 | #define TXCLKCTRL_CLOCKDC_BITS 0x00300000 | ||
1139 | #define TXCLKCTRL_CLOCKDC_S 20 | ||
1140 | #define TXCLKCTRL_CLOCKFP_BITS 0x00030000 | ||
1141 | #define TXCLKCTRL_CLOCKFP_S 16 | ||
1142 | #define TXCLKCTRL_CLOCKD1_BITS 0x00003000 | ||
1143 | #define TXCLKCTRL_CLOCKD1_S 12 | ||
1144 | #define TXCLKCTRL_CLOCKD0_BITS 0x00000300 | ||
1145 | #define TXCLKCTRL_CLOCKD0_S 8 | ||
1146 | #define TXCLKCTRL_CLOCKA1_BITS 0x00000030 | ||
1147 | #define TXCLKCTRL_CLOCKA1_S 4 | ||
1148 | #define TXCLKCTRL_CLOCKA0_BITS 0x00000003 | ||
1149 | #define TXCLKCTRL_CLOCKA0_S 0 | ||
1150 | |||
1151 | /* | ||
1152 | * Individual settings for each field are common | ||
1153 | */ | ||
1154 | #define TXCLKCTRL_CLOCKxx_OFF 0 | ||
1155 | #define TXCLKCTRL_CLOCKxx_DEFON 1 | ||
1156 | #define TXCLKCTRL_CLOCKxx_AUTO 2 | ||
1157 | |||
1158 | #endif /* METAC_2_1 */ | ||
1159 | |||
1160 | #ifdef METAC_2_1 | ||
1161 | /* | ||
1162 | * Fast interrupt new bits | ||
1163 | * ------------------------------------ | ||
1164 | */ | ||
1165 | #define TXSTATUS_IPTOGGLE_BIT 0x80000000 /* Prev PToggle of TXPRIVEXT */ | ||
1166 | #define TXSTATUS_ISTATE_BIT 0x40000000 /* IState bit */ | ||
1167 | #define TXSTATUS_IWAIT_BIT 0x20000000 /* wait indefinitely in decision step*/ | ||
1168 | #define TXSTATUS_IEXCEPT_BIT 0x10000000 /* Indicate an exception occured */ | ||
1169 | #define TXSTATUS_IRPCOUNT_BITS 0x0E000000 /* Number of 'dirty' date entries*/ | ||
1170 | #define TXSTATUS_IRPCOUNT_S 25 | ||
1171 | #define TXSTATUS_IRQSTAT_BITS 0x0000F000 /* IRQEnc bits, trigger or interrupts */ | ||
1172 | #define TXSTATUS_IRQSTAT_S 12 | ||
1173 | #define TXSTATUS_LNKSETOK_BIT 0x00000020 /* LNKSetOK bit, successful LNKSET */ | ||
1174 | |||
1175 | /* New fields in TXDE for fast interrupt system */ | ||
1176 | #define TXDIVTIME_IACTIVE_BIT 0x00008000 /* Enable new interrupt system */ | ||
1177 | #define TXDIVTIME_INONEST_BIT 0x00004000 /* Gate nested interrupt */ | ||
1178 | #define TXDIVTIME_IREGIDXGATE_BIT 0x00002000 /* gate of the IRegIdex field */ | ||
1179 | #define TXDIVTIME_IREGIDX_BITS 0x00001E00 /* Index of A0.0/1 replaces */ | ||
1180 | #define TXDIVTIME_IREGIDX_S 9 | ||
1181 | #define TXDIVTIME_NOST_BIT 0x00000100 /* disable superthreading bit */ | ||
1182 | #endif | ||
1183 | |||
1184 | #endif /* _ASM_METAG_REGS_H_ */ | ||
diff --git a/arch/metag/include/asm/mman.h b/arch/metag/include/asm/mman.h new file mode 100644 index 000000000000..17999dba9275 --- /dev/null +++ b/arch/metag/include/asm/mman.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef __METAG_MMAN_H__ | ||
2 | #define __METAG_MMAN_H__ | ||
3 | |||
4 | #include <uapi/asm/mman.h> | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | #define arch_mmap_check metag_mmap_check | ||
8 | int metag_mmap_check(unsigned long addr, unsigned long len, | ||
9 | unsigned long flags); | ||
10 | #endif | ||
11 | #endif /* __METAG_MMAN_H__ */ | ||
diff --git a/arch/metag/include/asm/mmu.h b/arch/metag/include/asm/mmu.h new file mode 100644 index 000000000000..9c321147c0b4 --- /dev/null +++ b/arch/metag/include/asm/mmu.h | |||
@@ -0,0 +1,77 @@ | |||
1 | #ifndef __MMU_H | ||
2 | #define __MMU_H | ||
3 | |||
4 | #ifdef CONFIG_METAG_USER_TCM | ||
5 | #include <linux/list.h> | ||
6 | #endif | ||
7 | |||
8 | #ifdef CONFIG_HUGETLB_PAGE | ||
9 | #include <asm/page.h> | ||
10 | #endif | ||
11 | |||
12 | typedef struct { | ||
13 | /* Software pgd base pointer used for Meta 1.x MMU. */ | ||
14 | unsigned long pgd_base; | ||
15 | #ifdef CONFIG_METAG_USER_TCM | ||
16 | struct list_head tcm; | ||
17 | #endif | ||
18 | #ifdef CONFIG_HUGETLB_PAGE | ||
19 | #if HPAGE_SHIFT < HUGEPT_SHIFT | ||
20 | /* last partially filled huge page table address */ | ||
21 | unsigned long part_huge; | ||
22 | #endif | ||
23 | #endif | ||
24 | } mm_context_t; | ||
25 | |||
26 | /* Given a virtual address, return the pte for the top level 4meg entry | ||
27 | * that maps that address. | ||
28 | * Returns 0 (an empty pte) if that range is not mapped. | ||
29 | */ | ||
30 | unsigned long mmu_read_first_level_page(unsigned long vaddr); | ||
31 | |||
32 | /* Given a linear (virtual) address, return the second level 4k pte | ||
33 | * that maps that address. Returns 0 if the address is not mapped. | ||
34 | */ | ||
35 | unsigned long mmu_read_second_level_page(unsigned long vaddr); | ||
36 | |||
37 | /* Get the virtual base address of the MMU */ | ||
38 | unsigned long mmu_get_base(void); | ||
39 | |||
40 | /* Initialize the MMU. */ | ||
41 | void mmu_init(unsigned long mem_end); | ||
42 | |||
43 | #ifdef CONFIG_METAG_META21_MMU | ||
44 | /* | ||
45 | * For cpu "cpu" calculate and return the address of the | ||
46 | * MMCU_TnLOCAL_TABLE_PHYS0 if running in local-space or | ||
47 | * MMCU_TnGLOBAL_TABLE_PHYS0 if running in global-space. | ||
48 | */ | ||
49 | static inline unsigned long mmu_phys0_addr(unsigned int cpu) | ||
50 | { | ||
51 | unsigned long phys0; | ||
52 | |||
53 | phys0 = (MMCU_T0LOCAL_TABLE_PHYS0 + | ||
54 | (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) + | ||
55 | (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET)); | ||
56 | |||
57 | return phys0; | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * For cpu "cpu" calculate and return the address of the | ||
62 | * MMCU_TnLOCAL_TABLE_PHYS1 if running in local-space or | ||
63 | * MMCU_TnGLOBAL_TABLE_PHYS1 if running in global-space. | ||
64 | */ | ||
65 | static inline unsigned long mmu_phys1_addr(unsigned int cpu) | ||
66 | { | ||
67 | unsigned long phys1; | ||
68 | |||
69 | phys1 = (MMCU_T0LOCAL_TABLE_PHYS1 + | ||
70 | (MMCU_TnX_TABLE_PHYSX_STRIDE * cpu)) + | ||
71 | (MMCU_TXG_TABLE_PHYSX_OFFSET * is_global_space(PAGE_OFFSET)); | ||
72 | |||
73 | return phys1; | ||
74 | } | ||
75 | #endif /* CONFIG_METAG_META21_MMU */ | ||
76 | |||
77 | #endif | ||
diff --git a/arch/metag/include/asm/mmu_context.h b/arch/metag/include/asm/mmu_context.h new file mode 100644 index 000000000000..ae2a71b5e0be --- /dev/null +++ b/arch/metag/include/asm/mmu_context.h | |||
@@ -0,0 +1,113 @@ | |||
1 | #ifndef __METAG_MMU_CONTEXT_H | ||
2 | #define __METAG_MMU_CONTEXT_H | ||
3 | |||
4 | #include <asm-generic/mm_hooks.h> | ||
5 | |||
6 | #include <asm/page.h> | ||
7 | #include <asm/mmu.h> | ||
8 | #include <asm/tlbflush.h> | ||
9 | #include <asm/cacheflush.h> | ||
10 | |||
11 | #include <linux/io.h> | ||
12 | |||
13 | static inline void enter_lazy_tlb(struct mm_struct *mm, | ||
14 | struct task_struct *tsk) | ||
15 | { | ||
16 | } | ||
17 | |||
18 | static inline int init_new_context(struct task_struct *tsk, | ||
19 | struct mm_struct *mm) | ||
20 | { | ||
21 | #ifndef CONFIG_METAG_META21_MMU | ||
22 | /* We use context to store a pointer to the page holding the | ||
23 | * pgd of a process while it is running. While a process is not | ||
24 | * running the pgd and context fields should be equal. | ||
25 | */ | ||
26 | mm->context.pgd_base = (unsigned long) mm->pgd; | ||
27 | #endif | ||
28 | #ifdef CONFIG_METAG_USER_TCM | ||
29 | INIT_LIST_HEAD(&mm->context.tcm); | ||
30 | #endif | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | #ifdef CONFIG_METAG_USER_TCM | ||
35 | |||
36 | #include <linux/slab.h> | ||
37 | #include <asm/tcm.h> | ||
38 | |||
39 | static inline void destroy_context(struct mm_struct *mm) | ||
40 | { | ||
41 | struct tcm_allocation *pos, *n; | ||
42 | |||
43 | list_for_each_entry_safe(pos, n, &mm->context.tcm, list) { | ||
44 | tcm_free(pos->tag, pos->addr, pos->size); | ||
45 | list_del(&pos->list); | ||
46 | kfree(pos); | ||
47 | } | ||
48 | } | ||
49 | #else | ||
50 | #define destroy_context(mm) do { } while (0) | ||
51 | #endif | ||
52 | |||
53 | #ifdef CONFIG_METAG_META21_MMU | ||
54 | static inline void load_pgd(pgd_t *pgd, int thread) | ||
55 | { | ||
56 | unsigned long phys0 = mmu_phys0_addr(thread); | ||
57 | unsigned long phys1 = mmu_phys1_addr(thread); | ||
58 | |||
59 | /* | ||
60 | * 0x900 2Gb address space | ||
61 | * The permission bits apply to MMU table region which gives a 2MB | ||
62 | * window into physical memory. We especially don't want userland to be | ||
63 | * able to access this. | ||
64 | */ | ||
65 | metag_out32(0x900 | _PAGE_CACHEABLE | _PAGE_PRIV | _PAGE_WRITE | | ||
66 | _PAGE_PRESENT, phys0); | ||
67 | /* Set new MMU base address */ | ||
68 | metag_out32(__pa(pgd) & MMCU_TBLPHYS1_ADDR_BITS, phys1); | ||
69 | } | ||
70 | #endif | ||
71 | |||
72 | static inline void switch_mmu(struct mm_struct *prev, struct mm_struct *next) | ||
73 | { | ||
74 | #ifdef CONFIG_METAG_META21_MMU | ||
75 | load_pgd(next->pgd, hard_processor_id()); | ||
76 | #else | ||
77 | unsigned int i; | ||
78 | |||
79 | /* prev->context == prev->pgd in the case where we are initially | ||
80 | switching from the init task to the first process. */ | ||
81 | if (prev->context.pgd_base != (unsigned long) prev->pgd) { | ||
82 | for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++) | ||
83 | ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i]; | ||
84 | } else | ||
85 | prev->pgd = (pgd_t *)mmu_get_base(); | ||
86 | |||
87 | next->pgd = prev->pgd; | ||
88 | prev->pgd = (pgd_t *) prev->context.pgd_base; | ||
89 | |||
90 | for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++) | ||
91 | next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i]; | ||
92 | |||
93 | flush_cache_all(); | ||
94 | #endif | ||
95 | flush_tlb_all(); | ||
96 | } | ||
97 | |||
98 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
99 | struct task_struct *tsk) | ||
100 | { | ||
101 | if (prev != next) | ||
102 | switch_mmu(prev, next); | ||
103 | } | ||
104 | |||
105 | static inline void activate_mm(struct mm_struct *prev_mm, | ||
106 | struct mm_struct *next_mm) | ||
107 | { | ||
108 | switch_mmu(prev_mm, next_mm); | ||
109 | } | ||
110 | |||
111 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
112 | |||
113 | #endif | ||
diff --git a/arch/metag/include/asm/mmzone.h b/arch/metag/include/asm/mmzone.h new file mode 100644 index 000000000000..9c88a9c65f59 --- /dev/null +++ b/arch/metag/include/asm/mmzone.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef __ASM_METAG_MMZONE_H | ||
2 | #define __ASM_METAG_MMZONE_H | ||
3 | |||
4 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
5 | #include <linux/numa.h> | ||
6 | |||
7 | extern struct pglist_data *node_data[]; | ||
8 | #define NODE_DATA(nid) (node_data[nid]) | ||
9 | |||
10 | static inline int pfn_to_nid(unsigned long pfn) | ||
11 | { | ||
12 | int nid; | ||
13 | |||
14 | for (nid = 0; nid < MAX_NUMNODES; nid++) | ||
15 | if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid)) | ||
16 | break; | ||
17 | |||
18 | return nid; | ||
19 | } | ||
20 | |||
21 | static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn) | ||
22 | { | ||
23 | return NODE_DATA(pfn_to_nid(pfn)); | ||
24 | } | ||
25 | |||
26 | /* arch/metag/mm/numa.c */ | ||
27 | void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end); | ||
28 | #else | ||
29 | static inline void | ||
30 | setup_bootmem_node(int nid, unsigned long start, unsigned long end) | ||
31 | { | ||
32 | } | ||
33 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ | ||
34 | |||
35 | #ifdef CONFIG_NUMA | ||
36 | /* SoC specific mem init */ | ||
37 | void __init soc_mem_setup(void); | ||
38 | #else | ||
39 | static inline void __init soc_mem_setup(void) {}; | ||
40 | #endif | ||
41 | |||
42 | #endif /* __ASM_METAG_MMZONE_H */ | ||
diff --git a/arch/metag/include/asm/module.h b/arch/metag/include/asm/module.h new file mode 100644 index 000000000000..e47e60941b2b --- /dev/null +++ b/arch/metag/include/asm/module.h | |||
@@ -0,0 +1,37 @@ | |||
1 | #ifndef _ASM_METAG_MODULE_H | ||
2 | #define _ASM_METAG_MODULE_H | ||
3 | |||
4 | #include <asm-generic/module.h> | ||
5 | |||
6 | struct metag_plt_entry { | ||
7 | /* Indirect jump instruction sequence. */ | ||
8 | unsigned long tramp[2]; | ||
9 | }; | ||
10 | |||
11 | struct mod_arch_specific { | ||
12 | /* Indices of PLT sections within module. */ | ||
13 | unsigned int core_plt_section, init_plt_section; | ||
14 | }; | ||
15 | |||
16 | #if defined CONFIG_METAG_META12 | ||
17 | #define MODULE_PROC_FAMILY "META 1.2 " | ||
18 | #elif defined CONFIG_METAG_META21 | ||
19 | #define MODULE_PROC_FAMILY "META 2.1 " | ||
20 | #else | ||
21 | #define MODULE_PROC_FAMILY "" | ||
22 | #endif | ||
23 | |||
24 | #ifdef CONFIG_4KSTACKS | ||
25 | #define MODULE_STACKSIZE "4KSTACKS " | ||
26 | #else | ||
27 | #define MODULE_STACKSIZE "" | ||
28 | #endif | ||
29 | |||
30 | #define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE | ||
31 | |||
32 | #ifdef MODULE | ||
33 | asm(".section .plt,\"ax\",@progbits; .balign 8; .previous"); | ||
34 | asm(".section .init.plt,\"ax\",@progbits; .balign 8; .previous"); | ||
35 | #endif | ||
36 | |||
37 | #endif /* _ASM_METAG_MODULE_H */ | ||
diff --git a/arch/metag/include/asm/page.h b/arch/metag/include/asm/page.h new file mode 100644 index 000000000000..1e8e281b8bb7 --- /dev/null +++ b/arch/metag/include/asm/page.h | |||
@@ -0,0 +1,128 @@ | |||
1 | #ifndef _METAG_PAGE_H | ||
2 | #define _METAG_PAGE_H | ||
3 | |||
4 | #include <linux/const.h> | ||
5 | |||
6 | #include <asm/metag_mem.h> | ||
7 | |||
8 | /* PAGE_SHIFT determines the page size */ | ||
9 | #if defined(CONFIG_PAGE_SIZE_4K) | ||
10 | #define PAGE_SHIFT 12 | ||
11 | #elif defined(CONFIG_PAGE_SIZE_8K) | ||
12 | #define PAGE_SHIFT 13 | ||
13 | #elif defined(CONFIG_PAGE_SIZE_16K) | ||
14 | #define PAGE_SHIFT 14 | ||
15 | #endif | ||
16 | |||
17 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | ||
18 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
19 | |||
20 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_8K) | ||
21 | # define HPAGE_SHIFT 13 | ||
22 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K) | ||
23 | # define HPAGE_SHIFT 14 | ||
24 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K) | ||
25 | # define HPAGE_SHIFT 15 | ||
26 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
27 | # define HPAGE_SHIFT 16 | ||
28 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K) | ||
29 | # define HPAGE_SHIFT 17 | ||
30 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
31 | # define HPAGE_SHIFT 18 | ||
32 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
33 | # define HPAGE_SHIFT 19 | ||
34 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M) | ||
35 | # define HPAGE_SHIFT 20 | ||
36 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M) | ||
37 | # define HPAGE_SHIFT 21 | ||
38 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M) | ||
39 | # define HPAGE_SHIFT 22 | ||
40 | #endif | ||
41 | |||
42 | #ifdef CONFIG_HUGETLB_PAGE | ||
43 | # define HPAGE_SIZE (1UL << HPAGE_SHIFT) | ||
44 | # define HPAGE_MASK (~(HPAGE_SIZE-1)) | ||
45 | # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) | ||
46 | /* | ||
47 | * We define our own hugetlb_get_unmapped_area so we don't corrupt 2nd level | ||
48 | * page tables with normal pages in them. | ||
49 | */ | ||
50 | # define HUGEPT_SHIFT (22) | ||
51 | # define HUGEPT_ALIGN (1 << HUGEPT_SHIFT) | ||
52 | # define HUGEPT_MASK (HUGEPT_ALIGN - 1) | ||
53 | # define ALIGN_HUGEPT(x) ALIGN(x, HUGEPT_ALIGN) | ||
54 | # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
55 | #endif | ||
56 | |||
57 | #ifndef __ASSEMBLY__ | ||
58 | |||
59 | /* On the Meta, we would like to know if the address (heap) we have is | ||
60 | * in local or global space. | ||
61 | */ | ||
62 | #define is_global_space(addr) ((addr) > 0x7fffffff) | ||
63 | #define is_local_space(addr) (!is_global_space(addr)) | ||
64 | |||
65 | extern void clear_page(void *to); | ||
66 | extern void copy_page(void *to, void *from); | ||
67 | |||
68 | #define clear_user_page(page, vaddr, pg) clear_page(page) | ||
69 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | ||
70 | |||
71 | /* | ||
72 | * These are used to make use of C type-checking.. | ||
73 | */ | ||
74 | typedef struct { unsigned long pte; } pte_t; | ||
75 | typedef struct { unsigned long pgd; } pgd_t; | ||
76 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
77 | typedef struct page *pgtable_t; | ||
78 | |||
79 | #define pte_val(x) ((x).pte) | ||
80 | #define pgd_val(x) ((x).pgd) | ||
81 | #define pgprot_val(x) ((x).pgprot) | ||
82 | |||
83 | #define __pte(x) ((pte_t) { (x) }) | ||
84 | #define __pgd(x) ((pgd_t) { (x) }) | ||
85 | #define __pgprot(x) ((pgprot_t) { (x) }) | ||
86 | |||
87 | /* The kernel must now ALWAYS live at either 0xC0000000 or 0x40000000 - that | ||
88 | * being either global or local space. | ||
89 | */ | ||
90 | #define PAGE_OFFSET (CONFIG_PAGE_OFFSET) | ||
91 | |||
92 | #if PAGE_OFFSET >= LINGLOBAL_BASE | ||
93 | #define META_MEMORY_BASE LINGLOBAL_BASE | ||
94 | #define META_MEMORY_LIMIT LINGLOBAL_LIMIT | ||
95 | #else | ||
96 | #define META_MEMORY_BASE LINLOCAL_BASE | ||
97 | #define META_MEMORY_LIMIT LINLOCAL_LIMIT | ||
98 | #endif | ||
99 | |||
100 | /* Offset between physical and virtual mapping of kernel memory. */ | ||
101 | extern unsigned int meta_memoffset; | ||
102 | |||
103 | #define __pa(x) ((unsigned long)(((unsigned long)(x)) - meta_memoffset)) | ||
104 | #define __va(x) ((void *)((unsigned long)(((unsigned long)(x)) + meta_memoffset))) | ||
105 | |||
106 | extern unsigned long pfn_base; | ||
107 | #define ARCH_PFN_OFFSET (pfn_base) | ||
108 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | ||
109 | #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) | ||
110 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | ||
111 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | ||
112 | #ifdef CONFIG_FLATMEM | ||
113 | extern unsigned long max_pfn; | ||
114 | extern unsigned long min_low_pfn; | ||
115 | #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_pfn) | ||
116 | #endif | ||
117 | |||
118 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | ||
119 | |||
120 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | ||
121 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
122 | |||
123 | #include <asm-generic/memory_model.h> | ||
124 | #include <asm-generic/getorder.h> | ||
125 | |||
126 | #endif /* __ASSMEBLY__ */ | ||
127 | |||
128 | #endif /* _METAG_PAGE_H */ | ||
diff --git a/arch/metag/include/asm/perf_event.h b/arch/metag/include/asm/perf_event.h new file mode 100644 index 000000000000..105bbff0149f --- /dev/null +++ b/arch/metag/include/asm/perf_event.h | |||
@@ -0,0 +1,4 @@ | |||
1 | #ifndef __ASM_METAG_PERF_EVENT_H | ||
2 | #define __ASM_METAG_PERF_EVENT_H | ||
3 | |||
4 | #endif /* __ASM_METAG_PERF_EVENT_H */ | ||
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h new file mode 100644 index 000000000000..275d9285141c --- /dev/null +++ b/arch/metag/include/asm/pgalloc.h | |||
@@ -0,0 +1,79 @@ | |||
1 | #ifndef _METAG_PGALLOC_H | ||
2 | #define _METAG_PGALLOC_H | ||
3 | |||
4 | #include <linux/threads.h> | ||
5 | #include <linux/mm.h> | ||
6 | |||
7 | #define pmd_populate_kernel(mm, pmd, pte) \ | ||
8 | set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) | ||
9 | |||
10 | #define pmd_populate(mm, pmd, pte) \ | ||
11 | set_pmd(pmd, __pmd(_PAGE_TABLE | page_to_phys(pte))) | ||
12 | |||
13 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
14 | |||
15 | /* | ||
16 | * Allocate and free page tables. | ||
17 | */ | ||
18 | #ifdef CONFIG_METAG_META21_MMU | ||
19 | static inline void pgd_ctor(pgd_t *pgd) | ||
20 | { | ||
21 | memcpy(pgd + USER_PTRS_PER_PGD, | ||
22 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
23 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
24 | } | ||
25 | #else | ||
26 | #define pgd_ctor(x) do { } while (0) | ||
27 | #endif | ||
28 | |||
29 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
30 | { | ||
31 | pgd_t *pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); | ||
32 | if (pgd) | ||
33 | pgd_ctor(pgd); | ||
34 | return pgd; | ||
35 | } | ||
36 | |||
37 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
38 | { | ||
39 | free_page((unsigned long)pgd); | ||
40 | } | ||
41 | |||
42 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | ||
43 | unsigned long address) | ||
44 | { | ||
45 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | | ||
46 | __GFP_ZERO); | ||
47 | return pte; | ||
48 | } | ||
49 | |||
50 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | ||
51 | unsigned long address) | ||
52 | { | ||
53 | struct page *pte; | ||
54 | pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); | ||
55 | if (pte) | ||
56 | pgtable_page_ctor(pte); | ||
57 | return pte; | ||
58 | } | ||
59 | |||
60 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
61 | { | ||
62 | free_page((unsigned long)pte); | ||
63 | } | ||
64 | |||
65 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | ||
66 | { | ||
67 | pgtable_page_dtor(pte); | ||
68 | __free_page(pte); | ||
69 | } | ||
70 | |||
71 | #define __pte_free_tlb(tlb, pte, addr) \ | ||
72 | do { \ | ||
73 | pgtable_page_dtor(pte); \ | ||
74 | tlb_remove_page((tlb), (pte)); \ | ||
75 | } while (0) | ||
76 | |||
77 | #define check_pgt_cache() do { } while (0) | ||
78 | |||
79 | #endif | ||
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h new file mode 100644 index 000000000000..1cd13d595198 --- /dev/null +++ b/arch/metag/include/asm/pgtable.h | |||
@@ -0,0 +1,370 @@ | |||
1 | /* | ||
2 | * Macros and functions to manipulate Meta page tables. | ||
3 | */ | ||
4 | |||
5 | #ifndef _METAG_PGTABLE_H | ||
6 | #define _METAG_PGTABLE_H | ||
7 | |||
8 | #include <asm-generic/pgtable-nopmd.h> | ||
9 | |||
10 | /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ | ||
11 | #if PAGE_OFFSET >= LINGLOBAL_BASE | ||
12 | #define CONSISTENT_START 0xF7000000 | ||
13 | #define CONSISTENT_END 0xF73FFFFF | ||
14 | #define VMALLOC_START 0xF8000000 | ||
15 | #define VMALLOC_END 0xFFFEFFFF | ||
16 | #else | ||
17 | #define CONSISTENT_START 0x77000000 | ||
18 | #define CONSISTENT_END 0x773FFFFF | ||
19 | #define VMALLOC_START 0x78000000 | ||
20 | #define VMALLOC_END 0x7FFFFFFF | ||
21 | #endif | ||
22 | |||
23 | /* | ||
24 | * Definitions for MMU descriptors | ||
25 | * | ||
26 | * These are the hardware bits in the MMCU pte entries. | ||
27 | * Derived from the Meta toolkit headers. | ||
28 | */ | ||
29 | #define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT | ||
30 | #define _PAGE_WRITE MMCU_ENTRY_WR_BIT | ||
31 | #define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT | ||
32 | /* Write combine bit - this can cause writes to occur out of order */ | ||
33 | #define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT | ||
34 | /* Sys coherent bit - this bit is never used by Linux */ | ||
35 | #define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT | ||
36 | #define _PAGE_ALWAYS_ZERO_1 0x020 | ||
37 | #define _PAGE_CACHE_CTRL0 0x040 | ||
38 | #define _PAGE_CACHE_CTRL1 0x080 | ||
39 | #define _PAGE_ALWAYS_ZERO_2 0x100 | ||
40 | #define _PAGE_ALWAYS_ZERO_3 0x200 | ||
41 | #define _PAGE_ALWAYS_ZERO_4 0x400 | ||
42 | #define _PAGE_ALWAYS_ZERO_5 0x800 | ||
43 | |||
44 | /* These are software bits that we stuff into the gaps in the hardware | ||
45 | * pte entries that are not used. Note, these DO get stored in the actual | ||
46 | * hardware, but the hardware just does not use them. | ||
47 | */ | ||
48 | #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1 | ||
49 | #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2 | ||
50 | #define _PAGE_FILE _PAGE_ALWAYS_ZERO_3 | ||
51 | |||
52 | /* Pages owned, and protected by, the kernel. */ | ||
53 | #define _PAGE_KERNEL _PAGE_PRIV | ||
54 | |||
55 | /* No cacheing of this page */ | ||
56 | #define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S) | ||
57 | /* burst cacheing - good for data streaming */ | ||
58 | #define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S) | ||
59 | /* One cache way per thread */ | ||
60 | #define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S) | ||
61 | /* Full on cacheing */ | ||
62 | #define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S) | ||
63 | |||
64 | #define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE) | ||
65 | |||
66 | /* which bits are used for cache control ... */ | ||
67 | #define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \ | ||
68 | _PAGE_WR_COMBINE) | ||
69 | |||
70 | /* This is a mask of the bits that pte_modify is allowed to change. */ | ||
71 | #define _PAGE_CHG_MASK (PAGE_MASK) | ||
72 | |||
73 | #define _PAGE_SZ_SHIFT 1 | ||
74 | #define _PAGE_SZ_4K (0x0) | ||
75 | #define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT) | ||
76 | #define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT) | ||
77 | #define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT) | ||
78 | #define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT) | ||
79 | #define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT) | ||
80 | #define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT) | ||
81 | #define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT) | ||
82 | #define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT) | ||
83 | #define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT) | ||
84 | #define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT) | ||
85 | #define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT) | ||
86 | |||
87 | #if defined(CONFIG_PAGE_SIZE_4K) | ||
88 | #define _PAGE_SZ (_PAGE_SZ_4K) | ||
89 | #elif defined(CONFIG_PAGE_SIZE_8K) | ||
90 | #define _PAGE_SZ (_PAGE_SZ_8K) | ||
91 | #elif defined(CONFIG_PAGE_SIZE_16K) | ||
92 | #define _PAGE_SZ (_PAGE_SZ_16K) | ||
93 | #endif | ||
94 | #define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT) | ||
95 | |||
96 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_8K) | ||
97 | # define _PAGE_SZHUGE (_PAGE_SZ_8K) | ||
98 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K) | ||
99 | # define _PAGE_SZHUGE (_PAGE_SZ_16K) | ||
100 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K) | ||
101 | # define _PAGE_SZHUGE (_PAGE_SZ_32K) | ||
102 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
103 | # define _PAGE_SZHUGE (_PAGE_SZ_64K) | ||
104 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K) | ||
105 | # define _PAGE_SZHUGE (_PAGE_SZ_128K) | ||
106 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
107 | # define _PAGE_SZHUGE (_PAGE_SZ_256K) | ||
108 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
109 | # define _PAGE_SZHUGE (_PAGE_SZ_512K) | ||
110 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M) | ||
111 | # define _PAGE_SZHUGE (_PAGE_SZ_1M) | ||
112 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M) | ||
113 | # define _PAGE_SZHUGE (_PAGE_SZ_2M) | ||
114 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M) | ||
115 | # define _PAGE_SZHUGE (_PAGE_SZ_4M) | ||
116 | #endif | ||
117 | |||
118 | /* | ||
119 | * The Linux memory management assumes a three-level page table setup. On | ||
120 | * Meta, we use that, but "fold" the mid level into the top-level page | ||
121 | * table. | ||
122 | */ | ||
123 | |||
124 | /* PGDIR_SHIFT determines the size of the area a second-level page table can | ||
125 | * map. This is always 4MB. | ||
126 | */ | ||
127 | |||
128 | #define PGDIR_SHIFT 22 | ||
129 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
130 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
131 | |||
132 | /* | ||
133 | * Entries per page directory level: we use a two-level, so | ||
134 | * we don't really have any PMD directory physically. First level tables | ||
135 | * always map 2Gb (local or global) at a granularity of 4MB, second-level | ||
136 | * tables map 4MB with a granularity between 4MB and 4kB (between 1 and | ||
137 | * 1024 entries). | ||
138 | */ | ||
139 | #define PTRS_PER_PTE (PGDIR_SIZE/PAGE_SIZE) | ||
140 | #define HPTRS_PER_PTE (PGDIR_SIZE/HPAGE_SIZE) | ||
141 | #define PTRS_PER_PGD 512 | ||
142 | |||
143 | #define USER_PTRS_PER_PGD 256 | ||
144 | #define FIRST_USER_ADDRESS META_MEMORY_BASE | ||
145 | #define FIRST_USER_PGD_NR pgd_index(FIRST_USER_ADDRESS) | ||
146 | |||
147 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
148 | _PAGE_CACHEABLE) | ||
149 | |||
150 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ | ||
151 | _PAGE_ACCESSED | _PAGE_CACHEABLE) | ||
152 | #define PAGE_SHARED_C PAGE_SHARED | ||
153 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
154 | _PAGE_CACHEABLE) | ||
155 | #define PAGE_COPY_C PAGE_COPY | ||
156 | |||
157 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
158 | _PAGE_CACHEABLE) | ||
159 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ | ||
160 | _PAGE_ACCESSED | _PAGE_WRITE | \ | ||
161 | _PAGE_CACHEABLE | _PAGE_KERNEL) | ||
162 | |||
163 | #define __P000 PAGE_NONE | ||
164 | #define __P001 PAGE_READONLY | ||
165 | #define __P010 PAGE_COPY | ||
166 | #define __P011 PAGE_COPY | ||
167 | #define __P100 PAGE_READONLY | ||
168 | #define __P101 PAGE_READONLY | ||
169 | #define __P110 PAGE_COPY_C | ||
170 | #define __P111 PAGE_COPY_C | ||
171 | |||
172 | #define __S000 PAGE_NONE | ||
173 | #define __S001 PAGE_READONLY | ||
174 | #define __S010 PAGE_SHARED | ||
175 | #define __S011 PAGE_SHARED | ||
176 | #define __S100 PAGE_READONLY | ||
177 | #define __S101 PAGE_READONLY | ||
178 | #define __S110 PAGE_SHARED_C | ||
179 | #define __S111 PAGE_SHARED_C | ||
180 | |||
181 | #ifndef __ASSEMBLY__ | ||
182 | |||
183 | #include <asm/page.h> | ||
184 | |||
185 | /* zero page used for uninitialized stuff */ | ||
186 | extern unsigned long empty_zero_page; | ||
187 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
188 | |||
189 | /* Certain architectures need to do special things when pte's | ||
190 | * within a page table are directly modified. Thus, the following | ||
191 | * hook is made available. | ||
192 | */ | ||
193 | #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) | ||
194 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | ||
195 | |||
196 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | ||
197 | |||
198 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | ||
199 | |||
200 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
201 | |||
202 | #define pte_none(x) (!pte_val(x)) | ||
203 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) | ||
204 | #define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0) | ||
205 | |||
206 | #define pmd_none(x) (!pmd_val(x)) | ||
207 | #define pmd_bad(x) ((pmd_val(x) & ~(PAGE_MASK | _PAGE_SZ_MASK)) \ | ||
208 | != (_PAGE_TABLE & ~_PAGE_SZ_MASK)) | ||
209 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | ||
210 | #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) | ||
211 | |||
212 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
213 | |||
214 | /* | ||
215 | * The following only work if pte_present() is true. | ||
216 | * Undefined behaviour if not.. | ||
217 | */ | ||
218 | |||
219 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } | ||
220 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
221 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
222 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
223 | static inline int pte_special(pte_t pte) { return 0; } | ||
224 | |||
225 | static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; } | ||
226 | static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } | ||
227 | static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | ||
228 | static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } | ||
229 | static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
230 | static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
231 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | ||
232 | static inline pte_t pte_mkhuge(pte_t pte) { return pte; } | ||
233 | |||
234 | /* | ||
235 | * Macro and implementation to make a page protection as uncacheable. | ||
236 | */ | ||
237 | #define pgprot_writecombine(prot) \ | ||
238 | __pgprot(pgprot_val(prot) & ~(_PAGE_CACHE_CTRL1 | _PAGE_CACHE_CTRL0)) | ||
239 | |||
240 | #define pgprot_noncached(prot) \ | ||
241 | __pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE) | ||
242 | |||
243 | |||
244 | /* | ||
245 | * Conversion functions: convert a page and protection to a page entry, | ||
246 | * and a page entry and page directory to the page they refer to. | ||
247 | */ | ||
248 | |||
249 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
250 | |||
251 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
252 | { | ||
253 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); | ||
254 | return pte; | ||
255 | } | ||
256 | |||
257 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) | ||
258 | { | ||
259 | unsigned long paddr = pmd_val(pmd) & PAGE_MASK; | ||
260 | if (!paddr) | ||
261 | return 0; | ||
262 | return (unsigned long)__va(paddr); | ||
263 | } | ||
264 | |||
265 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | ||
266 | #define pmd_page_shift(pmd) (12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \ | ||
267 | >> _PAGE_SZ_SHIFT)) | ||
268 | #define pmd_num_ptrs(pmd) (PGDIR_SIZE >> pmd_page_shift(pmd)) | ||
269 | |||
270 | /* | ||
271 | * Each pgd is only 2k, mapping 2Gb (local or global). If we're in global | ||
272 | * space drop the top bit before indexing the pgd. | ||
273 | */ | ||
274 | #if PAGE_OFFSET >= LINGLOBAL_BASE | ||
275 | #define pgd_index(address) ((((address) & ~0x80000000) >> PGDIR_SHIFT) \ | ||
276 | & (PTRS_PER_PGD-1)) | ||
277 | #else | ||
278 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | ||
279 | #endif | ||
280 | |||
281 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
282 | |||
283 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
284 | |||
285 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | ||
286 | |||
287 | /* Find an entry in the second-level page table.. */ | ||
288 | #if !defined(CONFIG_HUGETLB_PAGE) | ||
289 | /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */ | ||
290 | # define pte_index(pmd, address) \ | ||
291 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
292 | #else | ||
293 | /* some pages are huge, so read 1st level pt to find out */ | ||
294 | # define pte_index(pmd, address) \ | ||
295 | (((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1)) | ||
296 | #endif | ||
297 | #define pte_offset_kernel(dir, address) \ | ||
298 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(*(dir), address)) | ||
299 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) | ||
300 | #define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address) | ||
301 | |||
302 | #define pte_unmap(pte) do { } while (0) | ||
303 | #define pte_unmap_nested(pte) do { } while (0) | ||
304 | |||
305 | #define pte_ERROR(e) \ | ||
306 | pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | ||
307 | #define pgd_ERROR(e) \ | ||
308 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
309 | |||
310 | /* | ||
311 | * Meta doesn't have any external MMU info: the kernel page | ||
312 | * tables contain all the necessary information. | ||
313 | */ | ||
314 | static inline void update_mmu_cache(struct vm_area_struct *vma, | ||
315 | unsigned long address, pte_t *pte) | ||
316 | { | ||
317 | } | ||
318 | |||
319 | /* | ||
320 | * Encode and decode a swap entry (must be !pte_none(e) && !pte_present(e)) | ||
321 | * Since PAGE_PRESENT is bit 1, we can use the bits above that. | ||
322 | */ | ||
323 | #define __swp_type(x) (((x).val >> 1) & 0xff) | ||
324 | #define __swp_offset(x) ((x).val >> 10) | ||
325 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \ | ||
326 | ((offset) << 10) }) | ||
327 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
328 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
329 | |||
330 | #define PTE_FILE_MAX_BITS 22 | ||
331 | #define pte_to_pgoff(x) (pte_val(x) >> 10) | ||
332 | #define pgoff_to_pte(x) __pte(((x) << 10) | _PAGE_FILE) | ||
333 | |||
334 | #define kern_addr_valid(addr) (1) | ||
335 | |||
336 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
337 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
338 | |||
339 | /* | ||
340 | * No page table caches to initialise | ||
341 | */ | ||
342 | #define pgtable_cache_init() do { } while (0) | ||
343 | |||
344 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
345 | void paging_init(unsigned long mem_end); | ||
346 | |||
347 | #ifdef CONFIG_METAG_META12 | ||
348 | /* This is a workaround for an issue in Meta 1 cores. These cores cache | ||
349 | * invalid entries in the TLB so we always need to flush whenever we add | ||
350 | * a new pte. Unfortunately we can only flush the whole TLB not shoot down | ||
351 | * single entries so this is sub-optimal. This implementation ensures that | ||
352 | * we will get a flush at the second attempt, so we may still get repeated | ||
353 | * faults, we just don't overflow the kernel stack handling them. | ||
354 | */ | ||
355 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
356 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
357 | ({ \ | ||
358 | int __changed = !pte_same(*(__ptep), __entry); \ | ||
359 | if (__changed) { \ | ||
360 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | ||
361 | } \ | ||
362 | flush_tlb_page(__vma, __address); \ | ||
363 | __changed; \ | ||
364 | }) | ||
365 | #endif | ||
366 | |||
367 | #include <asm-generic/pgtable.h> | ||
368 | |||
369 | #endif /* __ASSEMBLY__ */ | ||
370 | #endif /* _METAG_PGTABLE_H */ | ||
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h new file mode 100644 index 000000000000..9b029a7911c3 --- /dev/null +++ b/arch/metag/include/asm/processor.h | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005,2006,2007,2008 Imagination Technologies | ||
3 | */ | ||
4 | |||
5 | #ifndef __ASM_METAG_PROCESSOR_H | ||
6 | #define __ASM_METAG_PROCESSOR_H | ||
7 | |||
8 | #include <linux/atomic.h> | ||
9 | |||
10 | #include <asm/page.h> | ||
11 | #include <asm/ptrace.h> | ||
12 | #include <asm/metag_regs.h> | ||
13 | |||
14 | /* | ||
15 | * Default implementation of macro that returns current | ||
16 | * instruction pointer ("program counter"). | ||
17 | */ | ||
18 | #define current_text_addr() ({ __label__ _l; _l: &&_l; }) | ||
19 | |||
20 | /* The task stops where the kernel starts */ | ||
21 | #define TASK_SIZE PAGE_OFFSET | ||
22 | /* Add an extra page of padding at the top of the stack for the guard page. */ | ||
23 | #define STACK_TOP (TASK_SIZE - PAGE_SIZE) | ||
24 | #define STACK_TOP_MAX STACK_TOP | ||
25 | |||
26 | /* This decides where the kernel will search for a free chunk of vm | ||
27 | * space during mmap's. | ||
28 | */ | ||
29 | #define TASK_UNMAPPED_BASE META_MEMORY_BASE | ||
30 | |||
31 | typedef struct { | ||
32 | unsigned long seg; | ||
33 | } mm_segment_t; | ||
34 | |||
35 | #ifdef CONFIG_METAG_FPU | ||
36 | struct meta_fpu_context { | ||
37 | TBICTXEXTFPU fpstate; | ||
38 | union { | ||
39 | struct { | ||
40 | TBICTXEXTBB4 fx8_15; | ||
41 | TBICTXEXTFPACC fpacc; | ||
42 | } fx8_15; | ||
43 | struct { | ||
44 | TBICTXEXTFPACC fpacc; | ||
45 | TBICTXEXTBB4 unused; | ||
46 | } nofx8_15; | ||
47 | } extfpstate; | ||
48 | bool needs_restore; | ||
49 | }; | ||
50 | #else | ||
51 | struct meta_fpu_context {}; | ||
52 | #endif | ||
53 | |||
54 | #ifdef CONFIG_METAG_DSP | ||
55 | struct meta_ext_context { | ||
56 | struct { | ||
57 | TBIEXTCTX ctx; | ||
58 | TBICTXEXTBB8 bb8; | ||
59 | TBIDUAL ax[TBICTXEXTAXX_BYTES / sizeof(TBIDUAL)]; | ||
60 | TBICTXEXTHL2 hl2; | ||
61 | TBICTXEXTTDPR ext; | ||
62 | TBICTXEXTRP6 rp; | ||
63 | } regs; | ||
64 | |||
65 | /* DSPRAM A and B save areas. */ | ||
66 | void *ram[2]; | ||
67 | |||
68 | /* ECH encoded size of DSPRAM save areas. */ | ||
69 | unsigned int ram_sz[2]; | ||
70 | }; | ||
71 | #else | ||
72 | struct meta_ext_context {}; | ||
73 | #endif | ||
74 | |||
75 | struct thread_struct { | ||
76 | PTBICTX kernel_context; | ||
77 | /* A copy of the user process Sig.SaveMask. */ | ||
78 | unsigned int user_flags; | ||
79 | struct meta_fpu_context *fpu_context; | ||
80 | void __user *tls_ptr; | ||
81 | unsigned short int_depth; | ||
82 | unsigned short txdefr_failure; | ||
83 | struct meta_ext_context *dsp_context; | ||
84 | }; | ||
85 | |||
86 | #define INIT_THREAD { \ | ||
87 | NULL, /* kernel_context */ \ | ||
88 | 0, /* user_flags */ \ | ||
89 | NULL, /* fpu_context */ \ | ||
90 | NULL, /* tls_ptr */ \ | ||
91 | 1, /* int_depth - we start in kernel */ \ | ||
92 | 0, /* txdefr_failure */ \ | ||
93 | NULL, /* dsp_context */ \ | ||
94 | } | ||
95 | |||
96 | /* Needed to make #define as we are referencing 'current', that is not visible | ||
97 | * yet. | ||
98 | * | ||
99 | * Stack layout is as below. | ||
100 | |||
101 | argc argument counter (integer) | ||
102 | argv[0] program name (pointer) | ||
103 | argv[1...N] program args (pointers) | ||
104 | argv[argc-1] end of args (integer) | ||
105 | NULL | ||
106 | env[0...N] environment variables (pointers) | ||
107 | NULL | ||
108 | |||
109 | */ | ||
110 | #define start_thread(regs, pc, usp) do { \ | ||
111 | unsigned int *argc = (unsigned int *) bprm->exec; \ | ||
112 | set_fs(USER_DS); \ | ||
113 | current->thread.int_depth = 1; \ | ||
114 | /* Force this process down to user land */ \ | ||
115 | regs->ctx.SaveMask = TBICTX_PRIV_BIT; \ | ||
116 | regs->ctx.CurrPC = pc; \ | ||
117 | regs->ctx.AX[0].U0 = usp; \ | ||
118 | regs->ctx.DX[3].U1 = *((int *)argc); /* argc */ \ | ||
119 | regs->ctx.DX[3].U0 = (int)((int *)argc + 1); /* argv */ \ | ||
120 | regs->ctx.DX[2].U1 = (int)((int *)argc + \ | ||
121 | regs->ctx.DX[3].U1 + 2); /* envp */ \ | ||
122 | regs->ctx.DX[2].U0 = 0; /* rtld_fini */ \ | ||
123 | } while (0) | ||
124 | |||
125 | /* Forward declaration, a strange C thing */ | ||
126 | struct task_struct; | ||
127 | |||
128 | /* Free all resources held by a thread. */ | ||
129 | static inline void release_thread(struct task_struct *dead_task) | ||
130 | { | ||
131 | } | ||
132 | |||
133 | #define copy_segments(tsk, mm) do { } while (0) | ||
134 | #define release_segments(mm) do { } while (0) | ||
135 | |||
136 | extern void exit_thread(void); | ||
137 | |||
138 | /* | ||
139 | * Return saved PC of a blocked thread. | ||
140 | */ | ||
141 | #define thread_saved_pc(tsk) \ | ||
142 | ((unsigned long)(tsk)->thread.kernel_context->CurrPC) | ||
143 | #define thread_saved_sp(tsk) \ | ||
144 | ((unsigned long)(tsk)->thread.kernel_context->AX[0].U0) | ||
145 | #define thread_saved_fp(tsk) \ | ||
146 | ((unsigned long)(tsk)->thread.kernel_context->AX[1].U0) | ||
147 | |||
148 | unsigned long get_wchan(struct task_struct *p); | ||
149 | |||
150 | #define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC) | ||
151 | #define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0) | ||
152 | |||
153 | #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) | ||
154 | |||
155 | #define cpu_relax() barrier() | ||
156 | |||
157 | extern void setup_priv(void); | ||
158 | |||
159 | static inline unsigned int hard_processor_id(void) | ||
160 | { | ||
161 | unsigned int id; | ||
162 | |||
163 | asm volatile ("MOV %0, TXENABLE\n" | ||
164 | "AND %0, %0, %1\n" | ||
165 | "LSR %0, %0, %2\n" | ||
166 | : "=&d" (id) | ||
167 | : "I" (TXENABLE_THREAD_BITS), | ||
168 | "K" (TXENABLE_THREAD_S) | ||
169 | ); | ||
170 | |||
171 | return id; | ||
172 | } | ||
173 | |||
174 | #define OP3_EXIT 0 | ||
175 | |||
176 | #define HALT_OK 0 | ||
177 | #define HALT_PANIC -1 | ||
178 | |||
179 | /* | ||
180 | * Halt (stop) the hardware thread. This instruction sequence is the | ||
181 | * standard way to cause a Meta hardware thread to exit. The exit code | ||
182 | * is pushed onto the stack which is interpreted by the debug adapter. | ||
183 | */ | ||
184 | static inline void hard_processor_halt(int exit_code) | ||
185 | { | ||
186 | asm volatile ("MOV D1Ar1, %0\n" | ||
187 | "MOV D0Ar6, %1\n" | ||
188 | "MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2\n" | ||
189 | "1:\n" | ||
190 | "SWITCH #0xC30006\n" | ||
191 | "B 1b\n" | ||
192 | : : "r" (exit_code), "K" (OP3_EXIT)); | ||
193 | } | ||
194 | |||
195 | /* Set these hooks to call SoC specific code to restart/halt/power off. */ | ||
196 | extern void (*soc_restart)(char *cmd); | ||
197 | extern void (*soc_halt)(void); | ||
198 | |||
199 | extern void show_trace(struct task_struct *tsk, unsigned long *sp, | ||
200 | struct pt_regs *regs); | ||
201 | |||
202 | #endif | ||
diff --git a/arch/metag/include/asm/prom.h b/arch/metag/include/asm/prom.h new file mode 100644 index 000000000000..d2aa35d2228e --- /dev/null +++ b/arch/metag/include/asm/prom.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * arch/metag/include/asm/prom.h | ||
3 | * | ||
4 | * Copyright (C) 2012 Imagination Technologies Ltd. | ||
5 | * | ||
6 | * Based on ARM version: | ||
7 | * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | */ | ||
14 | #ifndef __ASM_METAG_PROM_H | ||
15 | #define __ASM_METAG_PROM_H | ||
16 | |||
17 | #include <asm/setup.h> | ||
18 | #define HAVE_ARCH_DEVTREE_FIXUPS | ||
19 | |||
20 | extern struct machine_desc *setup_machine_fdt(void *dt); | ||
21 | extern void copy_fdt(void); | ||
22 | |||
23 | #endif /* __ASM_METAG_PROM_H */ | ||
diff --git a/arch/metag/include/asm/ptrace.h b/arch/metag/include/asm/ptrace.h new file mode 100644 index 000000000000..fcabc18daf25 --- /dev/null +++ b/arch/metag/include/asm/ptrace.h | |||
@@ -0,0 +1,60 @@ | |||
1 | #ifndef _METAG_PTRACE_H | ||
2 | #define _METAG_PTRACE_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <uapi/asm/ptrace.h> | ||
6 | #include <asm/tbx.h> | ||
7 | |||
8 | #ifndef __ASSEMBLY__ | ||
9 | |||
10 | /* this struct defines the way the registers are stored on the | ||
11 | stack during a system call. */ | ||
12 | |||
13 | struct pt_regs { | ||
14 | TBICTX ctx; | ||
15 | TBICTXEXTCB0 extcb0[5]; | ||
16 | }; | ||
17 | |||
18 | #define user_mode(regs) (((regs)->ctx.SaveMask & TBICTX_PRIV_BIT) > 0) | ||
19 | |||
20 | #define instruction_pointer(regs) ((unsigned long)(regs)->ctx.CurrPC) | ||
21 | #define profile_pc(regs) instruction_pointer(regs) | ||
22 | |||
23 | #define task_pt_regs(task) \ | ||
24 | ((struct pt_regs *)(task_stack_page(task) + \ | ||
25 | sizeof(struct thread_info))) | ||
26 | |||
27 | #define current_pt_regs() \ | ||
28 | ((struct pt_regs *)((char *)current_thread_info() + \ | ||
29 | sizeof(struct thread_info))) | ||
30 | |||
31 | int syscall_trace_enter(struct pt_regs *regs); | ||
32 | void syscall_trace_leave(struct pt_regs *regs); | ||
33 | |||
34 | /* copy a struct user_gp_regs out to user */ | ||
35 | int metag_gp_regs_copyout(const struct pt_regs *regs, | ||
36 | unsigned int pos, unsigned int count, | ||
37 | void *kbuf, void __user *ubuf); | ||
38 | /* copy a struct user_gp_regs in from user */ | ||
39 | int metag_gp_regs_copyin(struct pt_regs *regs, | ||
40 | unsigned int pos, unsigned int count, | ||
41 | const void *kbuf, const void __user *ubuf); | ||
42 | /* copy a struct user_cb_regs out to user */ | ||
43 | int metag_cb_regs_copyout(const struct pt_regs *regs, | ||
44 | unsigned int pos, unsigned int count, | ||
45 | void *kbuf, void __user *ubuf); | ||
46 | /* copy a struct user_cb_regs in from user */ | ||
47 | int metag_cb_regs_copyin(struct pt_regs *regs, | ||
48 | unsigned int pos, unsigned int count, | ||
49 | const void *kbuf, const void __user *ubuf); | ||
50 | /* copy a struct user_rp_state out to user */ | ||
51 | int metag_rp_state_copyout(const struct pt_regs *regs, | ||
52 | unsigned int pos, unsigned int count, | ||
53 | void *kbuf, void __user *ubuf); | ||
54 | /* copy a struct user_rp_state in from user */ | ||
55 | int metag_rp_state_copyin(struct pt_regs *regs, | ||
56 | unsigned int pos, unsigned int count, | ||
57 | const void *kbuf, const void __user *ubuf); | ||
58 | |||
59 | #endif /* __ASSEMBLY__ */ | ||
60 | #endif /* _METAG_PTRACE_H */ | ||
diff --git a/arch/metag/include/asm/setup.h b/arch/metag/include/asm/setup.h new file mode 100644 index 000000000000..e13083b15dd0 --- /dev/null +++ b/arch/metag/include/asm/setup.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _ASM_METAG_SETUP_H | ||
2 | #define _ASM_METAG_SETUP_H | ||
3 | |||
4 | #include <uapi/asm/setup.h> | ||
5 | |||
6 | void per_cpu_trap_init(unsigned long); | ||
7 | extern void __init dump_machine_table(void); | ||
8 | #endif /* _ASM_METAG_SETUP_H */ | ||
diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h new file mode 100644 index 000000000000..e0373f81a117 --- /dev/null +++ b/arch/metag/include/asm/smp.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef __ASM_SMP_H | ||
2 | #define __ASM_SMP_H | ||
3 | |||
4 | #include <linux/cpumask.h> | ||
5 | |||
6 | #define raw_smp_processor_id() (current_thread_info()->cpu) | ||
7 | |||
8 | enum ipi_msg_type { | ||
9 | IPI_CALL_FUNC, | ||
10 | IPI_CALL_FUNC_SINGLE, | ||
11 | IPI_RESCHEDULE, | ||
12 | }; | ||
13 | |||
14 | extern void arch_send_call_function_single_ipi(int cpu); | ||
15 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | ||
16 | #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask | ||
17 | |||
18 | asmlinkage void secondary_start_kernel(void); | ||
19 | |||
20 | extern void secondary_startup(void); | ||
21 | |||
22 | #ifdef CONFIG_HOTPLUG_CPU | ||
23 | extern void __cpu_die(unsigned int cpu); | ||
24 | extern int __cpu_disable(void); | ||
25 | extern void cpu_die(void); | ||
26 | #endif | ||
27 | |||
28 | extern void smp_init_cpus(void); | ||
29 | #endif /* __ASM_SMP_H */ | ||
diff --git a/arch/metag/include/asm/sparsemem.h b/arch/metag/include/asm/sparsemem.h new file mode 100644 index 000000000000..03fe255d697a --- /dev/null +++ b/arch/metag/include/asm/sparsemem.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_METAG_SPARSEMEM_H | ||
2 | #define __ASM_METAG_SPARSEMEM_H | ||
3 | |||
4 | /* | ||
5 | * SECTION_SIZE_BITS 2^N: how big each section will be | ||
6 | * MAX_PHYSADDR_BITS 2^N: how much physical address space we have | ||
7 | * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space | ||
8 | */ | ||
9 | #define SECTION_SIZE_BITS 26 | ||
10 | #define MAX_PHYSADDR_BITS 32 | ||
11 | #define MAX_PHYSMEM_BITS 32 | ||
12 | |||
13 | #endif /* __ASM_METAG_SPARSEMEM_H */ | ||
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h new file mode 100644 index 000000000000..86a7cf3d1386 --- /dev/null +++ b/arch/metag/include/asm/spinlock.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #ifdef CONFIG_METAG_ATOMICITY_LOCK1 | ||
5 | #include <asm/spinlock_lock1.h> | ||
6 | #else | ||
7 | #include <asm/spinlock_lnkget.h> | ||
8 | #endif | ||
9 | |||
10 | #define arch_spin_unlock_wait(lock) \ | ||
11 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
12 | |||
13 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
14 | |||
15 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
16 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
17 | |||
18 | #define arch_spin_relax(lock) cpu_relax() | ||
19 | #define arch_read_relax(lock) cpu_relax() | ||
20 | #define arch_write_relax(lock) cpu_relax() | ||
21 | |||
22 | #endif /* __ASM_SPINLOCK_H */ | ||
diff --git a/arch/metag/include/asm/spinlock_lnkget.h b/arch/metag/include/asm/spinlock_lnkget.h new file mode 100644 index 000000000000..ad8436feed8d --- /dev/null +++ b/arch/metag/include/asm/spinlock_lnkget.h | |||
@@ -0,0 +1,249 @@ | |||
1 | #ifndef __ASM_SPINLOCK_LNKGET_H | ||
2 | #define __ASM_SPINLOCK_LNKGET_H | ||
3 | |||
4 | /* | ||
5 | * None of these asm statements clobber memory as LNKSET writes around | ||
6 | * the cache so the memory it modifies cannot safely be read by any means | ||
7 | * other than these accessors. | ||
8 | */ | ||
9 | |||
10 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
11 | { | ||
12 | int ret; | ||
13 | |||
14 | asm volatile ("LNKGETD %0, [%1]\n" | ||
15 | "TST %0, #1\n" | ||
16 | "MOV %0, #1\n" | ||
17 | "XORZ %0, %0, %0\n" | ||
18 | : "=&d" (ret) | ||
19 | : "da" (&lock->lock) | ||
20 | : "cc"); | ||
21 | return ret; | ||
22 | } | ||
23 | |||
24 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
25 | { | ||
26 | int tmp; | ||
27 | |||
28 | asm volatile ("1: LNKGETD %0,[%1]\n" | ||
29 | " TST %0, #1\n" | ||
30 | " ADD %0, %0, #1\n" | ||
31 | " LNKSETDZ [%1], %0\n" | ||
32 | " BNZ 1b\n" | ||
33 | " DEFR %0, TXSTAT\n" | ||
34 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
35 | " CMPT %0, #HI(0x02000000)\n" | ||
36 | " BNZ 1b\n" | ||
37 | : "=&d" (tmp) | ||
38 | : "da" (&lock->lock) | ||
39 | : "cc"); | ||
40 | |||
41 | smp_mb(); | ||
42 | } | ||
43 | |||
44 | /* Returns 0 if failed to acquire lock */ | ||
45 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
46 | { | ||
47 | int tmp; | ||
48 | |||
49 | asm volatile (" LNKGETD %0,[%1]\n" | ||
50 | " TST %0, #1\n" | ||
51 | " ADD %0, %0, #1\n" | ||
52 | " LNKSETDZ [%1], %0\n" | ||
53 | " BNZ 1f\n" | ||
54 | " DEFR %0, TXSTAT\n" | ||
55 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
56 | " CMPT %0, #HI(0x02000000)\n" | ||
57 | " MOV %0, #1\n" | ||
58 | "1: XORNZ %0, %0, %0\n" | ||
59 | : "=&d" (tmp) | ||
60 | : "da" (&lock->lock) | ||
61 | : "cc"); | ||
62 | |||
63 | smp_mb(); | ||
64 | |||
65 | return tmp; | ||
66 | } | ||
67 | |||
68 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
69 | { | ||
70 | smp_mb(); | ||
71 | |||
72 | asm volatile (" SETD [%0], %1\n" | ||
73 | : | ||
74 | : "da" (&lock->lock), "da" (0) | ||
75 | : "memory"); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * RWLOCKS | ||
80 | * | ||
81 | * | ||
82 | * Write locks are easy - we just set bit 31. When unlocking, we can | ||
83 | * just write zero since the lock is exclusively held. | ||
84 | */ | ||
85 | |||
86 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
87 | { | ||
88 | int tmp; | ||
89 | |||
90 | asm volatile ("1: LNKGETD %0,[%1]\n" | ||
91 | " CMP %0, #0\n" | ||
92 | " ADD %0, %0, %2\n" | ||
93 | " LNKSETDZ [%1], %0\n" | ||
94 | " BNZ 1b\n" | ||
95 | " DEFR %0, TXSTAT\n" | ||
96 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
97 | " CMPT %0, #HI(0x02000000)\n" | ||
98 | " BNZ 1b\n" | ||
99 | : "=&d" (tmp) | ||
100 | : "da" (&rw->lock), "bd" (0x80000000) | ||
101 | : "cc"); | ||
102 | |||
103 | smp_mb(); | ||
104 | } | ||
105 | |||
106 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
107 | { | ||
108 | int tmp; | ||
109 | |||
110 | asm volatile (" LNKGETD %0,[%1]\n" | ||
111 | " CMP %0, #0\n" | ||
112 | " ADD %0, %0, %2\n" | ||
113 | " LNKSETDZ [%1], %0\n" | ||
114 | " BNZ 1f\n" | ||
115 | " DEFR %0, TXSTAT\n" | ||
116 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
117 | " CMPT %0, #HI(0x02000000)\n" | ||
118 | " MOV %0,#1\n" | ||
119 | "1: XORNZ %0, %0, %0\n" | ||
120 | : "=&d" (tmp) | ||
121 | : "da" (&rw->lock), "bd" (0x80000000) | ||
122 | : "cc"); | ||
123 | |||
124 | smp_mb(); | ||
125 | |||
126 | return tmp; | ||
127 | } | ||
128 | |||
129 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
130 | { | ||
131 | smp_mb(); | ||
132 | |||
133 | asm volatile (" SETD [%0], %1\n" | ||
134 | : | ||
135 | : "da" (&rw->lock), "da" (0) | ||
136 | : "memory"); | ||
137 | } | ||
138 | |||
139 | /* write_can_lock - would write_trylock() succeed? */ | ||
140 | static inline int arch_write_can_lock(arch_rwlock_t *rw) | ||
141 | { | ||
142 | int ret; | ||
143 | |||
144 | asm volatile ("LNKGETD %0, [%1]\n" | ||
145 | "CMP %0, #0\n" | ||
146 | "MOV %0, #1\n" | ||
147 | "XORNZ %0, %0, %0\n" | ||
148 | : "=&d" (ret) | ||
149 | : "da" (&rw->lock) | ||
150 | : "cc"); | ||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Read locks are a bit more hairy: | ||
156 | * - Exclusively load the lock value. | ||
157 | * - Increment it. | ||
158 | * - Store new lock value if positive, and we still own this location. | ||
159 | * If the value is negative, we've already failed. | ||
160 | * - If we failed to store the value, we want a negative result. | ||
161 | * - If we failed, try again. | ||
162 | * Unlocking is similarly hairy. We may have multiple read locks | ||
163 | * currently active. However, we know we won't have any write | ||
164 | * locks. | ||
165 | */ | ||
166 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
167 | { | ||
168 | int tmp; | ||
169 | |||
170 | asm volatile ("1: LNKGETD %0,[%1]\n" | ||
171 | " ADDS %0, %0, #1\n" | ||
172 | " LNKSETDPL [%1], %0\n" | ||
173 | " BMI 1b\n" | ||
174 | " DEFR %0, TXSTAT\n" | ||
175 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
176 | " CMPT %0, #HI(0x02000000)\n" | ||
177 | " BNZ 1b\n" | ||
178 | : "=&d" (tmp) | ||
179 | : "da" (&rw->lock) | ||
180 | : "cc"); | ||
181 | |||
182 | smp_mb(); | ||
183 | } | ||
184 | |||
185 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
186 | { | ||
187 | int tmp; | ||
188 | |||
189 | smp_mb(); | ||
190 | |||
191 | asm volatile ("1: LNKGETD %0,[%1]\n" | ||
192 | " SUB %0, %0, #1\n" | ||
193 | " LNKSETD [%1], %0\n" | ||
194 | " DEFR %0, TXSTAT\n" | ||
195 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
196 | " CMPT %0, #HI(0x02000000)\n" | ||
197 | " BNZ 1b\n" | ||
198 | : "=&d" (tmp) | ||
199 | : "da" (&rw->lock) | ||
200 | : "cc", "memory"); | ||
201 | } | ||
202 | |||
203 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
204 | { | ||
205 | int tmp; | ||
206 | |||
207 | asm volatile (" LNKGETD %0,[%1]\n" | ||
208 | " ADDS %0, %0, #1\n" | ||
209 | " LNKSETDPL [%1], %0\n" | ||
210 | " BMI 1f\n" | ||
211 | " DEFR %0, TXSTAT\n" | ||
212 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
213 | " CMPT %0, #HI(0x02000000)\n" | ||
214 | " MOV %0,#1\n" | ||
215 | " BZ 2f\n" | ||
216 | "1: MOV %0,#0\n" | ||
217 | "2:\n" | ||
218 | : "=&d" (tmp) | ||
219 | : "da" (&rw->lock) | ||
220 | : "cc"); | ||
221 | |||
222 | smp_mb(); | ||
223 | |||
224 | return tmp; | ||
225 | } | ||
226 | |||
227 | /* read_can_lock - would read_trylock() succeed? */ | ||
228 | static inline int arch_read_can_lock(arch_rwlock_t *rw) | ||
229 | { | ||
230 | int tmp; | ||
231 | |||
232 | asm volatile ("LNKGETD %0, [%1]\n" | ||
233 | "CMP %0, %2\n" | ||
234 | "MOV %0, #1\n" | ||
235 | "XORZ %0, %0, %0\n" | ||
236 | : "=&d" (tmp) | ||
237 | : "da" (&rw->lock), "bd" (0x80000000) | ||
238 | : "cc"); | ||
239 | return tmp; | ||
240 | } | ||
241 | |||
242 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
243 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
244 | |||
245 | #define arch_spin_relax(lock) cpu_relax() | ||
246 | #define arch_read_relax(lock) cpu_relax() | ||
247 | #define arch_write_relax(lock) cpu_relax() | ||
248 | |||
249 | #endif /* __ASM_SPINLOCK_LNKGET_H */ | ||
diff --git a/arch/metag/include/asm/spinlock_lock1.h b/arch/metag/include/asm/spinlock_lock1.h new file mode 100644 index 000000000000..c630444cffe9 --- /dev/null +++ b/arch/metag/include/asm/spinlock_lock1.h | |||
@@ -0,0 +1,184 @@ | |||
1 | #ifndef __ASM_SPINLOCK_LOCK1_H | ||
2 | #define __ASM_SPINLOCK_LOCK1_H | ||
3 | |||
4 | #include <asm/bug.h> | ||
5 | #include <asm/global_lock.h> | ||
6 | |||
7 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
8 | { | ||
9 | int ret; | ||
10 | |||
11 | barrier(); | ||
12 | ret = lock->lock; | ||
13 | WARN_ON(ret != 0 && ret != 1); | ||
14 | return ret; | ||
15 | } | ||
16 | |||
17 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
18 | { | ||
19 | unsigned int we_won = 0; | ||
20 | unsigned long flags; | ||
21 | |||
22 | again: | ||
23 | __global_lock1(flags); | ||
24 | if (lock->lock == 0) { | ||
25 | fence(); | ||
26 | lock->lock = 1; | ||
27 | we_won = 1; | ||
28 | } | ||
29 | __global_unlock1(flags); | ||
30 | if (we_won == 0) | ||
31 | goto again; | ||
32 | WARN_ON(lock->lock != 1); | ||
33 | } | ||
34 | |||
35 | /* Returns 0 if failed to acquire lock */ | ||
36 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
37 | { | ||
38 | unsigned long flags; | ||
39 | unsigned int ret; | ||
40 | |||
41 | __global_lock1(flags); | ||
42 | ret = lock->lock; | ||
43 | if (ret == 0) { | ||
44 | fence(); | ||
45 | lock->lock = 1; | ||
46 | } | ||
47 | __global_unlock1(flags); | ||
48 | return (ret == 0); | ||
49 | } | ||
50 | |||
51 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
52 | { | ||
53 | barrier(); | ||
54 | WARN_ON(!lock->lock); | ||
55 | lock->lock = 0; | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * RWLOCKS | ||
60 | * | ||
61 | * | ||
62 | * Write locks are easy - we just set bit 31. When unlocking, we can | ||
63 | * just write zero since the lock is exclusively held. | ||
64 | */ | ||
65 | |||
66 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
67 | { | ||
68 | unsigned long flags; | ||
69 | unsigned int we_won = 0; | ||
70 | |||
71 | again: | ||
72 | __global_lock1(flags); | ||
73 | if (rw->lock == 0) { | ||
74 | fence(); | ||
75 | rw->lock = 0x80000000; | ||
76 | we_won = 1; | ||
77 | } | ||
78 | __global_unlock1(flags); | ||
79 | if (we_won == 0) | ||
80 | goto again; | ||
81 | WARN_ON(rw->lock != 0x80000000); | ||
82 | } | ||
83 | |||
84 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
85 | { | ||
86 | unsigned long flags; | ||
87 | unsigned int ret; | ||
88 | |||
89 | __global_lock1(flags); | ||
90 | ret = rw->lock; | ||
91 | if (ret == 0) { | ||
92 | fence(); | ||
93 | rw->lock = 0x80000000; | ||
94 | } | ||
95 | __global_unlock1(flags); | ||
96 | |||
97 | return (ret == 0); | ||
98 | } | ||
99 | |||
100 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
101 | { | ||
102 | barrier(); | ||
103 | WARN_ON(rw->lock != 0x80000000); | ||
104 | rw->lock = 0; | ||
105 | } | ||
106 | |||
107 | /* write_can_lock - would write_trylock() succeed? */ | ||
108 | static inline int arch_write_can_lock(arch_rwlock_t *rw) | ||
109 | { | ||
110 | unsigned int ret; | ||
111 | |||
112 | barrier(); | ||
113 | ret = rw->lock; | ||
114 | return (ret == 0); | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Read locks are a bit more hairy: | ||
119 | * - Exclusively load the lock value. | ||
120 | * - Increment it. | ||
121 | * - Store new lock value if positive, and we still own this location. | ||
122 | * If the value is negative, we've already failed. | ||
123 | * - If we failed to store the value, we want a negative result. | ||
124 | * - If we failed, try again. | ||
125 | * Unlocking is similarly hairy. We may have multiple read locks | ||
126 | * currently active. However, we know we won't have any write | ||
127 | * locks. | ||
128 | */ | ||
129 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | unsigned int we_won = 0, ret; | ||
133 | |||
134 | again: | ||
135 | __global_lock1(flags); | ||
136 | ret = rw->lock; | ||
137 | if (ret < 0x80000000) { | ||
138 | fence(); | ||
139 | rw->lock = ret + 1; | ||
140 | we_won = 1; | ||
141 | } | ||
142 | __global_unlock1(flags); | ||
143 | if (!we_won) | ||
144 | goto again; | ||
145 | } | ||
146 | |||
147 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | unsigned int ret; | ||
151 | |||
152 | __global_lock1(flags); | ||
153 | fence(); | ||
154 | ret = rw->lock--; | ||
155 | __global_unlock1(flags); | ||
156 | WARN_ON(ret == 0); | ||
157 | } | ||
158 | |||
159 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
160 | { | ||
161 | unsigned long flags; | ||
162 | unsigned int ret; | ||
163 | |||
164 | __global_lock1(flags); | ||
165 | ret = rw->lock; | ||
166 | if (ret < 0x80000000) { | ||
167 | fence(); | ||
168 | rw->lock = ret + 1; | ||
169 | } | ||
170 | __global_unlock1(flags); | ||
171 | return (ret < 0x80000000); | ||
172 | } | ||
173 | |||
174 | /* read_can_lock - would read_trylock() succeed? */ | ||
175 | static inline int arch_read_can_lock(arch_rwlock_t *rw) | ||
176 | { | ||
177 | unsigned int ret; | ||
178 | |||
179 | barrier(); | ||
180 | ret = rw->lock; | ||
181 | return (ret < 0x80000000); | ||
182 | } | ||
183 | |||
184 | #endif /* __ASM_SPINLOCK_LOCK1_H */ | ||
diff --git a/arch/metag/include/asm/spinlock_types.h b/arch/metag/include/asm/spinlock_types.h new file mode 100644 index 000000000000..b76391405fea --- /dev/null +++ b/arch/metag/include/asm/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _ASM_METAG_SPINLOCK_TYPES_H | ||
2 | #define _ASM_METAG_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } arch_spinlock_t; | ||
11 | |||
12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } arch_rwlock_t; | ||
17 | |||
18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif /* _ASM_METAG_SPINLOCK_TYPES_H */ | ||
diff --git a/arch/metag/include/asm/stacktrace.h b/arch/metag/include/asm/stacktrace.h new file mode 100644 index 000000000000..2830a0fe7ac9 --- /dev/null +++ b/arch/metag/include/asm/stacktrace.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_STACKTRACE_H | ||
2 | #define __ASM_STACKTRACE_H | ||
3 | |||
4 | struct stackframe { | ||
5 | unsigned long fp; | ||
6 | unsigned long sp; | ||
7 | unsigned long lr; | ||
8 | unsigned long pc; | ||
9 | }; | ||
10 | |||
11 | struct metag_frame { | ||
12 | unsigned long fp; | ||
13 | unsigned long lr; | ||
14 | }; | ||
15 | |||
16 | extern int unwind_frame(struct stackframe *frame); | ||
17 | extern void walk_stackframe(struct stackframe *frame, | ||
18 | int (*fn)(struct stackframe *, void *), void *data); | ||
19 | |||
20 | #endif /* __ASM_STACKTRACE_H */ | ||
diff --git a/arch/metag/include/asm/string.h b/arch/metag/include/asm/string.h new file mode 100644 index 000000000000..53e3806eee04 --- /dev/null +++ b/arch/metag/include/asm/string.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _METAG_STRING_H_ | ||
2 | #define _METAG_STRING_H_ | ||
3 | |||
4 | #define __HAVE_ARCH_MEMSET | ||
5 | extern void *memset(void *__s, int __c, size_t __count); | ||
6 | |||
7 | #define __HAVE_ARCH_MEMCPY | ||
8 | void *memcpy(void *__to, __const__ void *__from, size_t __n); | ||
9 | |||
10 | #define __HAVE_ARCH_MEMMOVE | ||
11 | extern void *memmove(void *__dest, __const__ void *__src, size_t __n); | ||
12 | |||
13 | #endif /* _METAG_STRING_H_ */ | ||
diff --git a/arch/metag/include/asm/switch.h b/arch/metag/include/asm/switch.h new file mode 100644 index 000000000000..1fd6a587c844 --- /dev/null +++ b/arch/metag/include/asm/switch.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_METAG_SWITCH_H | ||
11 | #define _ASM_METAG_SWITCH_H | ||
12 | |||
13 | /* metag SWITCH codes */ | ||
14 | #define __METAG_SW_PERM_BREAK 0x400002 /* compiled in breakpoint */ | ||
15 | #define __METAG_SW_SYS_LEGACY 0x440000 /* legacy system calls */ | ||
16 | #define __METAG_SW_SYS 0x440001 /* system calls */ | ||
17 | |||
18 | /* metag SWITCH instruction encoding */ | ||
19 | #define __METAG_SW_ENCODING(TYPE) (0xaf000000 | (__METAG_SW_##TYPE)) | ||
20 | |||
21 | #endif /* _ASM_METAG_SWITCH_H */ | ||
diff --git a/arch/metag/include/asm/syscall.h b/arch/metag/include/asm/syscall.h new file mode 100644 index 000000000000..24fc97939f77 --- /dev/null +++ b/arch/metag/include/asm/syscall.h | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * Access to user system call parameters and results | ||
3 | * | ||
4 | * Copyright (C) 2008 Imagination Technologies Ltd. | ||
5 | * | ||
6 | * This copyrighted material is made available to anyone wishing to use, | ||
7 | * modify, copy, or redistribute it subject to the terms and conditions | ||
8 | * of the GNU General Public License v.2. | ||
9 | * | ||
10 | * See asm-generic/syscall.h for descriptions of what we must do here. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_METAG_SYSCALL_H | ||
14 | #define _ASM_METAG_SYSCALL_H | ||
15 | |||
16 | #include <linux/sched.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | |||
20 | #include <asm/switch.h> | ||
21 | |||
22 | static inline long syscall_get_nr(struct task_struct *task, | ||
23 | struct pt_regs *regs) | ||
24 | { | ||
25 | unsigned long insn; | ||
26 | |||
27 | /* | ||
28 | * FIXME there's no way to find out how we got here other than to | ||
29 | * examine the memory at the PC to see if it is a syscall | ||
30 | * SWITCH instruction. | ||
31 | */ | ||
32 | if (get_user(insn, (unsigned long *)(regs->ctx.CurrPC - 4))) | ||
33 | return -1; | ||
34 | |||
35 | if (insn == __METAG_SW_ENCODING(SYS)) | ||
36 | return regs->ctx.DX[0].U1; | ||
37 | else | ||
38 | return -1L; | ||
39 | } | ||
40 | |||
41 | static inline void syscall_rollback(struct task_struct *task, | ||
42 | struct pt_regs *regs) | ||
43 | { | ||
44 | /* do nothing */ | ||
45 | } | ||
46 | |||
47 | static inline long syscall_get_error(struct task_struct *task, | ||
48 | struct pt_regs *regs) | ||
49 | { | ||
50 | unsigned long error = regs->ctx.DX[0].U0; | ||
51 | return IS_ERR_VALUE(error) ? error : 0; | ||
52 | } | ||
53 | |||
54 | static inline long syscall_get_return_value(struct task_struct *task, | ||
55 | struct pt_regs *regs) | ||
56 | { | ||
57 | return regs->ctx.DX[0].U0; | ||
58 | } | ||
59 | |||
60 | static inline void syscall_set_return_value(struct task_struct *task, | ||
61 | struct pt_regs *regs, | ||
62 | int error, long val) | ||
63 | { | ||
64 | regs->ctx.DX[0].U0 = (long) error ?: val; | ||
65 | } | ||
66 | |||
67 | static inline void syscall_get_arguments(struct task_struct *task, | ||
68 | struct pt_regs *regs, | ||
69 | unsigned int i, unsigned int n, | ||
70 | unsigned long *args) | ||
71 | { | ||
72 | unsigned int reg, j; | ||
73 | BUG_ON(i + n > 6); | ||
74 | |||
75 | for (j = i, reg = 6 - i; j < (i + n); j++, reg--) { | ||
76 | if (reg % 2) | ||
77 | args[j] = regs->ctx.DX[(reg + 1) / 2].U0; | ||
78 | else | ||
79 | args[j] = regs->ctx.DX[reg / 2].U1; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | static inline void syscall_set_arguments(struct task_struct *task, | ||
84 | struct pt_regs *regs, | ||
85 | unsigned int i, unsigned int n, | ||
86 | const unsigned long *args) | ||
87 | { | ||
88 | unsigned int reg; | ||
89 | BUG_ON(i + n > 6); | ||
90 | |||
91 | for (reg = 6 - i; i < (i + n); i++, reg--) { | ||
92 | if (reg % 2) | ||
93 | regs->ctx.DX[(reg + 1) / 2].U0 = args[i]; | ||
94 | else | ||
95 | regs->ctx.DX[reg / 2].U1 = args[i]; | ||
96 | } | ||
97 | } | ||
98 | |||
99 | #define NR_syscalls __NR_syscalls | ||
100 | |||
101 | /* generic syscall table */ | ||
102 | extern const void *sys_call_table[]; | ||
103 | |||
104 | #endif /* _ASM_METAG_SYSCALL_H */ | ||
diff --git a/arch/metag/include/asm/syscalls.h b/arch/metag/include/asm/syscalls.h new file mode 100644 index 000000000000..a02b95556522 --- /dev/null +++ b/arch/metag/include/asm/syscalls.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef _ASM_METAG_SYSCALLS_H | ||
2 | #define _ASM_METAG_SYSCALLS_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/linkage.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/signal.h> | ||
8 | |||
9 | /* kernel/signal.c */ | ||
10 | #define sys_rt_sigreturn sys_rt_sigreturn | ||
11 | asmlinkage long sys_rt_sigreturn(void); | ||
12 | |||
13 | #include <asm-generic/syscalls.h> | ||
14 | |||
15 | /* kernel/sys_metag.c */ | ||
16 | asmlinkage int sys_metag_setglobalbit(char __user *, int); | ||
17 | asmlinkage void sys_metag_set_fpu_flags(unsigned int); | ||
18 | asmlinkage int sys_metag_set_tls(void __user *); | ||
19 | asmlinkage void *sys_metag_get_tls(void); | ||
20 | |||
21 | asmlinkage long sys_truncate64_metag(const char __user *, unsigned long, | ||
22 | unsigned long); | ||
23 | asmlinkage long sys_ftruncate64_metag(unsigned int, unsigned long, | ||
24 | unsigned long); | ||
25 | asmlinkage long sys_fadvise64_64_metag(int, unsigned long, unsigned long, | ||
26 | unsigned long, unsigned long, int); | ||
27 | asmlinkage long sys_readahead_metag(int, unsigned long, unsigned long, size_t); | ||
28 | asmlinkage ssize_t sys_pread64_metag(unsigned long, char __user *, size_t, | ||
29 | unsigned long, unsigned long); | ||
30 | asmlinkage ssize_t sys_pwrite64_metag(unsigned long, char __user *, size_t, | ||
31 | unsigned long, unsigned long); | ||
32 | asmlinkage long sys_sync_file_range_metag(int, unsigned long, unsigned long, | ||
33 | unsigned long, unsigned long, | ||
34 | unsigned int); | ||
35 | |||
36 | int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, | ||
37 | int syscall); | ||
38 | |||
39 | #endif /* _ASM_METAG_SYSCALLS_H */ | ||
diff --git a/arch/metag/include/asm/tbx.h b/arch/metag/include/asm/tbx.h new file mode 100644 index 000000000000..287b36ff8ad1 --- /dev/null +++ b/arch/metag/include/asm/tbx.h | |||
@@ -0,0 +1,1425 @@ | |||
1 | /* | ||
2 | * asm/tbx.h | ||
3 | * | ||
4 | * Copyright (C) 2000-2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Thread binary interface header | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_METAG_TBX_H_ | ||
14 | #define _ASM_METAG_TBX_H_ | ||
15 | |||
16 | /* for CACHEW_* values */ | ||
17 | #include <asm/metag_isa.h> | ||
18 | /* for LINSYSEVENT_* addresses */ | ||
19 | #include <asm/metag_mem.h> | ||
20 | |||
21 | #ifdef TBI_1_4 | ||
22 | #ifndef TBI_MUTEXES_1_4 | ||
23 | #define TBI_MUTEXES_1_4 | ||
24 | #endif | ||
25 | #ifndef TBI_SEMAPHORES_1_4 | ||
26 | #define TBI_SEMAPHORES_1_4 | ||
27 | #endif | ||
28 | #ifndef TBI_ASYNC_SWITCH_1_4 | ||
29 | #define TBI_ASYNC_SWITCH_1_4 | ||
30 | #endif | ||
31 | #ifndef TBI_FASTINT_1_4 | ||
32 | #define TBI_FASTINT_1_4 | ||
33 | #endif | ||
34 | #endif | ||
35 | |||
36 | |||
37 | /* Id values in the TBI system describe a segment using an arbitrary | ||
38 | integer value and flags in the bottom 8 bits, the SIGPOLL value is | ||
39 | used in cases where control over blocking or polling behaviour is | ||
40 | needed. */ | ||
41 | #define TBID_SIGPOLL_BIT 0x02 /* Set bit in an Id value to poll vs block */ | ||
42 | /* Extended segment identifiers use strings in the string table */ | ||
43 | #define TBID_IS_SEGSTR( Id ) (((Id) & (TBID_SEGTYPE_BITS>>1)) == 0) | ||
44 | |||
45 | /* Segment identifiers contain the following related bit-fields */ | ||
46 | #define TBID_SEGTYPE_BITS 0x0F /* One of the predefined segment types */ | ||
47 | #define TBID_SEGTYPE_S 0 | ||
48 | #define TBID_SEGSCOPE_BITS 0x30 /* Indicates the scope of the segment */ | ||
49 | #define TBID_SEGSCOPE_S 4 | ||
50 | #define TBID_SEGGADDR_BITS 0xC0 /* Indicates access possible via pGAddr */ | ||
51 | #define TBID_SEGGADDR_S 6 | ||
52 | |||
53 | /* Segments of memory can only really contain a few types of data */ | ||
54 | #define TBID_SEGTYPE_TEXT 0x02 /* Code segment */ | ||
55 | #define TBID_SEGTYPE_DATA 0x04 /* Data segment */ | ||
56 | #define TBID_SEGTYPE_STACK 0x06 /* Stack segment */ | ||
57 | #define TBID_SEGTYPE_HEAP 0x0A /* Heap segment */ | ||
58 | #define TBID_SEGTYPE_ROOT 0x0C /* Root block segments */ | ||
59 | #define TBID_SEGTYPE_STRING 0x0E /* String table segment */ | ||
60 | |||
61 | /* Segments have one of three possible scopes */ | ||
62 | #define TBID_SEGSCOPE_INIT 0 /* Temporary area for initialisation phase */ | ||
63 | #define TBID_SEGSCOPE_LOCAL 1 /* Private to this thread */ | ||
64 | #define TBID_SEGSCOPE_GLOBAL 2 /* Shared globally throughout the system */ | ||
65 | #define TBID_SEGSCOPE_SHARED 3 /* Limited sharing between local/global */ | ||
66 | |||
67 | /* For segment specifier a further field in two of the remaining bits | ||
68 | indicates the usefulness of the pGAddr field in the segment descriptor | ||
69 | descriptor. */ | ||
70 | #define TBID_SEGGADDR_NULL 0 /* pGAddr is NULL -> SEGSCOPE_(LOCAL|INIT) */ | ||
71 | #define TBID_SEGGADDR_READ 1 /* Only read via pGAddr */ | ||
72 | #define TBID_SEGGADDR_WRITE 2 /* Full access via pGAddr */ | ||
73 | #define TBID_SEGGADDR_EXEC 3 /* Only execute via pGAddr */ | ||
74 | |||
75 | /* The following values are common to both segment and signal Id value and | ||
76 | live in the top 8 bits of the Id values. */ | ||
77 | |||
78 | /* The ISTAT bit indicates if segments are related to interrupt vs | ||
79 | background level interfaces a thread can still handle all triggers at | ||
80 | either level, but can also split these up if it wants to. */ | ||
81 | #define TBID_ISTAT_BIT 0x01000000 | ||
82 | #define TBID_ISTAT_S 24 | ||
83 | |||
84 | /* Privilege needed to access a segment is indicated by the next bit. | ||
85 | |||
86 | This bit is set to mirror the current privilege level when starting a | ||
87 | search for a segment - setting it yourself toggles the automatically | ||
88 | generated state which is only useful to emulate unprivileged behaviour | ||
89 | or access unprivileged areas of memory while at privileged level. */ | ||
90 | #define TBID_PSTAT_BIT 0x02000000 | ||
91 | #define TBID_PSTAT_S 25 | ||
92 | |||
93 | /* The top six bits of a signal/segment specifier identifies a thread within | ||
94 | the system. This represents a segments owner. */ | ||
95 | #define TBID_THREAD_BITS 0xFC000000 | ||
96 | #define TBID_THREAD_S 26 | ||
97 | |||
98 | /* Special thread id values */ | ||
99 | #define TBID_THREAD_NULL (-32) /* Never matches any thread/segment id used */ | ||
100 | #define TBID_THREAD_GLOBAL (-31) /* Things global to all threads */ | ||
101 | #define TBID_THREAD_HOST ( -1) /* Host interface */ | ||
102 | #define TBID_THREAD_EXTIO (TBID_THREAD_HOST) /* Host based ExtIO i/f */ | ||
103 | |||
104 | /* Virtual Id's are used for external thread interface structures or the | ||
105 | above special Id's */ | ||
106 | #define TBID_IS_VIRTTHREAD( Id ) ((Id) < 0) | ||
107 | |||
108 | /* Real Id's are used for actual hardware threads that are local */ | ||
109 | #define TBID_IS_REALTHREAD( Id ) ((Id) >= 0) | ||
110 | |||
111 | /* Generate a segment Id given Thread, Scope, and Type */ | ||
112 | #define TBID_SEG( Thread, Scope, Type ) (\ | ||
113 | ((Thread)<<TBID_THREAD_S) + ((Scope)<<TBID_SEGSCOPE_S) + (Type)) | ||
114 | |||
115 | /* Generate a signal Id given Thread and SigNum */ | ||
116 | #define TBID_SIG( Thread, SigNum ) (\ | ||
117 | ((Thread)<<TBID_THREAD_S) + ((SigNum)<<TBID_SIGNUM_S) + TBID_SIGNAL_BIT) | ||
118 | |||
119 | /* Generate an Id that solely represents a thread - useful for cache ops */ | ||
120 | #define TBID_THD( Thread ) ((Thread)<<TBID_THREAD_S) | ||
121 | #define TBID_THD_NULL ((TBID_THREAD_NULL) <<TBID_THREAD_S) | ||
122 | #define TBID_THD_GLOBAL ((TBID_THREAD_GLOBAL)<<TBID_THREAD_S) | ||
123 | |||
124 | /* Common exception handler (see TBID_SIGNUM_XXF below) receives hardware | ||
125 | generated fault codes TBIXXF_SIGNUM_xxF in it's SigNum parameter */ | ||
126 | #define TBIXXF_SIGNUM_IIF 0x01 /* General instruction fault */ | ||
127 | #define TBIXXF_SIGNUM_PGF 0x02 /* Privilege general fault */ | ||
128 | #define TBIXXF_SIGNUM_DHF 0x03 /* Data access watchpoint HIT */ | ||
129 | #define TBIXXF_SIGNUM_IGF 0x05 /* Code fetch general read failure */ | ||
130 | #define TBIXXF_SIGNUM_DGF 0x07 /* Data access general read/write fault */ | ||
131 | #define TBIXXF_SIGNUM_IPF 0x09 /* Code fetch page fault */ | ||
132 | #define TBIXXF_SIGNUM_DPF 0x0B /* Data access page fault */ | ||
133 | #define TBIXXF_SIGNUM_IHF 0x0D /* Instruction breakpoint HIT */ | ||
134 | #define TBIXXF_SIGNUM_DWF 0x0F /* Data access read-only fault */ | ||
135 | |||
136 | /* Hardware signals communicate events between processing levels within a | ||
137 | single thread all the _xxF cases are exceptions and are routed via a | ||
138 | common exception handler, _SWx are software trap events and kicks including | ||
139 | __TBISignal generated kicks, and finally _TRx are hardware triggers */ | ||
140 | #define TBID_SIGNUM_SW0 0x00 /* SWITCH GROUP 0 - Per thread user */ | ||
141 | #define TBID_SIGNUM_SW1 0x01 /* SWITCH GROUP 1 - Per thread system */ | ||
142 | #define TBID_SIGNUM_SW2 0x02 /* SWITCH GROUP 2 - Internal global request */ | ||
143 | #define TBID_SIGNUM_SW3 0x03 /* SWITCH GROUP 3 - External global request */ | ||
144 | #ifdef TBI_1_4 | ||
145 | #define TBID_SIGNUM_FPE 0x04 /* Deferred exception - Any IEEE 754 exception */ | ||
146 | #define TBID_SIGNUM_FPD 0x05 /* Deferred exception - Denormal exception */ | ||
147 | /* Reserved 0x6 for a reserved deferred exception */ | ||
148 | #define TBID_SIGNUM_BUS 0x07 /* Deferred exception - Bus Error */ | ||
149 | /* Reserved 0x08-0x09 */ | ||
150 | #else | ||
151 | /* Reserved 0x04-0x09 */ | ||
152 | #endif | ||
153 | #define TBID_SIGNUM_SWS 0x0A /* KICK received with SigMask != 0 */ | ||
154 | #define TBID_SIGNUM_SWK 0x0B /* KICK received with SigMask == 0 */ | ||
155 | /* Reserved 0x0C-0x0F */ | ||
156 | #define TBID_SIGNUM_TRT 0x10 /* Timer trigger */ | ||
157 | #define TBID_SIGNUM_LWK 0x11 /* Low level kick (handler provided by TBI) */ | ||
158 | #define TBID_SIGNUM_XXF 0x12 /* Fault handler - receives ALL _xxF sigs */ | ||
159 | #ifdef TBI_1_4 | ||
160 | #define TBID_SIGNUM_DFR 0x13 /* Deferred Exception handler */ | ||
161 | #else | ||
162 | #define TBID_SIGNUM_FPE 0x13 /* FPE Exception handler */ | ||
163 | #endif | ||
164 | /* External trigger one group 0x14 to 0x17 - per thread */ | ||
165 | #define TBID_SIGNUM_TR1(Thread) (0x14+(Thread)) | ||
166 | #define TBID_SIGNUM_T10 0x14 | ||
167 | #define TBID_SIGNUM_T11 0x15 | ||
168 | #define TBID_SIGNUM_T12 0x16 | ||
169 | #define TBID_SIGNUM_T13 0x17 | ||
170 | /* External trigger two group 0x18 to 0x1b - per thread */ | ||
171 | #define TBID_SIGNUM_TR2(Thread) (0x18+(Thread)) | ||
172 | #define TBID_SIGNUM_T20 0x18 | ||
173 | #define TBID_SIGNUM_T21 0x19 | ||
174 | #define TBID_SIGNUM_T22 0x1A | ||
175 | #define TBID_SIGNUM_T23 0x1B | ||
176 | #define TBID_SIGNUM_TR3 0x1C /* External trigger N-4 (global) */ | ||
177 | #define TBID_SIGNUM_TR4 0x1D /* External trigger N-3 (global) */ | ||
178 | #define TBID_SIGNUM_TR5 0x1E /* External trigger N-2 (global) */ | ||
179 | #define TBID_SIGNUM_TR6 0x1F /* External trigger N-1 (global) */ | ||
180 | #define TBID_SIGNUM_MAX 0x1F | ||
181 | |||
182 | /* Return the trigger register(TXMASK[I]/TXSTAT[I]) bits related to | ||
183 | each hardware signal, sometimes this is a many-to-one relationship. */ | ||
184 | #define TBI_TRIG_BIT(SigNum) (\ | ||
185 | ((SigNum) >= TBID_SIGNUM_TRT) ? 1<<((SigNum)-TBID_SIGNUM_TRT) :\ | ||
186 | ( ((SigNum) == TBID_SIGNUM_SWS) || \ | ||
187 | ((SigNum) == TBID_SIGNUM_SWK) ) ? \ | ||
188 | TXSTAT_KICK_BIT : TXSTATI_BGNDHALT_BIT ) | ||
189 | |||
190 | /* Return the hardware trigger vector number for entries in the | ||
191 | HWVEC0EXT table that will generate the required internal trigger. */ | ||
192 | #define TBI_TRIG_VEC(SigNum) (\ | ||
193 | ((SigNum) >= TBID_SIGNUM_T10) ? ((SigNum)-TBID_SIGNUM_TRT) : -1) | ||
194 | |||
195 | /* Default trigger masks for each thread at background/interrupt level */ | ||
196 | #define TBI_TRIGS_INIT( Thread ) (\ | ||
197 | TXSTAT_KICK_BIT + TBI_TRIG_BIT(TBID_SIGNUM_TR1(Thread)) ) | ||
198 | #define TBI_INTS_INIT( Thread ) (\ | ||
199 | TXSTAT_KICK_BIT + TXSTATI_BGNDHALT_BIT \ | ||
200 | + TBI_TRIG_BIT(TBID_SIGNUM_TR2(Thread)) ) | ||
201 | |||
202 | #ifndef __ASSEMBLY__ | ||
203 | /* A spin-lock location is a zero-initialised location in memory */ | ||
204 | typedef volatile int TBISPIN, *PTBISPIN; | ||
205 | |||
206 | /* A kick location is a hardware location you can write to | ||
207 | * in order to cause a kick | ||
208 | */ | ||
209 | typedef volatile int *PTBIKICK; | ||
210 | |||
211 | #if defined(METAC_1_0) || defined(METAC_1_1) | ||
212 | /* Macro to perform a kick */ | ||
213 | #define TBI_KICK( pKick ) do { pKick[0] = 1; } while (0) | ||
214 | #else | ||
215 | /* #define METAG_LIN_VALUES before including machine.h if required */ | ||
216 | #ifdef LINSYSEVENT_WR_COMBINE_FLUSH | ||
217 | /* Macro to perform a kick - write combiners must be flushed */ | ||
218 | #define TBI_KICK( pKick ) do {\ | ||
219 | volatile int *pFlush = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH; \ | ||
220 | pFlush[0] = 0; \ | ||
221 | pKick[0] = 1; } while (0) | ||
222 | #endif | ||
223 | #endif /* if defined(METAC_1_0) || defined(METAC_1_1) */ | ||
224 | #endif /* ifndef __ASSEMBLY__ */ | ||
225 | |||
226 | #ifndef __ASSEMBLY__ | ||
227 | /* 64-bit dual unit state value */ | ||
228 | typedef struct _tbidual_tag_ { | ||
229 | /* 32-bit value from a pair of registers in data or address units */ | ||
230 | int U0, U1; | ||
231 | } TBIDUAL, *PTBIDUAL; | ||
232 | #endif /* ifndef __ASSEMBLY__ */ | ||
233 | |||
234 | /* Byte offsets of fields within TBIDUAL */ | ||
235 | #define TBIDUAL_U0 (0) | ||
236 | #define TBIDUAL_U1 (4) | ||
237 | |||
238 | #define TBIDUAL_BYTES (8) | ||
239 | |||
240 | #define TBICTX_CRIT_BIT 0x0001 /* ASync state saved in TBICTX */ | ||
241 | #define TBICTX_SOFT_BIT 0x0002 /* Sync state saved in TBICTX (other bits 0) */ | ||
242 | #ifdef TBI_FASTINT_1_4 | ||
243 | #define TBICTX_FINT_BIT 0x0004 /* Using Fast Interrupts */ | ||
244 | #endif | ||
245 | #define TBICTX_FPAC_BIT 0x0010 /* FPU state in TBICTX, FPU active on entry */ | ||
246 | #define TBICTX_XMCC_BIT 0x0020 /* Bit to identify a MECC task */ | ||
247 | #define TBICTX_CBUF_BIT 0x0040 /* Hardware catch buffer flag from TXSTATUS */ | ||
248 | #define TBICTX_CBRP_BIT 0x0080 /* Read pipeline dirty from TXDIVTIME */ | ||
249 | #define TBICTX_XDX8_BIT 0x0100 /* Saved DX.8 to DX.15 too */ | ||
250 | #define TBICTX_XAXX_BIT 0x0200 /* Save remaining AX registers to AX.7 */ | ||
251 | #define TBICTX_XHL2_BIT 0x0400 /* Saved hardware loop registers too */ | ||
252 | #define TBICTX_XTDP_BIT 0x0800 /* Saved DSP registers too */ | ||
253 | #define TBICTX_XEXT_BIT 0x1000 /* Set if TBICTX.Ext.Ctx contains extended | ||
254 | state save area, otherwise TBICTX.Ext.AX2 | ||
255 | just holds normal A0.2 and A1.2 states */ | ||
256 | #define TBICTX_WAIT_BIT 0x2000 /* Causes wait for trigger - sticky toggle */ | ||
257 | #define TBICTX_XCBF_BIT 0x4000 /* Catch buffer or RD extracted into TBICTX */ | ||
258 | #define TBICTX_PRIV_BIT 0x8000 /* Set if system uses 'privileged' model */ | ||
259 | |||
260 | #ifdef METAC_1_0 | ||
261 | #define TBICTX_XAX3_BIT 0x0200 /* Saved AX.5 to AX.7 for XAXX */ | ||
262 | #define TBICTX_AX_REGS 5 /* Ax.0 to Ax.4 are core GP regs on CHORUS */ | ||
263 | #else | ||
264 | #define TBICTX_XAX4_BIT 0x0200 /* Saved AX.4 to AX.7 for XAXX */ | ||
265 | #define TBICTX_AX_REGS 4 /* Default is Ax.0 to Ax.3 */ | ||
266 | #endif | ||
267 | |||
268 | #ifdef TBI_1_4 | ||
269 | #define TBICTX_CFGFPU_FX16_BIT 0x00010000 /* Save FX.8 to FX.15 too */ | ||
270 | |||
271 | /* The METAC_CORE_ID_CONFIG field indicates omitted DSP resources */ | ||
272 | #define METAC_COREID_CFGXCTX_MASK( Value ) (\ | ||
273 | ( (((Value & METAC_COREID_CFGDSP_BITS)>> \ | ||
274 | METAC_COREID_CFGDSP_S ) == METAC_COREID_CFGDSP_MIN) ? \ | ||
275 | ~(TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+ \ | ||
276 | TBICTX_XAXX_BIT+TBICTX_XDX8_BIT ) : ~0U ) ) | ||
277 | #endif | ||
278 | |||
279 | /* Extended context state provides a standardised method for registering the | ||
280 | arguments required by __TBICtxSave to save the additional register states | ||
281 | currently in use by non general purpose code. The state of the __TBIExtCtx | ||
282 | variable in the static space of the thread forms an extension of the base | ||
283 | context of the thread. | ||
284 | |||
285 | If ( __TBIExtCtx.Ctx.SaveMask == 0 ) then pExt is assumed to be NULL and | ||
286 | the empty state of __TBIExtCtx is represented by the fact that | ||
287 | TBICTX.SaveMask does not have the bit TBICTX_XEXT_BIT set. | ||
288 | |||
289 | If ( __TBIExtCtx.Ctx.SaveMask != 0 ) then pExt should point at a suitably | ||
290 | sized extended context save area (usually at the end of the stack space | ||
291 | allocated by the current routine). This space should allow for the | ||
292 | displaced state of A0.2 and A1.2 to be saved along with the other extended | ||
293 | states indicated via __TBIExtCtx.Ctx.SaveMask. */ | ||
294 | #ifndef __ASSEMBLY__ | ||
295 | typedef union _tbiextctx_tag_ { | ||
296 | long long Val; | ||
297 | TBIDUAL AX2; | ||
298 | struct _tbiextctxext_tag { | ||
299 | #ifdef TBI_1_4 | ||
300 | short DspramSizes; /* DSPRAM sizes. Encoding varies between | ||
301 | TBICtxAlloc and the ECH scheme. */ | ||
302 | #else | ||
303 | short Reserved0; | ||
304 | #endif | ||
305 | short SaveMask; /* Flag bits for state saved */ | ||
306 | PTBIDUAL pExt; /* AX[2] state saved first plus Xxxx state */ | ||
307 | |||
308 | } Ctx; | ||
309 | |||
310 | } TBIEXTCTX, *PTBIEXTCTX; | ||
311 | |||
312 | /* Automatic registration of extended context save for __TBINestInts */ | ||
313 | extern TBIEXTCTX __TBIExtCtx; | ||
314 | #endif /* ifndef __ASSEMBLY__ */ | ||
315 | |||
316 | /* Byte offsets of fields within TBIEXTCTX */ | ||
317 | #define TBIEXTCTX_AX2 (0) | ||
318 | #define TBIEXTCTX_Ctx (0) | ||
319 | #define TBIEXTCTX_Ctx_SaveMask (TBIEXTCTX_Ctx + 2) | ||
320 | #define TBIEXTCTX_Ctx_pExt (TBIEXTCTX_Ctx + 2 + 2) | ||
321 | |||
322 | /* Extended context data size calculation constants */ | ||
323 | #define TBICTXEXT_BYTES (8) | ||
324 | #define TBICTXEXTBB8_BYTES (8*8) | ||
325 | #define TBICTXEXTAX3_BYTES (3*8) | ||
326 | #define TBICTXEXTAX4_BYTES (4*8) | ||
327 | #ifdef METAC_1_0 | ||
328 | #define TBICTXEXTAXX_BYTES TBICTXEXTAX3_BYTES | ||
329 | #else | ||
330 | #define TBICTXEXTAXX_BYTES TBICTXEXTAX4_BYTES | ||
331 | #endif | ||
332 | #define TBICTXEXTHL2_BYTES (3*8) | ||
333 | #define TBICTXEXTTDR_BYTES (27*8) | ||
334 | #define TBICTXEXTTDP_BYTES TBICTXEXTTDR_BYTES | ||
335 | |||
336 | #ifdef TBI_1_4 | ||
337 | #define TBICTXEXTFX8_BYTES (4*8) | ||
338 | #define TBICTXEXTFPAC_BYTES (1*4 + 2*2 + 4*8) | ||
339 | #define TBICTXEXTFACF_BYTES (3*8) | ||
340 | #endif | ||
341 | |||
342 | /* Maximum flag bits to be set via the TBICTX_EXTSET macro */ | ||
343 | #define TBICTXEXT_MAXBITS (TBICTX_XEXT_BIT| \ | ||
344 | TBICTX_XDX8_BIT|TBICTX_XAXX_BIT|\ | ||
345 | TBICTX_XHL2_BIT|TBICTX_XTDP_BIT ) | ||
346 | |||
347 | /* Maximum size of the extended context save area for current variant */ | ||
348 | #define TBICTXEXT_MAXBYTES (TBICTXEXT_BYTES+TBICTXEXTBB8_BYTES+\ | ||
349 | TBICTXEXTAXX_BYTES+TBICTXEXTHL2_BYTES+\ | ||
350 | TBICTXEXTTDP_BYTES ) | ||
351 | |||
352 | #ifdef TBI_FASTINT_1_4 | ||
353 | /* Maximum flag bits to be set via the TBICTX_EXTSET macro */ | ||
354 | #define TBICTX2EXT_MAXBITS (TBICTX_XDX8_BIT|TBICTX_XAXX_BIT|\ | ||
355 | TBICTX_XHL2_BIT|TBICTX_XTDP_BIT ) | ||
356 | |||
357 | /* Maximum size of the extended context save area for current variant */ | ||
358 | #define TBICTX2EXT_MAXBYTES (TBICTXEXTBB8_BYTES+TBICTXEXTAXX_BYTES\ | ||
359 | +TBICTXEXTHL2_BYTES+TBICTXEXTTDP_BYTES ) | ||
360 | #endif | ||
361 | |||
362 | /* Specify extended resources being used by current routine, code must be | ||
363 | assembler generated to utilise extended resources- | ||
364 | |||
365 | MOV D0xxx,A0StP ; Perform alloca - routine should | ||
366 | ADD A0StP,A0StP,#SaveSize ; setup/use A0FrP to access locals | ||
367 | MOVT D1xxx,#SaveMask ; TBICTX_XEXT_BIT MUST be set | ||
368 | SETL [A1GbP+#OG(___TBIExtCtx)],D0xxx,D1xxx | ||
369 | |||
370 | NB: OG(___TBIExtCtx) is a special case supported for SETL/GETL operations | ||
371 | on 64-bit sizes structures only, other accesses must be based on use | ||
372 | of OGA(___TBIExtCtx). | ||
373 | |||
374 | At exit of routine- | ||
375 | |||
376 | MOV D0xxx,#0 ; Clear extended context save state | ||
377 | MOV D1xxx,#0 | ||
378 | SETL [A1GbP+#OG(___TBIExtCtx)],D0xxx,D1xxx | ||
379 | SUB A0StP,A0StP,#SaveSize ; If original A0StP required | ||
380 | |||
381 | NB: Both the setting and clearing of the whole __TBIExtCtx MUST be done | ||
382 | atomically in one 64-bit write operation. | ||
383 | |||
384 | For simple interrupt handling only via __TBINestInts there should be no | ||
385 | impact of the __TBIExtCtx system. If pre-emptive scheduling is being | ||
386 | performed however (assuming __TBINestInts has already been called earlier | ||
387 | on) then the following logic will correctly call __TBICtxSave if required | ||
388 | and clear out the currently selected background task- | ||
389 | |||
390 | if ( __TBIExtCtx.Ctx.SaveMask & TBICTX_XEXT_BIT ) | ||
391 | { | ||
392 | / * Store extended states in pCtx * / | ||
393 | State.Sig.SaveMask |= __TBIExtCtx.Ctx.SaveMask; | ||
394 | |||
395 | (void) __TBICtxSave( State, (void *) __TBIExtCtx.Ctx.pExt ); | ||
396 | __TBIExtCtx.Val = 0; | ||
397 | } | ||
398 | |||
399 | and when restoring task states call __TBICtxRestore- | ||
400 | |||
401 | / * Restore state from pCtx * / | ||
402 | State.Sig.pCtx = pCtx; | ||
403 | State.Sig.SaveMask = pCtx->SaveMask; | ||
404 | |||
405 | if ( State.Sig.SaveMask & TBICTX_XEXT_BIT ) | ||
406 | { | ||
407 | / * Restore extended states from pCtx * / | ||
408 | __TBIExtCtx.Val = pCtx->Ext.Val; | ||
409 | |||
410 | (void) __TBICtxRestore( State, (void *) __TBIExtCtx.Ctx.pExt ); | ||
411 | } | ||
412 | |||
413 | */ | ||
414 | |||
415 | /* Critical thread state save area */ | ||
416 | #ifndef __ASSEMBLY__ | ||
417 | typedef struct _tbictx_tag_ { | ||
418 | /* TXSTATUS_FLAG_BITS and TXSTATUS_LSM_STEP_BITS from TXSTATUS */ | ||
419 | short Flags; | ||
420 | /* Mask indicates any extended context state saved; 0 -> Never run */ | ||
421 | short SaveMask; | ||
422 | /* Saved PC value */ | ||
423 | int CurrPC; | ||
424 | /* Saved critical register states */ | ||
425 | TBIDUAL DX[8]; | ||
426 | /* Background control register states - for cores without catch buffer | ||
427 | base in DIVTIME the TXSTATUS bits RPVALID and RPMASK are stored with | ||
428 | the real state TXDIVTIME in CurrDIVTIME */ | ||
429 | int CurrRPT, CurrBPOBITS, CurrMODE, CurrDIVTIME; | ||
430 | /* Saved AX register states */ | ||
431 | TBIDUAL AX[2]; | ||
432 | TBIEXTCTX Ext; | ||
433 | TBIDUAL AX3[TBICTX_AX_REGS-3]; | ||
434 | |||
435 | /* Any CBUF state to be restored by a handler return must be stored here. | ||
436 | Other extended state can be stored anywhere - see __TBICtxSave and | ||
437 | __TBICtxRestore. */ | ||
438 | |||
439 | } TBICTX, *PTBICTX; | ||
440 | |||
441 | #ifdef TBI_FASTINT_1_4 | ||
442 | typedef struct _tbictx2_tag_ { | ||
443 | TBIDUAL AX[2]; /* AU.0, AU.1 */ | ||
444 | TBIDUAL DX[2]; /* DU.0, DU.4 */ | ||
445 | int CurrMODE; | ||
446 | int CurrRPT; | ||
447 | int CurrSTATUS; | ||
448 | void *CurrPC; /* PC in PC address space */ | ||
449 | } TBICTX2, *PTBICTX2; | ||
450 | /* TBICTX2 is followed by: | ||
451 | * TBICTXEXTCB0 if TXSTATUS.CBMarker | ||
452 | * TBIDUAL * TXSTATUS.IRPCount if TXSTATUS.IRPCount > 0 | ||
453 | * TBICTXGP if using __TBIStdRootIntHandler or __TBIStdCtxSwitchRootIntHandler | ||
454 | */ | ||
455 | |||
456 | typedef struct _tbictxgp_tag_ { | ||
457 | short DspramSizes; | ||
458 | short SaveMask; | ||
459 | void *pExt; | ||
460 | TBIDUAL DX[6]; /* DU.1-DU.3, DU.5-DU.7 */ | ||
461 | TBIDUAL AX[2]; /* AU.2-AU.3 */ | ||
462 | } TBICTXGP, *PTBICTXGP; | ||
463 | |||
464 | #define TBICTXGP_DspramSizes (0) | ||
465 | #define TBICTXGP_SaveMask (TBICTXGP_DspramSizes + 2) | ||
466 | #define TBICTXGP_MAX_BYTES (2 + 2 + 4 + 8*(6+2)) | ||
467 | |||
468 | #endif | ||
469 | #endif /* ifndef __ASSEMBLY__ */ | ||
470 | |||
471 | /* Byte offsets of fields within TBICTX */ | ||
472 | #define TBICTX_Flags (0) | ||
473 | #define TBICTX_SaveMask (2) | ||
474 | #define TBICTX_CurrPC (4) | ||
475 | #define TBICTX_DX (2 + 2 + 4) | ||
476 | #define TBICTX_CurrRPT (2 + 2 + 4 + 8 * 8) | ||
477 | #define TBICTX_CurrMODE (2 + 2 + 4 + 8 * 8 + 4 + 4) | ||
478 | #define TBICTX_AX (2 + 2 + 4 + 8 * 8 + 4 + 4 + 4 + 4) | ||
479 | #define TBICTX_Ext (2 + 2 + 4 + 8 * 8 + 4 + 4 + 4 + 4 + 2 * 8) | ||
480 | #define TBICTX_Ext_AX2 (TBICTX_Ext + TBIEXTCTX_AX2) | ||
481 | #define TBICTX_Ext_AX2_U0 (TBICTX_Ext + TBIEXTCTX_AX2 + TBIDUAL_U0) | ||
482 | #define TBICTX_Ext_AX2_U1 (TBICTX_Ext + TBIEXTCTX_AX2 + TBIDUAL_U1) | ||
483 | #define TBICTX_Ext_Ctx_pExt (TBICTX_Ext + TBIEXTCTX_Ctx_pExt) | ||
484 | #define TBICTX_Ext_Ctx_SaveMask (TBICTX_Ext + TBIEXTCTX_Ctx_SaveMask) | ||
485 | |||
486 | #ifdef TBI_FASTINT_1_4 | ||
487 | #define TBICTX2_BYTES (8 * 2 + 8 * 2 + 4 + 4 + 4 + 4) | ||
488 | #define TBICTXEXTCB0_BYTES (4 + 4 + 8) | ||
489 | |||
490 | #define TBICTX2_CRIT_MAX_BYTES (TBICTX2_BYTES + TBICTXEXTCB0_BYTES + 6 * TBIDUAL_BYTES) | ||
491 | #define TBI_SWITCH_NEXT_PC(PC, EXTRA) ((PC) + (EXTRA & 1) ? 8 : 4) | ||
492 | #endif | ||
493 | |||
494 | #ifndef __ASSEMBLY__ | ||
495 | /* Extended thread state save areas - catch buffer state element */ | ||
496 | typedef struct _tbictxextcb0_tag_ { | ||
497 | /* Flags data and address value - see METAC_CATCH_VALUES in machine.h */ | ||
498 | unsigned long CBFlags, CBAddr; | ||
499 | /* 64-bit data */ | ||
500 | TBIDUAL CBData; | ||
501 | |||
502 | } TBICTXEXTCB0, *PTBICTXEXTCB0; | ||
503 | |||
504 | /* Read pipeline state saved on later cores after single catch buffer slot */ | ||
505 | typedef struct _tbictxextrp6_tag_ { | ||
506 | /* RPMask is TXSTATUS_RPMASK_BITS only, reserved is undefined */ | ||
507 | unsigned long RPMask, Reserved0; | ||
508 | TBIDUAL CBData[6]; | ||
509 | |||
510 | } TBICTXEXTRP6, *PTBICTXEXTRP6; | ||
511 | |||
512 | /* Extended thread state save areas - 8 DU register pairs */ | ||
513 | typedef struct _tbictxextbb8_tag_ { | ||
514 | /* Remaining Data unit registers in 64-bit pairs */ | ||
515 | TBIDUAL UX[8]; | ||
516 | |||
517 | } TBICTXEXTBB8, *PTBICTXEXTBB8; | ||
518 | |||
519 | /* Extended thread state save areas - 3 AU register pairs */ | ||
520 | typedef struct _tbictxextbb3_tag_ { | ||
521 | /* Remaining Address unit registers in 64-bit pairs */ | ||
522 | TBIDUAL UX[3]; | ||
523 | |||
524 | } TBICTXEXTBB3, *PTBICTXEXTBB3; | ||
525 | |||
526 | /* Extended thread state save areas - 4 AU register pairs or 4 FX pairs */ | ||
527 | typedef struct _tbictxextbb4_tag_ { | ||
528 | /* Remaining Address unit or FPU registers in 64-bit pairs */ | ||
529 | TBIDUAL UX[4]; | ||
530 | |||
531 | } TBICTXEXTBB4, *PTBICTXEXTBB4; | ||
532 | |||
533 | /* Extended thread state save areas - Hardware loop states (max 2) */ | ||
534 | typedef struct _tbictxexthl2_tag_ { | ||
535 | /* Hardware looping register states */ | ||
536 | TBIDUAL Start, End, Count; | ||
537 | |||
538 | } TBICTXEXTHL2, *PTBICTXEXTHL2; | ||
539 | |||
540 | /* Extended thread state save areas - DSP register states */ | ||
541 | typedef struct _tbictxexttdp_tag_ { | ||
542 | /* DSP 32-bit accumulator register state (Bits 31:0 of ACX.0) */ | ||
543 | TBIDUAL Acc32[1]; | ||
544 | /* DSP > 32-bit accumulator bits 63:32 of ACX.0 (zero-extended) */ | ||
545 | TBIDUAL Acc64[1]; | ||
546 | /* Twiddle register state, and three phase increment states */ | ||
547 | TBIDUAL PReg[4]; | ||
548 | /* Modulo region size, padded to 64-bits */ | ||
549 | int CurrMRSIZE, Reserved0; | ||
550 | |||
551 | } TBICTXEXTTDP, *PTBICTXEXTTDP; | ||
552 | |||
553 | /* Extended thread state save areas - DSP register states including DSP RAM */ | ||
554 | typedef struct _tbictxexttdpr_tag_ { | ||
555 | /* DSP 32-bit accumulator register state (Bits 31:0 of ACX.0) */ | ||
556 | TBIDUAL Acc32[1]; | ||
557 | /* DSP 40-bit accumulator register state (Bits 39:8 of ACX.0) */ | ||
558 | TBIDUAL Acc40[1]; | ||
559 | /* DSP RAM Pointers */ | ||
560 | TBIDUAL RP0[2], WP0[2], RP1[2], WP1[2]; | ||
561 | /* DSP RAM Increments */ | ||
562 | TBIDUAL RPI0[2], WPI0[2], RPI1[2], WPI1[2]; | ||
563 | /* Template registers */ | ||
564 | unsigned long Tmplt[16]; | ||
565 | /* Modulo address region size and DSP RAM module region sizes */ | ||
566 | int CurrMRSIZE, CurrDRSIZE; | ||
567 | |||
568 | } TBICTXEXTTDPR, *PTBICTXEXTTDPR; | ||
569 | |||
570 | #ifdef TBI_1_4 | ||
571 | /* The METAC_ID_CORE register state is a marker for the FPU | ||
572 | state that is then stored after this core header structure. */ | ||
573 | #define TBICTXEXTFPU_CONFIG_MASK ( (METAC_COREID_NOFPACC_BIT+ \ | ||
574 | METAC_COREID_CFGFPU_BITS ) << \ | ||
575 | METAC_COREID_CONFIG_BITS ) | ||
576 | |||
577 | /* Recorded FPU exception state from TXDEFR in DefrFpu */ | ||
578 | #define TBICTXEXTFPU_DEFRFPU_MASK (TXDEFR_FPU_FE_BITS) | ||
579 | |||
580 | /* Extended thread state save areas - FPU register states */ | ||
581 | typedef struct _tbictxextfpu_tag_ { | ||
582 | /* Stored METAC_CORE_ID CONFIG */ | ||
583 | int CfgFpu; | ||
584 | /* Stored deferred TXDEFR bits related to FPU | ||
585 | * | ||
586 | * This is encoded as follows in order to fit into 16-bits: | ||
587 | * DefrFPU:15 - 14 <= 0 | ||
588 | * :13 - 8 <= TXDEFR:21-16 | ||
589 | * : 7 - 6 <= 0 | ||
590 | * : 5 - 0 <= TXDEFR:5-0 | ||
591 | */ | ||
592 | short DefrFpu; | ||
593 | |||
594 | /* TXMODE bits related to FPU */ | ||
595 | short ModeFpu; | ||
596 | |||
597 | /* FPU Even/Odd register states */ | ||
598 | TBIDUAL FX[4]; | ||
599 | |||
600 | /* if CfgFpu & TBICTX_CFGFPU_FX16_BIT -> 1 then TBICTXEXTBB4 holds FX.8-15 */ | ||
601 | /* if CfgFpu & TBICTX_CFGFPU_NOACF_BIT -> 0 then TBICTXEXTFPACC holds state */ | ||
602 | } TBICTXEXTFPU, *PTBICTXEXTFPU; | ||
603 | |||
604 | /* Extended thread state save areas - FPU accumulator state */ | ||
605 | typedef struct _tbictxextfpacc_tag_ { | ||
606 | /* FPU accumulator register state - three 64-bit parts */ | ||
607 | TBIDUAL FAcc32[3]; | ||
608 | |||
609 | } TBICTXEXTFPACC, *PTBICTXEXTFPACC; | ||
610 | #endif | ||
611 | |||
612 | /* Prototype TBI structure */ | ||
613 | struct _tbi_tag_ ; | ||
614 | |||
615 | /* A 64-bit return value used commonly in the TBI APIs */ | ||
616 | typedef union _tbires_tag_ { | ||
617 | /* Save and load this value to get/set the whole result quickly */ | ||
618 | long long Val; | ||
619 | |||
620 | /* Parameter of a fnSigs or __TBICtx* call */ | ||
621 | struct _tbires_sig_tag_ { | ||
622 | /* TXMASK[I] bits zeroed upto and including current trigger level */ | ||
623 | unsigned short TrigMask; | ||
624 | /* Control bits for handlers - see PTBIAPIFN documentation below */ | ||
625 | unsigned short SaveMask; | ||
626 | /* Pointer to the base register context save area of the thread */ | ||
627 | PTBICTX pCtx; | ||
628 | } Sig; | ||
629 | |||
630 | /* Result of TBIThrdPrivId call */ | ||
631 | struct _tbires_thrdprivid_tag_ { | ||
632 | /* Basic thread identifier; just TBID_THREAD_BITS */ | ||
633 | int Id; | ||
634 | /* None thread number bits; TBID_ISTAT_BIT+TBID_PSTAT_BIT */ | ||
635 | int Priv; | ||
636 | } Thrd; | ||
637 | |||
638 | /* Parameter and Result of a __TBISwitch call */ | ||
639 | struct _tbires_switch_tag_ { | ||
640 | /* Parameter passed across context switch */ | ||
641 | void *pPara; | ||
642 | /* Thread context of other Thread includng restore flags */ | ||
643 | PTBICTX pCtx; | ||
644 | } Switch; | ||
645 | |||
646 | /* For extended S/W events only */ | ||
647 | struct _tbires_ccb_tag_ { | ||
648 | void *pCCB; | ||
649 | int COff; | ||
650 | } CCB; | ||
651 | |||
652 | struct _tbires_tlb_tag_ { | ||
653 | int Leaf; /* TLB Leaf data */ | ||
654 | int Flags; /* TLB Flags */ | ||
655 | } Tlb; | ||
656 | |||
657 | #ifdef TBI_FASTINT_1_4 | ||
658 | struct _tbires_intr_tag_ { | ||
659 | short TrigMask; | ||
660 | short SaveMask; | ||
661 | PTBICTX2 pCtx; | ||
662 | } Intr; | ||
663 | #endif | ||
664 | |||
665 | } TBIRES, *PTBIRES; | ||
666 | #endif /* ifndef __ASSEMBLY__ */ | ||
667 | |||
668 | #ifndef __ASSEMBLY__ | ||
669 | /* Prototype for all signal handler functions, called via ___TBISyncTrigger or | ||
670 | ___TBIASyncTrigger. | ||
671 | |||
672 | State.Sig.TrigMask will indicate the bits set within TXMASKI at | ||
673 | the time of the handler call that have all been cleared to prevent | ||
674 | nested interrupt occuring immediately. | ||
675 | |||
676 | State.Sig.SaveMask is a bit-mask which will be set to Zero when a trigger | ||
677 | occurs at background level and TBICTX_CRIT_BIT and optionally | ||
678 | TBICTX_CBUF_BIT when a trigger occurs at interrupt level. | ||
679 | |||
680 | TBICTX_CBUF_BIT reflects the state of TXSTATUS_CBMARKER_BIT for | ||
681 | the interrupted background thread. | ||
682 | |||
683 | State.Sig.pCtx will point at a TBICTX structure generated to hold the | ||
684 | critical state of the interrupted thread at interrupt level and | ||
685 | should be set to NULL when called at background level. | ||
686 | |||
687 | Triggers will indicate the status of TXSTAT or TXSTATI sampled by the | ||
688 | code that called the handler. | ||
689 | |||
690 | InstOrSWSId is defined firstly as 'Inst' if the SigNum is TBID_SIGNUM_SWx | ||
691 | and hold the actual SWITCH instruction detected, secondly if SigNum | ||
692 | is TBID_SIGNUM_SWS the 'SWSId' is defined to hold the Id of the | ||
693 | software signal detected, in other cases the value of this | ||
694 | parameter is undefined. | ||
695 | |||
696 | pTBI points at the PTBI structure related to the thread and processing | ||
697 | level involved. | ||
698 | |||
699 | TBIRES return value at both processing levels is similar in terms of any | ||
700 | changes that the handler makes. By default the State argument value | ||
701 | passed in should be returned. | ||
702 | |||
703 | Sig.TrigMask value is bits to OR back into TXMASKI when the handler | ||
704 | completes to enable currently disabled interrupts. | ||
705 | |||
706 | Sig.SaveMask value is ignored. | ||
707 | |||
708 | Sig.pCtx is ignored. | ||
709 | |||
710 | */ | ||
711 | typedef TBIRES (*PTBIAPIFN)( TBIRES State, int SigNum, | ||
712 | int Triggers, int InstOrSWSId, | ||
713 | volatile struct _tbi_tag_ *pTBI ); | ||
714 | #endif /* ifndef __ASSEMBLY__ */ | ||
715 | |||
716 | #ifndef __ASSEMBLY__ | ||
717 | /* The global memory map is described by a list of segment descriptors */ | ||
718 | typedef volatile struct _tbiseg_tag_ { | ||
719 | volatile struct _tbiseg_tag_ *pLink; | ||
720 | int Id; /* Id of the segment */ | ||
721 | TBISPIN Lock; /* Spin-lock for struct (normally 0) */ | ||
722 | unsigned int Bytes; /* Size of region in bytes */ | ||
723 | void *pGAddr; /* Base addr of region in global space */ | ||
724 | void *pLAddr; /* Base addr of region in local space */ | ||
725 | int Data[2]; /* Segment specific data (may be extended) */ | ||
726 | |||
727 | } TBISEG, *PTBISEG; | ||
728 | #endif /* ifndef __ASSEMBLY__ */ | ||
729 | |||
730 | /* Offsets of fields in TBISEG structure */ | ||
731 | #define TBISEG_pLink ( 0) | ||
732 | #define TBISEG_Id ( 4) | ||
733 | #define TBISEG_Lock ( 8) | ||
734 | #define TBISEG_Bytes (12) | ||
735 | #define TBISEG_pGAddr (16) | ||
736 | #define TBISEG_pLAddr (20) | ||
737 | #define TBISEG_Data (24) | ||
738 | |||
739 | #ifndef __ASSEMBLY__ | ||
740 | typedef volatile struct _tbi_tag_ { | ||
741 | int SigMask; /* Bits set to represent S/W events */ | ||
742 | PTBIKICK pKick; /* Kick addr for S/W events */ | ||
743 | void *pCCB; /* Extended S/W events */ | ||
744 | PTBISEG pSeg; /* Related segment structure */ | ||
745 | PTBIAPIFN fnSigs[TBID_SIGNUM_MAX+1];/* Signal handler API table */ | ||
746 | } *PTBI, TBI; | ||
747 | #endif /* ifndef __ASSEMBLY__ */ | ||
748 | |||
749 | /* Byte offsets of fields within TBI */ | ||
750 | #define TBI_SigMask (0) | ||
751 | #define TBI_pKick (4) | ||
752 | #define TBI_pCCB (8) | ||
753 | #define TBI_pSeg (12) | ||
754 | #define TBI_fnSigs (16) | ||
755 | |||
756 | #ifdef TBI_1_4 | ||
757 | #ifndef __ASSEMBLY__ | ||
758 | /* This handler should be used for TBID_SIGNUM_DFR */ | ||
759 | extern TBIRES __TBIHandleDFR ( TBIRES State, int SigNum, | ||
760 | int Triggers, int InstOrSWSId, | ||
761 | volatile struct _tbi_tag_ *pTBI ); | ||
762 | #endif | ||
763 | #endif | ||
764 | |||
765 | /* String table entry - special values */ | ||
766 | #define METAG_TBI_STRS (0x5300) /* Tag : If entry is valid */ | ||
767 | #define METAG_TBI_STRE (0x4500) /* Tag : If entry is end of table */ | ||
768 | #define METAG_TBI_STRG (0x4700) /* Tag : If entry is a gap */ | ||
769 | #define METAG_TBI_STRX (0x5A00) /* TransLen : If no translation present */ | ||
770 | |||
771 | #ifndef __ASSEMBLY__ | ||
772 | typedef volatile struct _tbistr_tag_ { | ||
773 | short Bytes; /* Length of entry in Bytes */ | ||
774 | short Tag; /* Normally METAG_TBI_STRS(0x5300) */ | ||
775 | short Len; /* Length of the string entry (incl null) */ | ||
776 | short TransLen; /* Normally METAG_TBI_STRX(0x5A00) */ | ||
777 | char String[8]; /* Zero terminated (may-be bigger) */ | ||
778 | |||
779 | } TBISTR, *PTBISTR; | ||
780 | #endif /* ifndef __ASSEMBLY__ */ | ||
781 | |||
782 | /* Cache size information - available as fields of Data[1] of global heap | ||
783 | segment */ | ||
784 | #define METAG_TBI_ICACHE_SIZE_S 0 /* see comments below */ | ||
785 | #define METAG_TBI_ICACHE_SIZE_BITS 0x0000000F | ||
786 | #define METAG_TBI_ICACHE_FILL_S 4 | ||
787 | #define METAG_TBI_ICACHE_FILL_BITS 0x000000F0 | ||
788 | #define METAG_TBI_DCACHE_SIZE_S 8 | ||
789 | #define METAG_TBI_DCACHE_SIZE_BITS 0x00000F00 | ||
790 | #define METAG_TBI_DCACHE_FILL_S 12 | ||
791 | #define METAG_TBI_DCACHE_FILL_BITS 0x0000F000 | ||
792 | |||
793 | /* METAG_TBI_xCACHE_SIZE | ||
794 | Describes the physical cache size rounded up to the next power of 2 | ||
795 | relative to a 16K (2^14) cache. These sizes are encoded as a signed addend | ||
796 | to this base power of 2, for example | ||
797 | 4K -> 2^12 -> -2 (i.e. 12-14) | ||
798 | 8K -> 2^13 -> -1 | ||
799 | 16K -> 2^14 -> 0 | ||
800 | 32K -> 2^15 -> +1 | ||
801 | 64K -> 2^16 -> +2 | ||
802 | 128K -> 2^17 -> +3 | ||
803 | |||
804 | METAG_TBI_xCACHE_FILL | ||
805 | Describes the physical cache size within the power of 2 area given by | ||
806 | the value above. For example a 10K cache may be represented as having | ||
807 | nearest size 16K with a fill of 10 sixteenths. This is encoded as the | ||
808 | number of unused 1/16ths, for example | ||
809 | 0000 -> 0 -> 16/16 | ||
810 | 0001 -> 1 -> 15/16 | ||
811 | 0010 -> 2 -> 14/16 | ||
812 | ... | ||
813 | 1111 -> 15 -> 1/16 | ||
814 | */ | ||
815 | |||
816 | #define METAG_TBI_CACHE_SIZE_BASE_LOG2 14 | ||
817 | |||
818 | /* Each declaration made by this macro generates a TBISTR entry */ | ||
819 | #ifndef __ASSEMBLY__ | ||
820 | #define TBISTR_DECL( Name, Str ) \ | ||
821 | __attribute__ ((__section__ (".tbistr") )) const char Name[] = #Str | ||
822 | #endif | ||
823 | |||
824 | /* META timer values - see below for Timer support routines */ | ||
825 | #define TBI_TIMERWAIT_MIN (-16) /* Minimum 'recommended' period */ | ||
826 | #define TBI_TIMERWAIT_MAX (-0x7FFFFFFF) /* Maximum 'recommended' period */ | ||
827 | |||
828 | #ifndef __ASSEMBLY__ | ||
829 | /* These macros allow direct access from C to any register known to the | ||
830 | assembler or defined in machine.h. Example candidates are TXTACTCYC, | ||
831 | TXIDLECYC, and TXPRIVEXT. Note that when higher level macros and routines | ||
832 | like the timer and trigger handling features below these should be used in | ||
833 | preference to this direct low-level access mechanism. */ | ||
834 | #define TBI_GETREG( Reg ) __extension__ ({\ | ||
835 | int __GRValue; \ | ||
836 | __asm__ volatile ("MOV\t%0," #Reg "\t/* (*TBI_GETREG OK) */" : \ | ||
837 | "=r" (__GRValue) ); \ | ||
838 | __GRValue; }) | ||
839 | |||
840 | #define TBI_SETREG( Reg, Value ) do {\ | ||
841 | int __SRValue = Value; \ | ||
842 | __asm__ volatile ("MOV\t" #Reg ",%0\t/* (*TBI_SETREG OK) */" : \ | ||
843 | : "r" (__SRValue) ); } while (0) | ||
844 | |||
845 | #define TBI_SWAPREG( Reg, Value ) do {\ | ||
846 | int __XRValue = (Value); \ | ||
847 | __asm__ volatile ("SWAP\t" #Reg ",%0\t/* (*TBI_SWAPREG OK) */" : \ | ||
848 | "=r" (__XRValue) : "0" (__XRValue) ); \ | ||
849 | Value = __XRValue; } while (0) | ||
850 | |||
851 | /* Obtain and/or release global critical section lock given that interrupts | ||
852 | are already disabled and/or should remain disabled. */ | ||
853 | #define TBI_NOINTSCRITON do {\ | ||
854 | __asm__ volatile ("LOCK1\t\t/* (*TBI_NOINTSCRITON OK) */");} while (0) | ||
855 | #define TBI_NOINTSCRITOFF do {\ | ||
856 | __asm__ volatile ("LOCK0\t\t/* (*TBI_NOINTSCRITOFF OK) */");} while (0) | ||
857 | /* Optimised in-lining versions of the above macros */ | ||
858 | |||
859 | #define TBI_LOCK( TrigState ) do {\ | ||
860 | int __TRValue; \ | ||
861 | int __ALOCKHI = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \ | ||
862 | __asm__ volatile ("MOV %0,#0\t\t/* (*TBI_LOCK ... */\n\t" \ | ||
863 | "SWAP\t%0,TXMASKI\t/* ... */\n\t" \ | ||
864 | "LOCK2\t\t/* ... */\n\t" \ | ||
865 | "SETD\t[%1+#0x40],D1RtP /* ... OK) */" : \ | ||
866 | "=r&" (__TRValue) : "u" (__ALOCKHI) ); \ | ||
867 | TrigState = __TRValue; } while (0) | ||
868 | #define TBI_CRITON( TrigState ) do {\ | ||
869 | int __TRValue; \ | ||
870 | __asm__ volatile ("MOV %0,#0\t\t/* (*TBI_CRITON ... */\n\t" \ | ||
871 | "SWAP\t%0,TXMASKI\t/* ... */\n\t" \ | ||
872 | "LOCK1\t\t/* ... OK) */" : \ | ||
873 | "=r" (__TRValue) ); \ | ||
874 | TrigState = __TRValue; } while (0) | ||
875 | |||
876 | #define TBI_INTSX( TrigState ) do {\ | ||
877 | int __TRValue = TrigState; \ | ||
878 | __asm__ volatile ("SWAP\t%0,TXMASKI\t/* (*TBI_INTSX OK) */" : \ | ||
879 | "=r" (__TRValue) : "0" (__TRValue) ); \ | ||
880 | TrigState = __TRValue; } while (0) | ||
881 | |||
882 | #define TBI_UNLOCK( TrigState ) do {\ | ||
883 | int __TRValue = TrigState; \ | ||
884 | int __ALOCKHI = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \ | ||
885 | __asm__ volatile ("SETD\t[%1+#0x00],D1RtP\t/* (*TBI_UNLOCK ... */\n\t" \ | ||
886 | "LOCK0\t\t/* ... */\n\t" \ | ||
887 | "MOV\tTXMASKI,%0\t/* ... OK) */" : \ | ||
888 | : "r" (__TRValue), "u" (__ALOCKHI) ); } while (0) | ||
889 | |||
890 | #define TBI_CRITOFF( TrigState ) do {\ | ||
891 | int __TRValue = TrigState; \ | ||
892 | __asm__ volatile ("LOCK0\t\t/* (*TBI_CRITOFF ... */\n\t" \ | ||
893 | "MOV\tTXMASKI,%0\t/* ... OK) */" : \ | ||
894 | : "r" (__TRValue) ); } while (0) | ||
895 | |||
896 | #define TBI_TRIGSX( SrcDst ) do { TBI_SWAPREG( TXMASK, SrcDst );} while (0) | ||
897 | |||
898 | /* Composite macros to perform logic ops on INTS or TRIGS masks */ | ||
899 | #define TBI_INTSOR( Bits ) do {\ | ||
900 | int __TT = 0; TBI_INTSX(__TT); \ | ||
901 | __TT |= (Bits); TBI_INTSX(__TT); } while (0) | ||
902 | |||
903 | #define TBI_INTSAND( Bits ) do {\ | ||
904 | int __TT = 0; TBI_INTSX(__TT); \ | ||
905 | __TT &= (Bits); TBI_INTSX(__TT); } while (0) | ||
906 | |||
907 | #ifdef TBI_1_4 | ||
908 | #define TBI_DEFRICTRLSOR( Bits ) do {\ | ||
909 | int __TT = TBI_GETREG( CT.20 ); \ | ||
910 | __TT |= (Bits); TBI_SETREG( CT.20, __TT); } while (0) | ||
911 | |||
912 | #define TBI_DEFRICTRLSAND( Bits ) do {\ | ||
913 | int __TT = TBI_GETREG( TXDEFR ); \ | ||
914 | __TT &= (Bits); TBI_SETREG( CT.20, __TT); } while (0) | ||
915 | #endif | ||
916 | |||
917 | #define TBI_TRIGSOR( Bits ) do {\ | ||
918 | int __TT = TBI_GETREG( TXMASK ); \ | ||
919 | __TT |= (Bits); TBI_SETREG( TXMASK, __TT); } while (0) | ||
920 | |||
921 | #define TBI_TRIGSAND( Bits ) do {\ | ||
922 | int __TT = TBI_GETREG( TXMASK ); \ | ||
923 | __TT &= (Bits); TBI_SETREG( TXMASK, __TT); } while (0) | ||
924 | |||
925 | /* Macros to disable and re-enable interrupts using TBI_INTSX, deliberate | ||
926 | traps and exceptions can still be handled within the critical section. */ | ||
927 | #define TBI_STOPINTS( Value ) do {\ | ||
928 | int __TT = TBI_GETREG( TXMASKI ); \ | ||
929 | __TT &= TXSTATI_BGNDHALT_BIT; TBI_INTSX( __TT ); \ | ||
930 | Value = __TT; } while (0) | ||
931 | #define TBI_RESTINTS( Value ) do {\ | ||
932 | int __TT = Value; TBI_INTSX( __TT ); } while (0) | ||
933 | |||
934 | /* Return pointer to segment list at current privilege level */ | ||
935 | PTBISEG __TBISegList( void ); | ||
936 | |||
937 | /* Search the segment list for a match given Id, pStart can be NULL */ | ||
938 | PTBISEG __TBIFindSeg( PTBISEG pStart, int Id ); | ||
939 | |||
940 | /* Prepare a new segment structure using space from within another */ | ||
941 | PTBISEG __TBINewSeg( PTBISEG pFromSeg, int Id, unsigned int Bytes ); | ||
942 | |||
943 | /* Prepare a new segment using any global or local heap segments available */ | ||
944 | PTBISEG __TBIMakeNewSeg( int Id, unsigned int Bytes ); | ||
945 | |||
946 | /* Insert a new segment into the segment list so __TBIFindSeg can locate it */ | ||
947 | void __TBIAddSeg( PTBISEG pSeg ); | ||
948 | #define __TBIADDSEG_DEF /* Some versions failed to define this */ | ||
949 | |||
950 | /* Return Id of current thread; TBID_ISTAT_BIT+TBID_THREAD_BITS */ | ||
951 | int __TBIThreadId( void ); | ||
952 | |||
953 | /* Return TBIRES.Thrd data for current thread */ | ||
954 | TBIRES __TBIThrdPrivId( void ); | ||
955 | |||
956 | /* Return pointer to current threads TBI root block. | ||
957 | Id implies whether Int or Background root block is required */ | ||
958 | PTBI __TBI( int Id ); | ||
959 | |||
960 | /* Try to set Mask bit using the spin-lock protocol, return 0 if fails and | ||
961 | new state if succeeds */ | ||
962 | int __TBIPoll( PTBISPIN pLock, int Mask ); | ||
963 | |||
964 | /* Set Mask bits via the spin-lock protocol in *pLock, return new state */ | ||
965 | int __TBISpin( PTBISPIN pLock, int Mask ); | ||
966 | |||
967 | /* Default handler set up for all TBI.fnSigs entries during initialisation */ | ||
968 | TBIRES __TBIUnExpXXX( TBIRES State, int SigNum, | ||
969 | int Triggers, int Inst, PTBI pTBI ); | ||
970 | |||
971 | /* Call this routine to service triggers at background processing level. The | ||
972 | TBID_POLL_BIT of the Id parameter value will be used to indicate that the | ||
973 | routine should return if no triggers need to be serviced initially. If this | ||
974 | bit is not set the routine will block until one trigger handler is serviced | ||
975 | and then behave like the poll case servicing any remaining triggers | ||
976 | actually outstanding before returning. Normally the State parameter should | ||
977 | be simply initialised to zero and the result should be ignored, other | ||
978 | values/options are for internal use only. */ | ||
979 | TBIRES __TBISyncTrigger( TBIRES State, int Id ); | ||
980 | |||
981 | /* Call this routine to enable processing of triggers by signal handlers at | ||
982 | interrupt level. The State parameter value passed is returned by this | ||
983 | routine. The State.Sig.TrigMask field also specifies the initial | ||
984 | state of the interrupt mask register TXMASKI to be setup by the call. | ||
985 | The other parts of the State parameter are ignored unless the PRIV bit is | ||
986 | set in the SaveMask field. In this case the State.Sig.pCtx field specifies | ||
987 | the base of the stack to which the interrupt system should switch into | ||
988 | as it saves the state of the previously executing code. In the case the | ||
989 | thread will be unprivileged as it continues execution at the return | ||
990 | point of this routine and it's future state will be effectively never | ||
991 | trusted to be valid. */ | ||
992 | TBIRES __TBIASyncTrigger( TBIRES State ); | ||
993 | |||
994 | /* Call this to swap soft threads executing at the background processing level. | ||
995 | The TBIRES returned to the new thread will be the same as the NextThread | ||
996 | value specified to the call. The NextThread.Switch.pCtx value specifies | ||
997 | which thread context to restore and the NextThread.Switch.Para value can | ||
998 | hold an arbitrary expression to be passed between the threads. The saved | ||
999 | state of the previous thread will be stored in a TBICTX descriptor created | ||
1000 | on it's stack and the address of this will be stored into the *rpSaveCtx | ||
1001 | location specified. */ | ||
1002 | TBIRES __TBISwitch( TBIRES NextThread, PTBICTX *rpSaveCtx ); | ||
1003 | |||
1004 | /* Call this to initialise a stack frame ready for further use, up to four | ||
1005 | 32-bit arguments may be specified after the fixed args to be passed via | ||
1006 | the new stack pStack to the routine specified via fnMain. If the | ||
1007 | main-line routine ever returns the thread will operate as if main itself | ||
1008 | had returned and terminate with the return code given. */ | ||
1009 | typedef int (*PTBIMAINFN)( TBIRES Arg /*, <= 4 additional 32-bit args */ ); | ||
1010 | PTBICTX __TBISwitchInit( void *pStack, PTBIMAINFN fnMain, ... ); | ||
1011 | |||
1012 | /* Call this to resume a thread from a saved synchronous TBICTX state. | ||
1013 | The TBIRES returned to the new thread will be the same as the NextThread | ||
1014 | value specified to the call. The NextThread.Switch.pCtx value specifies | ||
1015 | which thread context to restore and the NextThread.Switch.Para value can | ||
1016 | hold an arbitrary expression to be passed between the threads. The context | ||
1017 | of the calling thread is lost and this routine never returns to the | ||
1018 | caller. The TrigsMask value supplied is ored into TXMASKI to enable | ||
1019 | interrupts after the context of the new thread is established. */ | ||
1020 | void __TBISyncResume( TBIRES NextThread, int TrigsMask ); | ||
1021 | |||
1022 | /* Call these routines to save and restore the extended states of | ||
1023 | scheduled tasks. */ | ||
1024 | void *__TBICtxSave( TBIRES State, void *pExt ); | ||
1025 | void *__TBICtxRestore( TBIRES State, void *pExt ); | ||
1026 | |||
1027 | #ifdef TBI_1_4 | ||
1028 | #ifdef TBI_FASTINT_1_4 | ||
1029 | /* Call these routines to copy the GP state to a separate buffer | ||
1030 | * Only necessary for context switching. | ||
1031 | */ | ||
1032 | PTBICTXGP __TBICtx2SaveCrit( PTBICTX2 pCurrentCtx, PTBICTX2 pSaveCtx ); | ||
1033 | void *__TBICtx2SaveGP( PTBICTXGP pCurrentCtxGP, PTBICTXGP pSaveCtxGP ); | ||
1034 | |||
1035 | /* Call these routines to save and restore the extended states of | ||
1036 | scheduled tasks. */ | ||
1037 | void *__TBICtx2Save( PTBICTXGP pCtxGP, short SaveMask, void *pExt ); | ||
1038 | void *__TBICtx2Restore( PTBICTX2 pCtx, short SaveMask, void *pExt ); | ||
1039 | #endif | ||
1040 | |||
1041 | /* If FPAC flag is set then significant FPU context exists. Call these routine | ||
1042 | to save and restore it */ | ||
1043 | void *__TBICtxFPUSave( TBIRES State, void *pExt ); | ||
1044 | void *__TBICtxFPURestore( TBIRES State, void *pExt ); | ||
1045 | |||
1046 | #ifdef TBI_FASTINT_1_4 | ||
1047 | extern void *__TBICtx2FPUSave (PTBICTXGP, short, void*); | ||
1048 | extern void *__TBICtx2FPURestore (PTBICTXGP, short, void*); | ||
1049 | #endif | ||
1050 | #endif | ||
1051 | |||
1052 | #ifdef TBI_1_4 | ||
1053 | /* Call these routines to save and restore DSPRAM. */ | ||
1054 | void *__TBIDspramSaveA (short DspramSizes, void *pExt); | ||
1055 | void *__TBIDspramSaveB (short DspramSizes, void *pExt); | ||
1056 | void *__TBIDspramRestoreA (short DspramSizes, void *pExt); | ||
1057 | void *__TBIDspramRestoreB (short DspramSizes, void *pExt); | ||
1058 | #endif | ||
1059 | |||
1060 | /* This routine should be used at the entrypoint of interrupt handlers to | ||
1061 | re-enable higher priority interrupts and/or save state from the previously | ||
1062 | executing background code. State is a TBIRES.Sig parameter with NoNestMask | ||
1063 | indicating the triggers (if any) that should remain disabled and SaveMask | ||
1064 | CBUF bit indicating the if the hardware catch buffer is dirty. Optionally | ||
1065 | any number of extended state bits X??? including XCBF can be specified to | ||
1066 | force a nested state save call to __TBICtxSave before the current routine | ||
1067 | continues. (In the latter case __TBICtxRestore should be called to restore | ||
1068 | any extended states before the background thread of execution is resumed) | ||
1069 | |||
1070 | By default (no X??? bits specified in SaveMask) this routine performs a | ||
1071 | sub-call to __TBICtxSave with the pExt and State parameters specified IF | ||
1072 | some triggers could be serviced while the current interrupt handler | ||
1073 | executes and the hardware catch buffer is actually dirty. In this case | ||
1074 | this routine provides the XCBF bit in State.Sig.SaveMask to force the | ||
1075 | __TBICtxSave to extract the current catch state. | ||
1076 | |||
1077 | The NoNestMask parameter should normally indicate that the same or lower | ||
1078 | triggers than those provoking the current handler call should not be | ||
1079 | serviced in nested calls, zero may be specified if all possible interrupts | ||
1080 | are to be allowed. | ||
1081 | |||
1082 | The TBIRES.Sig value returned will be similar to the State parameter | ||
1083 | specified with the XCBF bit ORed into it's SaveMask if a context save was | ||
1084 | required and fewer bits set in it's TrigMask corresponding to the same/lower | ||
1085 | priority interrupt triggers still not enabled. */ | ||
1086 | TBIRES __TBINestInts( TBIRES State, void *pExt, int NoNestMask ); | ||
1087 | |||
1088 | /* This routine causes the TBICTX structure specified in State.Sig.pCtx to | ||
1089 | be restored. This implies that execution will not return to the caller. | ||
1090 | The State.Sig.TrigMask field will be restored during the context switch | ||
1091 | such that any immediately occuring interrupts occur in the context of the | ||
1092 | newly specified task. The State.Sig.SaveMask parameter is ignored. */ | ||
1093 | void __TBIASyncResume( TBIRES State ); | ||
1094 | |||
1095 | /* Call this routine to enable fastest possible processing of one or more | ||
1096 | interrupt triggers via a unified signal handler. The handler concerned | ||
1097 | must simple return after servicing the related hardware. | ||
1098 | The State.Sig.TrigMask parameter indicates the interrupt triggers to be | ||
1099 | enabled and the Thin.Thin.fnHandler specifies the routine to call and | ||
1100 | the whole Thin parameter value will be passed to this routine unaltered as | ||
1101 | it's first parameter. */ | ||
1102 | void __TBIASyncThin( TBIRES State, TBIRES Thin ); | ||
1103 | |||
1104 | /* Do this before performing your own direct spin-lock access - use TBI_LOCK */ | ||
1105 | int __TBILock( void ); | ||
1106 | |||
1107 | /* Do this after performing your own direct spin-lock access - use TBI_UNLOCK */ | ||
1108 | void __TBIUnlock( int TrigState ); | ||
1109 | |||
1110 | /* Obtain and release global critical section lock - only stops execution | ||
1111 | of interrupts on this thread and similar critical section code on other | ||
1112 | local threads - use TBI_CRITON or TBI_CRITOFF */ | ||
1113 | int __TBICritOn( void ); | ||
1114 | void __TBICritOff( int TrigState ); | ||
1115 | |||
1116 | /* Change INTS (TXMASKI) - return old state - use TBI_INTSX */ | ||
1117 | int __TBIIntsX( int NewMask ); | ||
1118 | |||
1119 | /* Change TRIGS (TXMASK) - return old state - use TBI_TRIGSX */ | ||
1120 | int __TBITrigsX( int NewMask ); | ||
1121 | |||
1122 | /* This function initialises a timer for first use, only the TBID_ISTAT_BIT | ||
1123 | of the Id parameter is used to indicate which timer is to be modified. The | ||
1124 | Wait value should either be zero to disable the timer concerned or be in | ||
1125 | the recommended TBI_TIMERWAIT_* range to specify the delay required before | ||
1126 | the first timer trigger occurs. | ||
1127 | |||
1128 | The TBID_ISTAT_BIT of the Id parameter similar effects all other timer | ||
1129 | support functions (see below). */ | ||
1130 | void __TBITimerCtrl( int Id, int Wait ); | ||
1131 | |||
1132 | /* This routine returns a 64-bit time stamp value that is initialised to zero | ||
1133 | via a __TBITimerCtrl timer enabling call. */ | ||
1134 | long long __TBITimeStamp( int Id ); | ||
1135 | |||
1136 | /* To manage a periodic timer each period elapsed should be subracted from | ||
1137 | the current timer value to attempt to set up the next timer trigger. The | ||
1138 | Wait parameter should be a value in the recommended TBI_TIMERWAIT_* range. | ||
1139 | The return value is the new aggregate value that the timer was updated to, | ||
1140 | if this is less than zero then a timer trigger is guaranteed to be | ||
1141 | generated after the number of ticks implied, if a positive result is | ||
1142 | returned either itterative or step-wise corrective action must be taken to | ||
1143 | resynchronise the timer and hence provoke a future timer trigger. */ | ||
1144 | int __TBITimerAdd( int Id, int Wait ); | ||
1145 | |||
1146 | /* String table search function, pStart is first entry to check or NULL, | ||
1147 | pStr is string data to search for and MatchLen is either length of string | ||
1148 | to compare for an exact match or negative length to compare for partial | ||
1149 | match. */ | ||
1150 | const TBISTR *__TBIFindStr( const TBISTR *pStart, | ||
1151 | const char *pStr, int MatchLen ); | ||
1152 | |||
1153 | /* String table translate function, pStr is text to translate and Len is | ||
1154 | it's length. Value returned may not be a string pointer if the | ||
1155 | translation value is really some other type, 64-bit alignment of the return | ||
1156 | pointer is guaranteed so almost any type including a structure could be | ||
1157 | located with this routine. */ | ||
1158 | const void *__TBITransStr( const char *pStr, int Len ); | ||
1159 | |||
1160 | |||
1161 | |||
1162 | /* Arbitrary physical memory access windows, use different Channels to avoid | ||
1163 | conflict/thrashing within a single piece of code. */ | ||
1164 | void *__TBIPhysAccess( int Channel, int PhysAddr, int Bytes ); | ||
1165 | void __TBIPhysRelease( int Channel, void *pLinAddr ); | ||
1166 | |||
1167 | #ifdef METAC_1_0 | ||
1168 | /* Data cache function nullified because data cache is off */ | ||
1169 | #define TBIDCACHE_FLUSH( pAddr ) | ||
1170 | #define TBIDCACHE_PRELOAD( Type, pAddr ) ((Type) (pAddr)) | ||
1171 | #define TBIDCACHE_REFRESH( Type, pAddr ) ((Type) (pAddr)) | ||
1172 | #endif | ||
1173 | #ifdef METAC_1_1 | ||
1174 | /* To flush a single cache line from the data cache using a linear address */ | ||
1175 | #define TBIDCACHE_FLUSH( pAddr ) ((volatile char *) \ | ||
1176 | (((unsigned int) (pAddr))>>LINSYSLFLUSH_S))[0] = 0 | ||
1177 | |||
1178 | extern void * __builtin_dcache_preload (void *); | ||
1179 | |||
1180 | /* Try to ensure that the data at the address concerned is in the cache */ | ||
1181 | #define TBIDCACHE_PRELOAD( Type, Addr ) \ | ||
1182 | ((Type) __builtin_dcache_preload ((void *)(Addr))) | ||
1183 | |||
1184 | extern void * __builtin_dcache_refresh (void *); | ||
1185 | |||
1186 | /* Flush any old version of data from address and re-load a new copy */ | ||
1187 | #define TBIDCACHE_REFRESH( Type, Addr ) __extension__ ({ \ | ||
1188 | Type __addr = (Type)(Addr); \ | ||
1189 | (void)__builtin_dcache_refresh ((void *)(((unsigned int)(__addr))>>6)); \ | ||
1190 | __addr; }) | ||
1191 | |||
1192 | #endif | ||
1193 | #ifndef METAC_1_0 | ||
1194 | #ifndef METAC_1_1 | ||
1195 | /* Support for DCACHE builtin */ | ||
1196 | extern void __builtin_dcache_flush (void *); | ||
1197 | |||
1198 | /* To flush a single cache line from the data cache using a linear address */ | ||
1199 | #define TBIDCACHE_FLUSH( Addr ) \ | ||
1200 | __builtin_dcache_flush ((void *)(Addr)) | ||
1201 | |||
1202 | extern void * __builtin_dcache_preload (void *); | ||
1203 | |||
1204 | /* Try to ensure that the data at the address concerned is in the cache */ | ||
1205 | #define TBIDCACHE_PRELOAD( Type, Addr ) \ | ||
1206 | ((Type) __builtin_dcache_preload ((void *)(Addr))) | ||
1207 | |||
1208 | extern void * __builtin_dcache_refresh (void *); | ||
1209 | |||
1210 | /* Flush any old version of data from address and re-load a new copy */ | ||
1211 | #define TBIDCACHE_REFRESH( Type, Addr ) \ | ||
1212 | ((Type) __builtin_dcache_refresh ((void *)(Addr))) | ||
1213 | |||
1214 | #endif | ||
1215 | #endif | ||
1216 | |||
1217 | /* Flush the MMCU cache */ | ||
1218 | #define TBIMCACHE_FLUSH() { ((volatile int *) LINSYSCFLUSH_MMCU)[0] = 0; } | ||
1219 | |||
1220 | #ifdef METAC_2_1 | ||
1221 | /* Obtain the MMU table entry for the specified address */ | ||
1222 | #define TBIMTABLE_LEAFDATA(ADDR) TBIXCACHE_RD((int)(ADDR) & (-1<<6)) | ||
1223 | |||
1224 | #ifndef __ASSEMBLY__ | ||
1225 | /* Obtain the full MMU table entry for the specified address */ | ||
1226 | #define TBIMTABLE_DATA(ADDR) __extension__ ({ TBIRES __p; \ | ||
1227 | __p.Val = TBIXCACHE_RL((int)(ADDR) & (-1<<6)); \ | ||
1228 | __p; }) | ||
1229 | #endif | ||
1230 | #endif | ||
1231 | |||
1232 | /* Combine a physical base address, and a linear address | ||
1233 | * Internal use only | ||
1234 | */ | ||
1235 | #define _TBIMTABLE_LIN2PHYS(PHYS, LIN, LMASK) (void*)(((int)(PHYS)&0xFFFFF000)\ | ||
1236 | +((int)(LIN)&(LMASK))) | ||
1237 | |||
1238 | /* Convert a linear to a physical address */ | ||
1239 | #define TBIMTABLE_LIN2PHYS(LEAFDATA, ADDR) \ | ||
1240 | (((LEAFDATA) & CRLINPHY0_VAL_BIT) \ | ||
1241 | ? _TBIMTABLE_LIN2PHYS(LEAFDATA, ADDR, 0x00000FFF) \ | ||
1242 | : 0) | ||
1243 | |||
1244 | /* Debug support - using external debugger or host */ | ||
1245 | void __TBIDumpSegListEntries( void ); | ||
1246 | void __TBILogF( const char *pFmt, ... ); | ||
1247 | void __TBIAssert( const char *pFile, int LineNum, const char *pExp ); | ||
1248 | void __TBICont( const char *pMsg, ... ); /* TBIAssert -> 'wait for continue' */ | ||
1249 | |||
1250 | /* Array of signal name data for debug messages */ | ||
1251 | extern const char __TBISigNames[]; | ||
1252 | #endif /* ifndef __ASSEMBLY__ */ | ||
1253 | |||
1254 | |||
1255 | |||
1256 | /* Scale of sub-strings in the __TBISigNames string list */ | ||
1257 | #define TBI_SIGNAME_SCALE 4 | ||
1258 | #define TBI_SIGNAME_SCALE_S 2 | ||
1259 | |||
1260 | #define TBI_1_3 | ||
1261 | |||
1262 | #ifdef TBI_1_3 | ||
1263 | |||
1264 | #ifndef __ASSEMBLY__ | ||
1265 | #define TBIXCACHE_RD(ADDR) __extension__ ({\ | ||
1266 | void * __Addr = (void *)(ADDR); \ | ||
1267 | int __Data; \ | ||
1268 | __asm__ volatile ( "CACHERD\t%0,[%1+#0]" : \ | ||
1269 | "=r" (__Data) : "r" (__Addr) ); \ | ||
1270 | __Data; }) | ||
1271 | |||
1272 | #define TBIXCACHE_RL(ADDR) __extension__ ({\ | ||
1273 | void * __Addr = (void *)(ADDR); \ | ||
1274 | long long __Data; \ | ||
1275 | __asm__ volatile ( "CACHERL\t%0,%t0,[%1+#0]" : \ | ||
1276 | "=d" (__Data) : "r" (__Addr) ); \ | ||
1277 | __Data; }) | ||
1278 | |||
1279 | #define TBIXCACHE_WD(ADDR, DATA) do {\ | ||
1280 | void * __Addr = (void *)(ADDR); \ | ||
1281 | int __Data = DATA; \ | ||
1282 | __asm__ volatile ( "CACHEWD\t[%0+#0],%1" : \ | ||
1283 | : "r" (__Addr), "r" (__Data) ); } while(0) | ||
1284 | |||
1285 | #define TBIXCACHE_WL(ADDR, DATA) do {\ | ||
1286 | void * __Addr = (void *)(ADDR); \ | ||
1287 | long long __Data = DATA; \ | ||
1288 | __asm__ volatile ( "CACHEWL\t[%0+#0],%1,%t1" : \ | ||
1289 | : "r" (__Addr), "r" (__Data) ); } while(0) | ||
1290 | |||
1291 | #ifdef TBI_4_0 | ||
1292 | |||
1293 | #define TBICACHE_FLUSH_L1D_L2(ADDR) \ | ||
1294 | TBIXCACHE_WD(ADDR, CACHEW_FLUSH_L1D_L2) | ||
1295 | #define TBICACHE_WRITEBACK_L1D_L2(ADDR) \ | ||
1296 | TBIXCACHE_WD(ADDR, CACHEW_WRITEBACK_L1D_L2) | ||
1297 | #define TBICACHE_INVALIDATE_L1D(ADDR) \ | ||
1298 | TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1D) | ||
1299 | #define TBICACHE_INVALIDATE_L1D_L2(ADDR) \ | ||
1300 | TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1D_L2) | ||
1301 | #define TBICACHE_INVALIDATE_L1DTLB(ADDR) \ | ||
1302 | TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1DTLB) | ||
1303 | #define TBICACHE_INVALIDATE_L1I(ADDR) \ | ||
1304 | TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1I) | ||
1305 | #define TBICACHE_INVALIDATE_L1ITLB(ADDR) \ | ||
1306 | TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1ITLB) | ||
1307 | |||
1308 | #endif /* TBI_4_0 */ | ||
1309 | #endif /* ifndef __ASSEMBLY__ */ | ||
1310 | |||
1311 | /* | ||
1312 | * Calculate linear PC value from real PC and Minim mode control, the LSB of | ||
1313 | * the result returned indicates if address compression has occured. | ||
1314 | */ | ||
1315 | #ifndef __ASSEMBLY__ | ||
1316 | #define METAG_LINPC( PCVal ) (\ | ||
1317 | ( (TBI_GETREG(TXPRIVEXT) & TXPRIVEXT_MINIMON_BIT) != 0 ) ? ( \ | ||
1318 | ( ((PCVal) & 0x00900000) == 0x00900000 ) ? \ | ||
1319 | (((PCVal) & 0xFFE00000) + (((PCVal) & 0x001FFFFC)>>1) + 1) : \ | ||
1320 | ( ((PCVal) & 0x00800000) == 0x00000000 ) ? \ | ||
1321 | (((PCVal) & 0xFF800000) + (((PCVal) & 0x007FFFFC)>>1) + 1) : \ | ||
1322 | (PCVal) ) \ | ||
1323 | : (PCVal) ) | ||
1324 | #define METAG_LINPC_X2BIT 0x00000001 /* Make (Size>>1) if compressed */ | ||
1325 | |||
1326 | /* Convert an arbitrary Linear address into a valid Minim PC or return 0 */ | ||
1327 | #define METAG_PCMINIM( LinVal ) (\ | ||
1328 | (((LinVal) & 0x00980000) == 0x00880000) ? \ | ||
1329 | (((LinVal) & 0xFFE00000) + (((LinVal) & 0x000FFFFE)<<1)) : \ | ||
1330 | (((LinVal) & 0x00C00000) == 0x00000000) ? \ | ||
1331 | (((LinVal) & 0xFF800000) + (((LinVal) & 0x003FFFFE)<<1)) : 0 ) | ||
1332 | |||
1333 | /* Reverse a METAG_LINPC conversion step to return the original PCVal */ | ||
1334 | #define METAG_PCLIN( LinVal ) ( 0xFFFFFFFC & (\ | ||
1335 | ( (LinVal & METAG_LINPC_X2BIT) != 0 ) ? METAG_PCMINIM( LinVal ) : \ | ||
1336 | (LinVal) )) | ||
1337 | |||
1338 | /* | ||
1339 | * Flush the MMCU Table cache privately for each thread. On cores that do not | ||
1340 | * support per-thread flushing it will flush all threads mapping data. | ||
1341 | */ | ||
1342 | #define TBIMCACHE_TFLUSH(Thread) do {\ | ||
1343 | ((volatile int *)( LINSYSCFLUSH_TxMMCU_BASE + \ | ||
1344 | (LINSYSCFLUSH_TxMMCU_STRIDE*(Thread)) ))[0] = 0; \ | ||
1345 | } while(0) | ||
1346 | |||
1347 | /* | ||
1348 | * To flush a single linear-matched cache line from the code cache. In | ||
1349 | * cases where Minim is possible the METAC_LINPC operation must be used | ||
1350 | * to pre-process the address being flushed. | ||
1351 | */ | ||
1352 | #define TBIICACHE_FLUSH( pAddr ) TBIXCACHE_WD (pAddr, CACHEW_ICACHE_BIT) | ||
1353 | |||
1354 | /* To flush a single linear-matched mapping from code/data MMU table cache */ | ||
1355 | #define TBIMCACHE_AFLUSH( pAddr, SegType ) \ | ||
1356 | TBIXCACHE_WD(pAddr, CACHEW_TLBFLUSH_BIT + ( \ | ||
1357 | ((SegType) == TBID_SEGTYPE_TEXT) ? CACHEW_ICACHE_BIT : 0 )) | ||
1358 | |||
1359 | /* | ||
1360 | * To flush translation data corresponding to a range of addresses without | ||
1361 | * using TBITCACHE_FLUSH to flush all of this threads translation data. It | ||
1362 | * is necessary to know what stride (>= 4K) must be used to flush a specific | ||
1363 | * region. | ||
1364 | * | ||
1365 | * For example direct mapped regions use the maximum page size (512K) which may | ||
1366 | * mean that only one flush is needed to cover the sub-set of the direct | ||
1367 | * mapped area used since it was setup. | ||
1368 | * | ||
1369 | * The function returns the stride on which flushes should be performed. | ||
1370 | * | ||
1371 | * If 0 is returned then the region is not subject to MMU caching, if -1 is | ||
1372 | * returned then this indicates that only TBIMCACHE_TFLUSH can be used to | ||
1373 | * flush the region concerned rather than TBIMCACHE_AFLUSH which this | ||
1374 | * function is designed to support. | ||
1375 | */ | ||
1376 | int __TBIMMUCacheStride( const void *pStart, int Bytes ); | ||
1377 | |||
1378 | /* | ||
1379 | * This function will use the above lower level functions to achieve a MMU | ||
1380 | * table data flush in an optimal a fashion as possible. On a system that | ||
1381 | * supports linear address based caching this function will also call the | ||
1382 | * code or data cache flush functions to maintain address/data coherency. | ||
1383 | * | ||
1384 | * SegType should be TBID_SEGTYPE_TEXT if the address range is for code or | ||
1385 | * any other value such as TBID_SEGTYPE_DATA for data. If an area is | ||
1386 | * used in both ways then call this function twice; once for each. | ||
1387 | */ | ||
1388 | void __TBIMMUCacheFlush( const void *pStart, int Bytes, int SegType ); | ||
1389 | |||
1390 | /* | ||
1391 | * Cached Core mode setup and flush functions allow one code and one data | ||
1392 | * region of the corresponding global or local cache partion size to be | ||
1393 | * locked into the corresponding cache memory. This prevents normal LRU | ||
1394 | * logic discarding the code or data and avoids write-thru bandwidth in | ||
1395 | * data areas. Code mappings are selected by specifying TBID_SEGTYPE_TEXT | ||
1396 | * for SegType, otherwise data mappings are created. | ||
1397 | * | ||
1398 | * Mode supplied should always contain the VALID bit and WINx selection data. | ||
1399 | * Data areas will be mapped read-only if the WRITE bit is not added. | ||
1400 | * | ||
1401 | * The address returned by the Opt function will either be the same as that | ||
1402 | * passed in (if optimisation cannot be supported) or the base of the new core | ||
1403 | * cached region in linear address space. The returned address must be passed | ||
1404 | * into the End function to remove the mapping when required. If a non-core | ||
1405 | * cached memory address is passed into it the End function has no effect. | ||
1406 | * Note that the region accessed MUST be flushed from the appropriate cache | ||
1407 | * before the End function is called to deliver correct operation. | ||
1408 | */ | ||
1409 | void *__TBICoreCacheOpt( const void *pStart, int Bytes, int SegType, int Mode ); | ||
1410 | void __TBICoreCacheEnd( const void *pOpt, int Bytes, int SegType ); | ||
1411 | |||
1412 | /* | ||
1413 | * Optimise physical access channel and flush side effects before releasing | ||
1414 | * the channel. If pStart is NULL the whole region must be flushed and this is | ||
1415 | * done automatically by the channel release function if optimisation is | ||
1416 | * enabled. Flushing the specific region that may have been accessed before | ||
1417 | * release should optimises this process. On physically cached systems we do | ||
1418 | * not flush the code/data caches only the MMU table data needs flushing. | ||
1419 | */ | ||
1420 | void __TBIPhysOptim( int Channel, int IMode, int DMode ); | ||
1421 | void __TBIPhysFlush( int Channel, const void *pStart, int Bytes ); | ||
1422 | #endif | ||
1423 | #endif /* ifdef TBI_1_3 */ | ||
1424 | |||
1425 | #endif /* _ASM_METAG_TBX_H_ */ | ||
diff --git a/arch/metag/include/asm/tcm.h b/arch/metag/include/asm/tcm.h new file mode 100644 index 000000000000..7711c317b1d2 --- /dev/null +++ b/arch/metag/include/asm/tcm.h | |||
@@ -0,0 +1,30 @@ | |||
1 | #ifndef __ASM_TCM_H__ | ||
2 | #define __ASM_TCM_H__ | ||
3 | |||
4 | #include <linux/ioport.h> | ||
5 | #include <linux/list.h> | ||
6 | |||
7 | struct tcm_allocation { | ||
8 | struct list_head list; | ||
9 | unsigned int tag; | ||
10 | unsigned long addr; | ||
11 | unsigned long size; | ||
12 | }; | ||
13 | |||
14 | /* | ||
15 | * TCM memory region descriptor. | ||
16 | */ | ||
17 | struct tcm_region { | ||
18 | unsigned int tag; | ||
19 | struct resource res; | ||
20 | }; | ||
21 | |||
22 | #define TCM_INVALID_TAG 0xffffffff | ||
23 | |||
24 | unsigned long tcm_alloc(unsigned int tag, size_t len); | ||
25 | void tcm_free(unsigned int tag, unsigned long addr, size_t len); | ||
26 | unsigned int tcm_lookup_tag(unsigned long p); | ||
27 | |||
28 | int tcm_add_region(struct tcm_region *reg); | ||
29 | |||
30 | #endif | ||
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h new file mode 100644 index 000000000000..0ecd34d8b5f6 --- /dev/null +++ b/arch/metag/include/asm/thread_info.h | |||
@@ -0,0 +1,155 @@ | |||
1 | /* thread_info.h: Meta low-level thread information | ||
2 | * | ||
3 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | ||
4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | ||
5 | * | ||
6 | * Meta port by Imagination Technologies | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_THREAD_INFO_H | ||
10 | #define _ASM_THREAD_INFO_H | ||
11 | |||
12 | #include <linux/compiler.h> | ||
13 | #include <asm/page.h> | ||
14 | |||
15 | #ifndef __ASSEMBLY__ | ||
16 | #include <asm/processor.h> | ||
17 | #endif | ||
18 | |||
19 | /* | ||
20 | * low level task data that entry.S needs immediate access to | ||
21 | * - this struct should fit entirely inside of one cache line | ||
22 | * - this struct shares the supervisor stack pages | ||
23 | * - if the contents of this structure are changed, the assembly constants must | ||
24 | * also be changed | ||
25 | */ | ||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | /* This must be 8 byte aligned so we can ensure stack alignment. */ | ||
29 | struct thread_info { | ||
30 | struct task_struct *task; /* main task structure */ | ||
31 | struct exec_domain *exec_domain; /* execution domain */ | ||
32 | unsigned long flags; /* low level flags */ | ||
33 | unsigned long status; /* thread-synchronous flags */ | ||
34 | u32 cpu; /* current CPU */ | ||
35 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | ||
36 | |||
37 | mm_segment_t addr_limit; /* thread address space */ | ||
38 | struct restart_block restart_block; | ||
39 | |||
40 | u8 supervisor_stack[0]; | ||
41 | }; | ||
42 | |||
43 | #else /* !__ASSEMBLY__ */ | ||
44 | |||
45 | #include <generated/asm-offsets.h> | ||
46 | |||
47 | #endif | ||
48 | |||
49 | #define PREEMPT_ACTIVE 0x10000000 | ||
50 | |||
51 | #ifdef CONFIG_4KSTACKS | ||
52 | #define THREAD_SHIFT 12 | ||
53 | #else | ||
54 | #define THREAD_SHIFT 13 | ||
55 | #endif | ||
56 | |||
57 | #if THREAD_SHIFT >= PAGE_SHIFT | ||
58 | #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) | ||
59 | #else | ||
60 | #define THREAD_SIZE_ORDER 0 | ||
61 | #endif | ||
62 | |||
63 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | ||
64 | |||
65 | #define STACK_WARN (THREAD_SIZE/8) | ||
66 | /* | ||
67 | * macros/functions for gaining access to the thread information structure | ||
68 | */ | ||
69 | #ifndef __ASSEMBLY__ | ||
70 | |||
71 | #define INIT_THREAD_INFO(tsk) \ | ||
72 | { \ | ||
73 | .task = &tsk, \ | ||
74 | .exec_domain = &default_exec_domain, \ | ||
75 | .flags = 0, \ | ||
76 | .cpu = 0, \ | ||
77 | .preempt_count = INIT_PREEMPT_COUNT, \ | ||
78 | .addr_limit = KERNEL_DS, \ | ||
79 | .restart_block = { \ | ||
80 | .fn = do_no_restart_syscall, \ | ||
81 | }, \ | ||
82 | } | ||
83 | |||
84 | #define init_thread_info (init_thread_union.thread_info) | ||
85 | #define init_stack (init_thread_union.stack) | ||
86 | |||
87 | /* how to get the current stack pointer from C */ | ||
88 | register unsigned long current_stack_pointer asm("A0StP") __used; | ||
89 | |||
90 | /* how to get the thread information struct from C */ | ||
91 | static inline struct thread_info *current_thread_info(void) | ||
92 | { | ||
93 | return (struct thread_info *)(current_stack_pointer & | ||
94 | ~(THREAD_SIZE - 1)); | ||
95 | } | ||
96 | |||
97 | #define __HAVE_ARCH_KSTACK_END | ||
98 | static inline int kstack_end(void *addr) | ||
99 | { | ||
100 | return addr == (void *) (((unsigned long) addr & ~(THREAD_SIZE - 1)) | ||
101 | + sizeof(struct thread_info)); | ||
102 | } | ||
103 | |||
104 | #endif | ||
105 | |||
106 | /* | ||
107 | * thread information flags | ||
108 | * - these are process state flags that various assembly files may need to | ||
109 | * access | ||
110 | * - pending work-to-be-done flags are in LSW | ||
111 | * - other flags in MSW | ||
112 | */ | ||
113 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | ||
114 | #define TIF_SIGPENDING 1 /* signal pending */ | ||
115 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | ||
116 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to user | ||
117 | mode */ | ||
118 | #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ | ||
119 | #define TIF_SECCOMP 5 /* secure computing */ | ||
120 | #define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ | ||
121 | #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ | ||
122 | #define TIF_POLLING_NRFLAG 8 /* true if poll_idle() is polling | ||
123 | TIF_NEED_RESCHED */ | ||
124 | #define TIF_MEMDIE 9 /* is terminating due to OOM killer */ | ||
125 | #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint instrumentation */ | ||
126 | |||
127 | |||
128 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | ||
129 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | ||
130 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | ||
131 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | ||
132 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | ||
133 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | ||
134 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
135 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | ||
136 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | ||
137 | |||
138 | /* work to do in syscall trace */ | ||
139 | #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ | ||
140 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | ||
141 | _TIF_SYSCALL_TRACEPOINT) | ||
142 | |||
143 | /* work to do on any return to u-space */ | ||
144 | #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ | ||
145 | _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ | ||
146 | _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ | ||
147 | _TIF_NOTIFY_RESUME) | ||
148 | |||
149 | /* work to do on interrupt/exception return */ | ||
150 | #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ | ||
151 | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) | ||
152 | |||
153 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | ||
154 | |||
155 | #endif /* _ASM_THREAD_INFO_H */ | ||
diff --git a/arch/metag/include/asm/tlb.h b/arch/metag/include/asm/tlb.h new file mode 100644 index 000000000000..048282f1cc1f --- /dev/null +++ b/arch/metag/include/asm/tlb.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef __ASM_METAG_TLB_H | ||
2 | #define __ASM_METAG_TLB_H | ||
3 | |||
4 | #include <asm/cacheflush.h> | ||
5 | #include <asm/page.h> | ||
6 | |||
7 | /* Note, read http://lkml.org/lkml/2004/1/15/6 */ | ||
8 | |||
9 | #ifdef CONFIG_METAG_META12 | ||
10 | |||
11 | #define tlb_start_vma(tlb, vma) \ | ||
12 | do { \ | ||
13 | if (!tlb->fullmm) \ | ||
14 | flush_cache_range(vma, vma->vm_start, vma->vm_end); \ | ||
15 | } while (0) | ||
16 | |||
17 | #define tlb_end_vma(tlb, vma) \ | ||
18 | do { \ | ||
19 | if (!tlb->fullmm) \ | ||
20 | flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ | ||
21 | } while (0) | ||
22 | |||
23 | |||
24 | #else | ||
25 | |||
26 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
27 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
28 | |||
29 | #endif | ||
30 | |||
31 | #define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0) | ||
32 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
33 | |||
34 | #include <asm-generic/tlb.h> | ||
35 | |||
36 | #endif | ||
diff --git a/arch/metag/include/asm/tlbflush.h b/arch/metag/include/asm/tlbflush.h new file mode 100644 index 000000000000..566acf918a64 --- /dev/null +++ b/arch/metag/include/asm/tlbflush.h | |||
@@ -0,0 +1,77 @@ | |||
1 | #ifndef __ASM_METAG_TLBFLUSH_H | ||
2 | #define __ASM_METAG_TLBFLUSH_H | ||
3 | |||
4 | #include <linux/io.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <asm/metag_mem.h> | ||
7 | #include <asm/pgalloc.h> | ||
8 | |||
9 | /* | ||
10 | * TLB flushing: | ||
11 | * | ||
12 | * - flush_tlb() flushes the current mm struct TLBs | ||
13 | * - flush_tlb_all() flushes all processes TLBs | ||
14 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
15 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
16 | * - flush_tlb_range(mm, start, end) flushes a range of pages | ||
17 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
18 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables | ||
19 | * | ||
20 | * FIXME: Meta 2 can flush single TLB entries. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #if defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP) | ||
25 | static inline void __flush_tlb(void) | ||
26 | { | ||
27 | /* flush TLB entries for just the current hardware thread */ | ||
28 | int thread = hard_processor_id(); | ||
29 | metag_out32(0, (LINSYSCFLUSH_TxMMCU_BASE + | ||
30 | LINSYSCFLUSH_TxMMCU_STRIDE * thread)); | ||
31 | } | ||
32 | #else | ||
33 | static inline void __flush_tlb(void) | ||
34 | { | ||
35 | /* flush TLB entries for all hardware threads */ | ||
36 | metag_out32(0, LINSYSCFLUSH_MMCU); | ||
37 | } | ||
38 | #endif /* defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP) */ | ||
39 | |||
40 | #define flush_tlb() __flush_tlb() | ||
41 | |||
42 | #define flush_tlb_all() __flush_tlb() | ||
43 | |||
44 | #define local_flush_tlb_all() __flush_tlb() | ||
45 | |||
46 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
47 | { | ||
48 | if (mm == current->active_mm) | ||
49 | __flush_tlb(); | ||
50 | } | ||
51 | |||
52 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
53 | unsigned long addr) | ||
54 | { | ||
55 | flush_tlb_mm(vma->vm_mm); | ||
56 | } | ||
57 | |||
58 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
59 | unsigned long start, unsigned long end) | ||
60 | { | ||
61 | flush_tlb_mm(vma->vm_mm); | ||
62 | } | ||
63 | |||
64 | static inline void flush_tlb_pgtables(struct mm_struct *mm, | ||
65 | unsigned long start, unsigned long end) | ||
66 | { | ||
67 | flush_tlb_mm(mm); | ||
68 | } | ||
69 | |||
70 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
71 | unsigned long end) | ||
72 | { | ||
73 | flush_tlb_all(); | ||
74 | } | ||
75 | |||
76 | #endif /* __ASM_METAG_TLBFLUSH_H */ | ||
77 | |||
diff --git a/arch/metag/include/asm/topology.h b/arch/metag/include/asm/topology.h new file mode 100644 index 000000000000..23f5118f58db --- /dev/null +++ b/arch/metag/include/asm/topology.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef _ASM_METAG_TOPOLOGY_H | ||
2 | #define _ASM_METAG_TOPOLOGY_H | ||
3 | |||
4 | #ifdef CONFIG_NUMA | ||
5 | |||
6 | /* sched_domains SD_NODE_INIT for Meta machines */ | ||
7 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
8 | .parent = NULL, \ | ||
9 | .child = NULL, \ | ||
10 | .groups = NULL, \ | ||
11 | .min_interval = 8, \ | ||
12 | .max_interval = 32, \ | ||
13 | .busy_factor = 32, \ | ||
14 | .imbalance_pct = 125, \ | ||
15 | .cache_nice_tries = 2, \ | ||
16 | .busy_idx = 3, \ | ||
17 | .idle_idx = 2, \ | ||
18 | .newidle_idx = 0, \ | ||
19 | .wake_idx = 0, \ | ||
20 | .forkexec_idx = 0, \ | ||
21 | .flags = SD_LOAD_BALANCE \ | ||
22 | | SD_BALANCE_FORK \ | ||
23 | | SD_BALANCE_EXEC \ | ||
24 | | SD_BALANCE_NEWIDLE \ | ||
25 | | SD_SERIALIZE, \ | ||
26 | .last_balance = jiffies, \ | ||
27 | .balance_interval = 1, \ | ||
28 | .nr_balance_failed = 0, \ | ||
29 | } | ||
30 | |||
31 | #define cpu_to_node(cpu) ((void)(cpu), 0) | ||
32 | #define parent_node(node) ((void)(node), 0) | ||
33 | |||
34 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) | ||
35 | |||
36 | #define pcibus_to_node(bus) ((void)(bus), -1) | ||
37 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | ||
38 | cpu_all_mask : \ | ||
39 | cpumask_of_node(pcibus_to_node(bus))) | ||
40 | |||
41 | #endif | ||
42 | |||
43 | #define mc_capable() (1) | ||
44 | |||
45 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu); | ||
46 | |||
47 | extern cpumask_t cpu_core_map[NR_CPUS]; | ||
48 | |||
49 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | ||
50 | |||
51 | #include <asm-generic/topology.h> | ||
52 | |||
53 | #endif /* _ASM_METAG_TOPOLOGY_H */ | ||
diff --git a/arch/metag/include/asm/traps.h b/arch/metag/include/asm/traps.h new file mode 100644 index 000000000000..ac808740bd84 --- /dev/null +++ b/arch/metag/include/asm/traps.h | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005,2008 Imagination Technologies | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file COPYING in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | ||
8 | |||
9 | #ifndef _METAG_TBIVECTORS_H | ||
10 | #define _METAG_TBIVECTORS_H | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | #include <asm/tbx.h> | ||
15 | |||
16 | typedef TBIRES (*kick_irq_func_t)(TBIRES, int, int, int, PTBI, int *); | ||
17 | |||
18 | extern TBIRES kick_handler(TBIRES, int, int, int, PTBI); | ||
19 | struct kick_irq_handler { | ||
20 | struct list_head list; | ||
21 | kick_irq_func_t func; | ||
22 | }; | ||
23 | |||
24 | extern void kick_register_func(struct kick_irq_handler *); | ||
25 | extern void kick_unregister_func(struct kick_irq_handler *); | ||
26 | |||
27 | extern void head_end(TBIRES, unsigned long); | ||
28 | extern void restart_critical_section(TBIRES State); | ||
29 | extern TBIRES tail_end_sys(TBIRES, int, int *); | ||
30 | static inline TBIRES tail_end(TBIRES state) | ||
31 | { | ||
32 | return tail_end_sys(state, -1, NULL); | ||
33 | } | ||
34 | |||
35 | DECLARE_PER_CPU(PTBI, pTBI); | ||
36 | extern PTBI pTBI_get(unsigned int); | ||
37 | |||
38 | extern int ret_from_fork(TBIRES arg); | ||
39 | |||
40 | extern int do_page_fault(struct pt_regs *regs, unsigned long address, | ||
41 | unsigned int write_access, unsigned int trapno); | ||
42 | |||
43 | extern TBIRES __TBIUnExpXXX(TBIRES State, int SigNum, int Triggers, int Inst, | ||
44 | PTBI pTBI); | ||
45 | |||
46 | #endif | ||
47 | |||
48 | #endif /* _METAG_TBIVECTORS_H */ | ||
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h new file mode 100644 index 000000000000..0748b0a97986 --- /dev/null +++ b/arch/metag/include/asm/uaccess.h | |||
@@ -0,0 +1,241 @@ | |||
1 | #ifndef __METAG_UACCESS_H | ||
2 | #define __METAG_UACCESS_H | ||
3 | |||
4 | /* | ||
5 | * User space memory access functions | ||
6 | */ | ||
7 | #include <linux/sched.h> | ||
8 | |||
9 | #define VERIFY_READ 0 | ||
10 | #define VERIFY_WRITE 1 | ||
11 | |||
12 | /* | ||
13 | * The fs value determines whether argument validity checking should be | ||
14 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
15 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
16 | * | ||
17 | * For historical reasons, these macros are grossly misnamed. | ||
18 | */ | ||
19 | |||
20 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
21 | |||
22 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) | ||
23 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
24 | |||
25 | #define get_ds() (KERNEL_DS) | ||
26 | #define get_fs() (current_thread_info()->addr_limit) | ||
27 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
28 | |||
29 | #define segment_eq(a, b) ((a).seg == (b).seg) | ||
30 | |||
31 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) | ||
32 | /* | ||
33 | * Explicitly allow NULL pointers here. Parts of the kernel such | ||
34 | * as readv/writev use access_ok to validate pointers, but want | ||
35 | * to allow NULL pointers for various reasons. NULL pointers are | ||
36 | * safe to allow through because the first page is not mappable on | ||
37 | * Meta. | ||
38 | * | ||
39 | * We also wish to avoid letting user code access the system area | ||
40 | * and the kernel half of the address space. | ||
41 | */ | ||
42 | #define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \ | ||
43 | ((addr) > PAGE_OFFSET && \ | ||
44 | (addr) < LINCORE_BASE)) | ||
45 | |||
46 | static inline int __access_ok(unsigned long addr, unsigned long size) | ||
47 | { | ||
48 | return __kernel_ok || !__user_bad(addr, size); | ||
49 | } | ||
50 | |||
51 | #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \ | ||
52 | (unsigned long)(size)) | ||
53 | |||
54 | static inline int verify_area(int type, const void *addr, unsigned long size) | ||
55 | { | ||
56 | return access_ok(type, addr, size) ? 0 : -EFAULT; | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * The exception table consists of pairs of addresses: the first is the | ||
61 | * address of an instruction that is allowed to fault, and the second is | ||
62 | * the address at which the program should continue. No registers are | ||
63 | * modified, so it is entirely up to the continuation code to figure out | ||
64 | * what to do. | ||
65 | * | ||
66 | * All the routines below use bits of fixup code that are out of line | ||
67 | * with the main instruction path. This means when everything is well, | ||
68 | * we don't even have to jump over them. Further, they do not intrude | ||
69 | * on our cache or tlb entries. | ||
70 | */ | ||
71 | struct exception_table_entry { | ||
72 | unsigned long insn, fixup; | ||
73 | }; | ||
74 | |||
75 | extern int fixup_exception(struct pt_regs *regs); | ||
76 | |||
77 | /* | ||
78 | * These are the main single-value transfer routines. They automatically | ||
79 | * use the right size if we just have the right pointer type. | ||
80 | */ | ||
81 | |||
82 | #define put_user(x, ptr) \ | ||
83 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
84 | #define __put_user(x, ptr) \ | ||
85 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
86 | |||
87 | extern void __put_user_bad(void); | ||
88 | |||
89 | #define __put_user_nocheck(x, ptr, size) \ | ||
90 | ({ \ | ||
91 | long __pu_err; \ | ||
92 | __put_user_size((x), (ptr), (size), __pu_err); \ | ||
93 | __pu_err; \ | ||
94 | }) | ||
95 | |||
96 | #define __put_user_check(x, ptr, size) \ | ||
97 | ({ \ | ||
98 | long __pu_err = -EFAULT; \ | ||
99 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
100 | if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ | ||
101 | __put_user_size((x), __pu_addr, (size), __pu_err); \ | ||
102 | __pu_err; \ | ||
103 | }) | ||
104 | |||
105 | extern long __put_user_asm_b(unsigned int x, void __user *addr); | ||
106 | extern long __put_user_asm_w(unsigned int x, void __user *addr); | ||
107 | extern long __put_user_asm_d(unsigned int x, void __user *addr); | ||
108 | extern long __put_user_asm_l(unsigned long long x, void __user *addr); | ||
109 | |||
110 | #define __put_user_size(x, ptr, size, retval) \ | ||
111 | do { \ | ||
112 | retval = 0; \ | ||
113 | switch (size) { \ | ||
114 | case 1: \ | ||
115 | retval = __put_user_asm_b((unsigned int)x, ptr); break; \ | ||
116 | case 2: \ | ||
117 | retval = __put_user_asm_w((unsigned int)x, ptr); break; \ | ||
118 | case 4: \ | ||
119 | retval = __put_user_asm_d((unsigned int)x, ptr); break; \ | ||
120 | case 8: \ | ||
121 | retval = __put_user_asm_l((unsigned long long)x, ptr); break; \ | ||
122 | default: \ | ||
123 | __put_user_bad(); \ | ||
124 | } \ | ||
125 | } while (0) | ||
126 | |||
127 | #define get_user(x, ptr) \ | ||
128 | __get_user_check((x), (ptr), sizeof(*(ptr))) | ||
129 | #define __get_user(x, ptr) \ | ||
130 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | ||
131 | |||
132 | extern long __get_user_bad(void); | ||
133 | |||
134 | #define __get_user_nocheck(x, ptr, size) \ | ||
135 | ({ \ | ||
136 | long __gu_err, __gu_val; \ | ||
137 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | ||
138 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
139 | __gu_err; \ | ||
140 | }) | ||
141 | |||
142 | #define __get_user_check(x, ptr, size) \ | ||
143 | ({ \ | ||
144 | long __gu_err = -EFAULT, __gu_val = 0; \ | ||
145 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | ||
146 | if (access_ok(VERIFY_READ, __gu_addr, size)) \ | ||
147 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ | ||
148 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
149 | __gu_err; \ | ||
150 | }) | ||
151 | |||
152 | extern unsigned char __get_user_asm_b(const void __user *addr, long *err); | ||
153 | extern unsigned short __get_user_asm_w(const void __user *addr, long *err); | ||
154 | extern unsigned int __get_user_asm_d(const void __user *addr, long *err); | ||
155 | |||
156 | #define __get_user_size(x, ptr, size, retval) \ | ||
157 | do { \ | ||
158 | retval = 0; \ | ||
159 | switch (size) { \ | ||
160 | case 1: \ | ||
161 | x = __get_user_asm_b(ptr, &retval); break; \ | ||
162 | case 2: \ | ||
163 | x = __get_user_asm_w(ptr, &retval); break; \ | ||
164 | case 4: \ | ||
165 | x = __get_user_asm_d(ptr, &retval); break; \ | ||
166 | default: \ | ||
167 | (x) = __get_user_bad(); \ | ||
168 | } \ | ||
169 | } while (0) | ||
170 | |||
171 | /* | ||
172 | * Copy a null terminated string from userspace. | ||
173 | * | ||
174 | * Must return: | ||
175 | * -EFAULT for an exception | ||
176 | * count if we hit the buffer limit | ||
177 | * bytes copied if we hit a null byte | ||
178 | * (without the null byte) | ||
179 | */ | ||
180 | |||
181 | extern long __must_check __strncpy_from_user(char *dst, const char __user *src, | ||
182 | long count); | ||
183 | |||
184 | #define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count) | ||
185 | |||
186 | /* | ||
187 | * Return the size of a string (including the ending 0) | ||
188 | * | ||
189 | * Return 0 on exception, a value greater than N if too long | ||
190 | */ | ||
191 | extern long __must_check strnlen_user(const char __user *src, long count); | ||
192 | |||
193 | #define strlen_user(str) strnlen_user(str, 32767) | ||
194 | |||
195 | extern unsigned long __must_check __copy_user_zeroing(void *to, | ||
196 | const void __user *from, | ||
197 | unsigned long n); | ||
198 | |||
199 | static inline unsigned long | ||
200 | copy_from_user(void *to, const void __user *from, unsigned long n) | ||
201 | { | ||
202 | if (access_ok(VERIFY_READ, from, n)) | ||
203 | return __copy_user_zeroing(to, from, n); | ||
204 | return n; | ||
205 | } | ||
206 | |||
207 | #define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) | ||
208 | #define __copy_from_user_inatomic __copy_from_user | ||
209 | |||
210 | extern unsigned long __must_check __copy_user(void __user *to, | ||
211 | const void *from, | ||
212 | unsigned long n); | ||
213 | |||
214 | static inline unsigned long copy_to_user(void __user *to, const void *from, | ||
215 | unsigned long n) | ||
216 | { | ||
217 | if (access_ok(VERIFY_WRITE, to, n)) | ||
218 | return __copy_user(to, from, n); | ||
219 | return n; | ||
220 | } | ||
221 | |||
222 | #define __copy_to_user(to, from, n) __copy_user(to, from, n) | ||
223 | #define __copy_to_user_inatomic __copy_to_user | ||
224 | |||
225 | /* | ||
226 | * Zero Userspace | ||
227 | */ | ||
228 | |||
229 | extern unsigned long __must_check __do_clear_user(void __user *to, | ||
230 | unsigned long n); | ||
231 | |||
232 | static inline unsigned long clear_user(void __user *to, unsigned long n) | ||
233 | { | ||
234 | if (access_ok(VERIFY_WRITE, to, n)) | ||
235 | return __do_clear_user(to, n); | ||
236 | return n; | ||
237 | } | ||
238 | |||
239 | #define __clear_user(to, n) __do_clear_user(to, n) | ||
240 | |||
241 | #endif /* _METAG_UACCESS_H */ | ||
diff --git a/arch/metag/include/asm/unistd.h b/arch/metag/include/asm/unistd.h new file mode 100644 index 000000000000..32955a18fb32 --- /dev/null +++ b/arch/metag/include/asm/unistd.h | |||
@@ -0,0 +1,12 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <uapi/asm/unistd.h> | ||
11 | |||
12 | #define __ARCH_WANT_SYS_CLONE | ||
diff --git a/arch/metag/include/asm/user_gateway.h b/arch/metag/include/asm/user_gateway.h new file mode 100644 index 000000000000..e404c09e3b74 --- /dev/null +++ b/arch/metag/include/asm/user_gateway.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Imagination Technologies | ||
3 | */ | ||
4 | |||
5 | #ifndef __ASM_METAG_USER_GATEWAY_H | ||
6 | #define __ASM_METAG_USER_GATEWAY_H | ||
7 | |||
8 | #include <asm/page.h> | ||
9 | |||
10 | /* Page of kernel code accessible to userspace. */ | ||
11 | #define USER_GATEWAY_PAGE 0x6ffff000 | ||
12 | /* Offset of TLS pointer array in gateway page. */ | ||
13 | #define USER_GATEWAY_TLS 0x100 | ||
14 | |||
15 | #ifndef __ASSEMBLY__ | ||
16 | |||
17 | extern char __user_gateway_start; | ||
18 | extern char __user_gateway_end; | ||
19 | |||
20 | /* Kernel mapping of the gateway page. */ | ||
21 | extern void *gateway_page; | ||
22 | |||
23 | static inline void set_gateway_tls(void __user *tls_ptr) | ||
24 | { | ||
25 | void **gateway_tls = (void **)(gateway_page + USER_GATEWAY_TLS + | ||
26 | hard_processor_id() * 4); | ||
27 | |||
28 | *gateway_tls = (__force void *)tls_ptr; | ||
29 | #ifdef CONFIG_METAG_META12 | ||
30 | /* Avoid cache aliases on virtually tagged cache. */ | ||
31 | __builtin_dcache_flush((void *)USER_GATEWAY_PAGE + USER_GATEWAY_TLS + | ||
32 | hard_processor_id() * sizeof(void *)); | ||
33 | #endif | ||
34 | } | ||
35 | |||
36 | extern int __kuser_get_tls(void); | ||
37 | extern char *__kuser_get_tls_end[]; | ||
38 | |||
39 | extern int __kuser_cmpxchg(int, int, unsigned long *); | ||
40 | extern char *__kuser_cmpxchg_end[]; | ||
41 | |||
42 | #endif | ||
43 | |||
44 | #endif | ||
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..876c71f866de --- /dev/null +++ b/arch/metag/include/uapi/asm/Kbuild | |||
@@ -0,0 +1,13 @@ | |||
1 | # UAPI Header export list | ||
2 | include include/uapi/asm-generic/Kbuild.asm | ||
3 | |||
4 | header-y += byteorder.h | ||
5 | header-y += ptrace.h | ||
6 | header-y += resource.h | ||
7 | header-y += sigcontext.h | ||
8 | header-y += siginfo.h | ||
9 | header-y += swab.h | ||
10 | header-y += unistd.h | ||
11 | |||
12 | generic-y += mman.h | ||
13 | generic-y += setup.h | ||
diff --git a/arch/metag/include/uapi/asm/byteorder.h b/arch/metag/include/uapi/asm/byteorder.h new file mode 100644 index 000000000000..9558416d578b --- /dev/null +++ b/arch/metag/include/uapi/asm/byteorder.h | |||
@@ -0,0 +1 @@ | |||
#include <linux/byteorder/little_endian.h> | |||
diff --git a/arch/metag/include/uapi/asm/ptrace.h b/arch/metag/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..45d97809d33e --- /dev/null +++ b/arch/metag/include/uapi/asm/ptrace.h | |||
@@ -0,0 +1,113 @@ | |||
1 | #ifndef _UAPI_METAG_PTRACE_H | ||
2 | #define _UAPI_METAG_PTRACE_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | /* | ||
7 | * These are the layouts of the regsets returned by the GETREGSET ptrace call | ||
8 | */ | ||
9 | |||
10 | /* user_gp_regs::status */ | ||
11 | |||
12 | /* CBMarker bit (indicates catch state / catch replay) */ | ||
13 | #define USER_GP_REGS_STATUS_CATCH_BIT (1 << 22) | ||
14 | #define USER_GP_REGS_STATUS_CATCH_S 22 | ||
15 | /* LSM_STEP field (load/store multiple step) */ | ||
16 | #define USER_GP_REGS_STATUS_LSM_STEP_BITS (0x7 << 8) | ||
17 | #define USER_GP_REGS_STATUS_LSM_STEP_S 8 | ||
18 | /* SCC bit (indicates split 16x16 condition flags) */ | ||
19 | #define USER_GP_REGS_STATUS_SCC_BIT (1 << 4) | ||
20 | #define USER_GP_REGS_STATUS_SCC_S 4 | ||
21 | |||
22 | /* normal condition flags */ | ||
23 | /* CF_Z bit (Zero flag) */ | ||
24 | #define USER_GP_REGS_STATUS_CF_Z_BIT (1 << 3) | ||
25 | #define USER_GP_REGS_STATUS_CF_Z_S 3 | ||
26 | /* CF_N bit (Negative flag) */ | ||
27 | #define USER_GP_REGS_STATUS_CF_N_BIT (1 << 2) | ||
28 | #define USER_GP_REGS_STATUS_CF_N_S 2 | ||
29 | /* CF_V bit (oVerflow flag) */ | ||
30 | #define USER_GP_REGS_STATUS_CF_V_BIT (1 << 1) | ||
31 | #define USER_GP_REGS_STATUS_CF_V_S 1 | ||
32 | /* CF_C bit (Carry flag) */ | ||
33 | #define USER_GP_REGS_STATUS_CF_C_BIT (1 << 0) | ||
34 | #define USER_GP_REGS_STATUS_CF_C_S 0 | ||
35 | |||
36 | /* split 16x16 condition flags */ | ||
37 | /* SCF_LZ bit (Low Zero flag) */ | ||
38 | #define USER_GP_REGS_STATUS_SCF_LZ_BIT (1 << 3) | ||
39 | #define USER_GP_REGS_STATUS_SCF_LZ_S 3 | ||
40 | /* SCF_HZ bit (High Zero flag) */ | ||
41 | #define USER_GP_REGS_STATUS_SCF_HZ_BIT (1 << 2) | ||
42 | #define USER_GP_REGS_STATUS_SCF_HZ_S 2 | ||
43 | /* SCF_HC bit (High Carry flag) */ | ||
44 | #define USER_GP_REGS_STATUS_SCF_HC_BIT (1 << 1) | ||
45 | #define USER_GP_REGS_STATUS_SCF_HC_S 1 | ||
46 | /* SCF_LC bit (Low Carry flag) */ | ||
47 | #define USER_GP_REGS_STATUS_SCF_LC_BIT (1 << 0) | ||
48 | #define USER_GP_REGS_STATUS_SCF_LC_S 0 | ||
49 | |||
50 | /** | ||
51 | * struct user_gp_regs - User general purpose registers | ||
52 | * @dx: GP data unit regs (dx[reg][unit] = D{unit:0-1}.{reg:0-7}) | ||
53 | * @ax: GP address unit regs (ax[reg][unit] = A{unit:0-1}.{reg:0-3}) | ||
54 | * @pc: PC register | ||
55 | * @status: TXSTATUS register (condition flags, LSM_STEP etc) | ||
56 | * @rpt: TXRPT registers (branch repeat counter) | ||
57 | * @bpobits: TXBPOBITS register ("branch prediction other" bits) | ||
58 | * @mode: TXMODE register | ||
59 | * @_pad1: Reserved padding to make sizeof obviously 64bit aligned | ||
60 | * | ||
61 | * This is the user-visible general purpose register state structure. | ||
62 | * | ||
63 | * It can be accessed through PTRACE_GETREGSET with NT_PRSTATUS. | ||
64 | * | ||
65 | * It is also used in the signal context. | ||
66 | */ | ||
67 | struct user_gp_regs { | ||
68 | unsigned long dx[8][2]; | ||
69 | unsigned long ax[4][2]; | ||
70 | unsigned long pc; | ||
71 | unsigned long status; | ||
72 | unsigned long rpt; | ||
73 | unsigned long bpobits; | ||
74 | unsigned long mode; | ||
75 | unsigned long _pad1; | ||
76 | }; | ||
77 | |||
78 | /** | ||
79 | * struct user_cb_regs - User catch buffer registers | ||
80 | * @flags: TXCATCH0 register (fault flags) | ||
81 | * @addr: TXCATCH1 register (fault address) | ||
82 | * @data: TXCATCH2 and TXCATCH3 registers (low and high data word) | ||
83 | * | ||
84 | * This is the user-visible catch buffer register state structure containing | ||
85 | * information about a failed memory access, and allowing the access to be | ||
86 | * modified and replayed. | ||
87 | * | ||
88 | * It can be accessed through PTRACE_GETREGSET with NT_METAG_CBUF. | ||
89 | */ | ||
90 | struct user_cb_regs { | ||
91 | unsigned long flags; | ||
92 | unsigned long addr; | ||
93 | unsigned long long data; | ||
94 | }; | ||
95 | |||
96 | /** | ||
97 | * struct user_rp_state - User read pipeline state | ||
98 | * @entries: Read pipeline entries | ||
99 | * @mask: Mask of valid pipeline entries (RPMask from TXDIVTIME register) | ||
100 | * | ||
101 | * This is the user-visible read pipeline state structure containing the entries | ||
102 | * currently in the read pipeline and the mask of valid entries. | ||
103 | * | ||
104 | * It can be accessed through PTRACE_GETREGSET with NT_METAG_RPIPE. | ||
105 | */ | ||
106 | struct user_rp_state { | ||
107 | unsigned long long entries[6]; | ||
108 | unsigned long mask; | ||
109 | }; | ||
110 | |||
111 | #endif /* __ASSEMBLY__ */ | ||
112 | |||
113 | #endif /* _UAPI_METAG_PTRACE_H */ | ||
diff --git a/arch/metag/include/uapi/asm/resource.h b/arch/metag/include/uapi/asm/resource.h new file mode 100644 index 000000000000..526d23cc3054 --- /dev/null +++ b/arch/metag/include/uapi/asm/resource.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef _UAPI_METAG_RESOURCE_H | ||
2 | #define _UAPI_METAG_RESOURCE_H | ||
3 | |||
4 | #define _STK_LIM_MAX (1 << 28) | ||
5 | #include <asm-generic/resource.h> | ||
6 | |||
7 | #endif /* _UAPI_METAG_RESOURCE_H */ | ||
diff --git a/arch/metag/include/uapi/asm/sigcontext.h b/arch/metag/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000000..ef79a910c1c4 --- /dev/null +++ b/arch/metag/include/uapi/asm/sigcontext.h | |||
@@ -0,0 +1,31 @@ | |||
1 | #ifndef _ASM_METAG_SIGCONTEXT_H | ||
2 | #define _ASM_METAG_SIGCONTEXT_H | ||
3 | |||
4 | #include <asm/ptrace.h> | ||
5 | |||
6 | /* | ||
7 | * In a sigcontext structure we need to store the active state of the | ||
8 | * user process so that it does not get trashed when we call the signal | ||
9 | * handler. That not really the same as a user context that we are | ||
10 | * going to store on syscall etc. | ||
11 | */ | ||
12 | struct sigcontext { | ||
13 | struct user_gp_regs regs; /* needs to be first */ | ||
14 | |||
15 | /* | ||
16 | * Catch registers describing a memory fault. | ||
17 | * If USER_GP_REGS_STATUS_CATCH_BIT is set in regs.status then catch | ||
18 | * buffers have been saved and will be replayed on sigreturn. | ||
19 | * Clear that bit to discard the catch state instead of replaying it. | ||
20 | */ | ||
21 | struct user_cb_regs cb; | ||
22 | |||
23 | /* | ||
24 | * Read pipeline state. This will get restored on sigreturn. | ||
25 | */ | ||
26 | struct user_rp_state rp; | ||
27 | |||
28 | unsigned long oldmask; | ||
29 | }; | ||
30 | |||
31 | #endif | ||
diff --git a/arch/metag/include/uapi/asm/siginfo.h b/arch/metag/include/uapi/asm/siginfo.h new file mode 100644 index 000000000000..b2e0c8b62aef --- /dev/null +++ b/arch/metag/include/uapi/asm/siginfo.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _METAG_SIGINFO_H | ||
2 | #define _METAG_SIGINFO_H | ||
3 | |||
4 | #define __ARCH_SI_TRAPNO | ||
5 | |||
6 | #include <asm-generic/siginfo.h> | ||
7 | |||
8 | #endif | ||
diff --git a/arch/metag/include/uapi/asm/swab.h b/arch/metag/include/uapi/asm/swab.h new file mode 100644 index 000000000000..1076b3a6387a --- /dev/null +++ b/arch/metag/include/uapi/asm/swab.h | |||
@@ -0,0 +1,26 @@ | |||
1 | #ifndef __ASM_METAG_SWAB_H | ||
2 | #define __ASM_METAG_SWAB_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm-generic/swab.h> | ||
7 | |||
8 | static inline __attribute_const__ __u16 __arch_swab16(__u16 x) | ||
9 | { | ||
10 | return __builtin_metag_bswaps(x); | ||
11 | } | ||
12 | #define __arch_swab16 __arch_swab16 | ||
13 | |||
14 | static inline __attribute_const__ __u32 __arch_swab32(__u32 x) | ||
15 | { | ||
16 | return __builtin_metag_bswap(x); | ||
17 | } | ||
18 | #define __arch_swab32 __arch_swab32 | ||
19 | |||
20 | static inline __attribute_const__ __u64 __arch_swab64(__u64 x) | ||
21 | { | ||
22 | return __builtin_metag_bswapll(x); | ||
23 | } | ||
24 | #define __arch_swab64 __arch_swab64 | ||
25 | |||
26 | #endif /* __ASM_METAG_SWAB_H */ | ||
diff --git a/arch/metag/include/uapi/asm/unistd.h b/arch/metag/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..b80b8e899d22 --- /dev/null +++ b/arch/metag/include/uapi/asm/unistd.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | /* Use the standard ABI for syscalls. */ | ||
11 | #include <asm-generic/unistd.h> | ||
12 | |||
13 | /* metag-specific syscalls. */ | ||
14 | #define __NR_metag_setglobalbit (__NR_arch_specific_syscall + 1) | ||
15 | __SYSCALL(__NR_metag_setglobalbit, sys_metag_setglobalbit) | ||
16 | #define __NR_metag_set_fpu_flags (__NR_arch_specific_syscall + 2) | ||
17 | __SYSCALL(__NR_metag_set_fpu_flags, sys_metag_set_fpu_flags) | ||
18 | #define __NR_metag_set_tls (__NR_arch_specific_syscall + 3) | ||
19 | __SYSCALL(__NR_metag_set_tls, sys_metag_set_tls) | ||
20 | #define __NR_metag_get_tls (__NR_arch_specific_syscall + 4) | ||
21 | __SYSCALL(__NR_metag_get_tls, sys_metag_get_tls) | ||
diff --git a/arch/metag/kernel/.gitignore b/arch/metag/kernel/.gitignore new file mode 100644 index 000000000000..c5f676c3c224 --- /dev/null +++ b/arch/metag/kernel/.gitignore | |||
@@ -0,0 +1 @@ | |||
vmlinux.lds | |||
diff --git a/arch/metag/kernel/Makefile b/arch/metag/kernel/Makefile new file mode 100644 index 000000000000..d7675f4a5df8 --- /dev/null +++ b/arch/metag/kernel/Makefile | |||
@@ -0,0 +1,39 @@ | |||
1 | # | ||
2 | # Makefile for the Linux/Meta kernel. | ||
3 | # | ||
4 | |||
5 | extra-y += head.o | ||
6 | extra-y += vmlinux.lds | ||
7 | |||
8 | obj-y += cachepart.o | ||
9 | obj-y += clock.o | ||
10 | obj-y += core_reg.o | ||
11 | obj-y += devtree.o | ||
12 | obj-y += dma.o | ||
13 | obj-y += irq.o | ||
14 | obj-y += kick.o | ||
15 | obj-y += machines.o | ||
16 | obj-y += process.o | ||
17 | obj-y += ptrace.o | ||
18 | obj-y += setup.o | ||
19 | obj-y += signal.o | ||
20 | obj-y += stacktrace.o | ||
21 | obj-y += sys_metag.o | ||
22 | obj-y += tbiunexp.o | ||
23 | obj-y += time.o | ||
24 | obj-y += topology.o | ||
25 | obj-y += traps.o | ||
26 | obj-y += user_gateway.o | ||
27 | |||
28 | obj-$(CONFIG_PERF_EVENTS) += perf/ | ||
29 | |||
30 | obj-$(CONFIG_METAG_COREMEM) += coremem.o | ||
31 | obj-$(CONFIG_METAG_DA) += da.o | ||
32 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | ||
33 | obj-$(CONFIG_FUNCTION_TRACER) += ftrace_stub.o | ||
34 | obj-$(CONFIG_MODULES) += metag_ksyms.o | ||
35 | obj-$(CONFIG_MODULES) += module.o | ||
36 | obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o | ||
37 | obj-$(CONFIG_SMP) += smp.o | ||
38 | obj-$(CONFIG_METAG_SUSPEND_MEM) += suspend.o | ||
39 | obj-$(CONFIG_METAG_USER_TCM) += tcm.o | ||
diff --git a/arch/metag/kernel/asm-offsets.c b/arch/metag/kernel/asm-offsets.c new file mode 100644 index 000000000000..bfc9205f9647 --- /dev/null +++ b/arch/metag/kernel/asm-offsets.c | |||
@@ -0,0 +1,14 @@ | |||
1 | /* | ||
2 | * This program is used to generate definitions needed by | ||
3 | * assembly language modules. | ||
4 | * | ||
5 | */ | ||
6 | |||
7 | #include <linux/kbuild.h> | ||
8 | #include <linux/thread_info.h> | ||
9 | |||
10 | int main(void) | ||
11 | { | ||
12 | DEFINE(THREAD_INFO_SIZE, sizeof(struct thread_info)); | ||
13 | return 0; | ||
14 | } | ||
diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c new file mode 100644 index 000000000000..3a589dfb966b --- /dev/null +++ b/arch/metag/kernel/cachepart.c | |||
@@ -0,0 +1,124 @@ | |||
1 | /* | ||
2 | * Meta cache partition manipulation. | ||
3 | * | ||
4 | * Copyright 2010 Imagination Technologies Ltd. | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/io.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <asm/processor.h> | ||
11 | #include <asm/cachepart.h> | ||
12 | #include <asm/metag_isa.h> | ||
13 | #include <asm/metag_mem.h> | ||
14 | |||
15 | #define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n)) | ||
16 | #define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n)) | ||
17 | |||
18 | #define CACHE_ASSOCIATIVITY 4 /* 4 way set-assosiative */ | ||
19 | #define ICACHE 0 | ||
20 | #define DCACHE 1 | ||
21 | |||
22 | /* The CORE_CONFIG2 register is not available on Meta 1 */ | ||
23 | #ifdef CONFIG_METAG_META21 | ||
24 | unsigned int get_dcache_size(void) | ||
25 | { | ||
26 | unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); | ||
27 | return 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS) | ||
28 | >> METAC_CORECFG2_DCSZ_S); | ||
29 | } | ||
30 | |||
31 | unsigned int get_icache_size(void) | ||
32 | { | ||
33 | unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); | ||
34 | return 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS) | ||
35 | >> METAC_CORE_C2ICSZ_S); | ||
36 | } | ||
37 | |||
38 | unsigned int get_global_dcache_size(void) | ||
39 | { | ||
40 | unsigned int cpart = metag_in32(SYSC_DCPART(hard_processor_id())); | ||
41 | unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS; | ||
42 | return (get_dcache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4; | ||
43 | } | ||
44 | |||
45 | unsigned int get_global_icache_size(void) | ||
46 | { | ||
47 | unsigned int cpart = metag_in32(SYSC_ICPART(hard_processor_id())); | ||
48 | unsigned int temp = cpart & SYSC_xCPARTG_AND_BITS; | ||
49 | return (get_icache_size() * ((temp >> SYSC_xCPARTG_AND_S) + 1)) >> 4; | ||
50 | } | ||
51 | |||
52 | static unsigned int get_thread_cache_size(unsigned int cache, int thread_id) | ||
53 | { | ||
54 | unsigned int cache_size; | ||
55 | unsigned int t_cache_part; | ||
56 | unsigned int isEnabled; | ||
57 | unsigned int offset = 0; | ||
58 | isEnabled = (cache == DCACHE ? metag_in32(MMCU_DCACHE_CTRL_ADDR) & 0x1 : | ||
59 | metag_in32(MMCU_ICACHE_CTRL_ADDR) & 0x1); | ||
60 | if (!isEnabled) | ||
61 | return 0; | ||
62 | #if PAGE_OFFSET >= LINGLOBAL_BASE | ||
63 | /* Checking for global cache */ | ||
64 | cache_size = (cache == DCACHE ? get_global_dache_size() : | ||
65 | get_global_icache_size()); | ||
66 | offset = 8; | ||
67 | #else | ||
68 | cache_size = (cache == DCACHE ? get_dcache_size() : | ||
69 | get_icache_size()); | ||
70 | #endif | ||
71 | t_cache_part = (cache == DCACHE ? | ||
72 | (metag_in32(SYSC_DCPART(thread_id)) >> offset) & 0xF : | ||
73 | (metag_in32(SYSC_ICPART(thread_id)) >> offset) & 0xF); | ||
74 | switch (t_cache_part) { | ||
75 | case 0xF: | ||
76 | return cache_size; | ||
77 | case 0x7: | ||
78 | return cache_size / 2; | ||
79 | case 0x3: | ||
80 | return cache_size / 4; | ||
81 | case 0x1: | ||
82 | return cache_size / 8; | ||
83 | case 0: | ||
84 | return cache_size / 16; | ||
85 | } | ||
86 | return -1; | ||
87 | } | ||
88 | |||
89 | void check_for_cache_aliasing(int thread_id) | ||
90 | { | ||
91 | unsigned int thread_cache_size; | ||
92 | unsigned int cache_type; | ||
93 | for (cache_type = ICACHE; cache_type <= DCACHE; cache_type++) { | ||
94 | thread_cache_size = | ||
95 | get_thread_cache_size(cache_type, thread_id); | ||
96 | if (thread_cache_size < 0) | ||
97 | pr_emerg("Can't read %s cache size", \ | ||
98 | cache_type ? "DCACHE" : "ICACHE"); | ||
99 | else if (thread_cache_size == 0) | ||
100 | /* Cache is off. No need to check for aliasing */ | ||
101 | continue; | ||
102 | if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) { | ||
103 | pr_emerg("Cache aliasing detected in %s on Thread %d", | ||
104 | cache_type ? "DCACHE" : "ICACHE", thread_id); | ||
105 | pr_warn("Total %s size: %u bytes", | ||
106 | cache_type ? "DCACHE" : "ICACHE ", | ||
107 | cache_type ? get_dcache_size() | ||
108 | : get_icache_size()); | ||
109 | pr_warn("Thread %s size: %d bytes", | ||
110 | cache_type ? "CACHE" : "ICACHE", | ||
111 | thread_cache_size); | ||
112 | pr_warn("Page Size: %lu bytes", PAGE_SIZE); | ||
113 | } | ||
114 | } | ||
115 | } | ||
116 | |||
117 | #else | ||
118 | |||
119 | void check_for_cache_aliasing(int thread_id) | ||
120 | { | ||
121 | return; | ||
122 | } | ||
123 | |||
124 | #endif | ||
diff --git a/arch/metag/kernel/clock.c b/arch/metag/kernel/clock.c new file mode 100644 index 000000000000..defc84056f18 --- /dev/null +++ b/arch/metag/kernel/clock.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * arch/metag/kernel/clock.c | ||
3 | * | ||
4 | * Copyright (C) 2012 Imagination Technologies Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/delay.h> | ||
12 | #include <linux/io.h> | ||
13 | |||
14 | #include <asm/param.h> | ||
15 | #include <asm/clock.h> | ||
16 | |||
17 | struct meta_clock_desc _meta_clock; | ||
18 | |||
19 | /* Default machine get_core_freq callback. */ | ||
20 | static unsigned long get_core_freq_default(void) | ||
21 | { | ||
22 | #ifdef CONFIG_METAG_META21 | ||
23 | /* | ||
24 | * Meta 2 cores divide down the core clock for the Meta timers, so we | ||
25 | * can estimate the core clock from the divider. | ||
26 | */ | ||
27 | return (metag_in32(EXPAND_TIMER_DIV) + 1) * 1000000; | ||
28 | #else | ||
29 | /* | ||
30 | * On Meta 1 we don't know the core clock, but assuming the Meta timer | ||
31 | * is correct it can be estimated based on loops_per_jiffy. | ||
32 | */ | ||
33 | return (loops_per_jiffy * HZ * 5) >> 1; | ||
34 | #endif | ||
35 | } | ||
36 | |||
37 | /** | ||
38 | * setup_meta_clocks() - Set up the Meta clock. | ||
39 | * @desc: Clock descriptor usually provided by machine description | ||
40 | * | ||
41 | * Ensures all callbacks are valid. | ||
42 | */ | ||
43 | void __init setup_meta_clocks(struct meta_clock_desc *desc) | ||
44 | { | ||
45 | /* copy callbacks */ | ||
46 | if (desc) | ||
47 | _meta_clock = *desc; | ||
48 | |||
49 | /* set fallback functions */ | ||
50 | if (!_meta_clock.get_core_freq) | ||
51 | _meta_clock.get_core_freq = get_core_freq_default; | ||
52 | } | ||
53 | |||
diff --git a/arch/metag/kernel/core_reg.c b/arch/metag/kernel/core_reg.c new file mode 100644 index 000000000000..671cce8c34f2 --- /dev/null +++ b/arch/metag/kernel/core_reg.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Support for reading and writing Meta core internal registers. | ||
3 | * | ||
4 | * Copyright (C) 2011 Imagination Technologies Ltd. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/delay.h> | ||
9 | #include <linux/export.h> | ||
10 | |||
11 | #include <asm/core_reg.h> | ||
12 | #include <asm/global_lock.h> | ||
13 | #include <asm/hwthread.h> | ||
14 | #include <asm/io.h> | ||
15 | #include <asm/metag_mem.h> | ||
16 | #include <asm/metag_regs.h> | ||
17 | |||
18 | #define UNIT_BIT_MASK TXUXXRXRQ_UXX_BITS | ||
19 | #define REG_BIT_MASK TXUXXRXRQ_RX_BITS | ||
20 | #define THREAD_BIT_MASK TXUXXRXRQ_TX_BITS | ||
21 | |||
22 | #define UNIT_SHIFTS TXUXXRXRQ_UXX_S | ||
23 | #define REG_SHIFTS TXUXXRXRQ_RX_S | ||
24 | #define THREAD_SHIFTS TXUXXRXRQ_TX_S | ||
25 | |||
26 | #define UNIT_VAL(x) (((x) << UNIT_SHIFTS) & UNIT_BIT_MASK) | ||
27 | #define REG_VAL(x) (((x) << REG_SHIFTS) & REG_BIT_MASK) | ||
28 | #define THREAD_VAL(x) (((x) << THREAD_SHIFTS) & THREAD_BIT_MASK) | ||
29 | |||
30 | /* | ||
31 | * core_reg_write() - modify the content of a register in a core unit. | ||
32 | * @unit: The unit to be modified. | ||
33 | * @reg: Register number within the unit. | ||
34 | * @thread: The thread we want to access. | ||
35 | * @val: The new value to write. | ||
36 | * | ||
37 | * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID, | ||
38 | * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM, | ||
39 | * TXPOLLI_REGNUM, etc). | ||
40 | */ | ||
41 | void core_reg_write(int unit, int reg, int thread, unsigned int val) | ||
42 | { | ||
43 | unsigned long flags; | ||
44 | |||
45 | /* TXUCT_ID has its own memory mapped registers */ | ||
46 | if (unit == TXUCT_ID) { | ||
47 | void __iomem *cu_reg = __CU_addr(thread, reg); | ||
48 | metag_out32(val, cu_reg); | ||
49 | return; | ||
50 | } | ||
51 | |||
52 | __global_lock2(flags); | ||
53 | |||
54 | /* wait for ready */ | ||
55 | while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) | ||
56 | udelay(10); | ||
57 | |||
58 | /* set the value to write */ | ||
59 | metag_out32(val, TXUXXRXDT); | ||
60 | |||
61 | /* set the register to write */ | ||
62 | val = UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread); | ||
63 | metag_out32(val, TXUXXRXRQ); | ||
64 | |||
65 | /* wait for finish */ | ||
66 | while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) | ||
67 | udelay(10); | ||
68 | |||
69 | __global_unlock2(flags); | ||
70 | } | ||
71 | EXPORT_SYMBOL(core_reg_write); | ||
72 | |||
73 | /* | ||
74 | * core_reg_read() - read the content of a register in a core unit. | ||
75 | * @unit: The unit to be modified. | ||
76 | * @reg: Register number within the unit. | ||
77 | * @thread: The thread we want to access. | ||
78 | * | ||
79 | * Check asm/metag_regs.h for a list/defines of supported units (ie: TXUPC_ID, | ||
80 | * TXUTR_ID, etc), and regnums within the units (ie: TXMASKI_REGNUM, | ||
81 | * TXPOLLI_REGNUM, etc). | ||
82 | */ | ||
83 | unsigned int core_reg_read(int unit, int reg, int thread) | ||
84 | { | ||
85 | unsigned long flags; | ||
86 | unsigned int val; | ||
87 | |||
88 | /* TXUCT_ID has its own memory mapped registers */ | ||
89 | if (unit == TXUCT_ID) { | ||
90 | void __iomem *cu_reg = __CU_addr(thread, reg); | ||
91 | val = metag_in32(cu_reg); | ||
92 | return val; | ||
93 | } | ||
94 | |||
95 | __global_lock2(flags); | ||
96 | |||
97 | /* wait for ready */ | ||
98 | while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) | ||
99 | udelay(10); | ||
100 | |||
101 | /* set the register to read */ | ||
102 | val = (UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread) | | ||
103 | TXUXXRXRQ_RDnWR_BIT); | ||
104 | metag_out32(val, TXUXXRXRQ); | ||
105 | |||
106 | /* wait for finish */ | ||
107 | while (!(metag_in32(TXUXXRXRQ) & TXUXXRXRQ_DREADY_BIT)) | ||
108 | udelay(10); | ||
109 | |||
110 | /* read the register value */ | ||
111 | val = metag_in32(TXUXXRXDT); | ||
112 | |||
113 | __global_unlock2(flags); | ||
114 | |||
115 | return val; | ||
116 | } | ||
117 | EXPORT_SYMBOL(core_reg_read); | ||
diff --git a/arch/metag/kernel/da.c b/arch/metag/kernel/da.c new file mode 100644 index 000000000000..52aabb658fde --- /dev/null +++ b/arch/metag/kernel/da.c | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Meta DA JTAG debugger control. | ||
3 | * | ||
4 | * Copyright 2012 Imagination Technologies Ltd. | ||
5 | */ | ||
6 | |||
7 | |||
8 | #include <linux/io.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <asm/da.h> | ||
11 | #include <asm/metag_mem.h> | ||
12 | |||
13 | bool _metag_da_present; | ||
14 | |||
15 | int __init metag_da_probe(void) | ||
16 | { | ||
17 | _metag_da_present = (metag_in32(T0VECINT_BHALT) == 1); | ||
18 | if (_metag_da_present) | ||
19 | pr_info("DA present\n"); | ||
20 | else | ||
21 | pr_info("DA not present\n"); | ||
22 | return 0; | ||
23 | } | ||
diff --git a/arch/metag/kernel/devtree.c b/arch/metag/kernel/devtree.c new file mode 100644 index 000000000000..7cd02529636e --- /dev/null +++ b/arch/metag/kernel/devtree.c | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * linux/arch/metag/kernel/devtree.c | ||
3 | * | ||
4 | * Copyright (C) 2012 Imagination Technologies Ltd. | ||
5 | * | ||
6 | * Based on ARM version: | ||
7 | * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/init.h> | ||
15 | #include <linux/export.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/bootmem.h> | ||
18 | #include <linux/memblock.h> | ||
19 | #include <linux/of.h> | ||
20 | #include <linux/of_fdt.h> | ||
21 | |||
22 | #include <asm/setup.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/mach/arch.h> | ||
25 | |||
26 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | ||
27 | { | ||
28 | pr_err("%s(%llx, %llx)\n", | ||
29 | __func__, base, size); | ||
30 | } | ||
31 | |||
32 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | ||
33 | { | ||
34 | return alloc_bootmem_align(size, align); | ||
35 | } | ||
36 | |||
37 | /** | ||
38 | * setup_machine_fdt - Machine setup when an dtb was passed to the kernel | ||
39 | * @dt: virtual address pointer to dt blob | ||
40 | * | ||
41 | * If a dtb was passed to the kernel, then use it to choose the correct | ||
42 | * machine_desc and to setup the system. | ||
43 | */ | ||
44 | struct machine_desc * __init setup_machine_fdt(void *dt) | ||
45 | { | ||
46 | struct boot_param_header *devtree = dt; | ||
47 | struct machine_desc *mdesc, *mdesc_best = NULL; | ||
48 | unsigned int score, mdesc_score = ~1; | ||
49 | unsigned long dt_root; | ||
50 | const char *model; | ||
51 | |||
52 | /* check device tree validity */ | ||
53 | if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) | ||
54 | return NULL; | ||
55 | |||
56 | /* Search the mdescs for the 'best' compatible value match */ | ||
57 | initial_boot_params = devtree; | ||
58 | dt_root = of_get_flat_dt_root(); | ||
59 | |||
60 | for_each_machine_desc(mdesc) { | ||
61 | score = of_flat_dt_match(dt_root, mdesc->dt_compat); | ||
62 | if (score > 0 && score < mdesc_score) { | ||
63 | mdesc_best = mdesc; | ||
64 | mdesc_score = score; | ||
65 | } | ||
66 | } | ||
67 | if (!mdesc_best) { | ||
68 | const char *prop; | ||
69 | long size; | ||
70 | |||
71 | pr_err("\nError: unrecognized/unsupported device tree compatible list:\n[ "); | ||
72 | |||
73 | prop = of_get_flat_dt_prop(dt_root, "compatible", &size); | ||
74 | if (prop) { | ||
75 | while (size > 0) { | ||
76 | printk("'%s' ", prop); | ||
77 | size -= strlen(prop) + 1; | ||
78 | prop += strlen(prop) + 1; | ||
79 | } | ||
80 | } | ||
81 | printk("]\n\n"); | ||
82 | |||
83 | dump_machine_table(); /* does not return */ | ||
84 | } | ||
85 | |||
86 | model = of_get_flat_dt_prop(dt_root, "model", NULL); | ||
87 | if (!model) | ||
88 | model = of_get_flat_dt_prop(dt_root, "compatible", NULL); | ||
89 | if (!model) | ||
90 | model = "<unknown>"; | ||
91 | pr_info("Machine: %s, model: %s\n", mdesc_best->name, model); | ||
92 | |||
93 | /* Retrieve various information from the /chosen node */ | ||
94 | of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); | ||
95 | |||
96 | return mdesc_best; | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * copy_fdt - Copy device tree into non-init memory. | ||
101 | * | ||
102 | * We must copy the flattened device tree blob into non-init memory because the | ||
103 | * unflattened device tree will reference the strings in it directly. | ||
104 | */ | ||
105 | void __init copy_fdt(void) | ||
106 | { | ||
107 | void *alloc = early_init_dt_alloc_memory_arch( | ||
108 | be32_to_cpu(initial_boot_params->totalsize), 0x40); | ||
109 | if (alloc) { | ||
110 | memcpy(alloc, initial_boot_params, | ||
111 | be32_to_cpu(initial_boot_params->totalsize)); | ||
112 | initial_boot_params = alloc; | ||
113 | } | ||
114 | } | ||
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c new file mode 100644 index 000000000000..8c00dedadc54 --- /dev/null +++ b/arch/metag/kernel/dma.c | |||
@@ -0,0 +1,507 @@ | |||
1 | /* | ||
2 | * Meta version derived from arch/powerpc/lib/dma-noncoherent.c | ||
3 | * Copyright (C) 2008 Imagination Technologies Ltd. | ||
4 | * | ||
5 | * PowerPC version derived from arch/arm/mm/consistent.c | ||
6 | * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) | ||
7 | * | ||
8 | * Copyright (C) 2000 Russell King | ||
9 | * | ||
10 | * Consistent memory allocators. Used for DMA devices that want to | ||
11 | * share uncached memory with the processor core. The function return | ||
12 | * is the virtual address and 'dma_handle' is the physical address. | ||
13 | * Mostly stolen from the ARM port, with some changes for PowerPC. | ||
14 | * -- Dan | ||
15 | * | ||
16 | * Reorganized to get rid of the arch-specific consistent_* functions | ||
17 | * and provide non-coherent implementations for the DMA API. -Matt | ||
18 | * | ||
19 | * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() | ||
20 | * implementation. This is pulled straight from ARM and barely | ||
21 | * modified. -Matt | ||
22 | * | ||
23 | * This program is free software; you can redistribute it and/or modify | ||
24 | * it under the terms of the GNU General Public License version 2 as | ||
25 | * published by the Free Software Foundation. | ||
26 | */ | ||
27 | |||
28 | #include <linux/sched.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/errno.h> | ||
31 | #include <linux/export.h> | ||
32 | #include <linux/string.h> | ||
33 | #include <linux/types.h> | ||
34 | #include <linux/highmem.h> | ||
35 | #include <linux/dma-mapping.h> | ||
36 | #include <linux/slab.h> | ||
37 | |||
38 | #include <asm/tlbflush.h> | ||
39 | #include <asm/mmu.h> | ||
40 | |||
41 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_START) \ | ||
42 | >> PAGE_SHIFT) | ||
43 | |||
44 | static u64 get_coherent_dma_mask(struct device *dev) | ||
45 | { | ||
46 | u64 mask = ~0ULL; | ||
47 | |||
48 | if (dev) { | ||
49 | mask = dev->coherent_dma_mask; | ||
50 | |||
51 | /* | ||
52 | * Sanity check the DMA mask - it must be non-zero, and | ||
53 | * must be able to be satisfied by a DMA allocation. | ||
54 | */ | ||
55 | if (mask == 0) { | ||
56 | dev_warn(dev, "coherent DMA mask is unset\n"); | ||
57 | return 0; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | return mask; | ||
62 | } | ||
63 | /* | ||
64 | * This is the page table (2MB) covering uncached, DMA consistent allocations | ||
65 | */ | ||
66 | static pte_t *consistent_pte; | ||
67 | static DEFINE_SPINLOCK(consistent_lock); | ||
68 | |||
69 | /* | ||
70 | * VM region handling support. | ||
71 | * | ||
72 | * This should become something generic, handling VM region allocations for | ||
73 | * vmalloc and similar (ioremap, module space, etc). | ||
74 | * | ||
75 | * I envisage vmalloc()'s supporting vm_struct becoming: | ||
76 | * | ||
77 | * struct vm_struct { | ||
78 | * struct metag_vm_region region; | ||
79 | * unsigned long flags; | ||
80 | * struct page **pages; | ||
81 | * unsigned int nr_pages; | ||
82 | * unsigned long phys_addr; | ||
83 | * }; | ||
84 | * | ||
85 | * get_vm_area() would then call metag_vm_region_alloc with an appropriate | ||
86 | * struct metag_vm_region head (eg): | ||
87 | * | ||
88 | * struct metag_vm_region vmalloc_head = { | ||
89 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
90 | * .vm_start = VMALLOC_START, | ||
91 | * .vm_end = VMALLOC_END, | ||
92 | * }; | ||
93 | * | ||
94 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
95 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
96 | * would have to initialise this each time prior to calling | ||
97 | * metag_vm_region_alloc(). | ||
98 | */ | ||
99 | struct metag_vm_region { | ||
100 | struct list_head vm_list; | ||
101 | unsigned long vm_start; | ||
102 | unsigned long vm_end; | ||
103 | struct page *vm_pages; | ||
104 | int vm_active; | ||
105 | }; | ||
106 | |||
107 | static struct metag_vm_region consistent_head = { | ||
108 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | ||
109 | .vm_start = CONSISTENT_START, | ||
110 | .vm_end = CONSISTENT_END, | ||
111 | }; | ||
112 | |||
113 | static struct metag_vm_region *metag_vm_region_alloc(struct metag_vm_region | ||
114 | *head, size_t size, | ||
115 | gfp_t gfp) | ||
116 | { | ||
117 | unsigned long addr = head->vm_start, end = head->vm_end - size; | ||
118 | unsigned long flags; | ||
119 | struct metag_vm_region *c, *new; | ||
120 | |||
121 | new = kmalloc(sizeof(struct metag_vm_region), gfp); | ||
122 | if (!new) | ||
123 | goto out; | ||
124 | |||
125 | spin_lock_irqsave(&consistent_lock, flags); | ||
126 | |||
127 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
128 | if ((addr + size) < addr) | ||
129 | goto nospc; | ||
130 | if ((addr + size) <= c->vm_start) | ||
131 | goto found; | ||
132 | addr = c->vm_end; | ||
133 | if (addr > end) | ||
134 | goto nospc; | ||
135 | } | ||
136 | |||
137 | found: | ||
138 | /* | ||
139 | * Insert this entry _before_ the one we found. | ||
140 | */ | ||
141 | list_add_tail(&new->vm_list, &c->vm_list); | ||
142 | new->vm_start = addr; | ||
143 | new->vm_end = addr + size; | ||
144 | new->vm_active = 1; | ||
145 | |||
146 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
147 | return new; | ||
148 | |||
149 | nospc: | ||
150 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
151 | kfree(new); | ||
152 | out: | ||
153 | return NULL; | ||
154 | } | ||
155 | |||
156 | static struct metag_vm_region *metag_vm_region_find(struct metag_vm_region | ||
157 | *head, unsigned long addr) | ||
158 | { | ||
159 | struct metag_vm_region *c; | ||
160 | |||
161 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
162 | if (c->vm_active && c->vm_start == addr) | ||
163 | goto out; | ||
164 | } | ||
165 | c = NULL; | ||
166 | out: | ||
167 | return c; | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Allocate DMA-coherent memory space and return both the kernel remapped | ||
172 | * virtual and bus address for that space. | ||
173 | */ | ||
174 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
175 | dma_addr_t *handle, gfp_t gfp) | ||
176 | { | ||
177 | struct page *page; | ||
178 | struct metag_vm_region *c; | ||
179 | unsigned long order; | ||
180 | u64 mask = get_coherent_dma_mask(dev); | ||
181 | u64 limit; | ||
182 | |||
183 | if (!consistent_pte) { | ||
184 | pr_err("%s: not initialised\n", __func__); | ||
185 | dump_stack(); | ||
186 | return NULL; | ||
187 | } | ||
188 | |||
189 | if (!mask) | ||
190 | goto no_page; | ||
191 | size = PAGE_ALIGN(size); | ||
192 | limit = (mask + 1) & ~mask; | ||
193 | if ((limit && size >= limit) | ||
194 | || size >= (CONSISTENT_END - CONSISTENT_START)) { | ||
195 | pr_warn("coherent allocation too big (requested %#x mask %#Lx)\n", | ||
196 | size, mask); | ||
197 | return NULL; | ||
198 | } | ||
199 | |||
200 | order = get_order(size); | ||
201 | |||
202 | if (mask != 0xffffffff) | ||
203 | gfp |= GFP_DMA; | ||
204 | |||
205 | page = alloc_pages(gfp, order); | ||
206 | if (!page) | ||
207 | goto no_page; | ||
208 | |||
209 | /* | ||
210 | * Invalidate any data that might be lurking in the | ||
211 | * kernel direct-mapped region for device DMA. | ||
212 | */ | ||
213 | { | ||
214 | void *kaddr = page_address(page); | ||
215 | memset(kaddr, 0, size); | ||
216 | flush_dcache_region(kaddr, size); | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * Allocate a virtual address in the consistent mapping region. | ||
221 | */ | ||
222 | c = metag_vm_region_alloc(&consistent_head, size, | ||
223 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | ||
224 | if (c) { | ||
225 | unsigned long vaddr = c->vm_start; | ||
226 | pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); | ||
227 | struct page *end = page + (1 << order); | ||
228 | |||
229 | c->vm_pages = page; | ||
230 | split_page(page, order); | ||
231 | |||
232 | /* | ||
233 | * Set the "dma handle" | ||
234 | */ | ||
235 | *handle = page_to_bus(page); | ||
236 | |||
237 | do { | ||
238 | BUG_ON(!pte_none(*pte)); | ||
239 | |||
240 | SetPageReserved(page); | ||
241 | set_pte_at(&init_mm, vaddr, | ||
242 | pte, mk_pte(page, | ||
243 | pgprot_writecombine | ||
244 | (PAGE_KERNEL))); | ||
245 | page++; | ||
246 | pte++; | ||
247 | vaddr += PAGE_SIZE; | ||
248 | } while (size -= PAGE_SIZE); | ||
249 | |||
250 | /* | ||
251 | * Free the otherwise unused pages. | ||
252 | */ | ||
253 | while (page < end) { | ||
254 | __free_page(page); | ||
255 | page++; | ||
256 | } | ||
257 | |||
258 | return (void *)c->vm_start; | ||
259 | } | ||
260 | |||
261 | if (page) | ||
262 | __free_pages(page, order); | ||
263 | no_page: | ||
264 | return NULL; | ||
265 | } | ||
266 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
267 | |||
268 | /* | ||
269 | * free a page as defined by the above mapping. | ||
270 | */ | ||
271 | void dma_free_coherent(struct device *dev, size_t size, | ||
272 | void *vaddr, dma_addr_t dma_handle) | ||
273 | { | ||
274 | struct metag_vm_region *c; | ||
275 | unsigned long flags, addr; | ||
276 | pte_t *ptep; | ||
277 | |||
278 | size = PAGE_ALIGN(size); | ||
279 | |||
280 | spin_lock_irqsave(&consistent_lock, flags); | ||
281 | |||
282 | c = metag_vm_region_find(&consistent_head, (unsigned long)vaddr); | ||
283 | if (!c) | ||
284 | goto no_area; | ||
285 | |||
286 | c->vm_active = 0; | ||
287 | if ((c->vm_end - c->vm_start) != size) { | ||
288 | pr_err("%s: freeing wrong coherent size (%ld != %d)\n", | ||
289 | __func__, c->vm_end - c->vm_start, size); | ||
290 | dump_stack(); | ||
291 | size = c->vm_end - c->vm_start; | ||
292 | } | ||
293 | |||
294 | ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | ||
295 | addr = c->vm_start; | ||
296 | do { | ||
297 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
298 | unsigned long pfn; | ||
299 | |||
300 | ptep++; | ||
301 | addr += PAGE_SIZE; | ||
302 | |||
303 | if (!pte_none(pte) && pte_present(pte)) { | ||
304 | pfn = pte_pfn(pte); | ||
305 | |||
306 | if (pfn_valid(pfn)) { | ||
307 | struct page *page = pfn_to_page(pfn); | ||
308 | ClearPageReserved(page); | ||
309 | |||
310 | __free_page(page); | ||
311 | continue; | ||
312 | } | ||
313 | } | ||
314 | |||
315 | pr_crit("%s: bad page in kernel page table\n", | ||
316 | __func__); | ||
317 | } while (size -= PAGE_SIZE); | ||
318 | |||
319 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
320 | |||
321 | list_del(&c->vm_list); | ||
322 | |||
323 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
324 | |||
325 | kfree(c); | ||
326 | return; | ||
327 | |||
328 | no_area: | ||
329 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
330 | pr_err("%s: trying to free invalid coherent area: %p\n", | ||
331 | __func__, vaddr); | ||
332 | dump_stack(); | ||
333 | } | ||
334 | EXPORT_SYMBOL(dma_free_coherent); | ||
335 | |||
336 | |||
337 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
338 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
339 | { | ||
340 | int ret = -ENXIO; | ||
341 | |||
342 | unsigned long flags, user_size, kern_size; | ||
343 | struct metag_vm_region *c; | ||
344 | |||
345 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | ||
346 | |||
347 | spin_lock_irqsave(&consistent_lock, flags); | ||
348 | c = metag_vm_region_find(&consistent_head, (unsigned long)cpu_addr); | ||
349 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
350 | |||
351 | if (c) { | ||
352 | unsigned long off = vma->vm_pgoff; | ||
353 | |||
354 | kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; | ||
355 | |||
356 | if (off < kern_size && | ||
357 | user_size <= (kern_size - off)) { | ||
358 | ret = remap_pfn_range(vma, vma->vm_start, | ||
359 | page_to_pfn(c->vm_pages) + off, | ||
360 | user_size << PAGE_SHIFT, | ||
361 | vma->vm_page_prot); | ||
362 | } | ||
363 | } | ||
364 | |||
365 | |||
366 | return ret; | ||
367 | } | ||
368 | |||
369 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
370 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
371 | { | ||
372 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
373 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
374 | } | ||
375 | EXPORT_SYMBOL(dma_mmap_coherent); | ||
376 | |||
377 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | ||
378 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
379 | { | ||
380 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
381 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
382 | } | ||
383 | EXPORT_SYMBOL(dma_mmap_writecombine); | ||
384 | |||
385 | |||
386 | |||
387 | |||
388 | /* | ||
389 | * Initialise the consistent memory allocation. | ||
390 | */ | ||
391 | static int __init dma_alloc_init(void) | ||
392 | { | ||
393 | pgd_t *pgd, *pgd_k; | ||
394 | pud_t *pud, *pud_k; | ||
395 | pmd_t *pmd, *pmd_k; | ||
396 | pte_t *pte; | ||
397 | int ret = 0; | ||
398 | |||
399 | do { | ||
400 | int offset = pgd_index(CONSISTENT_START); | ||
401 | pgd = pgd_offset(&init_mm, CONSISTENT_START); | ||
402 | pud = pud_alloc(&init_mm, pgd, CONSISTENT_START); | ||
403 | pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START); | ||
404 | if (!pmd) { | ||
405 | pr_err("%s: no pmd tables\n", __func__); | ||
406 | ret = -ENOMEM; | ||
407 | break; | ||
408 | } | ||
409 | WARN_ON(!pmd_none(*pmd)); | ||
410 | |||
411 | pte = pte_alloc_kernel(pmd, CONSISTENT_START); | ||
412 | if (!pte) { | ||
413 | pr_err("%s: no pte tables\n", __func__); | ||
414 | ret = -ENOMEM; | ||
415 | break; | ||
416 | } | ||
417 | |||
418 | pgd_k = ((pgd_t *) mmu_get_base()) + offset; | ||
419 | pud_k = pud_offset(pgd_k, CONSISTENT_START); | ||
420 | pmd_k = pmd_offset(pud_k, CONSISTENT_START); | ||
421 | set_pmd(pmd_k, *pmd); | ||
422 | |||
423 | consistent_pte = pte; | ||
424 | } while (0); | ||
425 | |||
426 | return ret; | ||
427 | } | ||
428 | early_initcall(dma_alloc_init); | ||
429 | |||
430 | /* | ||
431 | * make an area consistent to devices. | ||
432 | */ | ||
433 | void dma_sync_for_device(void *vaddr, size_t size, int dma_direction) | ||
434 | { | ||
435 | /* | ||
436 | * Ensure any writes get through the write combiner. This is necessary | ||
437 | * even with DMA_FROM_DEVICE, or the write may dirty the cache after | ||
438 | * we've invalidated it and get written back during the DMA. | ||
439 | */ | ||
440 | |||
441 | barrier(); | ||
442 | |||
443 | switch (dma_direction) { | ||
444 | case DMA_BIDIRECTIONAL: | ||
445 | /* | ||
446 | * Writeback to ensure the device can see our latest changes and | ||
447 | * so that we have no dirty lines, and invalidate the cache | ||
448 | * lines too in preparation for receiving the buffer back | ||
449 | * (dma_sync_for_cpu) later. | ||
450 | */ | ||
451 | flush_dcache_region(vaddr, size); | ||
452 | break; | ||
453 | case DMA_TO_DEVICE: | ||
454 | /* | ||
455 | * Writeback to ensure the device can see our latest changes. | ||
456 | * There's no need to invalidate as the device shouldn't write | ||
457 | * to the buffer. | ||
458 | */ | ||
459 | writeback_dcache_region(vaddr, size); | ||
460 | break; | ||
461 | case DMA_FROM_DEVICE: | ||
462 | /* | ||
463 | * Invalidate to ensure we have no dirty lines that could get | ||
464 | * written back during the DMA. It's also safe to flush | ||
465 | * (writeback) here if necessary. | ||
466 | */ | ||
467 | invalidate_dcache_region(vaddr, size); | ||
468 | break; | ||
469 | case DMA_NONE: | ||
470 | BUG(); | ||
471 | } | ||
472 | |||
473 | wmb(); | ||
474 | } | ||
475 | EXPORT_SYMBOL(dma_sync_for_device); | ||
476 | |||
477 | /* | ||
478 | * make an area consistent to the core. | ||
479 | */ | ||
480 | void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) | ||
481 | { | ||
482 | /* | ||
483 | * Hardware L2 cache prefetch doesn't occur across 4K physical | ||
484 | * boundaries, however according to Documentation/DMA-API-HOWTO.txt | ||
485 | * kmalloc'd memory is DMA'able, so accesses in nearby memory could | ||
486 | * trigger a cache fill in the DMA buffer. | ||
487 | * | ||
488 | * This should never cause dirty lines, so a flush or invalidate should | ||
489 | * be safe to allow us to see data from the device. | ||
490 | */ | ||
491 | if (_meta_l2c_pf_is_enabled()) { | ||
492 | switch (dma_direction) { | ||
493 | case DMA_BIDIRECTIONAL: | ||
494 | case DMA_FROM_DEVICE: | ||
495 | invalidate_dcache_region(vaddr, size); | ||
496 | break; | ||
497 | case DMA_TO_DEVICE: | ||
498 | /* The device shouldn't have written to the buffer */ | ||
499 | break; | ||
500 | case DMA_NONE: | ||
501 | BUG(); | ||
502 | } | ||
503 | } | ||
504 | |||
505 | rmb(); | ||
506 | } | ||
507 | EXPORT_SYMBOL(dma_sync_for_cpu); | ||
diff --git a/arch/metag/kernel/ftrace.c b/arch/metag/kernel/ftrace.c new file mode 100644 index 000000000000..a774f321643f --- /dev/null +++ b/arch/metag/kernel/ftrace.c | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Imagination Technologies Ltd. | ||
3 | * Licensed under the GPL | ||
4 | * | ||
5 | * Dynamic ftrace support. | ||
6 | */ | ||
7 | |||
8 | #include <linux/ftrace.h> | ||
9 | #include <linux/io.h> | ||
10 | #include <linux/uaccess.h> | ||
11 | |||
12 | #include <asm/cacheflush.h> | ||
13 | |||
14 | #define D04_MOVT_TEMPLATE 0x02200005 | ||
15 | #define D04_CALL_TEMPLATE 0xAC200005 | ||
16 | #define D1RTP_MOVT_TEMPLATE 0x03200005 | ||
17 | #define D1RTP_CALL_TEMPLATE 0xAC200006 | ||
18 | |||
19 | static const unsigned long NOP[2] = {0xa0fffffe, 0xa0fffffe}; | ||
20 | static unsigned long movt_and_call_insn[2]; | ||
21 | |||
22 | static unsigned char *ftrace_nop_replace(void) | ||
23 | { | ||
24 | return (char *)&NOP[0]; | ||
25 | } | ||
26 | |||
27 | static unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) | ||
28 | { | ||
29 | unsigned long hi16, low16; | ||
30 | |||
31 | hi16 = (addr & 0xffff0000) >> 13; | ||
32 | low16 = (addr & 0x0000ffff) << 3; | ||
33 | |||
34 | /* | ||
35 | * The compiler makes the call to mcount_wrapper() | ||
36 | * (Meta's wrapper around mcount()) through the register | ||
37 | * D0.4. So whenever we're patching one of those compiler-generated | ||
38 | * calls we also need to go through D0.4. Otherwise use D1RtP. | ||
39 | */ | ||
40 | if (pc == (unsigned long)&ftrace_call) { | ||
41 | writel(D1RTP_MOVT_TEMPLATE | hi16, &movt_and_call_insn[0]); | ||
42 | writel(D1RTP_CALL_TEMPLATE | low16, &movt_and_call_insn[1]); | ||
43 | } else { | ||
44 | writel(D04_MOVT_TEMPLATE | hi16, &movt_and_call_insn[0]); | ||
45 | writel(D04_CALL_TEMPLATE | low16, &movt_and_call_insn[1]); | ||
46 | } | ||
47 | |||
48 | return (unsigned char *)&movt_and_call_insn[0]; | ||
49 | } | ||
50 | |||
51 | static int ftrace_modify_code(unsigned long pc, unsigned char *old_code, | ||
52 | unsigned char *new_code) | ||
53 | { | ||
54 | unsigned char replaced[MCOUNT_INSN_SIZE]; | ||
55 | |||
56 | /* | ||
57 | * Note: Due to modules and __init, code can | ||
58 | * disappear and change, we need to protect against faulting | ||
59 | * as well as code changing. | ||
60 | * | ||
61 | * No real locking needed, this code is run through | ||
62 | * kstop_machine. | ||
63 | */ | ||
64 | |||
65 | /* read the text we want to modify */ | ||
66 | if (probe_kernel_read(replaced, (void *)pc, MCOUNT_INSN_SIZE)) | ||
67 | return -EFAULT; | ||
68 | |||
69 | /* Make sure it is what we expect it to be */ | ||
70 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) | ||
71 | return -EINVAL; | ||
72 | |||
73 | /* replace the text with the new text */ | ||
74 | if (probe_kernel_write((void *)pc, new_code, MCOUNT_INSN_SIZE)) | ||
75 | return -EPERM; | ||
76 | |||
77 | flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | int ftrace_update_ftrace_func(ftrace_func_t func) | ||
83 | { | ||
84 | int ret; | ||
85 | unsigned long pc; | ||
86 | unsigned char old[MCOUNT_INSN_SIZE], *new; | ||
87 | |||
88 | pc = (unsigned long)&ftrace_call; | ||
89 | memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); | ||
90 | new = ftrace_call_replace(pc, (unsigned long)func); | ||
91 | ret = ftrace_modify_code(pc, old, new); | ||
92 | |||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | int ftrace_make_nop(struct module *mod, | ||
97 | struct dyn_ftrace *rec, unsigned long addr) | ||
98 | { | ||
99 | unsigned char *new, *old; | ||
100 | unsigned long ip = rec->ip; | ||
101 | |||
102 | old = ftrace_call_replace(ip, addr); | ||
103 | new = ftrace_nop_replace(); | ||
104 | |||
105 | return ftrace_modify_code(ip, old, new); | ||
106 | } | ||
107 | |||
108 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
109 | { | ||
110 | unsigned char *new, *old; | ||
111 | unsigned long ip = rec->ip; | ||
112 | |||
113 | old = ftrace_nop_replace(); | ||
114 | new = ftrace_call_replace(ip, addr); | ||
115 | |||
116 | return ftrace_modify_code(ip, old, new); | ||
117 | } | ||
118 | |||
119 | /* run from kstop_machine */ | ||
120 | int __init ftrace_dyn_arch_init(void *data) | ||
121 | { | ||
122 | /* The return code is returned via data */ | ||
123 | writel(0, data); | ||
124 | |||
125 | return 0; | ||
126 | } | ||
diff --git a/arch/metag/kernel/ftrace_stub.S b/arch/metag/kernel/ftrace_stub.S new file mode 100644 index 000000000000..e70bff745bdd --- /dev/null +++ b/arch/metag/kernel/ftrace_stub.S | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Imagination Technologies Ltd. | ||
3 | * Licensed under the GPL | ||
4 | * | ||
5 | */ | ||
6 | |||
7 | #include <asm/ftrace.h> | ||
8 | |||
9 | .text | ||
10 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
11 | .global _mcount_wrapper | ||
12 | .type _mcount_wrapper,function | ||
13 | _mcount_wrapper: | ||
14 | MOV PC,D0.4 | ||
15 | |||
16 | .global _ftrace_caller | ||
17 | .type _ftrace_caller,function | ||
18 | _ftrace_caller: | ||
19 | MOVT D0Re0,#HI(_function_trace_stop) | ||
20 | ADD D0Re0,D0Re0,#LO(_function_trace_stop) | ||
21 | GETD D0Re0,[D0Re0] | ||
22 | CMP D0Re0,#0 | ||
23 | BEQ $Lcall_stub | ||
24 | MOV PC,D0.4 | ||
25 | $Lcall_stub: | ||
26 | MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 | ||
27 | MOV D1Ar1, D0.4 | ||
28 | MOV D0Ar2, D1RtP | ||
29 | SUB D1Ar1,D1Ar1,#MCOUNT_INSN_SIZE | ||
30 | |||
31 | .global _ftrace_call | ||
32 | _ftrace_call: | ||
33 | MOVT D1RtP,#HI(_ftrace_stub) | ||
34 | CALL D1RtP,#LO(_ftrace_stub) | ||
35 | GETL D0.4, D1RtP, [A0StP++#(-8)] | ||
36 | GETL D0Ar2, D1Ar1, [A0StP++#(-8)] | ||
37 | GETL D0Ar4, D1Ar3, [A0StP++#(-8)] | ||
38 | GETL D0Ar6, D1Ar5, [A0StP++#(-8)] | ||
39 | MOV PC, D0.4 | ||
40 | #else | ||
41 | |||
42 | .global _mcount_wrapper | ||
43 | .type _mcount_wrapper,function | ||
44 | _mcount_wrapper: | ||
45 | MOVT D0Re0,#HI(_function_trace_stop) | ||
46 | ADD D0Re0,D0Re0,#LO(_function_trace_stop) | ||
47 | GETD D0Re0,[D0Re0] | ||
48 | CMP D0Re0,#0 | ||
49 | BEQ $Lcall_mcount | ||
50 | MOV PC,D0.4 | ||
51 | $Lcall_mcount: | ||
52 | MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 | ||
53 | MOV D1Ar1, D0.4 | ||
54 | MOV D0Ar2, D1RtP | ||
55 | MOVT D0Re0,#HI(_ftrace_trace_function) | ||
56 | ADD D0Re0,D0Re0,#LO(_ftrace_trace_function) | ||
57 | GET D1Ar3,[D0Re0] | ||
58 | MOVT D1Re0,#HI(_ftrace_stub) | ||
59 | ADD D1Re0,D1Re0,#LO(_ftrace_stub) | ||
60 | CMP D1Ar3,D1Re0 | ||
61 | BEQ $Ltrace_exit | ||
62 | MOV D1RtP,D1Ar3 | ||
63 | SUB D1Ar1,D1Ar1,#MCOUNT_INSN_SIZE | ||
64 | SWAP PC,D1RtP | ||
65 | $Ltrace_exit: | ||
66 | GETL D0.4, D1RtP, [A0StP++#(-8)] | ||
67 | GETL D0Ar2, D1Ar1, [A0StP++#(-8)] | ||
68 | GETL D0Ar4, D1Ar3, [A0StP++#(-8)] | ||
69 | GETL D0Ar6, D1Ar5, [A0StP++#(-8)] | ||
70 | MOV PC, D0.4 | ||
71 | |||
72 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
73 | |||
74 | .global _ftrace_stub | ||
75 | _ftrace_stub: | ||
76 | MOV PC,D1RtP | ||
diff --git a/arch/metag/kernel/head.S b/arch/metag/kernel/head.S new file mode 100644 index 000000000000..969dffabc03a --- /dev/null +++ b/arch/metag/kernel/head.S | |||
@@ -0,0 +1,57 @@ | |||
1 | ! Copyright 2005,2006,2007,2009 Imagination Technologies | ||
2 | |||
3 | #include <linux/init.h> | ||
4 | #include <generated/asm-offsets.h> | ||
5 | #undef __exit | ||
6 | |||
7 | __HEAD | ||
8 | ! Setup the stack and get going into _metag_start_kernel | ||
9 | .global __start | ||
10 | .type __start,function | ||
11 | __start: | ||
12 | ! D1Ar1 contains pTBI (ISTAT) | ||
13 | ! D0Ar2 contains pTBI | ||
14 | ! D1Ar3 contains __pTBISegs | ||
15 | ! D0Ar4 contains kernel arglist pointer | ||
16 | |||
17 | MOVT D0Re0,#HI(___pTBIs) | ||
18 | ADD D0Re0,D0Re0,#LO(___pTBIs) | ||
19 | SETL [D0Re0],D0Ar2,D1Ar1 | ||
20 | MOVT D0Re0,#HI(___pTBISegs) | ||
21 | ADD D0Re0,D0Re0,#LO(___pTBISegs) | ||
22 | SETD [D0Re0],D1Ar3 | ||
23 | MOV A0FrP,#0 | ||
24 | MOV D0Re0,#0 | ||
25 | MOV D1Re0,#0 | ||
26 | MOV D1Ar3,#0 | ||
27 | MOV D1Ar1,D0Ar4 !Store kernel boot params | ||
28 | MOV D1Ar5,#0 | ||
29 | MOV D0Ar6,#0 | ||
30 | #ifdef CONFIG_METAG_DSP | ||
31 | MOV D0.8,#0 | ||
32 | #endif | ||
33 | MOVT A0StP,#HI(_init_thread_union) | ||
34 | ADD A0StP,A0StP,#LO(_init_thread_union) | ||
35 | ADD A0StP,A0StP,#THREAD_INFO_SIZE | ||
36 | MOVT D1RtP,#HI(_metag_start_kernel) | ||
37 | CALL D1RtP,#LO(_metag_start_kernel) | ||
38 | .size __start,.-__start | ||
39 | |||
40 | !! Needed by TBX | ||
41 | .global __exit | ||
42 | .type __exit,function | ||
43 | __exit: | ||
44 | XOR TXENABLE,D0Re0,D0Re0 | ||
45 | .size __exit,.-__exit | ||
46 | |||
47 | #ifdef CONFIG_SMP | ||
48 | .global _secondary_startup | ||
49 | .type _secondary_startup,function | ||
50 | _secondary_startup: | ||
51 | MOVT A0StP,#HI(_secondary_data_stack) | ||
52 | ADD A0StP,A0StP,#LO(_secondary_data_stack) | ||
53 | GETD A0StP,[A0StP] | ||
54 | ADD A0StP,A0StP,#THREAD_INFO_SIZE | ||
55 | B _secondary_start_kernel | ||
56 | .size _secondary_startup,.-_secondary_startup | ||
57 | #endif | ||
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c new file mode 100644 index 000000000000..87707efeb0a3 --- /dev/null +++ b/arch/metag/kernel/irq.c | |||
@@ -0,0 +1,323 @@ | |||
1 | /* | ||
2 | * Linux/Meta general interrupt handling code | ||
3 | * | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/interrupt.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/irqchip/metag-ext.h> | ||
10 | #include <linux/irqchip/metag.h> | ||
11 | #include <linux/irqdomain.h> | ||
12 | #include <linux/ratelimit.h> | ||
13 | |||
14 | #include <asm/core_reg.h> | ||
15 | #include <asm/mach/arch.h> | ||
16 | #include <asm/uaccess.h> | ||
17 | |||
18 | #ifdef CONFIG_4KSTACKS | ||
19 | union irq_ctx { | ||
20 | struct thread_info tinfo; | ||
21 | u32 stack[THREAD_SIZE/sizeof(u32)]; | ||
22 | }; | ||
23 | |||
24 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; | ||
25 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | ||
26 | #endif | ||
27 | |||
28 | struct irq_domain *root_domain; | ||
29 | |||
30 | static unsigned int startup_meta_irq(struct irq_data *data) | ||
31 | { | ||
32 | tbi_startup_interrupt(data->hwirq); | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | static void shutdown_meta_irq(struct irq_data *data) | ||
37 | { | ||
38 | tbi_shutdown_interrupt(data->hwirq); | ||
39 | } | ||
40 | |||
41 | void do_IRQ(int irq, struct pt_regs *regs) | ||
42 | { | ||
43 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
44 | #ifdef CONFIG_4KSTACKS | ||
45 | struct irq_desc *desc; | ||
46 | union irq_ctx *curctx, *irqctx; | ||
47 | u32 *isp; | ||
48 | #endif | ||
49 | |||
50 | irq_enter(); | ||
51 | |||
52 | irq = irq_linear_revmap(root_domain, irq); | ||
53 | |||
54 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
55 | /* Debugging check for stack overflow: is there less than 1KB free? */ | ||
56 | { | ||
57 | unsigned long sp; | ||
58 | |||
59 | sp = __core_reg_get(A0StP); | ||
60 | sp &= THREAD_SIZE - 1; | ||
61 | |||
62 | if (unlikely(sp > (THREAD_SIZE - 1024))) | ||
63 | pr_err("Stack overflow in do_IRQ: %ld\n", sp); | ||
64 | } | ||
65 | #endif | ||
66 | |||
67 | |||
68 | #ifdef CONFIG_4KSTACKS | ||
69 | curctx = (union irq_ctx *) current_thread_info(); | ||
70 | irqctx = hardirq_ctx[smp_processor_id()]; | ||
71 | |||
72 | /* | ||
73 | * this is where we switch to the IRQ stack. However, if we are | ||
74 | * already using the IRQ stack (because we interrupted a hardirq | ||
75 | * handler) we can't do that and just have to keep using the | ||
76 | * current stack (which is the irq stack already after all) | ||
77 | */ | ||
78 | if (curctx != irqctx) { | ||
79 | /* build the stack frame on the IRQ stack */ | ||
80 | isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info)); | ||
81 | irqctx->tinfo.task = curctx->tinfo.task; | ||
82 | |||
83 | /* | ||
84 | * Copy the softirq bits in preempt_count so that the | ||
85 | * softirq checks work in the hardirq context. | ||
86 | */ | ||
87 | irqctx->tinfo.preempt_count = | ||
88 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | ||
89 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | ||
90 | |||
91 | desc = irq_to_desc(irq); | ||
92 | |||
93 | asm volatile ( | ||
94 | "MOV D0.5,%0\n" | ||
95 | "MOV D1Ar1,%1\n" | ||
96 | "MOV D1RtP,%2\n" | ||
97 | "MOV D0Ar2,%3\n" | ||
98 | "SWAP A0StP,D0.5\n" | ||
99 | "SWAP PC,D1RtP\n" | ||
100 | "MOV A0StP,D0.5\n" | ||
101 | : | ||
102 | : "r" (isp), "r" (irq), "r" (desc->handle_irq), | ||
103 | "r" (desc) | ||
104 | : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", | ||
105 | "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", | ||
106 | "D0.5" | ||
107 | ); | ||
108 | } else | ||
109 | #endif | ||
110 | generic_handle_irq(irq); | ||
111 | |||
112 | irq_exit(); | ||
113 | |||
114 | set_irq_regs(old_regs); | ||
115 | } | ||
116 | |||
117 | #ifdef CONFIG_4KSTACKS | ||
118 | |||
119 | static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; | ||
120 | |||
121 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; | ||
122 | |||
123 | /* | ||
124 | * allocate per-cpu stacks for hardirq and for softirq processing | ||
125 | */ | ||
126 | void irq_ctx_init(int cpu) | ||
127 | { | ||
128 | union irq_ctx *irqctx; | ||
129 | |||
130 | if (hardirq_ctx[cpu]) | ||
131 | return; | ||
132 | |||
133 | irqctx = (union irq_ctx *) &hardirq_stack[cpu * THREAD_SIZE]; | ||
134 | irqctx->tinfo.task = NULL; | ||
135 | irqctx->tinfo.exec_domain = NULL; | ||
136 | irqctx->tinfo.cpu = cpu; | ||
137 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | ||
138 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | ||
139 | |||
140 | hardirq_ctx[cpu] = irqctx; | ||
141 | |||
142 | irqctx = (union irq_ctx *) &softirq_stack[cpu * THREAD_SIZE]; | ||
143 | irqctx->tinfo.task = NULL; | ||
144 | irqctx->tinfo.exec_domain = NULL; | ||
145 | irqctx->tinfo.cpu = cpu; | ||
146 | irqctx->tinfo.preempt_count = 0; | ||
147 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | ||
148 | |||
149 | softirq_ctx[cpu] = irqctx; | ||
150 | |||
151 | pr_info("CPU %u irqstacks, hard=%p soft=%p\n", | ||
152 | cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); | ||
153 | } | ||
154 | |||
155 | void irq_ctx_exit(int cpu) | ||
156 | { | ||
157 | hardirq_ctx[smp_processor_id()] = NULL; | ||
158 | } | ||
159 | |||
160 | extern asmlinkage void __do_softirq(void); | ||
161 | |||
162 | asmlinkage void do_softirq(void) | ||
163 | { | ||
164 | unsigned long flags; | ||
165 | struct thread_info *curctx; | ||
166 | union irq_ctx *irqctx; | ||
167 | u32 *isp; | ||
168 | |||
169 | if (in_interrupt()) | ||
170 | return; | ||
171 | |||
172 | local_irq_save(flags); | ||
173 | |||
174 | if (local_softirq_pending()) { | ||
175 | curctx = current_thread_info(); | ||
176 | irqctx = softirq_ctx[smp_processor_id()]; | ||
177 | irqctx->tinfo.task = curctx->task; | ||
178 | |||
179 | /* build the stack frame on the softirq stack */ | ||
180 | isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info)); | ||
181 | |||
182 | asm volatile ( | ||
183 | "MOV D0.5,%0\n" | ||
184 | "SWAP A0StP,D0.5\n" | ||
185 | "CALLR D1RtP,___do_softirq\n" | ||
186 | "MOV A0StP,D0.5\n" | ||
187 | : | ||
188 | : "r" (isp) | ||
189 | : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", | ||
190 | "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", | ||
191 | "D0.5" | ||
192 | ); | ||
193 | /* | ||
194 | * Shouldn't happen, we returned above if in_interrupt(): | ||
195 | */ | ||
196 | WARN_ON_ONCE(softirq_count()); | ||
197 | } | ||
198 | |||
199 | local_irq_restore(flags); | ||
200 | } | ||
201 | #endif | ||
202 | |||
203 | static struct irq_chip meta_irq_type = { | ||
204 | .name = "META-IRQ", | ||
205 | .irq_startup = startup_meta_irq, | ||
206 | .irq_shutdown = shutdown_meta_irq, | ||
207 | }; | ||
208 | |||
209 | /** | ||
210 | * tbisig_map() - Map a TBI signal number to a virtual IRQ number. | ||
211 | * @hw: Number of the TBI signal. Must be in range. | ||
212 | * | ||
213 | * Returns: The virtual IRQ number of the TBI signal number IRQ specified by | ||
214 | * @hw. | ||
215 | */ | ||
216 | int tbisig_map(unsigned int hw) | ||
217 | { | ||
218 | return irq_create_mapping(root_domain, hw); | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * metag_tbisig_map() - map a tbi signal to a Linux virtual IRQ number | ||
223 | * @d: root irq domain | ||
224 | * @irq: virtual irq number | ||
225 | * @hw: hardware irq number (TBI signal number) | ||
226 | * | ||
227 | * This sets up a virtual irq for a specified TBI signal number. | ||
228 | */ | ||
229 | static int metag_tbisig_map(struct irq_domain *d, unsigned int irq, | ||
230 | irq_hw_number_t hw) | ||
231 | { | ||
232 | #ifdef CONFIG_SMP | ||
233 | irq_set_chip_and_handler(irq, &meta_irq_type, handle_percpu_irq); | ||
234 | #else | ||
235 | irq_set_chip_and_handler(irq, &meta_irq_type, handle_simple_irq); | ||
236 | #endif | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | static const struct irq_domain_ops metag_tbisig_domain_ops = { | ||
241 | .map = metag_tbisig_map, | ||
242 | }; | ||
243 | |||
244 | /* | ||
245 | * void init_IRQ(void) | ||
246 | * | ||
247 | * Parameters: None | ||
248 | * | ||
249 | * Returns: Nothing | ||
250 | * | ||
251 | * This function should be called during kernel startup to initialize | ||
252 | * the IRQ handling routines. | ||
253 | */ | ||
254 | void __init init_IRQ(void) | ||
255 | { | ||
256 | root_domain = irq_domain_add_linear(NULL, 32, | ||
257 | &metag_tbisig_domain_ops, NULL); | ||
258 | if (unlikely(!root_domain)) | ||
259 | panic("init_IRQ: cannot add root IRQ domain"); | ||
260 | |||
261 | irq_ctx_init(smp_processor_id()); | ||
262 | |||
263 | init_internal_IRQ(); | ||
264 | init_external_IRQ(); | ||
265 | |||
266 | if (machine_desc->init_irq) | ||
267 | machine_desc->init_irq(); | ||
268 | } | ||
269 | |||
270 | int __init arch_probe_nr_irqs(void) | ||
271 | { | ||
272 | if (machine_desc->nr_irqs) | ||
273 | nr_irqs = machine_desc->nr_irqs; | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | #ifdef CONFIG_HOTPLUG_CPU | ||
278 | static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) | ||
279 | { | ||
280 | struct irq_desc *desc = irq_to_desc(irq); | ||
281 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
282 | |||
283 | raw_spin_lock_irq(&desc->lock); | ||
284 | if (chip->irq_set_affinity) | ||
285 | chip->irq_set_affinity(data, cpumask_of(cpu), false); | ||
286 | raw_spin_unlock_irq(&desc->lock); | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * The CPU has been marked offline. Migrate IRQs off this CPU. If | ||
291 | * the affinity settings do not allow other CPUs, force them onto any | ||
292 | * available CPU. | ||
293 | */ | ||
294 | void migrate_irqs(void) | ||
295 | { | ||
296 | unsigned int i, cpu = smp_processor_id(); | ||
297 | struct irq_desc *desc; | ||
298 | |||
299 | for_each_irq_desc(i, desc) { | ||
300 | struct irq_data *data = irq_desc_get_irq_data(desc); | ||
301 | unsigned int newcpu; | ||
302 | |||
303 | if (irqd_is_per_cpu(data)) | ||
304 | continue; | ||
305 | |||
306 | if (!cpumask_test_cpu(cpu, data->affinity)) | ||
307 | continue; | ||
308 | |||
309 | newcpu = cpumask_any_and(data->affinity, cpu_online_mask); | ||
310 | |||
311 | if (newcpu >= nr_cpu_ids) { | ||
312 | pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", | ||
313 | i, cpu); | ||
314 | |||
315 | cpumask_setall(data->affinity); | ||
316 | newcpu = cpumask_any_and(data->affinity, | ||
317 | cpu_online_mask); | ||
318 | } | ||
319 | |||
320 | route_irq(data, i, newcpu); | ||
321 | } | ||
322 | } | ||
323 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/metag/kernel/kick.c b/arch/metag/kernel/kick.c new file mode 100644 index 000000000000..50fcbec98cd2 --- /dev/null +++ b/arch/metag/kernel/kick.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Imagination Technologies | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file COPYING in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * The Meta KICK interrupt mechanism is generally a useful feature, so | ||
9 | * we provide an interface for registering multiple interrupt | ||
10 | * handlers. All the registered interrupt handlers are "chained". When | ||
11 | * a KICK interrupt is received the first function in the list is | ||
12 | * called. If that interrupt handler cannot handle the KICK the next | ||
13 | * one is called, then the next until someone handles it (or we run | ||
14 | * out of functions). As soon as one function handles the interrupt no | ||
15 | * other handlers are called. | ||
16 | * | ||
17 | * The only downside of chaining interrupt handlers is that each | ||
18 | * handler must be able to detect whether the KICK was intended for it | ||
19 | * or not. For example, when the IPI handler runs and it sees that | ||
20 | * there are no IPI messages it must not signal that the KICK was | ||
21 | * handled, thereby giving the other handlers a chance to run. | ||
22 | * | ||
23 | * The reason that we provide our own interface for calling KICK | ||
24 | * handlers instead of using the generic kernel infrastructure is that | ||
25 | * the KICK handlers require access to a CPU's pTBI structure. So we | ||
26 | * pass it as an argument. | ||
27 | */ | ||
28 | #include <linux/export.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/mm.h> | ||
31 | #include <linux/types.h> | ||
32 | |||
33 | #include <asm/traps.h> | ||
34 | |||
35 | /* | ||
36 | * All accesses/manipulations of kick_handlers_list should be | ||
37 | * performed while holding kick_handlers_lock. | ||
38 | */ | ||
39 | static DEFINE_SPINLOCK(kick_handlers_lock); | ||
40 | static LIST_HEAD(kick_handlers_list); | ||
41 | |||
42 | void kick_register_func(struct kick_irq_handler *kh) | ||
43 | { | ||
44 | unsigned long flags; | ||
45 | |||
46 | spin_lock_irqsave(&kick_handlers_lock, flags); | ||
47 | |||
48 | list_add_tail(&kh->list, &kick_handlers_list); | ||
49 | |||
50 | spin_unlock_irqrestore(&kick_handlers_lock, flags); | ||
51 | } | ||
52 | EXPORT_SYMBOL(kick_register_func); | ||
53 | |||
54 | void kick_unregister_func(struct kick_irq_handler *kh) | ||
55 | { | ||
56 | unsigned long flags; | ||
57 | |||
58 | spin_lock_irqsave(&kick_handlers_lock, flags); | ||
59 | |||
60 | list_del(&kh->list); | ||
61 | |||
62 | spin_unlock_irqrestore(&kick_handlers_lock, flags); | ||
63 | } | ||
64 | EXPORT_SYMBOL(kick_unregister_func); | ||
65 | |||
66 | TBIRES | ||
67 | kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) | ||
68 | { | ||
69 | struct kick_irq_handler *kh; | ||
70 | struct list_head *lh; | ||
71 | int handled = 0; | ||
72 | TBIRES ret; | ||
73 | |||
74 | head_end(State, ~INTS_OFF_MASK); | ||
75 | |||
76 | /* If we interrupted user code handle any critical sections. */ | ||
77 | if (State.Sig.SaveMask & TBICTX_PRIV_BIT) | ||
78 | restart_critical_section(State); | ||
79 | |||
80 | trace_hardirqs_off(); | ||
81 | |||
82 | /* | ||
83 | * There is no need to disable interrupts here because we | ||
84 | * can't nest KICK interrupts in a KICK interrupt handler. | ||
85 | */ | ||
86 | spin_lock(&kick_handlers_lock); | ||
87 | |||
88 | list_for_each(lh, &kick_handlers_list) { | ||
89 | kh = list_entry(lh, struct kick_irq_handler, list); | ||
90 | |||
91 | ret = kh->func(State, SigNum, Triggers, Inst, pTBI, &handled); | ||
92 | if (handled) | ||
93 | break; | ||
94 | } | ||
95 | |||
96 | spin_unlock(&kick_handlers_lock); | ||
97 | |||
98 | WARN_ON(!handled); | ||
99 | |||
100 | return tail_end(ret); | ||
101 | } | ||
diff --git a/arch/metag/kernel/machines.c b/arch/metag/kernel/machines.c new file mode 100644 index 000000000000..1edf6ba193b1 --- /dev/null +++ b/arch/metag/kernel/machines.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * arch/metag/kernel/machines.c | ||
3 | * | ||
4 | * Copyright (C) 2012 Imagination Technologies Ltd. | ||
5 | * | ||
6 | * Generic Meta Boards. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <asm/irq.h> | ||
11 | #include <asm/mach/arch.h> | ||
12 | |||
13 | static const char *meta_boards_compat[] __initdata = { | ||
14 | "img,meta", | ||
15 | NULL, | ||
16 | }; | ||
17 | |||
18 | MACHINE_START(META, "Generic Meta") | ||
19 | .dt_compat = meta_boards_compat, | ||
20 | MACHINE_END | ||
diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c new file mode 100644 index 000000000000..ec872ef14eb1 --- /dev/null +++ b/arch/metag/kernel/metag_ksyms.c | |||
@@ -0,0 +1,49 @@ | |||
1 | #include <linux/export.h> | ||
2 | |||
3 | #include <asm/div64.h> | ||
4 | #include <asm/ftrace.h> | ||
5 | #include <asm/page.h> | ||
6 | #include <asm/string.h> | ||
7 | #include <asm/tbx.h> | ||
8 | |||
9 | EXPORT_SYMBOL(clear_page); | ||
10 | EXPORT_SYMBOL(copy_page); | ||
11 | |||
12 | #ifdef CONFIG_FLATMEM | ||
13 | /* needed for the pfn_valid macro */ | ||
14 | EXPORT_SYMBOL(max_pfn); | ||
15 | EXPORT_SYMBOL(min_low_pfn); | ||
16 | #endif | ||
17 | |||
18 | /* TBI symbols */ | ||
19 | EXPORT_SYMBOL(__TBI); | ||
20 | EXPORT_SYMBOL(__TBIFindSeg); | ||
21 | EXPORT_SYMBOL(__TBIPoll); | ||
22 | EXPORT_SYMBOL(__TBITimeStamp); | ||
23 | |||
24 | #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) | ||
25 | |||
26 | /* libgcc functions */ | ||
27 | DECLARE_EXPORT(__ashldi3); | ||
28 | DECLARE_EXPORT(__ashrdi3); | ||
29 | DECLARE_EXPORT(__lshrdi3); | ||
30 | DECLARE_EXPORT(__udivsi3); | ||
31 | DECLARE_EXPORT(__divsi3); | ||
32 | DECLARE_EXPORT(__umodsi3); | ||
33 | DECLARE_EXPORT(__modsi3); | ||
34 | DECLARE_EXPORT(__muldi3); | ||
35 | DECLARE_EXPORT(__cmpdi2); | ||
36 | DECLARE_EXPORT(__ucmpdi2); | ||
37 | |||
38 | /* Maths functions */ | ||
39 | EXPORT_SYMBOL(div_u64); | ||
40 | EXPORT_SYMBOL(div_s64); | ||
41 | |||
42 | /* String functions */ | ||
43 | EXPORT_SYMBOL(memcpy); | ||
44 | EXPORT_SYMBOL(memset); | ||
45 | EXPORT_SYMBOL(memmove); | ||
46 | |||
47 | #ifdef CONFIG_FUNCTION_TRACER | ||
48 | EXPORT_SYMBOL(mcount_wrapper); | ||
49 | #endif | ||
diff --git a/arch/metag/kernel/module.c b/arch/metag/kernel/module.c new file mode 100644 index 000000000000..986331cd0a52 --- /dev/null +++ b/arch/metag/kernel/module.c | |||
@@ -0,0 +1,284 @@ | |||
1 | /* Kernel module help for Meta. | ||
2 | |||
3 | This program is free software; you can redistribute it and/or modify | ||
4 | it under the terms of the GNU General Public License as published by | ||
5 | the Free Software Foundation; either version 2 of the License, or | ||
6 | (at your option) any later version. | ||
7 | |||
8 | This program is distributed in the hope that it will be useful, | ||
9 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | GNU General Public License for more details. | ||
12 | */ | ||
13 | #include <linux/moduleloader.h> | ||
14 | #include <linux/elf.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/sort.h> | ||
20 | |||
21 | #include <asm/unaligned.h> | ||
22 | |||
23 | /* Count how many different relocations (different symbol, different | ||
24 | addend) */ | ||
25 | static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) | ||
26 | { | ||
27 | unsigned int i, r_info, r_addend, _count_relocs; | ||
28 | |||
29 | _count_relocs = 0; | ||
30 | r_info = 0; | ||
31 | r_addend = 0; | ||
32 | for (i = 0; i < num; i++) | ||
33 | /* Only count relbranch relocs, others don't need stubs */ | ||
34 | if (ELF32_R_TYPE(rela[i].r_info) == R_METAG_RELBRANCH && | ||
35 | (r_info != ELF32_R_SYM(rela[i].r_info) || | ||
36 | r_addend != rela[i].r_addend)) { | ||
37 | _count_relocs++; | ||
38 | r_info = ELF32_R_SYM(rela[i].r_info); | ||
39 | r_addend = rela[i].r_addend; | ||
40 | } | ||
41 | |||
42 | return _count_relocs; | ||
43 | } | ||
44 | |||
45 | static int relacmp(const void *_x, const void *_y) | ||
46 | { | ||
47 | const Elf32_Rela *x, *y; | ||
48 | |||
49 | y = (Elf32_Rela *)_x; | ||
50 | x = (Elf32_Rela *)_y; | ||
51 | |||
52 | /* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to | ||
53 | * make the comparison cheaper/faster. It won't affect the sorting or | ||
54 | * the counting algorithms' performance | ||
55 | */ | ||
56 | if (x->r_info < y->r_info) | ||
57 | return -1; | ||
58 | else if (x->r_info > y->r_info) | ||
59 | return 1; | ||
60 | else if (x->r_addend < y->r_addend) | ||
61 | return -1; | ||
62 | else if (x->r_addend > y->r_addend) | ||
63 | return 1; | ||
64 | else | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static void relaswap(void *_x, void *_y, int size) | ||
69 | { | ||
70 | uint32_t *x, *y, tmp; | ||
71 | int i; | ||
72 | |||
73 | y = (uint32_t *)_x; | ||
74 | x = (uint32_t *)_y; | ||
75 | |||
76 | for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) { | ||
77 | tmp = x[i]; | ||
78 | x[i] = y[i]; | ||
79 | y[i] = tmp; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | /* Get the potential trampolines size required of the init and | ||
84 | non-init sections */ | ||
85 | static unsigned long get_plt_size(const Elf32_Ehdr *hdr, | ||
86 | const Elf32_Shdr *sechdrs, | ||
87 | const char *secstrings, | ||
88 | int is_init) | ||
89 | { | ||
90 | unsigned long ret = 0; | ||
91 | unsigned i; | ||
92 | |||
93 | /* Everything marked ALLOC (this includes the exported | ||
94 | symbols) */ | ||
95 | for (i = 1; i < hdr->e_shnum; i++) { | ||
96 | /* If it's called *.init*, and we're not init, we're | ||
97 | not interested */ | ||
98 | if ((strstr(secstrings + sechdrs[i].sh_name, ".init") != NULL) | ||
99 | != is_init) | ||
100 | continue; | ||
101 | |||
102 | /* We don't want to look at debug sections. */ | ||
103 | if (strstr(secstrings + sechdrs[i].sh_name, ".debug") != NULL) | ||
104 | continue; | ||
105 | |||
106 | if (sechdrs[i].sh_type == SHT_RELA) { | ||
107 | pr_debug("Found relocations in section %u\n", i); | ||
108 | pr_debug("Ptr: %p. Number: %u\n", | ||
109 | (void *)hdr + sechdrs[i].sh_offset, | ||
110 | sechdrs[i].sh_size / sizeof(Elf32_Rela)); | ||
111 | |||
112 | /* Sort the relocation information based on a symbol and | ||
113 | * addend key. This is a stable O(n*log n) complexity | ||
114 | * alogrithm but it will reduce the complexity of | ||
115 | * count_relocs() to linear complexity O(n) | ||
116 | */ | ||
117 | sort((void *)hdr + sechdrs[i].sh_offset, | ||
118 | sechdrs[i].sh_size / sizeof(Elf32_Rela), | ||
119 | sizeof(Elf32_Rela), relacmp, relaswap); | ||
120 | |||
121 | ret += count_relocs((void *)hdr | ||
122 | + sechdrs[i].sh_offset, | ||
123 | sechdrs[i].sh_size | ||
124 | / sizeof(Elf32_Rela)) | ||
125 | * sizeof(struct metag_plt_entry); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | return ret; | ||
130 | } | ||
131 | |||
132 | int module_frob_arch_sections(Elf32_Ehdr *hdr, | ||
133 | Elf32_Shdr *sechdrs, | ||
134 | char *secstrings, | ||
135 | struct module *me) | ||
136 | { | ||
137 | unsigned int i; | ||
138 | |||
139 | /* Find .plt and .init.plt sections */ | ||
140 | for (i = 0; i < hdr->e_shnum; i++) { | ||
141 | if (strcmp(secstrings + sechdrs[i].sh_name, ".init.plt") == 0) | ||
142 | me->arch.init_plt_section = i; | ||
143 | else if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0) | ||
144 | me->arch.core_plt_section = i; | ||
145 | } | ||
146 | if (!me->arch.core_plt_section || !me->arch.init_plt_section) { | ||
147 | pr_err("Module doesn't contain .plt or .init.plt sections.\n"); | ||
148 | return -ENOEXEC; | ||
149 | } | ||
150 | |||
151 | /* Override their sizes */ | ||
152 | sechdrs[me->arch.core_plt_section].sh_size | ||
153 | = get_plt_size(hdr, sechdrs, secstrings, 0); | ||
154 | sechdrs[me->arch.core_plt_section].sh_type = SHT_NOBITS; | ||
155 | sechdrs[me->arch.init_plt_section].sh_size | ||
156 | = get_plt_size(hdr, sechdrs, secstrings, 1); | ||
157 | sechdrs[me->arch.init_plt_section].sh_type = SHT_NOBITS; | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /* Set up a trampoline in the PLT to bounce us to the distant function */ | ||
162 | static uint32_t do_plt_call(void *location, Elf32_Addr val, | ||
163 | Elf32_Shdr *sechdrs, struct module *mod) | ||
164 | { | ||
165 | struct metag_plt_entry *entry; | ||
166 | /* Instructions used to do the indirect jump. */ | ||
167 | uint32_t tramp[2]; | ||
168 | |||
169 | /* We have to trash a register, so we assume that any control | ||
170 | transfer more than 21-bits away must be a function call | ||
171 | (so we can use a call-clobbered register). */ | ||
172 | |||
173 | /* MOVT D0Re0,#HI(v) */ | ||
174 | tramp[0] = 0x02000005 | (((val & 0xffff0000) >> 16) << 3); | ||
175 | /* JUMP D0Re0,#LO(v) */ | ||
176 | tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3); | ||
177 | |||
178 | /* Init, or core PLT? */ | ||
179 | if (location >= mod->module_core | ||
180 | && location < mod->module_core + mod->core_size) | ||
181 | entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; | ||
182 | else | ||
183 | entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; | ||
184 | |||
185 | /* Find this entry, or if that fails, the next avail. entry */ | ||
186 | while (entry->tramp[0]) | ||
187 | if (entry->tramp[0] == tramp[0] && entry->tramp[1] == tramp[1]) | ||
188 | return (uint32_t)entry; | ||
189 | else | ||
190 | entry++; | ||
191 | |||
192 | entry->tramp[0] = tramp[0]; | ||
193 | entry->tramp[1] = tramp[1]; | ||
194 | |||
195 | return (uint32_t)entry; | ||
196 | } | ||
197 | |||
198 | int apply_relocate_add(Elf32_Shdr *sechdrs, | ||
199 | const char *strtab, | ||
200 | unsigned int symindex, | ||
201 | unsigned int relsec, | ||
202 | struct module *me) | ||
203 | { | ||
204 | unsigned int i; | ||
205 | Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; | ||
206 | Elf32_Sym *sym; | ||
207 | Elf32_Addr relocation; | ||
208 | uint32_t *location; | ||
209 | int32_t value; | ||
210 | |||
211 | pr_debug("Applying relocate section %u to %u\n", relsec, | ||
212 | sechdrs[relsec].sh_info); | ||
213 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
214 | /* This is where to make the change */ | ||
215 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
216 | + rel[i].r_offset; | ||
217 | /* This is the symbol it is referring to. Note that all | ||
218 | undefined symbols have been resolved. */ | ||
219 | sym = (Elf32_Sym *)sechdrs[symindex].sh_addr | ||
220 | + ELF32_R_SYM(rel[i].r_info); | ||
221 | relocation = sym->st_value + rel[i].r_addend; | ||
222 | |||
223 | switch (ELF32_R_TYPE(rel[i].r_info)) { | ||
224 | case R_METAG_NONE: | ||
225 | break; | ||
226 | case R_METAG_HIADDR16: | ||
227 | relocation >>= 16; | ||
228 | case R_METAG_LOADDR16: | ||
229 | *location = (*location & 0xfff80007) | | ||
230 | ((relocation & 0xffff) << 3); | ||
231 | break; | ||
232 | case R_METAG_ADDR32: | ||
233 | /* | ||
234 | * Packed data structures may cause a misaligned | ||
235 | * R_METAG_ADDR32 to be emitted. | ||
236 | */ | ||
237 | put_unaligned(relocation, location); | ||
238 | break; | ||
239 | case R_METAG_GETSETOFF: | ||
240 | *location += ((relocation & 0xfff) << 7); | ||
241 | break; | ||
242 | case R_METAG_RELBRANCH: | ||
243 | if (*location & (0x7ffff << 5)) { | ||
244 | pr_err("bad relbranch relocation\n"); | ||
245 | break; | ||
246 | } | ||
247 | |||
248 | /* This jump is too big for the offset slot. Build | ||
249 | * a PLT to jump through to get to where we want to go. | ||
250 | * NB: 21bit check - not scaled to 19bit yet | ||
251 | */ | ||
252 | if (((int32_t)(relocation - | ||
253 | (uint32_t)location) > 0xfffff) || | ||
254 | ((int32_t)(relocation - | ||
255 | (uint32_t)location) < -0xfffff)) { | ||
256 | relocation = do_plt_call(location, relocation, | ||
257 | sechdrs, me); | ||
258 | } | ||
259 | |||
260 | value = relocation - (uint32_t)location; | ||
261 | |||
262 | /* branch instruction aligned */ | ||
263 | value /= 4; | ||
264 | |||
265 | if ((value > 0x7ffff) || (value < -0x7ffff)) { | ||
266 | /* | ||
267 | * this should have been caught by the code | ||
268 | * above! | ||
269 | */ | ||
270 | pr_err("overflow of relbranch reloc\n"); | ||
271 | } | ||
272 | |||
273 | *location = (*location & (~(0x7ffff << 5))) | | ||
274 | ((value & 0x7ffff) << 5); | ||
275 | break; | ||
276 | |||
277 | default: | ||
278 | pr_err("module %s: Unknown relocation: %u\n", | ||
279 | me->name, ELF32_R_TYPE(rel[i].r_info)); | ||
280 | return -ENOEXEC; | ||
281 | } | ||
282 | } | ||
283 | return 0; | ||
284 | } | ||
diff --git a/arch/metag/kernel/perf/Makefile b/arch/metag/kernel/perf/Makefile new file mode 100644 index 000000000000..b158cb27208d --- /dev/null +++ b/arch/metag/kernel/perf/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | # Makefile for performance event core | ||
2 | |||
3 | obj-y += perf_event.o | ||
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c new file mode 100644 index 000000000000..a876d5ff3897 --- /dev/null +++ b/arch/metag/kernel/perf/perf_event.c | |||
@@ -0,0 +1,861 @@ | |||
1 | /* | ||
2 | * Meta performance counter support. | ||
3 | * Copyright (C) 2012 Imagination Technologies Ltd | ||
4 | * | ||
5 | * This code is based on the sh pmu code: | ||
6 | * Copyright (C) 2009 Paul Mundt | ||
7 | * | ||
8 | * and on the arm pmu code: | ||
9 | * Copyright (C) 2009 picoChip Designs, Ltd., James Iles | ||
10 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> | ||
11 | * | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/atomic.h> | ||
18 | #include <linux/export.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/irqchip/metag.h> | ||
21 | #include <linux/perf_event.h> | ||
22 | #include <linux/slab.h> | ||
23 | |||
24 | #include <asm/core_reg.h> | ||
25 | #include <asm/hwthread.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/irq.h> | ||
28 | |||
29 | #include "perf_event.h" | ||
30 | |||
31 | static int _hw_perf_event_init(struct perf_event *); | ||
32 | static void _hw_perf_event_destroy(struct perf_event *); | ||
33 | |||
34 | /* Determines which core type we are */ | ||
35 | static struct metag_pmu *metag_pmu __read_mostly; | ||
36 | |||
37 | /* Processor specific data */ | ||
38 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | ||
39 | |||
40 | /* PMU admin */ | ||
41 | const char *perf_pmu_name(void) | ||
42 | { | ||
43 | if (metag_pmu) | ||
44 | return metag_pmu->pmu.name; | ||
45 | |||
46 | return NULL; | ||
47 | } | ||
48 | EXPORT_SYMBOL_GPL(perf_pmu_name); | ||
49 | |||
50 | int perf_num_counters(void) | ||
51 | { | ||
52 | if (metag_pmu) | ||
53 | return metag_pmu->max_events; | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | EXPORT_SYMBOL_GPL(perf_num_counters); | ||
58 | |||
59 | static inline int metag_pmu_initialised(void) | ||
60 | { | ||
61 | return !!metag_pmu; | ||
62 | } | ||
63 | |||
64 | static void release_pmu_hardware(void) | ||
65 | { | ||
66 | int irq; | ||
67 | unsigned int version = (metag_pmu->version & | ||
68 | (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >> | ||
69 | METAC_ID_REV_S; | ||
70 | |||
71 | /* Early cores don't have overflow interrupts */ | ||
72 | if (version < 0x0104) | ||
73 | return; | ||
74 | |||
75 | irq = internal_irq_map(17); | ||
76 | if (irq >= 0) | ||
77 | free_irq(irq, (void *)1); | ||
78 | |||
79 | irq = internal_irq_map(16); | ||
80 | if (irq >= 0) | ||
81 | free_irq(irq, (void *)0); | ||
82 | } | ||
83 | |||
84 | static int reserve_pmu_hardware(void) | ||
85 | { | ||
86 | int err = 0, irq[2]; | ||
87 | unsigned int version = (metag_pmu->version & | ||
88 | (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >> | ||
89 | METAC_ID_REV_S; | ||
90 | |||
91 | /* Early cores don't have overflow interrupts */ | ||
92 | if (version < 0x0104) | ||
93 | goto out; | ||
94 | |||
95 | /* | ||
96 | * Bit 16 on HWSTATMETA is the interrupt for performance counter 0; | ||
97 | * similarly, 17 is the interrupt for performance counter 1. | ||
98 | * We can't (yet) interrupt on the cycle counter, because it's a | ||
99 | * register, however it holds a 32-bit value as opposed to 24-bit. | ||
100 | */ | ||
101 | irq[0] = internal_irq_map(16); | ||
102 | if (irq[0] < 0) { | ||
103 | pr_err("unable to map internal IRQ %d\n", 16); | ||
104 | goto out; | ||
105 | } | ||
106 | err = request_irq(irq[0], metag_pmu->handle_irq, IRQF_NOBALANCING, | ||
107 | "metagpmu0", (void *)0); | ||
108 | if (err) { | ||
109 | pr_err("unable to request IRQ%d for metag PMU counters\n", | ||
110 | irq[0]); | ||
111 | goto out; | ||
112 | } | ||
113 | |||
114 | irq[1] = internal_irq_map(17); | ||
115 | if (irq[1] < 0) { | ||
116 | pr_err("unable to map internal IRQ %d\n", 17); | ||
117 | goto out_irq1; | ||
118 | } | ||
119 | err = request_irq(irq[1], metag_pmu->handle_irq, IRQF_NOBALANCING, | ||
120 | "metagpmu1", (void *)1); | ||
121 | if (err) { | ||
122 | pr_err("unable to request IRQ%d for metag PMU counters\n", | ||
123 | irq[1]); | ||
124 | goto out_irq1; | ||
125 | } | ||
126 | |||
127 | return 0; | ||
128 | |||
129 | out_irq1: | ||
130 | free_irq(irq[0], (void *)0); | ||
131 | out: | ||
132 | return err; | ||
133 | } | ||
134 | |||
135 | /* PMU operations */ | ||
136 | static void metag_pmu_enable(struct pmu *pmu) | ||
137 | { | ||
138 | } | ||
139 | |||
140 | static void metag_pmu_disable(struct pmu *pmu) | ||
141 | { | ||
142 | } | ||
143 | |||
144 | static int metag_pmu_event_init(struct perf_event *event) | ||
145 | { | ||
146 | int err = 0; | ||
147 | atomic_t *active_events = &metag_pmu->active_events; | ||
148 | |||
149 | if (!metag_pmu_initialised()) { | ||
150 | err = -ENODEV; | ||
151 | goto out; | ||
152 | } | ||
153 | |||
154 | if (has_branch_stack(event)) | ||
155 | return -EOPNOTSUPP; | ||
156 | |||
157 | event->destroy = _hw_perf_event_destroy; | ||
158 | |||
159 | if (!atomic_inc_not_zero(active_events)) { | ||
160 | mutex_lock(&metag_pmu->reserve_mutex); | ||
161 | if (atomic_read(active_events) == 0) | ||
162 | err = reserve_pmu_hardware(); | ||
163 | |||
164 | if (!err) | ||
165 | atomic_inc(active_events); | ||
166 | |||
167 | mutex_unlock(&metag_pmu->reserve_mutex); | ||
168 | } | ||
169 | |||
170 | /* Hardware and caches counters */ | ||
171 | switch (event->attr.type) { | ||
172 | case PERF_TYPE_HARDWARE: | ||
173 | case PERF_TYPE_HW_CACHE: | ||
174 | err = _hw_perf_event_init(event); | ||
175 | break; | ||
176 | |||
177 | default: | ||
178 | return -ENOENT; | ||
179 | } | ||
180 | |||
181 | if (err) | ||
182 | event->destroy(event); | ||
183 | |||
184 | out: | ||
185 | return err; | ||
186 | } | ||
187 | |||
188 | void metag_pmu_event_update(struct perf_event *event, | ||
189 | struct hw_perf_event *hwc, int idx) | ||
190 | { | ||
191 | u64 prev_raw_count, new_raw_count; | ||
192 | s64 delta; | ||
193 | |||
194 | /* | ||
195 | * If this counter is chained, it may be that the previous counter | ||
196 | * value has been changed beneath us. | ||
197 | * | ||
198 | * To get around this, we read and exchange the new raw count, then | ||
199 | * add the delta (new - prev) to the generic counter atomically. | ||
200 | * | ||
201 | * Without interrupts, this is the simplest approach. | ||
202 | */ | ||
203 | again: | ||
204 | prev_raw_count = local64_read(&hwc->prev_count); | ||
205 | new_raw_count = metag_pmu->read(idx); | ||
206 | |||
207 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
208 | new_raw_count) != prev_raw_count) | ||
209 | goto again; | ||
210 | |||
211 | /* | ||
212 | * Calculate the delta and add it to the counter. | ||
213 | */ | ||
214 | delta = new_raw_count - prev_raw_count; | ||
215 | |||
216 | local64_add(delta, &event->count); | ||
217 | } | ||
218 | |||
219 | int metag_pmu_event_set_period(struct perf_event *event, | ||
220 | struct hw_perf_event *hwc, int idx) | ||
221 | { | ||
222 | s64 left = local64_read(&hwc->period_left); | ||
223 | s64 period = hwc->sample_period; | ||
224 | int ret = 0; | ||
225 | |||
226 | if (unlikely(left <= -period)) { | ||
227 | left = period; | ||
228 | local64_set(&hwc->period_left, left); | ||
229 | hwc->last_period = period; | ||
230 | ret = 1; | ||
231 | } | ||
232 | |||
233 | if (unlikely(left <= 0)) { | ||
234 | left += period; | ||
235 | local64_set(&hwc->period_left, left); | ||
236 | hwc->last_period = period; | ||
237 | ret = 1; | ||
238 | } | ||
239 | |||
240 | if (left > (s64)metag_pmu->max_period) | ||
241 | left = metag_pmu->max_period; | ||
242 | |||
243 | if (metag_pmu->write) | ||
244 | metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD); | ||
245 | |||
246 | perf_event_update_userpage(event); | ||
247 | |||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | static void metag_pmu_start(struct perf_event *event, int flags) | ||
252 | { | ||
253 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
254 | struct hw_perf_event *hwc = &event->hw; | ||
255 | int idx = hwc->idx; | ||
256 | |||
257 | if (WARN_ON_ONCE(idx == -1)) | ||
258 | return; | ||
259 | |||
260 | /* | ||
261 | * We always have to reprogram the period, so ignore PERF_EF_RELOAD. | ||
262 | */ | ||
263 | if (flags & PERF_EF_RELOAD) | ||
264 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
265 | |||
266 | hwc->state = 0; | ||
267 | |||
268 | /* | ||
269 | * Reset the period. | ||
270 | * Some counters can't be stopped (i.e. are core global), so when the | ||
271 | * counter was 'stopped' we merely disabled the IRQ. If we don't reset | ||
272 | * the period, then we'll either: a) get an overflow too soon; | ||
273 | * or b) too late if the overflow happened since disabling. | ||
274 | * Obviously, this has little bearing on cores without the overflow | ||
275 | * interrupt, as the performance counter resets to zero on write | ||
276 | * anyway. | ||
277 | */ | ||
278 | if (metag_pmu->max_period) | ||
279 | metag_pmu_event_set_period(event, hwc, hwc->idx); | ||
280 | cpuc->events[idx] = event; | ||
281 | metag_pmu->enable(hwc, idx); | ||
282 | } | ||
283 | |||
284 | static void metag_pmu_stop(struct perf_event *event, int flags) | ||
285 | { | ||
286 | struct hw_perf_event *hwc = &event->hw; | ||
287 | |||
288 | /* | ||
289 | * We should always update the counter on stop; see comment above | ||
290 | * why. | ||
291 | */ | ||
292 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
293 | metag_pmu_event_update(event, hwc, hwc->idx); | ||
294 | metag_pmu->disable(hwc, hwc->idx); | ||
295 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
296 | } | ||
297 | } | ||
298 | |||
299 | static int metag_pmu_add(struct perf_event *event, int flags) | ||
300 | { | ||
301 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
302 | struct hw_perf_event *hwc = &event->hw; | ||
303 | int idx = 0, ret = 0; | ||
304 | |||
305 | perf_pmu_disable(event->pmu); | ||
306 | |||
307 | /* check whether we're counting instructions */ | ||
308 | if (hwc->config == 0x100) { | ||
309 | if (__test_and_set_bit(METAG_INST_COUNTER, | ||
310 | cpuc->used_mask)) { | ||
311 | ret = -EAGAIN; | ||
312 | goto out; | ||
313 | } | ||
314 | idx = METAG_INST_COUNTER; | ||
315 | } else { | ||
316 | /* Check whether we have a spare counter */ | ||
317 | idx = find_first_zero_bit(cpuc->used_mask, | ||
318 | atomic_read(&metag_pmu->active_events)); | ||
319 | if (idx >= METAG_INST_COUNTER) { | ||
320 | ret = -EAGAIN; | ||
321 | goto out; | ||
322 | } | ||
323 | |||
324 | __set_bit(idx, cpuc->used_mask); | ||
325 | } | ||
326 | hwc->idx = idx; | ||
327 | |||
328 | /* Make sure the counter is disabled */ | ||
329 | metag_pmu->disable(hwc, idx); | ||
330 | |||
331 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
332 | if (flags & PERF_EF_START) | ||
333 | metag_pmu_start(event, PERF_EF_RELOAD); | ||
334 | |||
335 | perf_event_update_userpage(event); | ||
336 | out: | ||
337 | perf_pmu_enable(event->pmu); | ||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | static void metag_pmu_del(struct perf_event *event, int flags) | ||
342 | { | ||
343 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
344 | struct hw_perf_event *hwc = &event->hw; | ||
345 | int idx = hwc->idx; | ||
346 | |||
347 | WARN_ON(idx < 0); | ||
348 | metag_pmu_stop(event, PERF_EF_UPDATE); | ||
349 | cpuc->events[idx] = NULL; | ||
350 | __clear_bit(idx, cpuc->used_mask); | ||
351 | |||
352 | perf_event_update_userpage(event); | ||
353 | } | ||
354 | |||
355 | static void metag_pmu_read(struct perf_event *event) | ||
356 | { | ||
357 | struct hw_perf_event *hwc = &event->hw; | ||
358 | |||
359 | /* Don't read disabled counters! */ | ||
360 | if (hwc->idx < 0) | ||
361 | return; | ||
362 | |||
363 | metag_pmu_event_update(event, hwc, hwc->idx); | ||
364 | } | ||
365 | |||
366 | static struct pmu pmu = { | ||
367 | .pmu_enable = metag_pmu_enable, | ||
368 | .pmu_disable = metag_pmu_disable, | ||
369 | |||
370 | .event_init = metag_pmu_event_init, | ||
371 | |||
372 | .add = metag_pmu_add, | ||
373 | .del = metag_pmu_del, | ||
374 | .start = metag_pmu_start, | ||
375 | .stop = metag_pmu_stop, | ||
376 | .read = metag_pmu_read, | ||
377 | }; | ||
378 | |||
379 | /* Core counter specific functions */ | ||
380 | static const int metag_general_events[] = { | ||
381 | [PERF_COUNT_HW_CPU_CYCLES] = 0x03, | ||
382 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x100, | ||
383 | [PERF_COUNT_HW_CACHE_REFERENCES] = -1, | ||
384 | [PERF_COUNT_HW_CACHE_MISSES] = -1, | ||
385 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, | ||
386 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | ||
387 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | ||
388 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = -1, | ||
389 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = -1, | ||
390 | [PERF_COUNT_HW_REF_CPU_CYCLES] = -1, | ||
391 | }; | ||
392 | |||
393 | static const int metag_pmu_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | ||
394 | [C(L1D)] = { | ||
395 | [C(OP_READ)] = { | ||
396 | [C(RESULT_ACCESS)] = 0x08, | ||
397 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
398 | }, | ||
399 | [C(OP_WRITE)] = { | ||
400 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
401 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
402 | }, | ||
403 | [C(OP_PREFETCH)] = { | ||
404 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
405 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
406 | }, | ||
407 | }, | ||
408 | [C(L1I)] = { | ||
409 | [C(OP_READ)] = { | ||
410 | [C(RESULT_ACCESS)] = 0x09, | ||
411 | [C(RESULT_MISS)] = 0x0a, | ||
412 | }, | ||
413 | [C(OP_WRITE)] = { | ||
414 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
415 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
416 | }, | ||
417 | [C(OP_PREFETCH)] = { | ||
418 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
419 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
420 | }, | ||
421 | }, | ||
422 | [C(LL)] = { | ||
423 | [C(OP_READ)] = { | ||
424 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
425 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
426 | }, | ||
427 | [C(OP_WRITE)] = { | ||
428 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
429 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
430 | }, | ||
431 | [C(OP_PREFETCH)] = { | ||
432 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
433 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
434 | }, | ||
435 | }, | ||
436 | [C(DTLB)] = { | ||
437 | [C(OP_READ)] = { | ||
438 | [C(RESULT_ACCESS)] = 0xd0, | ||
439 | [C(RESULT_MISS)] = 0xd2, | ||
440 | }, | ||
441 | [C(OP_WRITE)] = { | ||
442 | [C(RESULT_ACCESS)] = 0xd4, | ||
443 | [C(RESULT_MISS)] = 0xd5, | ||
444 | }, | ||
445 | [C(OP_PREFETCH)] = { | ||
446 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
447 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
448 | }, | ||
449 | }, | ||
450 | [C(ITLB)] = { | ||
451 | [C(OP_READ)] = { | ||
452 | [C(RESULT_ACCESS)] = 0xd1, | ||
453 | [C(RESULT_MISS)] = 0xd3, | ||
454 | }, | ||
455 | [C(OP_WRITE)] = { | ||
456 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
457 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
458 | }, | ||
459 | [C(OP_PREFETCH)] = { | ||
460 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
461 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
462 | }, | ||
463 | }, | ||
464 | [C(BPU)] = { | ||
465 | [C(OP_READ)] = { | ||
466 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
467 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
468 | }, | ||
469 | [C(OP_WRITE)] = { | ||
470 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
471 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
472 | }, | ||
473 | [C(OP_PREFETCH)] = { | ||
474 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
475 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
476 | }, | ||
477 | }, | ||
478 | [C(NODE)] = { | ||
479 | [C(OP_READ)] = { | ||
480 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
481 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
482 | }, | ||
483 | [C(OP_WRITE)] = { | ||
484 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
485 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
486 | }, | ||
487 | [C(OP_PREFETCH)] = { | ||
488 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
489 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
490 | }, | ||
491 | }, | ||
492 | }; | ||
493 | |||
494 | |||
495 | static void _hw_perf_event_destroy(struct perf_event *event) | ||
496 | { | ||
497 | atomic_t *active_events = &metag_pmu->active_events; | ||
498 | struct mutex *pmu_mutex = &metag_pmu->reserve_mutex; | ||
499 | |||
500 | if (atomic_dec_and_mutex_lock(active_events, pmu_mutex)) { | ||
501 | release_pmu_hardware(); | ||
502 | mutex_unlock(pmu_mutex); | ||
503 | } | ||
504 | } | ||
505 | |||
506 | static int _hw_perf_cache_event(int config, int *evp) | ||
507 | { | ||
508 | unsigned long type, op, result; | ||
509 | int ev; | ||
510 | |||
511 | if (!metag_pmu->cache_events) | ||
512 | return -EINVAL; | ||
513 | |||
514 | /* Unpack config */ | ||
515 | type = config & 0xff; | ||
516 | op = (config >> 8) & 0xff; | ||
517 | result = (config >> 16) & 0xff; | ||
518 | |||
519 | if (type >= PERF_COUNT_HW_CACHE_MAX || | ||
520 | op >= PERF_COUNT_HW_CACHE_OP_MAX || | ||
521 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
522 | return -EINVAL; | ||
523 | |||
524 | ev = (*metag_pmu->cache_events)[type][op][result]; | ||
525 | if (ev == 0) | ||
526 | return -EOPNOTSUPP; | ||
527 | if (ev == -1) | ||
528 | return -EINVAL; | ||
529 | *evp = ev; | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static int _hw_perf_event_init(struct perf_event *event) | ||
534 | { | ||
535 | struct perf_event_attr *attr = &event->attr; | ||
536 | struct hw_perf_event *hwc = &event->hw; | ||
537 | int mapping = 0, err; | ||
538 | |||
539 | switch (attr->type) { | ||
540 | case PERF_TYPE_HARDWARE: | ||
541 | if (attr->config >= PERF_COUNT_HW_MAX) | ||
542 | return -EINVAL; | ||
543 | |||
544 | mapping = metag_pmu->event_map(attr->config); | ||
545 | break; | ||
546 | |||
547 | case PERF_TYPE_HW_CACHE: | ||
548 | err = _hw_perf_cache_event(attr->config, &mapping); | ||
549 | if (err) | ||
550 | return err; | ||
551 | break; | ||
552 | } | ||
553 | |||
554 | /* Return early if the event is unsupported */ | ||
555 | if (mapping == -1) | ||
556 | return -EINVAL; | ||
557 | |||
558 | /* | ||
559 | * Early cores have "limited" counters - they have no overflow | ||
560 | * interrupts - and so are unable to do sampling without extra work | ||
561 | * and timer assistance. | ||
562 | */ | ||
563 | if (metag_pmu->max_period == 0) { | ||
564 | if (hwc->sample_period) | ||
565 | return -EINVAL; | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * Don't assign an index until the event is placed into the hardware. | ||
570 | * -1 signifies that we're still deciding where to put it. On SMP | ||
571 | * systems each core has its own set of counters, so we can't do any | ||
572 | * constraint checking yet. | ||
573 | */ | ||
574 | hwc->idx = -1; | ||
575 | |||
576 | /* Store the event encoding */ | ||
577 | hwc->config |= (unsigned long)mapping; | ||
578 | |||
579 | /* | ||
580 | * For non-sampling runs, limit the sample_period to half of the | ||
581 | * counter width. This way, the new counter value should be less | ||
582 | * likely to overtake the previous one (unless there are IRQ latency | ||
583 | * issues...) | ||
584 | */ | ||
585 | if (metag_pmu->max_period) { | ||
586 | if (!hwc->sample_period) { | ||
587 | hwc->sample_period = metag_pmu->max_period >> 1; | ||
588 | hwc->last_period = hwc->sample_period; | ||
589 | local64_set(&hwc->period_left, hwc->sample_period); | ||
590 | } | ||
591 | } | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) | ||
597 | { | ||
598 | struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); | ||
599 | unsigned int config = event->config; | ||
600 | unsigned int tmp = config & 0xf0; | ||
601 | unsigned long flags; | ||
602 | |||
603 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
604 | |||
605 | /* | ||
606 | * Check if we're enabling the instruction counter (index of | ||
607 | * MAX_HWEVENTS - 1) | ||
608 | */ | ||
609 | if (METAG_INST_COUNTER == idx) { | ||
610 | WARN_ONCE((config != 0x100), | ||
611 | "invalid configuration (%d) for counter (%d)\n", | ||
612 | config, idx); | ||
613 | |||
614 | /* Reset the cycle count */ | ||
615 | __core_reg_set(TXTACTCYC, 0); | ||
616 | goto unlock; | ||
617 | } | ||
618 | |||
619 | /* Check for a core internal or performance channel event. */ | ||
620 | if (tmp) { | ||
621 | void *perf_addr = (void *)PERF_COUNT(idx); | ||
622 | |||
623 | /* | ||
624 | * Anything other than a cycle count will write the low- | ||
625 | * nibble to the correct counter register. | ||
626 | */ | ||
627 | switch (tmp) { | ||
628 | case 0xd0: | ||
629 | perf_addr = (void *)PERF_ICORE(idx); | ||
630 | break; | ||
631 | |||
632 | case 0xf0: | ||
633 | perf_addr = (void *)PERF_CHAN(idx); | ||
634 | break; | ||
635 | } | ||
636 | |||
637 | metag_out32((tmp & 0x0f), perf_addr); | ||
638 | |||
639 | /* | ||
640 | * Now we use the high nibble as the performance event to | ||
641 | * to count. | ||
642 | */ | ||
643 | config = tmp >> 4; | ||
644 | } | ||
645 | |||
646 | /* | ||
647 | * Enabled counters start from 0. Early cores clear the count on | ||
648 | * write but newer cores don't, so we make sure that the count is | ||
649 | * set to 0. | ||
650 | */ | ||
651 | tmp = ((config & 0xf) << 28) | | ||
652 | ((1 << 24) << cpu_2_hwthread_id[get_cpu()]); | ||
653 | metag_out32(tmp, PERF_COUNT(idx)); | ||
654 | unlock: | ||
655 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
656 | } | ||
657 | |||
658 | static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx) | ||
659 | { | ||
660 | struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); | ||
661 | unsigned int tmp = 0; | ||
662 | unsigned long flags; | ||
663 | |||
664 | /* | ||
665 | * The cycle counter can't be disabled per se, as it's a hardware | ||
666 | * thread register which is always counting. We merely return if this | ||
667 | * is the counter we're attempting to disable. | ||
668 | */ | ||
669 | if (METAG_INST_COUNTER == idx) | ||
670 | return; | ||
671 | |||
672 | /* | ||
673 | * The counter value _should_ have been read prior to disabling, | ||
674 | * as if we're running on an early core then the value gets reset to | ||
675 | * 0, and any read after that would be useless. On the newer cores, | ||
676 | * however, it's better to read-modify-update this for purposes of | ||
677 | * the overflow interrupt. | ||
678 | * Here we remove the thread id AND the event nibble (there are at | ||
679 | * least two events that count events that are core global and ignore | ||
680 | * the thread id mask). This only works because we don't mix thread | ||
681 | * performance counts, and event 0x00 requires a thread id mask! | ||
682 | */ | ||
683 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
684 | |||
685 | tmp = metag_in32(PERF_COUNT(idx)); | ||
686 | tmp &= 0x00ffffff; | ||
687 | metag_out32(tmp, PERF_COUNT(idx)); | ||
688 | |||
689 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
690 | } | ||
691 | |||
692 | static u64 metag_pmu_read_counter(int idx) | ||
693 | { | ||
694 | u32 tmp = 0; | ||
695 | |||
696 | /* The act of reading the cycle counter also clears it */ | ||
697 | if (METAG_INST_COUNTER == idx) { | ||
698 | __core_reg_swap(TXTACTCYC, tmp); | ||
699 | goto out; | ||
700 | } | ||
701 | |||
702 | tmp = metag_in32(PERF_COUNT(idx)) & 0x00ffffff; | ||
703 | out: | ||
704 | return tmp; | ||
705 | } | ||
706 | |||
707 | static void metag_pmu_write_counter(int idx, u32 val) | ||
708 | { | ||
709 | struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events); | ||
710 | u32 tmp = 0; | ||
711 | unsigned long flags; | ||
712 | |||
713 | /* | ||
714 | * This _shouldn't_ happen, but if it does, then we can just | ||
715 | * ignore the write, as the register is read-only and clear-on-write. | ||
716 | */ | ||
717 | if (METAG_INST_COUNTER == idx) | ||
718 | return; | ||
719 | |||
720 | /* | ||
721 | * We'll keep the thread mask and event id, and just update the | ||
722 | * counter itself. Also , we should bound the value to 24-bits. | ||
723 | */ | ||
724 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
725 | |||
726 | val &= 0x00ffffff; | ||
727 | tmp = metag_in32(PERF_COUNT(idx)) & 0xff000000; | ||
728 | val |= tmp; | ||
729 | metag_out32(val, PERF_COUNT(idx)); | ||
730 | |||
731 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
732 | } | ||
733 | |||
734 | static int metag_pmu_event_map(int idx) | ||
735 | { | ||
736 | return metag_general_events[idx]; | ||
737 | } | ||
738 | |||
739 | static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev) | ||
740 | { | ||
741 | int idx = (int)dev; | ||
742 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
743 | struct perf_event *event = cpuhw->events[idx]; | ||
744 | struct hw_perf_event *hwc = &event->hw; | ||
745 | struct pt_regs *regs = get_irq_regs(); | ||
746 | struct perf_sample_data sampledata; | ||
747 | unsigned long flags; | ||
748 | u32 counter = 0; | ||
749 | |||
750 | /* | ||
751 | * We need to stop the core temporarily from generating another | ||
752 | * interrupt while we disable this counter. However, we don't want | ||
753 | * to flag the counter as free | ||
754 | */ | ||
755 | __global_lock2(flags); | ||
756 | counter = metag_in32(PERF_COUNT(idx)); | ||
757 | metag_out32((counter & 0x00ffffff), PERF_COUNT(idx)); | ||
758 | __global_unlock2(flags); | ||
759 | |||
760 | /* Update the counts and reset the sample period */ | ||
761 | metag_pmu_event_update(event, hwc, idx); | ||
762 | perf_sample_data_init(&sampledata, 0, hwc->last_period); | ||
763 | metag_pmu_event_set_period(event, hwc, idx); | ||
764 | |||
765 | /* | ||
766 | * Enable the counter again once core overflow processing has | ||
767 | * completed. | ||
768 | */ | ||
769 | if (!perf_event_overflow(event, &sampledata, regs)) | ||
770 | metag_out32(counter, PERF_COUNT(idx)); | ||
771 | |||
772 | return IRQ_HANDLED; | ||
773 | } | ||
774 | |||
775 | static struct metag_pmu _metag_pmu = { | ||
776 | .handle_irq = metag_pmu_counter_overflow, | ||
777 | .enable = metag_pmu_enable_counter, | ||
778 | .disable = metag_pmu_disable_counter, | ||
779 | .read = metag_pmu_read_counter, | ||
780 | .write = metag_pmu_write_counter, | ||
781 | .event_map = metag_pmu_event_map, | ||
782 | .cache_events = &metag_pmu_cache_events, | ||
783 | .max_period = MAX_PERIOD, | ||
784 | .max_events = MAX_HWEVENTS, | ||
785 | }; | ||
786 | |||
787 | /* PMU CPU hotplug notifier */ | ||
788 | static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b, | ||
789 | unsigned long action, void *hcpu) | ||
790 | { | ||
791 | unsigned int cpu = (unsigned int)hcpu; | ||
792 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
793 | |||
794 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) | ||
795 | return NOTIFY_DONE; | ||
796 | |||
797 | memset(cpuc, 0, sizeof(struct cpu_hw_events)); | ||
798 | raw_spin_lock_init(&cpuc->pmu_lock); | ||
799 | |||
800 | return NOTIFY_OK; | ||
801 | } | ||
802 | |||
803 | static struct notifier_block __cpuinitdata metag_pmu_notifier = { | ||
804 | .notifier_call = metag_pmu_cpu_notify, | ||
805 | }; | ||
806 | |||
807 | /* PMU Initialisation */ | ||
808 | static int __init init_hw_perf_events(void) | ||
809 | { | ||
810 | int ret = 0, cpu; | ||
811 | u32 version = *(u32 *)METAC_ID; | ||
812 | int major = (version & METAC_ID_MAJOR_BITS) >> METAC_ID_MAJOR_S; | ||
813 | int min_rev = (version & (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) | ||
814 | >> METAC_ID_REV_S; | ||
815 | |||
816 | /* Not a Meta 2 core, then not supported */ | ||
817 | if (0x02 > major) { | ||
818 | pr_info("no hardware counter support available\n"); | ||
819 | goto out; | ||
820 | } else if (0x02 == major) { | ||
821 | metag_pmu = &_metag_pmu; | ||
822 | |||
823 | if (min_rev < 0x0104) { | ||
824 | /* | ||
825 | * A core without overflow interrupts, and clear-on- | ||
826 | * write counters. | ||
827 | */ | ||
828 | metag_pmu->handle_irq = NULL; | ||
829 | metag_pmu->write = NULL; | ||
830 | metag_pmu->max_period = 0; | ||
831 | } | ||
832 | |||
833 | metag_pmu->name = "Meta 2"; | ||
834 | metag_pmu->version = version; | ||
835 | metag_pmu->pmu = pmu; | ||
836 | } | ||
837 | |||
838 | pr_info("enabled with %s PMU driver, %d counters available\n", | ||
839 | metag_pmu->name, metag_pmu->max_events); | ||
840 | |||
841 | /* Initialise the active events and reservation mutex */ | ||
842 | atomic_set(&metag_pmu->active_events, 0); | ||
843 | mutex_init(&metag_pmu->reserve_mutex); | ||
844 | |||
845 | /* Clear the counters */ | ||
846 | metag_out32(0, PERF_COUNT(0)); | ||
847 | metag_out32(0, PERF_COUNT(1)); | ||
848 | |||
849 | for_each_possible_cpu(cpu) { | ||
850 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
851 | |||
852 | memset(cpuc, 0, sizeof(struct cpu_hw_events)); | ||
853 | raw_spin_lock_init(&cpuc->pmu_lock); | ||
854 | } | ||
855 | |||
856 | register_cpu_notifier(&metag_pmu_notifier); | ||
857 | ret = perf_pmu_register(&pmu, (char *)metag_pmu->name, PERF_TYPE_RAW); | ||
858 | out: | ||
859 | return ret; | ||
860 | } | ||
861 | early_initcall(init_hw_perf_events); | ||
diff --git a/arch/metag/kernel/perf/perf_event.h b/arch/metag/kernel/perf/perf_event.h new file mode 100644 index 000000000000..fd10a1345b67 --- /dev/null +++ b/arch/metag/kernel/perf/perf_event.h | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * Meta performance counter support. | ||
3 | * Copyright (C) 2012 Imagination Technologies Ltd | ||
4 | * | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | */ | ||
9 | |||
10 | #ifndef METAG_PERF_EVENT_H_ | ||
11 | #define METAG_PERF_EVENT_H_ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/perf_event.h> | ||
16 | |||
17 | /* For performance counter definitions */ | ||
18 | #include <asm/metag_mem.h> | ||
19 | |||
20 | /* | ||
21 | * The Meta core has two performance counters, with 24-bit resolution. Newer | ||
22 | * cores generate an overflow interrupt on transition from 0xffffff to 0. | ||
23 | * | ||
24 | * Each counter consists of the counter id, hardware thread id, and the count | ||
25 | * itself; each counter can be assigned to multiple hardware threads at any | ||
26 | * one time, with the returned count being an aggregate of events. A small | ||
27 | * number of events are thread global, i.e. they count the aggregate of all | ||
28 | * threads' events, regardless of the thread selected. | ||
29 | * | ||
30 | * Newer cores can store an arbitrary 24-bit number in the counter, whereas | ||
31 | * older cores will clear the counter bits on write. | ||
32 | * | ||
33 | * We also have a pseudo-counter in the form of the thread active cycles | ||
34 | * counter (which, incidentally, is also bound to | ||
35 | */ | ||
36 | |||
37 | #define MAX_HWEVENTS 3 | ||
38 | #define MAX_PERIOD ((1UL << 24) - 1) | ||
39 | #define METAG_INST_COUNTER (MAX_HWEVENTS - 1) | ||
40 | |||
41 | /** | ||
42 | * struct cpu_hw_events - a processor core's performance events | ||
43 | * @events: an array of perf_events active for a given index. | ||
44 | * @used_mask: a bitmap of in-use counters. | ||
45 | * @pmu_lock: a perf counter lock | ||
46 | * | ||
47 | * This is a per-cpu/core structure that maintains a record of its | ||
48 | * performance counters' state. | ||
49 | */ | ||
50 | struct cpu_hw_events { | ||
51 | struct perf_event *events[MAX_HWEVENTS]; | ||
52 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | ||
53 | raw_spinlock_t pmu_lock; | ||
54 | }; | ||
55 | |||
56 | /** | ||
57 | * struct metag_pmu - the Meta PMU structure | ||
58 | * @pmu: core pmu structure | ||
59 | * @name: pmu name | ||
60 | * @version: core version | ||
61 | * @handle_irq: overflow interrupt handler | ||
62 | * @enable: enable a counter | ||
63 | * @disable: disable a counter | ||
64 | * @read: read the value of a counter | ||
65 | * @write: write a value to a counter | ||
66 | * @event_map: kernel event to counter event id map | ||
67 | * @cache_events: kernel cache counter to core cache counter map | ||
68 | * @max_period: maximum value of the counter before overflow | ||
69 | * @max_events: maximum number of counters available at any one time | ||
70 | * @active_events: number of active counters | ||
71 | * @reserve_mutex: counter reservation mutex | ||
72 | * | ||
73 | * This describes the main functionality and data used by the performance | ||
74 | * event core. | ||
75 | */ | ||
76 | struct metag_pmu { | ||
77 | struct pmu pmu; | ||
78 | const char *name; | ||
79 | u32 version; | ||
80 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | ||
81 | void (*enable)(struct hw_perf_event *evt, int idx); | ||
82 | void (*disable)(struct hw_perf_event *evt, int idx); | ||
83 | u64 (*read)(int idx); | ||
84 | void (*write)(int idx, u32 val); | ||
85 | int (*event_map)(int idx); | ||
86 | const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] | ||
87 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
88 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
89 | u32 max_period; | ||
90 | int max_events; | ||
91 | atomic_t active_events; | ||
92 | struct mutex reserve_mutex; | ||
93 | }; | ||
94 | |||
95 | /* Convenience macros for accessing the perf counters */ | ||
96 | /* Define some convenience accessors */ | ||
97 | #define PERF_COUNT(x) (PERF_COUNT0 + (sizeof(u64) * (x))) | ||
98 | #define PERF_ICORE(x) (PERF_ICORE0 + (sizeof(u64) * (x))) | ||
99 | #define PERF_CHAN(x) (PERF_CHAN0 + (sizeof(u64) * (x))) | ||
100 | |||
101 | /* Cache index macros */ | ||
102 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
103 | #define CACHE_OP_UNSUPPORTED 0xfffe | ||
104 | #define CACHE_OP_NONSENSE 0xffff | ||
105 | |||
106 | #endif | ||
diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c new file mode 100644 index 000000000000..315633461a94 --- /dev/null +++ b/arch/metag/kernel/perf_callchain.c | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * Perf callchain handling code. | ||
3 | * | ||
4 | * Based on the ARM perf implementation. | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/perf_event.h> | ||
10 | #include <linux/uaccess.h> | ||
11 | #include <asm/ptrace.h> | ||
12 | #include <asm/stacktrace.h> | ||
13 | |||
14 | static bool is_valid_call(unsigned long calladdr) | ||
15 | { | ||
16 | unsigned int callinsn; | ||
17 | |||
18 | /* Check the possible return address is aligned. */ | ||
19 | if (!(calladdr & 0x3)) { | ||
20 | if (!get_user(callinsn, (unsigned int *)calladdr)) { | ||
21 | /* Check for CALLR or SWAP PC,D1RtP. */ | ||
22 | if ((callinsn & 0xff000000) == 0xab000000 || | ||
23 | callinsn == 0xa3200aa0) | ||
24 | return true; | ||
25 | } | ||
26 | } | ||
27 | return false; | ||
28 | } | ||
29 | |||
30 | static struct metag_frame __user * | ||
31 | user_backtrace(struct metag_frame __user *user_frame, | ||
32 | struct perf_callchain_entry *entry) | ||
33 | { | ||
34 | struct metag_frame frame; | ||
35 | unsigned long calladdr; | ||
36 | |||
37 | /* We cannot rely on having frame pointers in user code. */ | ||
38 | while (1) { | ||
39 | /* Also check accessibility of one struct frame beyond */ | ||
40 | if (!access_ok(VERIFY_READ, user_frame, sizeof(frame))) | ||
41 | return 0; | ||
42 | if (__copy_from_user_inatomic(&frame, user_frame, | ||
43 | sizeof(frame))) | ||
44 | return 0; | ||
45 | |||
46 | --user_frame; | ||
47 | |||
48 | calladdr = frame.lr - 4; | ||
49 | if (is_valid_call(calladdr)) { | ||
50 | perf_callchain_store(entry, calladdr); | ||
51 | return user_frame; | ||
52 | } | ||
53 | } | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | void | ||
59 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | ||
60 | { | ||
61 | unsigned long sp = regs->ctx.AX[0].U0; | ||
62 | struct metag_frame __user *frame; | ||
63 | |||
64 | frame = (struct metag_frame __user *)sp; | ||
65 | |||
66 | --frame; | ||
67 | |||
68 | while ((entry->nr < PERF_MAX_STACK_DEPTH) && frame) | ||
69 | frame = user_backtrace(frame, entry); | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Gets called by walk_stackframe() for every stackframe. This will be called | ||
74 | * whist unwinding the stackframe and is like a subroutine return so we use | ||
75 | * the PC. | ||
76 | */ | ||
77 | static int | ||
78 | callchain_trace(struct stackframe *fr, | ||
79 | void *data) | ||
80 | { | ||
81 | struct perf_callchain_entry *entry = data; | ||
82 | perf_callchain_store(entry, fr->pc); | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | void | ||
87 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | ||
88 | { | ||
89 | struct stackframe fr; | ||
90 | |||
91 | fr.fp = regs->ctx.AX[1].U0; | ||
92 | fr.sp = regs->ctx.AX[0].U0; | ||
93 | fr.lr = regs->ctx.DX[4].U1; | ||
94 | fr.pc = regs->ctx.CurrPC; | ||
95 | walk_stackframe(&fr, callchain_trace, entry); | ||
96 | } | ||
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c new file mode 100644 index 000000000000..c6efe62e5b76 --- /dev/null +++ b/arch/metag/kernel/process.c | |||
@@ -0,0 +1,461 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies | ||
3 | * | ||
4 | * This file contains the architecture-dependent parts of process handling. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/errno.h> | ||
9 | #include <linux/export.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/unistd.h> | ||
14 | #include <linux/ptrace.h> | ||
15 | #include <linux/user.h> | ||
16 | #include <linux/reboot.h> | ||
17 | #include <linux/elfcore.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/tick.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/mman.h> | ||
22 | #include <linux/pm.h> | ||
23 | #include <linux/syscalls.h> | ||
24 | #include <linux/uaccess.h> | ||
25 | #include <asm/core_reg.h> | ||
26 | #include <asm/user_gateway.h> | ||
27 | #include <asm/tcm.h> | ||
28 | #include <asm/traps.h> | ||
29 | #include <asm/switch_to.h> | ||
30 | |||
31 | /* | ||
32 | * Wait for the next interrupt and enable local interrupts | ||
33 | */ | ||
34 | static inline void arch_idle(void) | ||
35 | { | ||
36 | int tmp; | ||
37 | |||
38 | /* | ||
39 | * Quickly jump straight into the interrupt entry point without actually | ||
40 | * triggering an interrupt. When TXSTATI gets read the processor will | ||
41 | * block until an interrupt is triggered. | ||
42 | */ | ||
43 | asm volatile (/* Switch into ISTAT mode */ | ||
44 | "RTH\n\t" | ||
45 | /* Enable local interrupts */ | ||
46 | "MOV TXMASKI, %1\n\t" | ||
47 | /* | ||
48 | * We can't directly "SWAP PC, PCX", so we swap via a | ||
49 | * temporary. Essentially we do: | ||
50 | * PCX_new = 1f (the place to continue execution) | ||
51 | * PC = PCX_old | ||
52 | */ | ||
53 | "ADD %0, CPC0, #(1f-.)\n\t" | ||
54 | "SWAP PCX, %0\n\t" | ||
55 | "MOV PC, %0\n" | ||
56 | /* Continue execution here with interrupts enabled */ | ||
57 | "1:" | ||
58 | : "=a" (tmp) | ||
59 | : "r" (get_trigger_mask())); | ||
60 | } | ||
61 | |||
62 | void cpu_idle(void) | ||
63 | { | ||
64 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
65 | |||
66 | while (1) { | ||
67 | tick_nohz_idle_enter(); | ||
68 | rcu_idle_enter(); | ||
69 | |||
70 | while (!need_resched()) { | ||
71 | /* | ||
72 | * We need to disable interrupts here to ensure we don't | ||
73 | * miss a wakeup call. | ||
74 | */ | ||
75 | local_irq_disable(); | ||
76 | if (!need_resched()) { | ||
77 | #ifdef CONFIG_HOTPLUG_CPU | ||
78 | if (cpu_is_offline(smp_processor_id())) | ||
79 | cpu_die(); | ||
80 | #endif | ||
81 | arch_idle(); | ||
82 | } else { | ||
83 | local_irq_enable(); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | rcu_idle_exit(); | ||
88 | tick_nohz_idle_exit(); | ||
89 | schedule_preempt_disabled(); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | void (*pm_power_off)(void); | ||
94 | EXPORT_SYMBOL(pm_power_off); | ||
95 | |||
96 | void (*soc_restart)(char *cmd); | ||
97 | void (*soc_halt)(void); | ||
98 | |||
99 | void machine_restart(char *cmd) | ||
100 | { | ||
101 | if (soc_restart) | ||
102 | soc_restart(cmd); | ||
103 | hard_processor_halt(HALT_OK); | ||
104 | } | ||
105 | |||
106 | void machine_halt(void) | ||
107 | { | ||
108 | if (soc_halt) | ||
109 | soc_halt(); | ||
110 | smp_send_stop(); | ||
111 | hard_processor_halt(HALT_OK); | ||
112 | } | ||
113 | |||
114 | void machine_power_off(void) | ||
115 | { | ||
116 | if (pm_power_off) | ||
117 | pm_power_off(); | ||
118 | smp_send_stop(); | ||
119 | hard_processor_halt(HALT_OK); | ||
120 | } | ||
121 | |||
122 | #define FLAG_Z 0x8 | ||
123 | #define FLAG_N 0x4 | ||
124 | #define FLAG_O 0x2 | ||
125 | #define FLAG_C 0x1 | ||
126 | |||
127 | void show_regs(struct pt_regs *regs) | ||
128 | { | ||
129 | int i; | ||
130 | const char *AX0_names[] = {"A0StP", "A0FrP"}; | ||
131 | const char *AX1_names[] = {"A1GbP", "A1LbP"}; | ||
132 | |||
133 | const char *DX0_names[] = { | ||
134 | "D0Re0", | ||
135 | "D0Ar6", | ||
136 | "D0Ar4", | ||
137 | "D0Ar2", | ||
138 | "D0FrT", | ||
139 | "D0.5 ", | ||
140 | "D0.6 ", | ||
141 | "D0.7 " | ||
142 | }; | ||
143 | |||
144 | const char *DX1_names[] = { | ||
145 | "D1Re0", | ||
146 | "D1Ar5", | ||
147 | "D1Ar3", | ||
148 | "D1Ar1", | ||
149 | "D1RtP", | ||
150 | "D1.5 ", | ||
151 | "D1.6 ", | ||
152 | "D1.7 " | ||
153 | }; | ||
154 | |||
155 | pr_info(" pt_regs @ %p\n", regs); | ||
156 | pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask); | ||
157 | pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags, | ||
158 | regs->ctx.Flags & FLAG_Z ? 'Z' : 'z', | ||
159 | regs->ctx.Flags & FLAG_N ? 'N' : 'n', | ||
160 | regs->ctx.Flags & FLAG_O ? 'O' : 'o', | ||
161 | regs->ctx.Flags & FLAG_C ? 'C' : 'c'); | ||
162 | pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT); | ||
163 | pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC); | ||
164 | |||
165 | /* AX regs */ | ||
166 | for (i = 0; i < 2; i++) { | ||
167 | pr_info(" %s = 0x%08x ", | ||
168 | AX0_names[i], | ||
169 | regs->ctx.AX[i].U0); | ||
170 | printk(" %s = 0x%08x\n", | ||
171 | AX1_names[i], | ||
172 | regs->ctx.AX[i].U1); | ||
173 | } | ||
174 | |||
175 | if (regs->ctx.SaveMask & TBICTX_XEXT_BIT) | ||
176 | pr_warn(" Extended state present - AX2.[01] will be WRONG\n"); | ||
177 | |||
178 | /* Special place with AXx.2 */ | ||
179 | pr_info(" A0.2 = 0x%08x ", | ||
180 | regs->ctx.Ext.AX2.U0); | ||
181 | printk(" A1.2 = 0x%08x\n", | ||
182 | regs->ctx.Ext.AX2.U1); | ||
183 | |||
184 | /* 'extended' AX regs (nominally, just AXx.3) */ | ||
185 | for (i = 0; i < (TBICTX_AX_REGS - 3); i++) { | ||
186 | pr_info(" A0.%d = 0x%08x ", i + 3, regs->ctx.AX3[i].U0); | ||
187 | printk(" A1.%d = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1); | ||
188 | } | ||
189 | |||
190 | for (i = 0; i < 8; i++) { | ||
191 | pr_info(" %s = 0x%08x ", DX0_names[i], regs->ctx.DX[i].U0); | ||
192 | printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1); | ||
193 | } | ||
194 | |||
195 | show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs); | ||
196 | } | ||
197 | |||
198 | int copy_thread(unsigned long clone_flags, unsigned long usp, | ||
199 | unsigned long arg, struct task_struct *tsk) | ||
200 | { | ||
201 | struct pt_regs *childregs = task_pt_regs(tsk); | ||
202 | void *kernel_context = ((void *) childregs + | ||
203 | sizeof(struct pt_regs)); | ||
204 | unsigned long global_base; | ||
205 | |||
206 | BUG_ON(((unsigned long)childregs) & 0x7); | ||
207 | BUG_ON(((unsigned long)kernel_context) & 0x7); | ||
208 | |||
209 | memset(&tsk->thread.kernel_context, 0, | ||
210 | sizeof(tsk->thread.kernel_context)); | ||
211 | |||
212 | tsk->thread.kernel_context = __TBISwitchInit(kernel_context, | ||
213 | ret_from_fork, | ||
214 | 0, 0); | ||
215 | |||
216 | if (unlikely(tsk->flags & PF_KTHREAD)) { | ||
217 | /* | ||
218 | * Make sure we don't leak any kernel data to child's regs | ||
219 | * if kernel thread becomes a userspace thread in the future | ||
220 | */ | ||
221 | memset(childregs, 0 , sizeof(struct pt_regs)); | ||
222 | |||
223 | global_base = __core_reg_get(A1GbP); | ||
224 | childregs->ctx.AX[0].U1 = (unsigned long) global_base; | ||
225 | childregs->ctx.AX[0].U0 = (unsigned long) kernel_context; | ||
226 | /* Set D1Ar1=arg and D1RtP=usp (fn) */ | ||
227 | childregs->ctx.DX[4].U1 = usp; | ||
228 | childregs->ctx.DX[3].U1 = arg; | ||
229 | tsk->thread.int_depth = 2; | ||
230 | return 0; | ||
231 | } | ||
232 | /* | ||
233 | * Get a pointer to where the new child's register block should have | ||
234 | * been pushed. | ||
235 | * The Meta's stack grows upwards, and the context is the the first | ||
236 | * thing to be pushed by TBX (phew) | ||
237 | */ | ||
238 | *childregs = *current_pt_regs(); | ||
239 | /* Set the correct stack for the clone mode */ | ||
240 | if (usp) | ||
241 | childregs->ctx.AX[0].U0 = ALIGN(usp, 8); | ||
242 | tsk->thread.int_depth = 1; | ||
243 | |||
244 | /* set return value for child process */ | ||
245 | childregs->ctx.DX[0].U0 = 0; | ||
246 | |||
247 | /* The TLS pointer is passed as an argument to sys_clone. */ | ||
248 | if (clone_flags & CLONE_SETTLS) | ||
249 | tsk->thread.tls_ptr = | ||
250 | (__force void __user *)childregs->ctx.DX[1].U1; | ||
251 | |||
252 | #ifdef CONFIG_METAG_FPU | ||
253 | if (tsk->thread.fpu_context) { | ||
254 | struct meta_fpu_context *ctx; | ||
255 | |||
256 | ctx = kmemdup(tsk->thread.fpu_context, | ||
257 | sizeof(struct meta_fpu_context), GFP_ATOMIC); | ||
258 | tsk->thread.fpu_context = ctx; | ||
259 | } | ||
260 | #endif | ||
261 | |||
262 | #ifdef CONFIG_METAG_DSP | ||
263 | if (tsk->thread.dsp_context) { | ||
264 | struct meta_ext_context *ctx; | ||
265 | int i; | ||
266 | |||
267 | ctx = kmemdup(tsk->thread.dsp_context, | ||
268 | sizeof(struct meta_ext_context), GFP_ATOMIC); | ||
269 | for (i = 0; i < 2; i++) | ||
270 | ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i], | ||
271 | GFP_ATOMIC); | ||
272 | tsk->thread.dsp_context = ctx; | ||
273 | } | ||
274 | #endif | ||
275 | |||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | #ifdef CONFIG_METAG_FPU | ||
280 | static void alloc_fpu_context(struct thread_struct *thread) | ||
281 | { | ||
282 | thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context), | ||
283 | GFP_ATOMIC); | ||
284 | } | ||
285 | |||
286 | static void clear_fpu(struct thread_struct *thread) | ||
287 | { | ||
288 | thread->user_flags &= ~TBICTX_FPAC_BIT; | ||
289 | kfree(thread->fpu_context); | ||
290 | thread->fpu_context = NULL; | ||
291 | } | ||
292 | #else | ||
293 | static void clear_fpu(struct thread_struct *thread) | ||
294 | { | ||
295 | } | ||
296 | #endif | ||
297 | |||
298 | #ifdef CONFIG_METAG_DSP | ||
299 | static void clear_dsp(struct thread_struct *thread) | ||
300 | { | ||
301 | if (thread->dsp_context) { | ||
302 | kfree(thread->dsp_context->ram[0]); | ||
303 | kfree(thread->dsp_context->ram[1]); | ||
304 | |||
305 | kfree(thread->dsp_context); | ||
306 | |||
307 | thread->dsp_context = NULL; | ||
308 | } | ||
309 | |||
310 | __core_reg_set(D0.8, 0); | ||
311 | } | ||
312 | #else | ||
313 | static void clear_dsp(struct thread_struct *thread) | ||
314 | { | ||
315 | } | ||
316 | #endif | ||
317 | |||
318 | struct task_struct *__sched __switch_to(struct task_struct *prev, | ||
319 | struct task_struct *next) | ||
320 | { | ||
321 | TBIRES to, from; | ||
322 | |||
323 | to.Switch.pCtx = next->thread.kernel_context; | ||
324 | to.Switch.pPara = prev; | ||
325 | |||
326 | #ifdef CONFIG_METAG_FPU | ||
327 | if (prev->thread.user_flags & TBICTX_FPAC_BIT) { | ||
328 | struct pt_regs *regs = task_pt_regs(prev); | ||
329 | TBIRES state; | ||
330 | |||
331 | state.Sig.SaveMask = prev->thread.user_flags; | ||
332 | state.Sig.pCtx = ®s->ctx; | ||
333 | |||
334 | if (!prev->thread.fpu_context) | ||
335 | alloc_fpu_context(&prev->thread); | ||
336 | if (prev->thread.fpu_context) | ||
337 | __TBICtxFPUSave(state, prev->thread.fpu_context); | ||
338 | } | ||
339 | /* | ||
340 | * Force a restore of the FPU context next time this process is | ||
341 | * scheduled. | ||
342 | */ | ||
343 | if (prev->thread.fpu_context) | ||
344 | prev->thread.fpu_context->needs_restore = true; | ||
345 | #endif | ||
346 | |||
347 | |||
348 | from = __TBISwitch(to, &prev->thread.kernel_context); | ||
349 | |||
350 | /* Restore TLS pointer for this process. */ | ||
351 | set_gateway_tls(current->thread.tls_ptr); | ||
352 | |||
353 | return (struct task_struct *) from.Switch.pPara; | ||
354 | } | ||
355 | |||
356 | void flush_thread(void) | ||
357 | { | ||
358 | clear_fpu(¤t->thread); | ||
359 | clear_dsp(¤t->thread); | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Free current thread data structures etc. | ||
364 | */ | ||
365 | void exit_thread(void) | ||
366 | { | ||
367 | clear_fpu(¤t->thread); | ||
368 | clear_dsp(¤t->thread); | ||
369 | } | ||
370 | |||
371 | /* TODO: figure out how to unwind the kernel stack here to figure out | ||
372 | * where we went to sleep. */ | ||
373 | unsigned long get_wchan(struct task_struct *p) | ||
374 | { | ||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | ||
379 | { | ||
380 | /* Returning 0 indicates that the FPU state was not stored (as it was | ||
381 | * not in use) */ | ||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | #ifdef CONFIG_METAG_USER_TCM | ||
386 | |||
387 | #define ELF_MIN_ALIGN PAGE_SIZE | ||
388 | |||
389 | #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) | ||
390 | #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) | ||
391 | #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) | ||
392 | |||
393 | #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) | ||
394 | |||
395 | unsigned long __metag_elf_map(struct file *filep, unsigned long addr, | ||
396 | struct elf_phdr *eppnt, int prot, int type, | ||
397 | unsigned long total_size) | ||
398 | { | ||
399 | unsigned long map_addr, size; | ||
400 | unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr); | ||
401 | unsigned long raw_size = eppnt->p_filesz + page_off; | ||
402 | unsigned long off = eppnt->p_offset - page_off; | ||
403 | unsigned int tcm_tag; | ||
404 | addr = ELF_PAGESTART(addr); | ||
405 | size = ELF_PAGEALIGN(raw_size); | ||
406 | |||
407 | /* mmap() will return -EINVAL if given a zero size, but a | ||
408 | * segment with zero filesize is perfectly valid */ | ||
409 | if (!size) | ||
410 | return addr; | ||
411 | |||
412 | tcm_tag = tcm_lookup_tag(addr); | ||
413 | |||
414 | if (tcm_tag != TCM_INVALID_TAG) | ||
415 | type &= ~MAP_FIXED; | ||
416 | |||
417 | /* | ||
418 | * total_size is the size of the ELF (interpreter) image. | ||
419 | * The _first_ mmap needs to know the full size, otherwise | ||
420 | * randomization might put this image into an overlapping | ||
421 | * position with the ELF binary image. (since size < total_size) | ||
422 | * So we first map the 'big' image - and unmap the remainder at | ||
423 | * the end. (which unmap is needed for ELF images with holes.) | ||
424 | */ | ||
425 | if (total_size) { | ||
426 | total_size = ELF_PAGEALIGN(total_size); | ||
427 | map_addr = vm_mmap(filep, addr, total_size, prot, type, off); | ||
428 | if (!BAD_ADDR(map_addr)) | ||
429 | vm_munmap(map_addr+size, total_size-size); | ||
430 | } else | ||
431 | map_addr = vm_mmap(filep, addr, size, prot, type, off); | ||
432 | |||
433 | if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) { | ||
434 | struct tcm_allocation *tcm; | ||
435 | unsigned long tcm_addr; | ||
436 | |||
437 | tcm = kmalloc(sizeof(*tcm), GFP_KERNEL); | ||
438 | if (!tcm) | ||
439 | return -ENOMEM; | ||
440 | |||
441 | tcm_addr = tcm_alloc(tcm_tag, raw_size); | ||
442 | if (tcm_addr != addr) { | ||
443 | kfree(tcm); | ||
444 | return -ENOMEM; | ||
445 | } | ||
446 | |||
447 | tcm->tag = tcm_tag; | ||
448 | tcm->addr = tcm_addr; | ||
449 | tcm->size = raw_size; | ||
450 | |||
451 | list_add(&tcm->list, ¤t->mm->context.tcm); | ||
452 | |||
453 | eppnt->p_vaddr = map_addr; | ||
454 | if (copy_from_user((void *) addr, (void __user *) map_addr, | ||
455 | raw_size)) | ||
456 | return -EFAULT; | ||
457 | } | ||
458 | |||
459 | return map_addr; | ||
460 | } | ||
461 | #endif | ||
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c new file mode 100644 index 000000000000..47a8828615a5 --- /dev/null +++ b/arch/metag/kernel/ptrace.c | |||
@@ -0,0 +1,380 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005-2012 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General | ||
5 | * Public License. See the file COPYING in the main directory of | ||
6 | * this archive for more details. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/ptrace.h> | ||
13 | #include <linux/user.h> | ||
14 | #include <linux/regset.h> | ||
15 | #include <linux/tracehook.h> | ||
16 | #include <linux/elf.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | #include <trace/syscall.h> | ||
19 | |||
20 | #define CREATE_TRACE_POINTS | ||
21 | #include <trace/events/syscalls.h> | ||
22 | |||
23 | /* | ||
24 | * user_regset definitions. | ||
25 | */ | ||
26 | |||
27 | int metag_gp_regs_copyout(const struct pt_regs *regs, | ||
28 | unsigned int pos, unsigned int count, | ||
29 | void *kbuf, void __user *ubuf) | ||
30 | { | ||
31 | const void *ptr; | ||
32 | unsigned long data; | ||
33 | int ret; | ||
34 | |||
35 | /* D{0-1}.{0-7} */ | ||
36 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
37 | regs->ctx.DX, 0, 4*16); | ||
38 | if (ret) | ||
39 | goto out; | ||
40 | /* A{0-1}.{0-1} */ | ||
41 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
42 | regs->ctx.AX, 4*16, 4*20); | ||
43 | if (ret) | ||
44 | goto out; | ||
45 | /* A{0-1}.2 */ | ||
46 | if (regs->ctx.SaveMask & TBICTX_XEXT_BIT) | ||
47 | ptr = regs->ctx.Ext.Ctx.pExt; | ||
48 | else | ||
49 | ptr = ®s->ctx.Ext.AX2; | ||
50 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
51 | ptr, 4*20, 4*22); | ||
52 | if (ret) | ||
53 | goto out; | ||
54 | /* A{0-1}.3 */ | ||
55 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
56 | ®s->ctx.AX3, 4*22, 4*24); | ||
57 | if (ret) | ||
58 | goto out; | ||
59 | /* PC */ | ||
60 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
61 | ®s->ctx.CurrPC, 4*24, 4*25); | ||
62 | if (ret) | ||
63 | goto out; | ||
64 | /* TXSTATUS */ | ||
65 | data = (unsigned long)regs->ctx.Flags; | ||
66 | if (regs->ctx.SaveMask & TBICTX_CBUF_BIT) | ||
67 | data |= USER_GP_REGS_STATUS_CATCH_BIT; | ||
68 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
69 | &data, 4*25, 4*26); | ||
70 | if (ret) | ||
71 | goto out; | ||
72 | /* TXRPT, TXBPOBITS, TXMODE */ | ||
73 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
74 | ®s->ctx.CurrRPT, 4*26, 4*29); | ||
75 | if (ret) | ||
76 | goto out; | ||
77 | /* Padding */ | ||
78 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
79 | 4*29, 4*30); | ||
80 | out: | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | int metag_gp_regs_copyin(struct pt_regs *regs, | ||
85 | unsigned int pos, unsigned int count, | ||
86 | const void *kbuf, const void __user *ubuf) | ||
87 | { | ||
88 | void *ptr; | ||
89 | unsigned long data; | ||
90 | int ret; | ||
91 | |||
92 | /* D{0-1}.{0-7} */ | ||
93 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
94 | regs->ctx.DX, 0, 4*16); | ||
95 | if (ret) | ||
96 | goto out; | ||
97 | /* A{0-1}.{0-1} */ | ||
98 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
99 | regs->ctx.AX, 4*16, 4*20); | ||
100 | if (ret) | ||
101 | goto out; | ||
102 | /* A{0-1}.2 */ | ||
103 | if (regs->ctx.SaveMask & TBICTX_XEXT_BIT) | ||
104 | ptr = regs->ctx.Ext.Ctx.pExt; | ||
105 | else | ||
106 | ptr = ®s->ctx.Ext.AX2; | ||
107 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
108 | ptr, 4*20, 4*22); | ||
109 | if (ret) | ||
110 | goto out; | ||
111 | /* A{0-1}.3 */ | ||
112 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
113 | ®s->ctx.AX3, 4*22, 4*24); | ||
114 | if (ret) | ||
115 | goto out; | ||
116 | /* PC */ | ||
117 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
118 | ®s->ctx.CurrPC, 4*24, 4*25); | ||
119 | if (ret) | ||
120 | goto out; | ||
121 | /* TXSTATUS */ | ||
122 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
123 | &data, 4*25, 4*26); | ||
124 | if (ret) | ||
125 | goto out; | ||
126 | regs->ctx.Flags = data & 0xffff; | ||
127 | if (data & USER_GP_REGS_STATUS_CATCH_BIT) | ||
128 | regs->ctx.SaveMask |= TBICTX_XCBF_BIT | TBICTX_CBUF_BIT; | ||
129 | else | ||
130 | regs->ctx.SaveMask &= ~TBICTX_CBUF_BIT; | ||
131 | /* TXRPT, TXBPOBITS, TXMODE */ | ||
132 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
133 | ®s->ctx.CurrRPT, 4*26, 4*29); | ||
134 | out: | ||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | static int metag_gp_regs_get(struct task_struct *target, | ||
139 | const struct user_regset *regset, | ||
140 | unsigned int pos, unsigned int count, | ||
141 | void *kbuf, void __user *ubuf) | ||
142 | { | ||
143 | const struct pt_regs *regs = task_pt_regs(target); | ||
144 | return metag_gp_regs_copyout(regs, pos, count, kbuf, ubuf); | ||
145 | } | ||
146 | |||
147 | static int metag_gp_regs_set(struct task_struct *target, | ||
148 | const struct user_regset *regset, | ||
149 | unsigned int pos, unsigned int count, | ||
150 | const void *kbuf, const void __user *ubuf) | ||
151 | { | ||
152 | struct pt_regs *regs = task_pt_regs(target); | ||
153 | return metag_gp_regs_copyin(regs, pos, count, kbuf, ubuf); | ||
154 | } | ||
155 | |||
156 | int metag_cb_regs_copyout(const struct pt_regs *regs, | ||
157 | unsigned int pos, unsigned int count, | ||
158 | void *kbuf, void __user *ubuf) | ||
159 | { | ||
160 | int ret; | ||
161 | |||
162 | /* TXCATCH{0-3} */ | ||
163 | if (regs->ctx.SaveMask & TBICTX_XCBF_BIT) | ||
164 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
165 | regs->extcb0, 0, 4*4); | ||
166 | else | ||
167 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
168 | 0, 4*4); | ||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | int metag_cb_regs_copyin(struct pt_regs *regs, | ||
173 | unsigned int pos, unsigned int count, | ||
174 | const void *kbuf, const void __user *ubuf) | ||
175 | { | ||
176 | int ret; | ||
177 | |||
178 | /* TXCATCH{0-3} */ | ||
179 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
180 | regs->extcb0, 0, 4*4); | ||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | static int metag_cb_regs_get(struct task_struct *target, | ||
185 | const struct user_regset *regset, | ||
186 | unsigned int pos, unsigned int count, | ||
187 | void *kbuf, void __user *ubuf) | ||
188 | { | ||
189 | const struct pt_regs *regs = task_pt_regs(target); | ||
190 | return metag_cb_regs_copyout(regs, pos, count, kbuf, ubuf); | ||
191 | } | ||
192 | |||
193 | static int metag_cb_regs_set(struct task_struct *target, | ||
194 | const struct user_regset *regset, | ||
195 | unsigned int pos, unsigned int count, | ||
196 | const void *kbuf, const void __user *ubuf) | ||
197 | { | ||
198 | struct pt_regs *regs = task_pt_regs(target); | ||
199 | return metag_cb_regs_copyin(regs, pos, count, kbuf, ubuf); | ||
200 | } | ||
201 | |||
202 | int metag_rp_state_copyout(const struct pt_regs *regs, | ||
203 | unsigned int pos, unsigned int count, | ||
204 | void *kbuf, void __user *ubuf) | ||
205 | { | ||
206 | unsigned long mask; | ||
207 | u64 *ptr; | ||
208 | int ret, i; | ||
209 | |||
210 | /* Empty read pipeline */ | ||
211 | if (!(regs->ctx.SaveMask & TBICTX_CBRP_BIT)) { | ||
212 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
213 | 0, 4*13); | ||
214 | goto out; | ||
215 | } | ||
216 | |||
217 | mask = (regs->ctx.CurrDIVTIME & TXDIVTIME_RPMASK_BITS) >> | ||
218 | TXDIVTIME_RPMASK_S; | ||
219 | |||
220 | /* Read pipeline entries */ | ||
221 | ptr = (void *)®s->extcb0[1]; | ||
222 | for (i = 0; i < 6; ++i, ++ptr) { | ||
223 | if (mask & (1 << i)) | ||
224 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
225 | ptr, 8*i, 8*(i + 1)); | ||
226 | else | ||
227 | ret = user_regset_copyout_zero(&pos, &count, &kbuf, | ||
228 | &ubuf, 8*i, 8*(i + 1)); | ||
229 | if (ret) | ||
230 | goto out; | ||
231 | } | ||
232 | /* Mask of entries */ | ||
233 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
234 | &mask, 4*12, 4*13); | ||
235 | out: | ||
236 | return ret; | ||
237 | } | ||
238 | |||
239 | int metag_rp_state_copyin(struct pt_regs *regs, | ||
240 | unsigned int pos, unsigned int count, | ||
241 | const void *kbuf, const void __user *ubuf) | ||
242 | { | ||
243 | struct user_rp_state rp; | ||
244 | unsigned long long *ptr; | ||
245 | int ret, i; | ||
246 | |||
247 | /* Read the entire pipeline before making any changes */ | ||
248 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
249 | &rp, 0, 4*13); | ||
250 | if (ret) | ||
251 | goto out; | ||
252 | |||
253 | /* Write pipeline entries */ | ||
254 | ptr = (void *)®s->extcb0[1]; | ||
255 | for (i = 0; i < 6; ++i, ++ptr) | ||
256 | if (rp.mask & (1 << i)) | ||
257 | *ptr = rp.entries[i]; | ||
258 | |||
259 | /* Update RPMask in TXDIVTIME */ | ||
260 | regs->ctx.CurrDIVTIME &= ~TXDIVTIME_RPMASK_BITS; | ||
261 | regs->ctx.CurrDIVTIME |= (rp.mask << TXDIVTIME_RPMASK_S) | ||
262 | & TXDIVTIME_RPMASK_BITS; | ||
263 | |||
264 | /* Set/clear flags to indicate catch/read pipeline state */ | ||
265 | if (rp.mask) | ||
266 | regs->ctx.SaveMask |= TBICTX_XCBF_BIT | TBICTX_CBRP_BIT; | ||
267 | else | ||
268 | regs->ctx.SaveMask &= ~TBICTX_CBRP_BIT; | ||
269 | out: | ||
270 | return ret; | ||
271 | } | ||
272 | |||
273 | static int metag_rp_state_get(struct task_struct *target, | ||
274 | const struct user_regset *regset, | ||
275 | unsigned int pos, unsigned int count, | ||
276 | void *kbuf, void __user *ubuf) | ||
277 | { | ||
278 | const struct pt_regs *regs = task_pt_regs(target); | ||
279 | return metag_rp_state_copyout(regs, pos, count, kbuf, ubuf); | ||
280 | } | ||
281 | |||
282 | static int metag_rp_state_set(struct task_struct *target, | ||
283 | const struct user_regset *regset, | ||
284 | unsigned int pos, unsigned int count, | ||
285 | const void *kbuf, const void __user *ubuf) | ||
286 | { | ||
287 | struct pt_regs *regs = task_pt_regs(target); | ||
288 | return metag_rp_state_copyin(regs, pos, count, kbuf, ubuf); | ||
289 | } | ||
290 | |||
291 | enum metag_regset { | ||
292 | REGSET_GENERAL, | ||
293 | REGSET_CBUF, | ||
294 | REGSET_READPIPE, | ||
295 | }; | ||
296 | |||
297 | static const struct user_regset metag_regsets[] = { | ||
298 | [REGSET_GENERAL] = { | ||
299 | .core_note_type = NT_PRSTATUS, | ||
300 | .n = ELF_NGREG, | ||
301 | .size = sizeof(long), | ||
302 | .align = sizeof(long long), | ||
303 | .get = metag_gp_regs_get, | ||
304 | .set = metag_gp_regs_set, | ||
305 | }, | ||
306 | [REGSET_CBUF] = { | ||
307 | .core_note_type = NT_METAG_CBUF, | ||
308 | .n = sizeof(struct user_cb_regs) / sizeof(long), | ||
309 | .size = sizeof(long), | ||
310 | .align = sizeof(long long), | ||
311 | .get = metag_cb_regs_get, | ||
312 | .set = metag_cb_regs_set, | ||
313 | }, | ||
314 | [REGSET_READPIPE] = { | ||
315 | .core_note_type = NT_METAG_RPIPE, | ||
316 | .n = sizeof(struct user_rp_state) / sizeof(long), | ||
317 | .size = sizeof(long), | ||
318 | .align = sizeof(long long), | ||
319 | .get = metag_rp_state_get, | ||
320 | .set = metag_rp_state_set, | ||
321 | }, | ||
322 | }; | ||
323 | |||
324 | static const struct user_regset_view user_metag_view = { | ||
325 | .name = "metag", | ||
326 | .e_machine = EM_METAG, | ||
327 | .regsets = metag_regsets, | ||
328 | .n = ARRAY_SIZE(metag_regsets) | ||
329 | }; | ||
330 | |||
331 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) | ||
332 | { | ||
333 | return &user_metag_view; | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * Called by kernel/ptrace.c when detaching.. | ||
338 | * | ||
339 | * Make sure single step bits etc are not set. | ||
340 | */ | ||
341 | void ptrace_disable(struct task_struct *child) | ||
342 | { | ||
343 | /* nothing to do.. */ | ||
344 | } | ||
345 | |||
346 | long arch_ptrace(struct task_struct *child, long request, unsigned long addr, | ||
347 | unsigned long data) | ||
348 | { | ||
349 | int ret; | ||
350 | |||
351 | switch (request) { | ||
352 | default: | ||
353 | ret = ptrace_request(child, request, addr, data); | ||
354 | break; | ||
355 | } | ||
356 | |||
357 | return ret; | ||
358 | } | ||
359 | |||
360 | int syscall_trace_enter(struct pt_regs *regs) | ||
361 | { | ||
362 | int ret = 0; | ||
363 | |||
364 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | ||
365 | ret = tracehook_report_syscall_entry(regs); | ||
366 | |||
367 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | ||
368 | trace_sys_enter(regs, regs->ctx.DX[0].U1); | ||
369 | |||
370 | return ret ? -1 : regs->ctx.DX[0].U1; | ||
371 | } | ||
372 | |||
373 | void syscall_trace_leave(struct pt_regs *regs) | ||
374 | { | ||
375 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | ||
376 | trace_sys_exit(regs, regs->ctx.DX[0].U1); | ||
377 | |||
378 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | ||
379 | tracehook_report_syscall_exit(regs, 0); | ||
380 | } | ||
diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c new file mode 100644 index 000000000000..879246170aec --- /dev/null +++ b/arch/metag/kernel/setup.c | |||
@@ -0,0 +1,631 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005-2012 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * This file contains the architecture-dependant parts of system setup. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/export.h> | ||
9 | #include <linux/bootmem.h> | ||
10 | #include <linux/console.h> | ||
11 | #include <linux/cpu.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <linux/genhd.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/initrd.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/memblock.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/of_fdt.h> | ||
23 | #include <linux/pfn.h> | ||
24 | #include <linux/root_dev.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/seq_file.h> | ||
27 | #include <linux/start_kernel.h> | ||
28 | #include <linux/string.h> | ||
29 | |||
30 | #include <asm/cachepart.h> | ||
31 | #include <asm/clock.h> | ||
32 | #include <asm/core_reg.h> | ||
33 | #include <asm/cpu.h> | ||
34 | #include <asm/da.h> | ||
35 | #include <asm/highmem.h> | ||
36 | #include <asm/hwthread.h> | ||
37 | #include <asm/l2cache.h> | ||
38 | #include <asm/mach/arch.h> | ||
39 | #include <asm/metag_mem.h> | ||
40 | #include <asm/metag_regs.h> | ||
41 | #include <asm/mmu.h> | ||
42 | #include <asm/mmzone.h> | ||
43 | #include <asm/processor.h> | ||
44 | #include <asm/prom.h> | ||
45 | #include <asm/sections.h> | ||
46 | #include <asm/setup.h> | ||
47 | #include <asm/traps.h> | ||
48 | |||
49 | /* Priv protect as many registers as possible. */ | ||
50 | #define DEFAULT_PRIV (TXPRIVEXT_COPRO_BITS | \ | ||
51 | TXPRIVEXT_TXTRIGGER_BIT | \ | ||
52 | TXPRIVEXT_TXGBLCREG_BIT | \ | ||
53 | TXPRIVEXT_ILOCK_BIT | \ | ||
54 | TXPRIVEXT_TXITACCYC_BIT | \ | ||
55 | TXPRIVEXT_TXDIVTIME_BIT | \ | ||
56 | TXPRIVEXT_TXAMAREGX_BIT | \ | ||
57 | TXPRIVEXT_TXTIMERI_BIT | \ | ||
58 | TXPRIVEXT_TXSTATUS_BIT | \ | ||
59 | TXPRIVEXT_TXDISABLE_BIT) | ||
60 | |||
61 | /* Meta2 specific bits. */ | ||
62 | #ifdef CONFIG_METAG_META12 | ||
63 | #define META2_PRIV 0 | ||
64 | #else | ||
65 | #define META2_PRIV (TXPRIVEXT_TXTIMER_BIT | \ | ||
66 | TXPRIVEXT_TRACE_BIT) | ||
67 | #endif | ||
68 | |||
69 | /* Unaligned access checking bits. */ | ||
70 | #ifdef CONFIG_METAG_UNALIGNED | ||
71 | #define UNALIGNED_PRIV TXPRIVEXT_ALIGNREW_BIT | ||
72 | #else | ||
73 | #define UNALIGNED_PRIV 0 | ||
74 | #endif | ||
75 | |||
76 | #define PRIV_BITS (DEFAULT_PRIV | \ | ||
77 | META2_PRIV | \ | ||
78 | UNALIGNED_PRIV) | ||
79 | |||
80 | /* | ||
81 | * Protect access to: | ||
82 | * 0x06000000-0x07ffffff Direct mapped region | ||
83 | * 0x05000000-0x05ffffff MMU table region (Meta1) | ||
84 | * 0x04400000-0x047fffff Cache flush region | ||
85 | * 0x84000000-0x87ffffff Core cache memory region (Meta2) | ||
86 | * | ||
87 | * Allow access to: | ||
88 | * 0x80000000-0x81ffffff Core code memory region (Meta2) | ||
89 | */ | ||
90 | #ifdef CONFIG_METAG_META12 | ||
91 | #define PRIVSYSR_BITS TXPRIVSYSR_ALL_BITS | ||
92 | #else | ||
93 | #define PRIVSYSR_BITS (TXPRIVSYSR_ALL_BITS & ~TXPRIVSYSR_CORECODE_BIT) | ||
94 | #endif | ||
95 | |||
96 | /* Protect all 0x02xxxxxx and 0x048xxxxx. */ | ||
97 | #define PIOREG_BITS 0xffffffff | ||
98 | |||
99 | /* | ||
100 | * Protect all 0x04000xx0 (system events) | ||
101 | * except write combiner flush and write fence (system events 4 and 5). | ||
102 | */ | ||
103 | #define PSYREG_BITS 0xfffffffb | ||
104 | |||
105 | |||
106 | extern char _heap_start[]; | ||
107 | |||
108 | #ifdef CONFIG_METAG_BUILTIN_DTB | ||
109 | extern u32 __dtb_start[]; | ||
110 | #endif | ||
111 | |||
112 | #ifdef CONFIG_DA_CONSOLE | ||
113 | /* Our early channel based console driver */ | ||
114 | extern struct console dash_console; | ||
115 | #endif | ||
116 | |||
117 | struct machine_desc *machine_desc __initdata; | ||
118 | |||
119 | /* | ||
120 | * Map a Linux CPU number to a hardware thread ID | ||
121 | * In SMP this will be setup with the correct mapping at startup; in UP this | ||
122 | * will map to the HW thread on which we are running. | ||
123 | */ | ||
124 | u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = { | ||
125 | [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID | ||
126 | }; | ||
127 | |||
128 | /* | ||
129 | * Map a hardware thread ID to a Linux CPU number | ||
130 | * In SMP this will be fleshed out with the correct CPU ID for a particular | ||
131 | * hardware thread. In UP this will be initialised with the boot CPU ID. | ||
132 | */ | ||
133 | u8 hwthread_id_2_cpu[4] __read_mostly = { | ||
134 | [0 ... 3] = BAD_CPU_ID | ||
135 | }; | ||
136 | |||
137 | /* The relative offset of the MMU mapped memory (from ldlk or bootloader) | ||
138 | * to the real physical memory. This is needed as we have to use the | ||
139 | * physical addresses in the MMU tables (pte entries), and not the virtual | ||
140 | * addresses. | ||
141 | * This variable is used in the __pa() and __va() macros, and should | ||
142 | * probably only be used via them. | ||
143 | */ | ||
144 | unsigned int meta_memoffset; | ||
145 | EXPORT_SYMBOL(meta_memoffset); | ||
146 | |||
147 | static char __initdata *original_cmd_line; | ||
148 | |||
149 | DEFINE_PER_CPU(PTBI, pTBI); | ||
150 | |||
151 | /* | ||
152 | * Mapping are specified as "CPU_ID:HWTHREAD_ID", e.g. | ||
153 | * | ||
154 | * "hwthread_map=0:1,1:2,2:3,3:0" | ||
155 | * | ||
156 | * Linux CPU ID HWTHREAD_ID | ||
157 | * --------------------------- | ||
158 | * 0 1 | ||
159 | * 1 2 | ||
160 | * 2 3 | ||
161 | * 3 0 | ||
162 | */ | ||
163 | static int __init parse_hwthread_map(char *p) | ||
164 | { | ||
165 | int cpu; | ||
166 | |||
167 | while (*p) { | ||
168 | cpu = (*p++) - '0'; | ||
169 | if (cpu < 0 || cpu > 9) | ||
170 | goto err_cpu; | ||
171 | |||
172 | p++; /* skip semi-colon */ | ||
173 | cpu_2_hwthread_id[cpu] = (*p++) - '0'; | ||
174 | if (cpu_2_hwthread_id[cpu] >= 4) | ||
175 | goto err_thread; | ||
176 | hwthread_id_2_cpu[cpu_2_hwthread_id[cpu]] = cpu; | ||
177 | |||
178 | if (*p == ',') | ||
179 | p++; /* skip comma */ | ||
180 | } | ||
181 | |||
182 | return 0; | ||
183 | err_cpu: | ||
184 | pr_err("%s: hwthread_map cpu argument out of range\n", __func__); | ||
185 | return -EINVAL; | ||
186 | err_thread: | ||
187 | pr_err("%s: hwthread_map thread argument out of range\n", __func__); | ||
188 | return -EINVAL; | ||
189 | } | ||
190 | early_param("hwthread_map", parse_hwthread_map); | ||
191 | |||
192 | void __init dump_machine_table(void) | ||
193 | { | ||
194 | struct machine_desc *p; | ||
195 | const char **compat; | ||
196 | |||
197 | pr_info("Available machine support:\n\tNAME\t\tCOMPATIBLE LIST\n"); | ||
198 | for_each_machine_desc(p) { | ||
199 | pr_info("\t%s\t[", p->name); | ||
200 | for (compat = p->dt_compat; compat && *compat; ++compat) | ||
201 | printk(" '%s'", *compat); | ||
202 | printk(" ]\n"); | ||
203 | } | ||
204 | |||
205 | pr_info("\nPlease check your kernel config and/or bootloader.\n"); | ||
206 | |||
207 | hard_processor_halt(HALT_PANIC); | ||
208 | } | ||
209 | |||
210 | #ifdef CONFIG_METAG_HALT_ON_PANIC | ||
211 | static int metag_panic_event(struct notifier_block *this, unsigned long event, | ||
212 | void *ptr) | ||
213 | { | ||
214 | hard_processor_halt(HALT_PANIC); | ||
215 | return NOTIFY_DONE; | ||
216 | } | ||
217 | |||
218 | static struct notifier_block metag_panic_block = { | ||
219 | metag_panic_event, | ||
220 | NULL, | ||
221 | 0 | ||
222 | }; | ||
223 | #endif | ||
224 | |||
225 | void __init setup_arch(char **cmdline_p) | ||
226 | { | ||
227 | unsigned long start_pfn; | ||
228 | unsigned long text_start = (unsigned long)(&_stext); | ||
229 | unsigned long cpu = smp_processor_id(); | ||
230 | unsigned long heap_start, heap_end; | ||
231 | unsigned long start_pte; | ||
232 | PTBI _pTBI; | ||
233 | PTBISEG p_heap; | ||
234 | int heap_id, i; | ||
235 | |||
236 | metag_cache_probe(); | ||
237 | |||
238 | metag_da_probe(); | ||
239 | #ifdef CONFIG_DA_CONSOLE | ||
240 | if (metag_da_enabled()) { | ||
241 | /* An early channel based console driver */ | ||
242 | register_console(&dash_console); | ||
243 | add_preferred_console("ttyDA", 1, NULL); | ||
244 | } | ||
245 | #endif | ||
246 | |||
247 | /* try interpreting the argument as a device tree */ | ||
248 | machine_desc = setup_machine_fdt(original_cmd_line); | ||
249 | /* if it doesn't look like a device tree it must be a command line */ | ||
250 | if (!machine_desc) { | ||
251 | #ifdef CONFIG_METAG_BUILTIN_DTB | ||
252 | /* try the embedded device tree */ | ||
253 | machine_desc = setup_machine_fdt(__dtb_start); | ||
254 | if (!machine_desc) | ||
255 | panic("Invalid embedded device tree."); | ||
256 | #else | ||
257 | /* use the default machine description */ | ||
258 | machine_desc = default_machine_desc(); | ||
259 | #endif | ||
260 | #ifndef CONFIG_CMDLINE_FORCE | ||
261 | /* append the bootloader cmdline to any builtin fdt cmdline */ | ||
262 | if (boot_command_line[0] && original_cmd_line[0]) | ||
263 | strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); | ||
264 | strlcat(boot_command_line, original_cmd_line, | ||
265 | COMMAND_LINE_SIZE); | ||
266 | #endif | ||
267 | } | ||
268 | setup_meta_clocks(machine_desc->clocks); | ||
269 | |||
270 | *cmdline_p = boot_command_line; | ||
271 | parse_early_param(); | ||
272 | |||
273 | /* | ||
274 | * Make sure we don't alias in dcache or icache | ||
275 | */ | ||
276 | check_for_cache_aliasing(cpu); | ||
277 | |||
278 | |||
279 | #ifdef CONFIG_METAG_HALT_ON_PANIC | ||
280 | atomic_notifier_chain_register(&panic_notifier_list, | ||
281 | &metag_panic_block); | ||
282 | #endif | ||
283 | |||
284 | #ifdef CONFIG_DUMMY_CONSOLE | ||
285 | conswitchp = &dummy_con; | ||
286 | #endif | ||
287 | |||
288 | if (!(__core_reg_get(TXSTATUS) & TXSTATUS_PSTAT_BIT)) | ||
289 | panic("Privilege must be enabled for this thread."); | ||
290 | |||
291 | _pTBI = __TBI(TBID_ISTAT_BIT); | ||
292 | |||
293 | per_cpu(pTBI, cpu) = _pTBI; | ||
294 | |||
295 | if (!per_cpu(pTBI, cpu)) | ||
296 | panic("No TBI found!"); | ||
297 | |||
298 | /* | ||
299 | * Initialize all interrupt vectors to our copy of __TBIUnExpXXX, | ||
300 | * rather than the version from the bootloader. This makes call | ||
301 | * stacks easier to understand and may allow us to unmap the | ||
302 | * bootloader at some point. | ||
303 | * | ||
304 | * We need to keep the LWK handler that TBI installed in order to | ||
305 | * be able to do inter-thread comms. | ||
306 | */ | ||
307 | for (i = 0; i <= TBID_SIGNUM_MAX; i++) | ||
308 | if (i != TBID_SIGNUM_LWK) | ||
309 | _pTBI->fnSigs[i] = __TBIUnExpXXX; | ||
310 | |||
311 | /* A Meta requirement is that the kernel is loaded (virtually) | ||
312 | * at the PAGE_OFFSET. | ||
313 | */ | ||
314 | if (PAGE_OFFSET != text_start) | ||
315 | panic("Kernel not loaded at PAGE_OFFSET (%#x) but at %#lx.", | ||
316 | PAGE_OFFSET, text_start); | ||
317 | |||
318 | start_pte = mmu_read_second_level_page(text_start); | ||
319 | |||
320 | /* | ||
321 | * Kernel pages should have the PRIV bit set by the bootloader. | ||
322 | */ | ||
323 | if (!(start_pte & _PAGE_KERNEL)) | ||
324 | panic("kernel pte does not have PRIV set"); | ||
325 | |||
326 | /* | ||
327 | * See __pa and __va in include/asm/page.h. | ||
328 | * This value is negative when running in local space but the | ||
329 | * calculations work anyway. | ||
330 | */ | ||
331 | meta_memoffset = text_start - (start_pte & PAGE_MASK); | ||
332 | |||
333 | /* Now lets look at the heap space */ | ||
334 | heap_id = (__TBIThreadId() & TBID_THREAD_BITS) | ||
335 | + TBID_SEG(0, TBID_SEGSCOPE_LOCAL, TBID_SEGTYPE_HEAP); | ||
336 | |||
337 | p_heap = __TBIFindSeg(NULL, heap_id); | ||
338 | |||
339 | if (!p_heap) | ||
340 | panic("Could not find heap from TBI!"); | ||
341 | |||
342 | /* The heap begins at the first full page after the kernel data. */ | ||
343 | heap_start = (unsigned long) &_heap_start; | ||
344 | |||
345 | /* The heap ends at the end of the heap segment specified with | ||
346 | * ldlk. | ||
347 | */ | ||
348 | if (is_global_space(text_start)) { | ||
349 | pr_debug("WARNING: running in global space!\n"); | ||
350 | heap_end = (unsigned long)p_heap->pGAddr + p_heap->Bytes; | ||
351 | } else { | ||
352 | heap_end = (unsigned long)p_heap->pLAddr + p_heap->Bytes; | ||
353 | } | ||
354 | |||
355 | ROOT_DEV = Root_RAM0; | ||
356 | |||
357 | /* init_mm is the mm struct used for the first task. It is then | ||
358 | * cloned for all other tasks spawned from that task. | ||
359 | * | ||
360 | * Note - we are using the virtual addresses here. | ||
361 | */ | ||
362 | init_mm.start_code = (unsigned long)(&_stext); | ||
363 | init_mm.end_code = (unsigned long)(&_etext); | ||
364 | init_mm.end_data = (unsigned long)(&_edata); | ||
365 | init_mm.brk = (unsigned long)heap_start; | ||
366 | |||
367 | min_low_pfn = PFN_UP(__pa(text_start)); | ||
368 | max_low_pfn = PFN_DOWN(__pa(heap_end)); | ||
369 | |||
370 | pfn_base = min_low_pfn; | ||
371 | |||
372 | /* Round max_pfn up to a 4Mb boundary. The free_bootmem_node() | ||
373 | * call later makes sure to keep the rounded up pages marked reserved. | ||
374 | */ | ||
375 | max_pfn = max_low_pfn + ((1 << MAX_ORDER) - 1); | ||
376 | max_pfn &= ~((1 << MAX_ORDER) - 1); | ||
377 | |||
378 | start_pfn = PFN_UP(__pa(heap_start)); | ||
379 | |||
380 | if (min_low_pfn & ((1 << MAX_ORDER) - 1)) { | ||
381 | /* Theoretically, we could expand the space that the | ||
382 | * bootmem allocator covers - much as we do for the | ||
383 | * 'high' address, and then tell the bootmem system | ||
384 | * that the lowest chunk is 'not available'. Right | ||
385 | * now it is just much easier to constrain the | ||
386 | * user to always MAX_ORDER align their kernel space. | ||
387 | */ | ||
388 | |||
389 | panic("Kernel must be %d byte aligned, currently at %#lx.", | ||
390 | 1 << (MAX_ORDER + PAGE_SHIFT), | ||
391 | min_low_pfn << PAGE_SHIFT); | ||
392 | } | ||
393 | |||
394 | #ifdef CONFIG_HIGHMEM | ||
395 | highstart_pfn = highend_pfn = max_pfn; | ||
396 | high_memory = (void *) __va(PFN_PHYS(highstart_pfn)); | ||
397 | #else | ||
398 | high_memory = (void *)__va(PFN_PHYS(max_pfn)); | ||
399 | #endif | ||
400 | |||
401 | paging_init(heap_end); | ||
402 | |||
403 | setup_priv(); | ||
404 | |||
405 | /* Setup the boot cpu's mapping. The rest will be setup below. */ | ||
406 | cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id(); | ||
407 | hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id(); | ||
408 | |||
409 | /* Copy device tree blob into non-init memory before unflattening */ | ||
410 | copy_fdt(); | ||
411 | unflatten_device_tree(); | ||
412 | |||
413 | #ifdef CONFIG_SMP | ||
414 | smp_init_cpus(); | ||
415 | #endif | ||
416 | |||
417 | if (machine_desc->init_early) | ||
418 | machine_desc->init_early(); | ||
419 | } | ||
420 | |||
421 | static int __init customize_machine(void) | ||
422 | { | ||
423 | /* customizes platform devices, or adds new ones */ | ||
424 | if (machine_desc->init_machine) | ||
425 | machine_desc->init_machine(); | ||
426 | return 0; | ||
427 | } | ||
428 | arch_initcall(customize_machine); | ||
429 | |||
430 | static int __init init_machine_late(void) | ||
431 | { | ||
432 | if (machine_desc->init_late) | ||
433 | machine_desc->init_late(); | ||
434 | return 0; | ||
435 | } | ||
436 | late_initcall(init_machine_late); | ||
437 | |||
438 | #ifdef CONFIG_PROC_FS | ||
439 | /* | ||
440 | * Get CPU information for use by the procfs. | ||
441 | */ | ||
442 | static const char *get_cpu_capabilities(unsigned int txenable) | ||
443 | { | ||
444 | #ifdef CONFIG_METAG_META21 | ||
445 | /* See CORE_ID in META HTP.GP TRM - Architecture Overview 2.1.238 */ | ||
446 | int coreid = metag_in32(METAC_CORE_ID); | ||
447 | unsigned int dsp_type = (coreid >> 3) & 7; | ||
448 | unsigned int fpu_type = (coreid >> 7) & 3; | ||
449 | |||
450 | switch (dsp_type | fpu_type << 3) { | ||
451 | case (0x00): return "EDSP"; | ||
452 | case (0x01): return "DSP"; | ||
453 | case (0x08): return "EDSP+LFPU"; | ||
454 | case (0x09): return "DSP+LFPU"; | ||
455 | case (0x10): return "EDSP+FPU"; | ||
456 | case (0x11): return "DSP+FPU"; | ||
457 | } | ||
458 | return "UNKNOWN"; | ||
459 | |||
460 | #else | ||
461 | if (!(txenable & TXENABLE_CLASS_BITS)) | ||
462 | return "DSP"; | ||
463 | else | ||
464 | return ""; | ||
465 | #endif | ||
466 | } | ||
467 | |||
468 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
469 | { | ||
470 | const char *cpu; | ||
471 | unsigned int txenable, thread_id, major, minor; | ||
472 | unsigned long clockfreq = get_coreclock(); | ||
473 | #ifdef CONFIG_SMP | ||
474 | int i; | ||
475 | unsigned long lpj; | ||
476 | #endif | ||
477 | |||
478 | cpu = "META"; | ||
479 | |||
480 | txenable = __core_reg_get(TXENABLE); | ||
481 | major = (txenable & TXENABLE_MAJOR_REV_BITS) >> TXENABLE_MAJOR_REV_S; | ||
482 | minor = (txenable & TXENABLE_MINOR_REV_BITS) >> TXENABLE_MINOR_REV_S; | ||
483 | thread_id = (txenable >> 8) & 0x3; | ||
484 | |||
485 | #ifdef CONFIG_SMP | ||
486 | for_each_online_cpu(i) { | ||
487 | lpj = per_cpu(cpu_data, i).loops_per_jiffy; | ||
488 | txenable = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, | ||
489 | cpu_2_hwthread_id[i]); | ||
490 | |||
491 | seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n" | ||
492 | "Clocking:\t%lu.%1luMHz\n" | ||
493 | "BogoMips:\t%lu.%02lu\n" | ||
494 | "Calibration:\t%lu loops\n" | ||
495 | "Capabilities:\t%s\n\n", | ||
496 | cpu, major, minor, i, | ||
497 | clockfreq / 1000000, (clockfreq / 100000) % 10, | ||
498 | lpj / (500000 / HZ), (lpj / (5000 / HZ)) % 100, | ||
499 | lpj, | ||
500 | get_cpu_capabilities(txenable)); | ||
501 | } | ||
502 | #else | ||
503 | seq_printf(m, "CPU:\t\t%s %d.%d (thread %d)\n" | ||
504 | "Clocking:\t%lu.%1luMHz\n" | ||
505 | "BogoMips:\t%lu.%02lu\n" | ||
506 | "Calibration:\t%lu loops\n" | ||
507 | "Capabilities:\t%s\n", | ||
508 | cpu, major, minor, thread_id, | ||
509 | clockfreq / 1000000, (clockfreq / 100000) % 10, | ||
510 | loops_per_jiffy / (500000 / HZ), | ||
511 | (loops_per_jiffy / (5000 / HZ)) % 100, | ||
512 | loops_per_jiffy, | ||
513 | get_cpu_capabilities(txenable)); | ||
514 | #endif /* CONFIG_SMP */ | ||
515 | |||
516 | #ifdef CONFIG_METAG_L2C | ||
517 | if (meta_l2c_is_present()) { | ||
518 | seq_printf(m, "L2 cache:\t%s\n" | ||
519 | "L2 cache size:\t%d KB\n", | ||
520 | meta_l2c_is_enabled() ? "enabled" : "disabled", | ||
521 | meta_l2c_size() >> 10); | ||
522 | } | ||
523 | #endif | ||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
528 | { | ||
529 | return (void *)(*pos == 0); | ||
530 | } | ||
531 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
532 | { | ||
533 | return NULL; | ||
534 | } | ||
535 | static void c_stop(struct seq_file *m, void *v) | ||
536 | { | ||
537 | } | ||
538 | const struct seq_operations cpuinfo_op = { | ||
539 | .start = c_start, | ||
540 | .next = c_next, | ||
541 | .stop = c_stop, | ||
542 | .show = show_cpuinfo, | ||
543 | }; | ||
544 | #endif /* CONFIG_PROC_FS */ | ||
545 | |||
546 | void __init metag_start_kernel(char *args) | ||
547 | { | ||
548 | /* Zero the timer register so timestamps are from the point at | ||
549 | * which the kernel started running. | ||
550 | */ | ||
551 | __core_reg_set(TXTIMER, 0); | ||
552 | |||
553 | /* Clear the bss. */ | ||
554 | memset(__bss_start, 0, | ||
555 | (unsigned long)__bss_stop - (unsigned long)__bss_start); | ||
556 | |||
557 | /* Remember where these are for use in setup_arch */ | ||
558 | original_cmd_line = args; | ||
559 | |||
560 | current_thread_info()->cpu = hard_processor_id(); | ||
561 | |||
562 | start_kernel(); | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * setup_priv() - Set up privilege protection registers. | ||
567 | * | ||
568 | * Set up privilege protection registers such as TXPRIVEXT to prevent userland | ||
569 | * from touching our precious registers and sensitive memory areas. | ||
570 | */ | ||
571 | void setup_priv(void) | ||
572 | { | ||
573 | unsigned int offset = hard_processor_id() << TXPRIVREG_STRIDE_S; | ||
574 | |||
575 | __core_reg_set(TXPRIVEXT, PRIV_BITS); | ||
576 | |||
577 | metag_out32(PRIVSYSR_BITS, T0PRIVSYSR + offset); | ||
578 | metag_out32(PIOREG_BITS, T0PIOREG + offset); | ||
579 | metag_out32(PSYREG_BITS, T0PSYREG + offset); | ||
580 | } | ||
581 | |||
582 | PTBI pTBI_get(unsigned int cpu) | ||
583 | { | ||
584 | return per_cpu(pTBI, cpu); | ||
585 | } | ||
586 | EXPORT_SYMBOL(pTBI_get); | ||
587 | |||
588 | #if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU) | ||
589 | char capabilites[] = "dsp fpu"; | ||
590 | #elif defined(CONFIG_METAG_DSP) | ||
591 | char capabilites[] = "dsp"; | ||
592 | #elif defined(CONFIG_METAG_FPU) | ||
593 | char capabilites[] = "fpu"; | ||
594 | #else | ||
595 | char capabilites[] = ""; | ||
596 | #endif | ||
597 | |||
598 | static struct ctl_table caps_kern_table[] = { | ||
599 | { | ||
600 | .procname = "capabilities", | ||
601 | .data = capabilites, | ||
602 | .maxlen = sizeof(capabilites), | ||
603 | .mode = 0444, | ||
604 | .proc_handler = proc_dostring, | ||
605 | }, | ||
606 | {} | ||
607 | }; | ||
608 | |||
609 | static struct ctl_table caps_root_table[] = { | ||
610 | { | ||
611 | .procname = "kernel", | ||
612 | .mode = 0555, | ||
613 | .child = caps_kern_table, | ||
614 | }, | ||
615 | {} | ||
616 | }; | ||
617 | |||
618 | static int __init capabilities_register_sysctl(void) | ||
619 | { | ||
620 | struct ctl_table_header *caps_table_header; | ||
621 | |||
622 | caps_table_header = register_sysctl_table(caps_root_table); | ||
623 | if (!caps_table_header) { | ||
624 | pr_err("Unable to register CAPABILITIES sysctl\n"); | ||
625 | return -ENOMEM; | ||
626 | } | ||
627 | |||
628 | return 0; | ||
629 | } | ||
630 | |||
631 | core_initcall(capabilities_register_sysctl); | ||
diff --git a/arch/metag/kernel/signal.c b/arch/metag/kernel/signal.c new file mode 100644 index 000000000000..3be61cf0b147 --- /dev/null +++ b/arch/metag/kernel/signal.c | |||
@@ -0,0 +1,344 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991,1992 Linus Torvalds | ||
3 | * Copyright (C) 2005-2012 Imagination Technologies Ltd. | ||
4 | * | ||
5 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <linux/sched.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/smp.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/signal.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/ptrace.h> | ||
17 | #include <linux/unistd.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/personality.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | #include <linux/tracehook.h> | ||
22 | |||
23 | #include <asm/ucontext.h> | ||
24 | #include <asm/cacheflush.h> | ||
25 | #include <asm/switch.h> | ||
26 | #include <asm/syscall.h> | ||
27 | #include <asm/syscalls.h> | ||
28 | |||
29 | #define REG_FLAGS ctx.SaveMask | ||
30 | #define REG_RETVAL ctx.DX[0].U0 | ||
31 | #define REG_SYSCALL ctx.DX[0].U1 | ||
32 | #define REG_SP ctx.AX[0].U0 | ||
33 | #define REG_ARG1 ctx.DX[3].U1 | ||
34 | #define REG_ARG2 ctx.DX[3].U0 | ||
35 | #define REG_ARG3 ctx.DX[2].U1 | ||
36 | #define REG_PC ctx.CurrPC | ||
37 | #define REG_RTP ctx.DX[4].U1 | ||
38 | |||
39 | struct rt_sigframe { | ||
40 | struct siginfo info; | ||
41 | struct ucontext uc; | ||
42 | unsigned long retcode[2]; | ||
43 | }; | ||
44 | |||
45 | static int restore_sigcontext(struct pt_regs *regs, | ||
46 | struct sigcontext __user *sc) | ||
47 | { | ||
48 | int err; | ||
49 | |||
50 | /* Always make any pending restarted system calls return -EINTR */ | ||
51 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
52 | |||
53 | err = metag_gp_regs_copyin(regs, 0, sizeof(struct user_gp_regs), NULL, | ||
54 | &sc->regs); | ||
55 | if (!err) | ||
56 | err = metag_cb_regs_copyin(regs, 0, | ||
57 | sizeof(struct user_cb_regs), NULL, | ||
58 | &sc->cb); | ||
59 | if (!err) | ||
60 | err = metag_rp_state_copyin(regs, 0, | ||
61 | sizeof(struct user_rp_state), NULL, | ||
62 | &sc->rp); | ||
63 | |||
64 | /* This is a user-mode context. */ | ||
65 | regs->REG_FLAGS |= TBICTX_PRIV_BIT; | ||
66 | |||
67 | return err; | ||
68 | } | ||
69 | |||
70 | long sys_rt_sigreturn(void) | ||
71 | { | ||
72 | /* NOTE - Meta stack goes UPWARDS - so we wind the stack back */ | ||
73 | struct pt_regs *regs = current_pt_regs(); | ||
74 | struct rt_sigframe __user *frame; | ||
75 | sigset_t set; | ||
76 | |||
77 | frame = (__force struct rt_sigframe __user *)(regs->REG_SP - | ||
78 | sizeof(*frame)); | ||
79 | |||
80 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
81 | goto badframe; | ||
82 | |||
83 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
84 | goto badframe; | ||
85 | |||
86 | set_current_blocked(&set); | ||
87 | |||
88 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) | ||
89 | goto badframe; | ||
90 | |||
91 | if (restore_altstack(&frame->uc.uc_stack)) | ||
92 | goto badframe; | ||
93 | |||
94 | return regs->REG_RETVAL; | ||
95 | |||
96 | badframe: | ||
97 | force_sig(SIGSEGV, current); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | ||
103 | unsigned long mask) | ||
104 | { | ||
105 | int err; | ||
106 | |||
107 | err = metag_gp_regs_copyout(regs, 0, sizeof(struct user_gp_regs), NULL, | ||
108 | &sc->regs); | ||
109 | |||
110 | if (!err) | ||
111 | err = metag_cb_regs_copyout(regs, 0, | ||
112 | sizeof(struct user_cb_regs), NULL, | ||
113 | &sc->cb); | ||
114 | if (!err) | ||
115 | err = metag_rp_state_copyout(regs, 0, | ||
116 | sizeof(struct user_rp_state), NULL, | ||
117 | &sc->rp); | ||
118 | |||
119 | /* OK, clear that cbuf flag in the old context, or our stored | ||
120 | * catch buffer will be restored when we go to call the signal | ||
121 | * handler. Also clear out the CBRP RA/RD pipe bit incase | ||
122 | * that is pending as well! | ||
123 | * Note that as we have already stored this context, these | ||
124 | * flags will get restored on sigreturn to their original | ||
125 | * state. | ||
126 | */ | ||
127 | regs->REG_FLAGS &= ~(TBICTX_XCBF_BIT | TBICTX_CBUF_BIT | | ||
128 | TBICTX_CBRP_BIT); | ||
129 | |||
130 | /* Clear out the LSM_STEP bits in case we are in the middle of | ||
131 | * and MSET/MGET. | ||
132 | */ | ||
133 | regs->ctx.Flags &= ~TXSTATUS_LSM_STEP_BITS; | ||
134 | |||
135 | err |= __put_user(mask, &sc->oldmask); | ||
136 | |||
137 | return err; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Determine which stack to use.. | ||
142 | */ | ||
143 | static void __user *get_sigframe(struct k_sigaction *ka, unsigned long sp, | ||
144 | size_t frame_size) | ||
145 | { | ||
146 | /* Meta stacks grows upwards */ | ||
147 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0)) | ||
148 | sp = current->sas_ss_sp; | ||
149 | |||
150 | sp = (sp + 7) & ~7; /* 8byte align stack */ | ||
151 | |||
152 | return (void __user *)sp; | ||
153 | } | ||
154 | |||
155 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
156 | sigset_t *set, struct pt_regs *regs) | ||
157 | { | ||
158 | struct rt_sigframe __user *frame; | ||
159 | int err = -EFAULT; | ||
160 | unsigned long code; | ||
161 | |||
162 | frame = get_sigframe(ka, regs->REG_SP, sizeof(*frame)); | ||
163 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
164 | goto out; | ||
165 | |||
166 | err = copy_siginfo_to_user(&frame->info, info); | ||
167 | |||
168 | /* Create the ucontext. */ | ||
169 | err |= __put_user(0, &frame->uc.uc_flags); | ||
170 | err |= __put_user(0, (unsigned long __user *)&frame->uc.uc_link); | ||
171 | err |= __save_altstack(&frame->uc.uc_stack, regs->REG_SP); | ||
172 | err |= setup_sigcontext(&frame->uc.uc_mcontext, | ||
173 | regs, set->sig[0]); | ||
174 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
175 | |||
176 | if (err) | ||
177 | goto out; | ||
178 | |||
179 | /* Set up to return from userspace. */ | ||
180 | |||
181 | /* MOV D1Re0 (D1.0), #__NR_rt_sigreturn */ | ||
182 | code = 0x03000004 | (__NR_rt_sigreturn << 3); | ||
183 | err |= __put_user(code, (unsigned long __user *)(&frame->retcode[0])); | ||
184 | |||
185 | /* SWITCH #__METAG_SW_SYS */ | ||
186 | code = __METAG_SW_ENCODING(SYS); | ||
187 | err |= __put_user(code, (unsigned long __user *)(&frame->retcode[1])); | ||
188 | |||
189 | if (err) | ||
190 | goto out; | ||
191 | |||
192 | /* Set up registers for signal handler */ | ||
193 | regs->REG_RTP = (unsigned long) frame->retcode; | ||
194 | regs->REG_SP = (unsigned long) frame + sizeof(*frame); | ||
195 | regs->REG_ARG1 = sig; | ||
196 | regs->REG_ARG2 = (unsigned long) &frame->info; | ||
197 | regs->REG_ARG3 = (unsigned long) &frame->uc; | ||
198 | regs->REG_PC = (unsigned long) ka->sa.sa_handler; | ||
199 | |||
200 | pr_debug("SIG deliver (%s:%d): sp=%p pc=%08x pr=%08x\n", | ||
201 | current->comm, current->pid, frame, regs->REG_PC, | ||
202 | regs->REG_RTP); | ||
203 | |||
204 | /* Now pass size of 'new code' into sigtramp so we can do a more | ||
205 | * effective cache flush - directed rather than 'full flush'. | ||
206 | */ | ||
207 | flush_cache_sigtramp(regs->REG_RTP, sizeof(frame->retcode)); | ||
208 | out: | ||
209 | if (err) { | ||
210 | force_sigsegv(sig, current); | ||
211 | return -EFAULT; | ||
212 | } | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static void handle_signal(unsigned long sig, siginfo_t *info, | ||
217 | struct k_sigaction *ka, struct pt_regs *regs) | ||
218 | { | ||
219 | sigset_t *oldset = sigmask_to_save(); | ||
220 | |||
221 | /* Set up the stack frame */ | ||
222 | if (setup_rt_frame(sig, ka, info, oldset, regs)) | ||
223 | return; | ||
224 | |||
225 | signal_delivered(sig, info, ka, regs, test_thread_flag(TIF_SINGLESTEP)); | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * Notes for Meta. | ||
230 | * We have moved from the old 2.4.9 SH way of using syscall_nr (in the stored | ||
231 | * context) to passing in the syscall flag on the stack. | ||
232 | * This is because having syscall_nr in our context does not fit with TBX, and | ||
233 | * corrupted the stack. | ||
234 | */ | ||
235 | static int do_signal(struct pt_regs *regs, int syscall) | ||
236 | { | ||
237 | unsigned int retval = 0, continue_addr = 0, restart_addr = 0; | ||
238 | struct k_sigaction ka; | ||
239 | siginfo_t info; | ||
240 | int signr; | ||
241 | int restart = 0; | ||
242 | |||
243 | /* | ||
244 | * By the end of rt_sigreturn the context describes the point that the | ||
245 | * signal was taken (which may happen to be just before a syscall if | ||
246 | * it's already been restarted). This should *never* be mistaken for a | ||
247 | * system call in need of restarting. | ||
248 | */ | ||
249 | if (syscall == __NR_rt_sigreturn) | ||
250 | syscall = -1; | ||
251 | |||
252 | /* Did we come from a system call? */ | ||
253 | if (syscall >= 0) { | ||
254 | continue_addr = regs->REG_PC; | ||
255 | restart_addr = continue_addr - 4; | ||
256 | retval = regs->REG_RETVAL; | ||
257 | |||
258 | /* | ||
259 | * Prepare for system call restart. We do this here so that a | ||
260 | * debugger will see the already changed PC. | ||
261 | */ | ||
262 | switch (retval) { | ||
263 | case -ERESTART_RESTARTBLOCK: | ||
264 | restart = -2; | ||
265 | case -ERESTARTNOHAND: | ||
266 | case -ERESTARTSYS: | ||
267 | case -ERESTARTNOINTR: | ||
268 | ++restart; | ||
269 | regs->REG_PC = restart_addr; | ||
270 | break; | ||
271 | } | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Get the signal to deliver. When running under ptrace, at this point | ||
276 | * the debugger may change all our registers ... | ||
277 | */ | ||
278 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
279 | /* | ||
280 | * Depending on the signal settings we may need to revert the decision | ||
281 | * to restart the system call. But skip this if a debugger has chosen to | ||
282 | * restart at a different PC. | ||
283 | */ | ||
284 | if (regs->REG_PC != restart_addr) | ||
285 | restart = 0; | ||
286 | if (signr > 0) { | ||
287 | if (unlikely(restart)) { | ||
288 | if (retval == -ERESTARTNOHAND | ||
289 | || retval == -ERESTART_RESTARTBLOCK | ||
290 | || (retval == -ERESTARTSYS | ||
291 | && !(ka.sa.sa_flags & SA_RESTART))) { | ||
292 | regs->REG_RETVAL = -EINTR; | ||
293 | regs->REG_PC = continue_addr; | ||
294 | } | ||
295 | } | ||
296 | |||
297 | /* Whee! Actually deliver the signal. */ | ||
298 | handle_signal(signr, &info, &ka, regs); | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | /* Handlerless -ERESTART_RESTARTBLOCK re-enters via restart_syscall */ | ||
303 | if (unlikely(restart < 0)) | ||
304 | regs->REG_SYSCALL = __NR_restart_syscall; | ||
305 | |||
306 | /* | ||
307 | * If there's no signal to deliver, we just put the saved sigmask back. | ||
308 | */ | ||
309 | restore_saved_sigmask(); | ||
310 | |||
311 | return restart; | ||
312 | } | ||
313 | |||
314 | int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, | ||
315 | int syscall) | ||
316 | { | ||
317 | do { | ||
318 | if (likely(thread_flags & _TIF_NEED_RESCHED)) { | ||
319 | schedule(); | ||
320 | } else { | ||
321 | if (unlikely(!user_mode(regs))) | ||
322 | return 0; | ||
323 | local_irq_enable(); | ||
324 | if (thread_flags & _TIF_SIGPENDING) { | ||
325 | int restart = do_signal(regs, syscall); | ||
326 | if (unlikely(restart)) { | ||
327 | /* | ||
328 | * Restart without handlers. | ||
329 | * Deal with it without leaving | ||
330 | * the kernel space. | ||
331 | */ | ||
332 | return restart; | ||
333 | } | ||
334 | syscall = -1; | ||
335 | } else { | ||
336 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
337 | tracehook_notify_resume(regs); | ||
338 | } | ||
339 | } | ||
340 | local_irq_disable(); | ||
341 | thread_flags = current_thread_info()->flags; | ||
342 | } while (thread_flags & _TIF_WORK_MASK); | ||
343 | return 0; | ||
344 | } | ||
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c new file mode 100644 index 000000000000..4b6d1f14df32 --- /dev/null +++ b/arch/metag/kernel/smp.c | |||
@@ -0,0 +1,575 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009,2010,2011 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/atomic.h> | ||
11 | #include <linux/delay.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/cache.h> | ||
17 | #include <linux/profile.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/cpu.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/seq_file.h> | ||
24 | #include <linux/irq.h> | ||
25 | #include <linux/bootmem.h> | ||
26 | |||
27 | #include <asm/cacheflush.h> | ||
28 | #include <asm/cachepart.h> | ||
29 | #include <asm/core_reg.h> | ||
30 | #include <asm/cpu.h> | ||
31 | #include <asm/mmu_context.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/pgalloc.h> | ||
34 | #include <asm/processor.h> | ||
35 | #include <asm/setup.h> | ||
36 | #include <asm/tlbflush.h> | ||
37 | #include <asm/hwthread.h> | ||
38 | #include <asm/traps.h> | ||
39 | |||
40 | DECLARE_PER_CPU(PTBI, pTBI); | ||
41 | |||
42 | void *secondary_data_stack; | ||
43 | |||
44 | /* | ||
45 | * structures for inter-processor calls | ||
46 | * - A collection of single bit ipi messages. | ||
47 | */ | ||
48 | struct ipi_data { | ||
49 | spinlock_t lock; | ||
50 | unsigned long ipi_count; | ||
51 | unsigned long bits; | ||
52 | }; | ||
53 | |||
54 | static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { | ||
55 | .lock = __SPIN_LOCK_UNLOCKED(ipi_data.lock), | ||
56 | }; | ||
57 | |||
58 | static DEFINE_SPINLOCK(boot_lock); | ||
59 | |||
60 | /* | ||
61 | * "thread" is assumed to be a valid Meta hardware thread ID. | ||
62 | */ | ||
63 | int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle) | ||
64 | { | ||
65 | u32 val; | ||
66 | |||
67 | /* | ||
68 | * set synchronisation state between this boot processor | ||
69 | * and the secondary one | ||
70 | */ | ||
71 | spin_lock(&boot_lock); | ||
72 | |||
73 | core_reg_write(TXUPC_ID, 0, thread, (unsigned int)secondary_startup); | ||
74 | core_reg_write(TXUPC_ID, 1, thread, 0); | ||
75 | |||
76 | /* | ||
77 | * Give the thread privilege (PSTAT) and clear potentially problematic | ||
78 | * bits in the process (namely ISTAT, CBMarker, CBMarkerI, LSM_STEP). | ||
79 | */ | ||
80 | core_reg_write(TXUCT_ID, TXSTATUS_REGNUM, thread, TXSTATUS_PSTAT_BIT); | ||
81 | |||
82 | /* Clear the minim enable bit. */ | ||
83 | val = core_reg_read(TXUCT_ID, TXPRIVEXT_REGNUM, thread); | ||
84 | core_reg_write(TXUCT_ID, TXPRIVEXT_REGNUM, thread, val & ~0x80); | ||
85 | |||
86 | /* | ||
87 | * set the ThreadEnable bit (0x1) in the TXENABLE register | ||
88 | * for the specified thread - off it goes! | ||
89 | */ | ||
90 | val = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, thread); | ||
91 | core_reg_write(TXUCT_ID, TXENABLE_REGNUM, thread, val | 0x1); | ||
92 | |||
93 | /* | ||
94 | * now the secondary core is starting up let it run its | ||
95 | * calibrations, then wait for it to finish | ||
96 | */ | ||
97 | spin_unlock(&boot_lock); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | ||
103 | { | ||
104 | unsigned int thread = cpu_2_hwthread_id[cpu]; | ||
105 | int ret; | ||
106 | |||
107 | load_pgd(swapper_pg_dir, thread); | ||
108 | |||
109 | flush_tlb_all(); | ||
110 | |||
111 | /* | ||
112 | * Tell the secondary CPU where to find its idle thread's stack. | ||
113 | */ | ||
114 | secondary_data_stack = task_stack_page(idle); | ||
115 | |||
116 | wmb(); | ||
117 | |||
118 | /* | ||
119 | * Now bring the CPU into our world. | ||
120 | */ | ||
121 | ret = boot_secondary(thread, idle); | ||
122 | if (ret == 0) { | ||
123 | unsigned long timeout; | ||
124 | |||
125 | /* | ||
126 | * CPU was successfully started, wait for it | ||
127 | * to come online or time out. | ||
128 | */ | ||
129 | timeout = jiffies + HZ; | ||
130 | while (time_before(jiffies, timeout)) { | ||
131 | if (cpu_online(cpu)) | ||
132 | break; | ||
133 | |||
134 | udelay(10); | ||
135 | barrier(); | ||
136 | } | ||
137 | |||
138 | if (!cpu_online(cpu)) | ||
139 | ret = -EIO; | ||
140 | } | ||
141 | |||
142 | secondary_data_stack = NULL; | ||
143 | |||
144 | if (ret) { | ||
145 | pr_crit("CPU%u: processor failed to boot\n", cpu); | ||
146 | |||
147 | /* | ||
148 | * FIXME: We need to clean up the new idle thread. --rmk | ||
149 | */ | ||
150 | } | ||
151 | |||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | #ifdef CONFIG_HOTPLUG_CPU | ||
156 | static DECLARE_COMPLETION(cpu_killed); | ||
157 | |||
158 | /* | ||
159 | * __cpu_disable runs on the processor to be shutdown. | ||
160 | */ | ||
161 | int __cpuexit __cpu_disable(void) | ||
162 | { | ||
163 | unsigned int cpu = smp_processor_id(); | ||
164 | struct task_struct *p; | ||
165 | |||
166 | /* | ||
167 | * Take this CPU offline. Once we clear this, we can't return, | ||
168 | * and we must not schedule until we're ready to give up the cpu. | ||
169 | */ | ||
170 | set_cpu_online(cpu, false); | ||
171 | |||
172 | /* | ||
173 | * OK - migrate IRQs away from this CPU | ||
174 | */ | ||
175 | migrate_irqs(); | ||
176 | |||
177 | /* | ||
178 | * Flush user cache and TLB mappings, and then remove this CPU | ||
179 | * from the vm mask set of all processes. | ||
180 | */ | ||
181 | flush_cache_all(); | ||
182 | local_flush_tlb_all(); | ||
183 | |||
184 | read_lock(&tasklist_lock); | ||
185 | for_each_process(p) { | ||
186 | if (p->mm) | ||
187 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); | ||
188 | } | ||
189 | read_unlock(&tasklist_lock); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * called on the thread which is asking for a CPU to be shutdown - | ||
196 | * waits until shutdown has completed, or it is timed out. | ||
197 | */ | ||
198 | void __cpuexit __cpu_die(unsigned int cpu) | ||
199 | { | ||
200 | if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1))) | ||
201 | pr_err("CPU%u: unable to kill\n", cpu); | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * Called from the idle thread for the CPU which has been shutdown. | ||
206 | * | ||
207 | * Note that we do not return from this function. If this cpu is | ||
208 | * brought online again it will need to run secondary_startup(). | ||
209 | */ | ||
210 | void __cpuexit cpu_die(void) | ||
211 | { | ||
212 | local_irq_disable(); | ||
213 | idle_task_exit(); | ||
214 | |||
215 | complete(&cpu_killed); | ||
216 | |||
217 | asm ("XOR TXENABLE, D0Re0,D0Re0\n"); | ||
218 | } | ||
219 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
220 | |||
221 | /* | ||
222 | * Called by both boot and secondaries to move global data into | ||
223 | * per-processor storage. | ||
224 | */ | ||
225 | void __cpuinit smp_store_cpu_info(unsigned int cpuid) | ||
226 | { | ||
227 | struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid); | ||
228 | |||
229 | cpu_info->loops_per_jiffy = loops_per_jiffy; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * This is the secondary CPU boot entry. We're using this CPUs | ||
234 | * idle thread stack and the global page tables. | ||
235 | */ | ||
236 | asmlinkage void secondary_start_kernel(void) | ||
237 | { | ||
238 | struct mm_struct *mm = &init_mm; | ||
239 | unsigned int cpu = smp_processor_id(); | ||
240 | |||
241 | /* | ||
242 | * All kernel threads share the same mm context; grab a | ||
243 | * reference and switch to it. | ||
244 | */ | ||
245 | atomic_inc(&mm->mm_users); | ||
246 | atomic_inc(&mm->mm_count); | ||
247 | current->active_mm = mm; | ||
248 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
249 | enter_lazy_tlb(mm, current); | ||
250 | local_flush_tlb_all(); | ||
251 | |||
252 | /* | ||
253 | * TODO: Some day it might be useful for each Linux CPU to | ||
254 | * have its own TBI structure. That would allow each Linux CPU | ||
255 | * to run different interrupt handlers for the same IRQ | ||
256 | * number. | ||
257 | * | ||
258 | * For now, simply copying the pointer to the boot CPU's TBI | ||
259 | * structure is sufficient because we always want to run the | ||
260 | * same interrupt handler whatever CPU takes the interrupt. | ||
261 | */ | ||
262 | per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); | ||
263 | |||
264 | if (!per_cpu(pTBI, cpu)) | ||
265 | panic("No TBI found!"); | ||
266 | |||
267 | per_cpu_trap_init(cpu); | ||
268 | |||
269 | preempt_disable(); | ||
270 | |||
271 | setup_priv(); | ||
272 | |||
273 | /* | ||
274 | * Enable local interrupts. | ||
275 | */ | ||
276 | tbi_startup_interrupt(TBID_SIGNUM_TRT); | ||
277 | notify_cpu_starting(cpu); | ||
278 | local_irq_enable(); | ||
279 | |||
280 | pr_info("CPU%u (thread %u): Booted secondary processor\n", | ||
281 | cpu, cpu_2_hwthread_id[cpu]); | ||
282 | |||
283 | calibrate_delay(); | ||
284 | smp_store_cpu_info(cpu); | ||
285 | |||
286 | /* | ||
287 | * OK, now it's safe to let the boot CPU continue | ||
288 | */ | ||
289 | set_cpu_online(cpu, true); | ||
290 | |||
291 | /* | ||
292 | * Check for cache aliasing. | ||
293 | * Preemption is disabled | ||
294 | */ | ||
295 | check_for_cache_aliasing(cpu); | ||
296 | |||
297 | /* | ||
298 | * OK, it's off to the idle thread for us | ||
299 | */ | ||
300 | cpu_idle(); | ||
301 | } | ||
302 | |||
303 | void __init smp_cpus_done(unsigned int max_cpus) | ||
304 | { | ||
305 | int cpu; | ||
306 | unsigned long bogosum = 0; | ||
307 | |||
308 | for_each_online_cpu(cpu) | ||
309 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | ||
310 | |||
311 | pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | ||
312 | num_online_cpus(), | ||
313 | bogosum / (500000/HZ), | ||
314 | (bogosum / (5000/HZ)) % 100); | ||
315 | } | ||
316 | |||
317 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
318 | { | ||
319 | unsigned int cpu = smp_processor_id(); | ||
320 | |||
321 | init_new_context(current, &init_mm); | ||
322 | current_thread_info()->cpu = cpu; | ||
323 | |||
324 | smp_store_cpu_info(cpu); | ||
325 | init_cpu_present(cpu_possible_mask); | ||
326 | } | ||
327 | |||
328 | void __init smp_prepare_boot_cpu(void) | ||
329 | { | ||
330 | unsigned int cpu = smp_processor_id(); | ||
331 | |||
332 | per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); | ||
333 | |||
334 | if (!per_cpu(pTBI, cpu)) | ||
335 | panic("No TBI found!"); | ||
336 | } | ||
337 | |||
338 | static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg); | ||
339 | |||
340 | static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) | ||
341 | { | ||
342 | unsigned long flags; | ||
343 | unsigned int cpu; | ||
344 | cpumask_t map; | ||
345 | |||
346 | cpumask_clear(&map); | ||
347 | local_irq_save(flags); | ||
348 | |||
349 | for_each_cpu(cpu, mask) { | ||
350 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
351 | |||
352 | spin_lock(&ipi->lock); | ||
353 | |||
354 | /* | ||
355 | * KICK interrupts are queued in hardware so we'll get | ||
356 | * multiple interrupts if we call smp_cross_call() | ||
357 | * multiple times for one msg. The problem is that we | ||
358 | * only have one bit for each message - we can't queue | ||
359 | * them in software. | ||
360 | * | ||
361 | * The first time through ipi_handler() we'll clear | ||
362 | * the msg bit, having done all the work. But when we | ||
363 | * return we'll get _another_ interrupt (and another, | ||
364 | * and another until we've handled all the queued | ||
365 | * KICKs). Running ipi_handler() when there's no work | ||
366 | * to do is bad because that's how kick handler | ||
367 | * chaining detects who the KICK was intended for. | ||
368 | * See arch/metag/kernel/kick.c for more details. | ||
369 | * | ||
370 | * So only add 'cpu' to 'map' if we haven't already | ||
371 | * queued a KICK interrupt for 'msg'. | ||
372 | */ | ||
373 | if (!(ipi->bits & (1 << msg))) { | ||
374 | ipi->bits |= 1 << msg; | ||
375 | cpumask_set_cpu(cpu, &map); | ||
376 | } | ||
377 | |||
378 | spin_unlock(&ipi->lock); | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * Call the platform specific cross-CPU call function. | ||
383 | */ | ||
384 | smp_cross_call(map, msg); | ||
385 | |||
386 | local_irq_restore(flags); | ||
387 | } | ||
388 | |||
389 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
390 | { | ||
391 | send_ipi_message(mask, IPI_CALL_FUNC); | ||
392 | } | ||
393 | |||
394 | void arch_send_call_function_single_ipi(int cpu) | ||
395 | { | ||
396 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | ||
397 | } | ||
398 | |||
399 | void show_ipi_list(struct seq_file *p) | ||
400 | { | ||
401 | unsigned int cpu; | ||
402 | |||
403 | seq_puts(p, "IPI:"); | ||
404 | |||
405 | for_each_present_cpu(cpu) | ||
406 | seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); | ||
407 | |||
408 | seq_putc(p, '\n'); | ||
409 | } | ||
410 | |||
411 | static DEFINE_SPINLOCK(stop_lock); | ||
412 | |||
413 | /* | ||
414 | * Main handler for inter-processor interrupts | ||
415 | * | ||
416 | * For Meta, the ipimask now only identifies a single | ||
417 | * category of IPI (Bit 1 IPIs have been replaced by a | ||
418 | * different mechanism): | ||
419 | * | ||
420 | * Bit 0 - Inter-processor function call | ||
421 | */ | ||
422 | static int do_IPI(struct pt_regs *regs) | ||
423 | { | ||
424 | unsigned int cpu = smp_processor_id(); | ||
425 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
426 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
427 | unsigned long msgs, nextmsg; | ||
428 | int handled = 0; | ||
429 | |||
430 | ipi->ipi_count++; | ||
431 | |||
432 | spin_lock(&ipi->lock); | ||
433 | msgs = ipi->bits; | ||
434 | nextmsg = msgs & -msgs; | ||
435 | ipi->bits &= ~nextmsg; | ||
436 | spin_unlock(&ipi->lock); | ||
437 | |||
438 | if (nextmsg) { | ||
439 | handled = 1; | ||
440 | |||
441 | nextmsg = ffz(~nextmsg); | ||
442 | switch (nextmsg) { | ||
443 | case IPI_RESCHEDULE: | ||
444 | scheduler_ipi(); | ||
445 | break; | ||
446 | |||
447 | case IPI_CALL_FUNC: | ||
448 | generic_smp_call_function_interrupt(); | ||
449 | break; | ||
450 | |||
451 | case IPI_CALL_FUNC_SINGLE: | ||
452 | generic_smp_call_function_single_interrupt(); | ||
453 | break; | ||
454 | |||
455 | default: | ||
456 | pr_crit("CPU%u: Unknown IPI message 0x%lx\n", | ||
457 | cpu, nextmsg); | ||
458 | break; | ||
459 | } | ||
460 | } | ||
461 | |||
462 | set_irq_regs(old_regs); | ||
463 | |||
464 | return handled; | ||
465 | } | ||
466 | |||
467 | void smp_send_reschedule(int cpu) | ||
468 | { | ||
469 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); | ||
470 | } | ||
471 | |||
472 | static void stop_this_cpu(void *data) | ||
473 | { | ||
474 | unsigned int cpu = smp_processor_id(); | ||
475 | |||
476 | if (system_state == SYSTEM_BOOTING || | ||
477 | system_state == SYSTEM_RUNNING) { | ||
478 | spin_lock(&stop_lock); | ||
479 | pr_crit("CPU%u: stopping\n", cpu); | ||
480 | dump_stack(); | ||
481 | spin_unlock(&stop_lock); | ||
482 | } | ||
483 | |||
484 | set_cpu_online(cpu, false); | ||
485 | |||
486 | local_irq_disable(); | ||
487 | |||
488 | hard_processor_halt(HALT_OK); | ||
489 | } | ||
490 | |||
491 | void smp_send_stop(void) | ||
492 | { | ||
493 | smp_call_function(stop_this_cpu, NULL, 0); | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * not supported here | ||
498 | */ | ||
499 | int setup_profiling_timer(unsigned int multiplier) | ||
500 | { | ||
501 | return -EINVAL; | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * We use KICKs for inter-processor interrupts. | ||
506 | * | ||
507 | * For every CPU in "callmap" the IPI data must already have been | ||
508 | * stored in that CPU's "ipi_data" member prior to calling this | ||
509 | * function. | ||
510 | */ | ||
511 | static void kick_raise_softirq(cpumask_t callmap, unsigned int irq) | ||
512 | { | ||
513 | int cpu; | ||
514 | |||
515 | for_each_cpu(cpu, &callmap) { | ||
516 | unsigned int thread; | ||
517 | |||
518 | thread = cpu_2_hwthread_id[cpu]; | ||
519 | |||
520 | BUG_ON(thread == BAD_HWTHREAD_ID); | ||
521 | |||
522 | metag_out32(1, T0KICKI + (thread * TnXKICK_STRIDE)); | ||
523 | } | ||
524 | } | ||
525 | |||
526 | static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers, | ||
527 | int Inst, PTBI pTBI, int *handled) | ||
528 | { | ||
529 | *handled = do_IPI((struct pt_regs *)State.Sig.pCtx); | ||
530 | |||
531 | return State; | ||
532 | } | ||
533 | |||
534 | static struct kick_irq_handler ipi_irq = { | ||
535 | .func = ipi_handler, | ||
536 | }; | ||
537 | |||
538 | static void smp_cross_call(cpumask_t callmap, enum ipi_msg_type msg) | ||
539 | { | ||
540 | kick_raise_softirq(callmap, 1); | ||
541 | } | ||
542 | |||
543 | static inline unsigned int get_core_count(void) | ||
544 | { | ||
545 | int i; | ||
546 | unsigned int ret = 0; | ||
547 | |||
548 | for (i = 0; i < CONFIG_NR_CPUS; i++) { | ||
549 | if (core_reg_read(TXUCT_ID, TXENABLE_REGNUM, i)) | ||
550 | ret++; | ||
551 | } | ||
552 | |||
553 | return ret; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * Initialise the CPU possible map early - this describes the CPUs | ||
558 | * which may be present or become present in the system. | ||
559 | */ | ||
560 | void __init smp_init_cpus(void) | ||
561 | { | ||
562 | unsigned int i, ncores = get_core_count(); | ||
563 | |||
564 | /* If no hwthread_map early param was set use default mapping */ | ||
565 | for (i = 0; i < NR_CPUS; i++) | ||
566 | if (cpu_2_hwthread_id[i] == BAD_HWTHREAD_ID) { | ||
567 | cpu_2_hwthread_id[i] = i; | ||
568 | hwthread_id_2_cpu[i] = i; | ||
569 | } | ||
570 | |||
571 | for (i = 0; i < ncores; i++) | ||
572 | set_cpu_possible(i, true); | ||
573 | |||
574 | kick_register_func(&ipi_irq); | ||
575 | } | ||
diff --git a/arch/metag/kernel/stacktrace.c b/arch/metag/kernel/stacktrace.c new file mode 100644 index 000000000000..5510361d5bea --- /dev/null +++ b/arch/metag/kernel/stacktrace.c | |||
@@ -0,0 +1,187 @@ | |||
1 | #include <linux/export.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/stacktrace.h> | ||
4 | |||
5 | #include <asm/stacktrace.h> | ||
6 | |||
7 | #if defined(CONFIG_FRAME_POINTER) | ||
8 | |||
9 | #ifdef CONFIG_KALLSYMS | ||
10 | #include <linux/kallsyms.h> | ||
11 | #include <linux/module.h> | ||
12 | |||
13 | static unsigned long tbi_boing_addr; | ||
14 | static unsigned long tbi_boing_size; | ||
15 | |||
16 | static void tbi_boing_init(void) | ||
17 | { | ||
18 | /* We need to know where TBIBoingVec is and it's size */ | ||
19 | unsigned long size; | ||
20 | unsigned long offset; | ||
21 | char modname[MODULE_NAME_LEN]; | ||
22 | char name[KSYM_NAME_LEN]; | ||
23 | tbi_boing_addr = kallsyms_lookup_name("___TBIBoingVec"); | ||
24 | if (!tbi_boing_addr) | ||
25 | tbi_boing_addr = 1; | ||
26 | else if (!lookup_symbol_attrs(tbi_boing_addr, &size, | ||
27 | &offset, modname, name)) | ||
28 | tbi_boing_size = size; | ||
29 | } | ||
30 | #endif | ||
31 | |||
32 | #define ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) | ||
33 | |||
34 | /* | ||
35 | * Unwind the current stack frame and store the new register values in the | ||
36 | * structure passed as argument. Unwinding is equivalent to a function return, | ||
37 | * hence the new PC value rather than LR should be used for backtrace. | ||
38 | */ | ||
39 | int notrace unwind_frame(struct stackframe *frame) | ||
40 | { | ||
41 | struct metag_frame *fp = (struct metag_frame *)frame->fp; | ||
42 | unsigned long lr; | ||
43 | unsigned long fpnew; | ||
44 | |||
45 | if (frame->fp & 0x7) | ||
46 | return -EINVAL; | ||
47 | |||
48 | fpnew = fp->fp; | ||
49 | lr = fp->lr - 4; | ||
50 | |||
51 | #ifdef CONFIG_KALLSYMS | ||
52 | /* If we've reached TBIBoingVec then we're at an interrupt | ||
53 | * entry point or a syscall entry point. The frame pointer | ||
54 | * points to a pt_regs which can be used to continue tracing on | ||
55 | * the other side of the boing. | ||
56 | */ | ||
57 | if (!tbi_boing_addr) | ||
58 | tbi_boing_init(); | ||
59 | if (tbi_boing_size && lr >= tbi_boing_addr && | ||
60 | lr < tbi_boing_addr + tbi_boing_size) { | ||
61 | struct pt_regs *regs = (struct pt_regs *)fpnew; | ||
62 | if (user_mode(regs)) | ||
63 | return -EINVAL; | ||
64 | fpnew = regs->ctx.AX[1].U0; | ||
65 | lr = regs->ctx.DX[4].U1; | ||
66 | } | ||
67 | #endif | ||
68 | |||
69 | /* stack grows up, so frame pointers must decrease */ | ||
70 | if (fpnew < (ALIGN_DOWN((unsigned long)fp, THREAD_SIZE) + | ||
71 | sizeof(struct thread_info)) || fpnew >= (unsigned long)fp) | ||
72 | return -EINVAL; | ||
73 | |||
74 | /* restore the registers from the stack frame */ | ||
75 | frame->fp = fpnew; | ||
76 | frame->pc = lr; | ||
77 | |||
78 | return 0; | ||
79 | } | ||
80 | #else | ||
81 | int notrace unwind_frame(struct stackframe *frame) | ||
82 | { | ||
83 | struct metag_frame *sp = (struct metag_frame *)frame->sp; | ||
84 | |||
85 | if (frame->sp & 0x7) | ||
86 | return -EINVAL; | ||
87 | |||
88 | while (!kstack_end(sp)) { | ||
89 | unsigned long addr = sp->lr - 4; | ||
90 | sp--; | ||
91 | |||
92 | if (__kernel_text_address(addr)) { | ||
93 | frame->sp = (unsigned long)sp; | ||
94 | frame->pc = addr; | ||
95 | return 0; | ||
96 | } | ||
97 | } | ||
98 | return -EINVAL; | ||
99 | } | ||
100 | #endif | ||
101 | |||
102 | void notrace walk_stackframe(struct stackframe *frame, | ||
103 | int (*fn)(struct stackframe *, void *), void *data) | ||
104 | { | ||
105 | while (1) { | ||
106 | int ret; | ||
107 | |||
108 | if (fn(frame, data)) | ||
109 | break; | ||
110 | ret = unwind_frame(frame); | ||
111 | if (ret < 0) | ||
112 | break; | ||
113 | } | ||
114 | } | ||
115 | EXPORT_SYMBOL(walk_stackframe); | ||
116 | |||
117 | #ifdef CONFIG_STACKTRACE | ||
118 | struct stack_trace_data { | ||
119 | struct stack_trace *trace; | ||
120 | unsigned int no_sched_functions; | ||
121 | unsigned int skip; | ||
122 | }; | ||
123 | |||
124 | static int save_trace(struct stackframe *frame, void *d) | ||
125 | { | ||
126 | struct stack_trace_data *data = d; | ||
127 | struct stack_trace *trace = data->trace; | ||
128 | unsigned long addr = frame->pc; | ||
129 | |||
130 | if (data->no_sched_functions && in_sched_functions(addr)) | ||
131 | return 0; | ||
132 | if (data->skip) { | ||
133 | data->skip--; | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | trace->entries[trace->nr_entries++] = addr; | ||
138 | |||
139 | return trace->nr_entries >= trace->max_entries; | ||
140 | } | ||
141 | |||
142 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | ||
143 | { | ||
144 | struct stack_trace_data data; | ||
145 | struct stackframe frame; | ||
146 | |||
147 | data.trace = trace; | ||
148 | data.skip = trace->skip; | ||
149 | |||
150 | if (tsk != current) { | ||
151 | #ifdef CONFIG_SMP | ||
152 | /* | ||
153 | * What guarantees do we have here that 'tsk' is not | ||
154 | * running on another CPU? For now, ignore it as we | ||
155 | * can't guarantee we won't explode. | ||
156 | */ | ||
157 | if (trace->nr_entries < trace->max_entries) | ||
158 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
159 | return; | ||
160 | #else | ||
161 | data.no_sched_functions = 1; | ||
162 | frame.fp = thread_saved_fp(tsk); | ||
163 | frame.sp = thread_saved_sp(tsk); | ||
164 | frame.lr = 0; /* recovered from the stack */ | ||
165 | frame.pc = thread_saved_pc(tsk); | ||
166 | #endif | ||
167 | } else { | ||
168 | register unsigned long current_sp asm ("A0StP"); | ||
169 | |||
170 | data.no_sched_functions = 0; | ||
171 | frame.fp = (unsigned long)__builtin_frame_address(0); | ||
172 | frame.sp = current_sp; | ||
173 | frame.lr = (unsigned long)__builtin_return_address(0); | ||
174 | frame.pc = (unsigned long)save_stack_trace_tsk; | ||
175 | } | ||
176 | |||
177 | walk_stackframe(&frame, save_trace, &data); | ||
178 | if (trace->nr_entries < trace->max_entries) | ||
179 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
180 | } | ||
181 | |||
182 | void save_stack_trace(struct stack_trace *trace) | ||
183 | { | ||
184 | save_stack_trace_tsk(current, trace); | ||
185 | } | ||
186 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
187 | #endif | ||
diff --git a/arch/metag/kernel/sys_metag.c b/arch/metag/kernel/sys_metag.c new file mode 100644 index 000000000000..efe833a452f7 --- /dev/null +++ b/arch/metag/kernel/sys_metag.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * This file contains various random system calls that | ||
3 | * have a non-standard calling sequence on the Linux/Meta | ||
4 | * platform. | ||
5 | */ | ||
6 | |||
7 | #include <linux/errno.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/syscalls.h> | ||
11 | #include <linux/mman.h> | ||
12 | #include <linux/file.h> | ||
13 | #include <linux/fs.h> | ||
14 | #include <linux/uaccess.h> | ||
15 | #include <linux/unistd.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/core_reg.h> | ||
18 | #include <asm/global_lock.h> | ||
19 | #include <asm/switch.h> | ||
20 | #include <asm/syscall.h> | ||
21 | #include <asm/syscalls.h> | ||
22 | #include <asm/user_gateway.h> | ||
23 | |||
24 | #define merge_64(hi, lo) ((((unsigned long long)(hi)) << 32) + \ | ||
25 | ((lo) & 0xffffffffUL)) | ||
26 | |||
27 | int metag_mmap_check(unsigned long addr, unsigned long len, | ||
28 | unsigned long flags) | ||
29 | { | ||
30 | /* We can't have people trying to write to the bottom of the | ||
31 | * memory map, there are mysterious unspecified things there that | ||
32 | * we don't want people trampling on. | ||
33 | */ | ||
34 | if ((flags & MAP_FIXED) && (addr < TASK_UNMAPPED_BASE)) | ||
35 | return -EINVAL; | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
41 | unsigned long prot, unsigned long flags, | ||
42 | unsigned long fd, unsigned long pgoff) | ||
43 | { | ||
44 | /* The shift for mmap2 is constant, regardless of PAGE_SIZE setting. */ | ||
45 | if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) | ||
46 | return -EINVAL; | ||
47 | |||
48 | pgoff >>= PAGE_SHIFT - 12; | ||
49 | |||
50 | return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); | ||
51 | } | ||
52 | |||
53 | asmlinkage int sys_metag_setglobalbit(char __user *addr, int mask) | ||
54 | { | ||
55 | char tmp; | ||
56 | int ret = 0; | ||
57 | unsigned int flags; | ||
58 | |||
59 | if (!((__force unsigned int)addr >= LINCORE_BASE)) | ||
60 | return -EFAULT; | ||
61 | |||
62 | __global_lock2(flags); | ||
63 | |||
64 | metag_data_cache_flush((__force void *)addr, sizeof(mask)); | ||
65 | |||
66 | ret = __get_user(tmp, addr); | ||
67 | if (ret) | ||
68 | goto out; | ||
69 | tmp |= mask; | ||
70 | ret = __put_user(tmp, addr); | ||
71 | |||
72 | metag_data_cache_flush((__force void *)addr, sizeof(mask)); | ||
73 | |||
74 | out: | ||
75 | __global_unlock2(flags); | ||
76 | |||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | #define TXDEFR_FPU_MASK ((0x1f << 16) | 0x1f) | ||
81 | |||
82 | asmlinkage void sys_metag_set_fpu_flags(unsigned int flags) | ||
83 | { | ||
84 | unsigned int temp; | ||
85 | |||
86 | flags &= TXDEFR_FPU_MASK; | ||
87 | |||
88 | temp = __core_reg_get(TXDEFR); | ||
89 | temp &= ~TXDEFR_FPU_MASK; | ||
90 | temp |= flags; | ||
91 | __core_reg_set(TXDEFR, temp); | ||
92 | } | ||
93 | |||
94 | asmlinkage int sys_metag_set_tls(void __user *ptr) | ||
95 | { | ||
96 | current->thread.tls_ptr = ptr; | ||
97 | set_gateway_tls(ptr); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | asmlinkage void *sys_metag_get_tls(void) | ||
103 | { | ||
104 | return (__force void *)current->thread.tls_ptr; | ||
105 | } | ||
106 | |||
107 | asmlinkage long sys_truncate64_metag(const char __user *path, unsigned long lo, | ||
108 | unsigned long hi) | ||
109 | { | ||
110 | return sys_truncate64(path, merge_64(hi, lo)); | ||
111 | } | ||
112 | |||
113 | asmlinkage long sys_ftruncate64_metag(unsigned int fd, unsigned long lo, | ||
114 | unsigned long hi) | ||
115 | { | ||
116 | return sys_ftruncate64(fd, merge_64(hi, lo)); | ||
117 | } | ||
118 | |||
119 | asmlinkage long sys_fadvise64_64_metag(int fd, unsigned long offs_lo, | ||
120 | unsigned long offs_hi, | ||
121 | unsigned long len_lo, | ||
122 | unsigned long len_hi, int advice) | ||
123 | { | ||
124 | return sys_fadvise64_64(fd, merge_64(offs_hi, offs_lo), | ||
125 | merge_64(len_hi, len_lo), advice); | ||
126 | } | ||
127 | |||
128 | asmlinkage long sys_readahead_metag(int fd, unsigned long lo, unsigned long hi, | ||
129 | size_t count) | ||
130 | { | ||
131 | return sys_readahead(fd, merge_64(hi, lo), count); | ||
132 | } | ||
133 | |||
134 | asmlinkage ssize_t sys_pread64_metag(unsigned long fd, char __user *buf, | ||
135 | size_t count, unsigned long lo, | ||
136 | unsigned long hi) | ||
137 | { | ||
138 | return sys_pread64(fd, buf, count, merge_64(hi, lo)); | ||
139 | } | ||
140 | |||
141 | asmlinkage ssize_t sys_pwrite64_metag(unsigned long fd, char __user *buf, | ||
142 | size_t count, unsigned long lo, | ||
143 | unsigned long hi) | ||
144 | { | ||
145 | return sys_pwrite64(fd, buf, count, merge_64(hi, lo)); | ||
146 | } | ||
147 | |||
148 | asmlinkage long sys_sync_file_range_metag(int fd, unsigned long offs_lo, | ||
149 | unsigned long offs_hi, | ||
150 | unsigned long len_lo, | ||
151 | unsigned long len_hi, | ||
152 | unsigned int flags) | ||
153 | { | ||
154 | return sys_sync_file_range(fd, merge_64(offs_hi, offs_lo), | ||
155 | merge_64(len_hi, len_lo), flags); | ||
156 | } | ||
157 | |||
158 | /* Provide the actual syscall number to call mapping. */ | ||
159 | #undef __SYSCALL | ||
160 | #define __SYSCALL(nr, call) [nr] = (call), | ||
161 | |||
162 | /* | ||
163 | * We need wrappers for anything with unaligned 64bit arguments | ||
164 | */ | ||
165 | #define sys_truncate64 sys_truncate64_metag | ||
166 | #define sys_ftruncate64 sys_ftruncate64_metag | ||
167 | #define sys_fadvise64_64 sys_fadvise64_64_metag | ||
168 | #define sys_readahead sys_readahead_metag | ||
169 | #define sys_pread64 sys_pread64_metag | ||
170 | #define sys_pwrite64 sys_pwrite64_metag | ||
171 | #define sys_sync_file_range sys_sync_file_range_metag | ||
172 | |||
173 | /* | ||
174 | * Note that we can't include <linux/unistd.h> here since the header | ||
175 | * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well. | ||
176 | */ | ||
177 | const void *sys_call_table[__NR_syscalls] = { | ||
178 | [0 ... __NR_syscalls-1] = sys_ni_syscall, | ||
179 | #include <asm/unistd.h> | ||
180 | }; | ||
diff --git a/arch/metag/kernel/tbiunexp.S b/arch/metag/kernel/tbiunexp.S new file mode 100644 index 000000000000..907bbe0b2e68 --- /dev/null +++ b/arch/metag/kernel/tbiunexp.S | |||
@@ -0,0 +1,22 @@ | |||
1 | /* Pass a breakpoint through to Codescape */ | ||
2 | |||
3 | #include <asm/tbx.h> | ||
4 | |||
5 | .text | ||
6 | .global ___TBIUnExpXXX | ||
7 | .type ___TBIUnExpXXX,function | ||
8 | ___TBIUnExpXXX: | ||
9 | TSTT D0Ar2,#TBICTX_CRIT_BIT ! Result of nestable int call? | ||
10 | BZ $LTBINormCase ! UnExpXXX at background level | ||
11 | MOV D0Re0,TXMASKI ! Read TXMASKI | ||
12 | XOR TXMASKI,D1Re0,D1Re0 ! Turn off BGNDHALT handling! | ||
13 | OR D0Ar2,D0Ar2,D0Re0 ! Preserve bits cleared | ||
14 | $LTBINormCase: | ||
15 | MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2 ! Save args on stack | ||
16 | SETL [A0StP++],D0Ar2,D1Ar1 ! Init area for returned values | ||
17 | SWITCH #0xC20208 ! Total stack frame size 8 Dwords | ||
18 | ! write back size 2 Dwords | ||
19 | GETL D0Re0,D1Re0,[--A0StP] ! Get result | ||
20 | SUB A0StP,A0StP,#(8*3) ! Recover stack frame | ||
21 | MOV PC,D1RtP | ||
22 | .size ___TBIUnExpXXX,.-___TBIUnExpXXX | ||
diff --git a/arch/metag/kernel/tcm.c b/arch/metag/kernel/tcm.c new file mode 100644 index 000000000000..5d102b31ce84 --- /dev/null +++ b/arch/metag/kernel/tcm.c | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Imagination Technologies Ltd. | ||
3 | */ | ||
4 | |||
5 | #include <linux/init.h> | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/spinlock.h> | ||
8 | #include <linux/stddef.h> | ||
9 | #include <linux/genalloc.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/list.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <asm/page.h> | ||
14 | #include <asm/tcm.h> | ||
15 | |||
16 | struct tcm_pool { | ||
17 | struct list_head list; | ||
18 | unsigned int tag; | ||
19 | unsigned long start; | ||
20 | unsigned long end; | ||
21 | struct gen_pool *pool; | ||
22 | }; | ||
23 | |||
24 | static LIST_HEAD(pool_list); | ||
25 | |||
26 | static struct tcm_pool *find_pool(unsigned int tag) | ||
27 | { | ||
28 | struct list_head *lh; | ||
29 | struct tcm_pool *pool; | ||
30 | |||
31 | list_for_each(lh, &pool_list) { | ||
32 | pool = list_entry(lh, struct tcm_pool, list); | ||
33 | if (pool->tag == tag) | ||
34 | return pool; | ||
35 | } | ||
36 | |||
37 | return NULL; | ||
38 | } | ||
39 | |||
40 | /** | ||
41 | * tcm_alloc - allocate memory from a TCM pool | ||
42 | * @tag: tag of the pool to allocate memory from | ||
43 | * @len: number of bytes to be allocated | ||
44 | * | ||
45 | * Allocate the requested number of bytes from the pool matching | ||
46 | * the specified tag. Returns the address of the allocated memory | ||
47 | * or zero on failure. | ||
48 | */ | ||
49 | unsigned long tcm_alloc(unsigned int tag, size_t len) | ||
50 | { | ||
51 | unsigned long vaddr; | ||
52 | struct tcm_pool *pool; | ||
53 | |||
54 | pool = find_pool(tag); | ||
55 | if (!pool) | ||
56 | return 0; | ||
57 | |||
58 | vaddr = gen_pool_alloc(pool->pool, len); | ||
59 | if (!vaddr) | ||
60 | return 0; | ||
61 | |||
62 | return vaddr; | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * tcm_free - free a block of memory to a TCM pool | ||
67 | * @tag: tag of the pool to free memory to | ||
68 | * @addr: address of the memory to be freed | ||
69 | * @len: number of bytes to be freed | ||
70 | * | ||
71 | * Free the requested number of bytes at a specific address to the | ||
72 | * pool matching the specified tag. | ||
73 | */ | ||
74 | void tcm_free(unsigned int tag, unsigned long addr, size_t len) | ||
75 | { | ||
76 | struct tcm_pool *pool; | ||
77 | |||
78 | pool = find_pool(tag); | ||
79 | if (!pool) | ||
80 | return; | ||
81 | gen_pool_free(pool->pool, addr, len); | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * tcm_lookup_tag - find the tag matching an address | ||
86 | * @p: memory address to lookup the tag for | ||
87 | * | ||
88 | * Find the tag of the tcm memory region that contains the | ||
89 | * specified address. Returns %TCM_INVALID_TAG if no such | ||
90 | * memory region could be found. | ||
91 | */ | ||
92 | unsigned int tcm_lookup_tag(unsigned long p) | ||
93 | { | ||
94 | struct list_head *lh; | ||
95 | struct tcm_pool *pool; | ||
96 | unsigned long addr = (unsigned long) p; | ||
97 | |||
98 | list_for_each(lh, &pool_list) { | ||
99 | pool = list_entry(lh, struct tcm_pool, list); | ||
100 | if (addr >= pool->start && addr < pool->end) | ||
101 | return pool->tag; | ||
102 | } | ||
103 | |||
104 | return TCM_INVALID_TAG; | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * tcm_add_region - add a memory region to TCM pool list | ||
109 | * @reg: descriptor of region to be added | ||
110 | * | ||
111 | * Add a region of memory to the TCM pool list. Returns 0 on success. | ||
112 | */ | ||
113 | int __init tcm_add_region(struct tcm_region *reg) | ||
114 | { | ||
115 | struct tcm_pool *pool; | ||
116 | |||
117 | pool = kmalloc(sizeof(*pool), GFP_KERNEL); | ||
118 | if (!pool) { | ||
119 | pr_err("Failed to alloc memory for TCM pool!\n"); | ||
120 | return -ENOMEM; | ||
121 | } | ||
122 | |||
123 | pool->tag = reg->tag; | ||
124 | pool->start = reg->res.start; | ||
125 | pool->end = reg->res.end; | ||
126 | |||
127 | /* | ||
128 | * 2^3 = 8 bytes granularity to allow for 64bit access alignment. | ||
129 | * -1 = NUMA node specifier. | ||
130 | */ | ||
131 | pool->pool = gen_pool_create(3, -1); | ||
132 | |||
133 | if (!pool->pool) { | ||
134 | pr_err("Failed to create TCM pool!\n"); | ||
135 | kfree(pool); | ||
136 | return -ENOMEM; | ||
137 | } | ||
138 | |||
139 | if (gen_pool_add(pool->pool, reg->res.start, | ||
140 | reg->res.end - reg->res.start + 1, -1)) { | ||
141 | pr_err("Failed to add memory to TCM pool!\n"); | ||
142 | return -ENOMEM; | ||
143 | } | ||
144 | pr_info("Added %s TCM pool (%08x bytes @ %08x)\n", | ||
145 | reg->res.name, reg->res.end - reg->res.start + 1, | ||
146 | reg->res.start); | ||
147 | |||
148 | list_add_tail(&pool->list, &pool_list); | ||
149 | |||
150 | return 0; | ||
151 | } | ||
diff --git a/arch/metag/kernel/time.c b/arch/metag/kernel/time.c new file mode 100644 index 000000000000..17dc10733b2f --- /dev/null +++ b/arch/metag/kernel/time.c | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005-2013 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * This file contains the Meta-specific time handling details. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | |||
10 | #include <clocksource/metag_generic.h> | ||
11 | |||
12 | void __init time_init(void) | ||
13 | { | ||
14 | metag_generic_timer_init(); | ||
15 | } | ||
diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c new file mode 100644 index 000000000000..bec3dec4922e --- /dev/null +++ b/arch/metag/kernel/topology.c | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Paul Mundt | ||
3 | * Copyright (C) 2010 Imagination Technolohies Ltd. | ||
4 | * | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | */ | ||
9 | #include <linux/cpu.h> | ||
10 | #include <linux/cpumask.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/node.h> | ||
14 | #include <linux/nodemask.h> | ||
15 | #include <linux/topology.h> | ||
16 | |||
17 | #include <asm/cpu.h> | ||
18 | |||
19 | DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data); | ||
20 | |||
21 | cpumask_t cpu_core_map[NR_CPUS]; | ||
22 | |||
23 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | ||
24 | { | ||
25 | return *cpu_possible_mask; | ||
26 | } | ||
27 | |||
28 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | ||
29 | { | ||
30 | return &cpu_core_map[cpu]; | ||
31 | } | ||
32 | |||
33 | int arch_update_cpu_topology(void) | ||
34 | { | ||
35 | unsigned int cpu; | ||
36 | |||
37 | for_each_possible_cpu(cpu) | ||
38 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int __init topology_init(void) | ||
44 | { | ||
45 | int i, ret; | ||
46 | |||
47 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
48 | for_each_online_node(i) | ||
49 | register_one_node(i); | ||
50 | #endif | ||
51 | |||
52 | for_each_present_cpu(i) { | ||
53 | struct cpuinfo_metag *cpuinfo = &per_cpu(cpu_data, i); | ||
54 | #ifdef CONFIG_HOTPLUG_CPU | ||
55 | cpuinfo->cpu.hotpluggable = 1; | ||
56 | #endif | ||
57 | ret = register_cpu(&cpuinfo->cpu, i); | ||
58 | if (unlikely(ret)) | ||
59 | pr_warn("%s: register_cpu %d failed (%d)\n", | ||
60 | __func__, i, ret); | ||
61 | } | ||
62 | |||
63 | #if defined(CONFIG_NUMA) && !defined(CONFIG_SMP) | ||
64 | /* | ||
65 | * In the UP case, make sure the CPU association is still | ||
66 | * registered under each node. Without this, sysfs fails | ||
67 | * to make the connection between nodes other than node0 | ||
68 | * and cpu0. | ||
69 | */ | ||
70 | for_each_online_node(i) | ||
71 | if (i != numa_node_id()) | ||
72 | register_cpu_under_node(raw_smp_processor_id(), i); | ||
73 | #endif | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | subsys_initcall(topology_init); | ||
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c new file mode 100644 index 000000000000..8961f247b500 --- /dev/null +++ b/arch/metag/kernel/traps.c | |||
@@ -0,0 +1,995 @@ | |||
1 | /* | ||
2 | * Meta exception handling. | ||
3 | * | ||
4 | * Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file COPYING in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/export.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/signal.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/preempt.h> | ||
20 | #include <linux/ptrace.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/kallsyms.h> | ||
23 | #include <linux/kdebug.h> | ||
24 | #include <linux/kexec.h> | ||
25 | #include <linux/unistd.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/syscalls.h> | ||
29 | |||
30 | #include <asm/bug.h> | ||
31 | #include <asm/core_reg.h> | ||
32 | #include <asm/irqflags.h> | ||
33 | #include <asm/siginfo.h> | ||
34 | #include <asm/traps.h> | ||
35 | #include <asm/hwthread.h> | ||
36 | #include <asm/switch.h> | ||
37 | #include <asm/user_gateway.h> | ||
38 | #include <asm/syscall.h> | ||
39 | #include <asm/syscalls.h> | ||
40 | |||
41 | /* Passing syscall arguments as long long is quicker. */ | ||
42 | typedef unsigned int (*LPSYSCALL) (unsigned long long, | ||
43 | unsigned long long, | ||
44 | unsigned long long); | ||
45 | |||
46 | /* | ||
47 | * Users of LNKSET should compare the bus error bits obtained from DEFR | ||
48 | * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between | ||
49 | * different cores revisions. | ||
50 | */ | ||
51 | #define TXDEFR_LNKSET_SUCCESS 0x02000000 | ||
52 | #define TXDEFR_LNKSET_FAILURE 0x04000000 | ||
53 | |||
54 | /* | ||
55 | * Our global TBI handle. Initialised from setup.c/setup_arch. | ||
56 | */ | ||
57 | DECLARE_PER_CPU(PTBI, pTBI); | ||
58 | |||
59 | #ifdef CONFIG_SMP | ||
60 | static DEFINE_PER_CPU(unsigned int, trigger_mask); | ||
61 | #else | ||
62 | unsigned int global_trigger_mask; | ||
63 | EXPORT_SYMBOL(global_trigger_mask); | ||
64 | #endif | ||
65 | |||
66 | unsigned long per_cpu__stack_save[NR_CPUS]; | ||
67 | |||
68 | static const char * const trap_names[] = { | ||
69 | [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault", | ||
70 | [TBIXXF_SIGNUM_PGF] = "Privilege violation", | ||
71 | [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault", | ||
72 | [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure", | ||
73 | [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault", | ||
74 | [TBIXXF_SIGNUM_IPF] = "Code fetch page fault", | ||
75 | [TBIXXF_SIGNUM_DPF] = "Data access page fault", | ||
76 | [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint", | ||
77 | [TBIXXF_SIGNUM_DWF] = "Read-only data access fault", | ||
78 | }; | ||
79 | |||
80 | const char *trap_name(int trapno) | ||
81 | { | ||
82 | if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names) | ||
83 | && trap_names[trapno]) | ||
84 | return trap_names[trapno]; | ||
85 | return "Unknown fault"; | ||
86 | } | ||
87 | |||
88 | static DEFINE_SPINLOCK(die_lock); | ||
89 | |||
90 | void die(const char *str, struct pt_regs *regs, long err, | ||
91 | unsigned long addr) | ||
92 | { | ||
93 | static int die_counter; | ||
94 | |||
95 | oops_enter(); | ||
96 | |||
97 | spin_lock_irq(&die_lock); | ||
98 | console_verbose(); | ||
99 | bust_spinlocks(1); | ||
100 | pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff, | ||
101 | trap_name(err & 0xffff), addr, ++die_counter); | ||
102 | |||
103 | print_modules(); | ||
104 | show_regs(regs); | ||
105 | |||
106 | pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm, | ||
107 | task_pid_nr(current), task_stack_page(current) + THREAD_SIZE); | ||
108 | |||
109 | bust_spinlocks(0); | ||
110 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); | ||
111 | if (kexec_should_crash(current)) | ||
112 | crash_kexec(regs); | ||
113 | |||
114 | if (in_interrupt()) | ||
115 | panic("Fatal exception in interrupt"); | ||
116 | |||
117 | if (panic_on_oops) | ||
118 | panic("Fatal exception"); | ||
119 | |||
120 | spin_unlock_irq(&die_lock); | ||
121 | oops_exit(); | ||
122 | do_exit(SIGSEGV); | ||
123 | } | ||
124 | |||
125 | #ifdef CONFIG_METAG_DSP | ||
126 | /* | ||
127 | * The ECH encoding specifies the size of a DSPRAM as, | ||
128 | * | ||
129 | * "slots" / 4 | ||
130 | * | ||
131 | * A "slot" is the size of two DSPRAM bank entries; an entry from | ||
132 | * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank | ||
133 | * entry is 4 bytes. | ||
134 | */ | ||
135 | #define SLOT_SZ 8 | ||
136 | static inline unsigned int decode_dspram_size(unsigned int size) | ||
137 | { | ||
138 | unsigned int _sz = size & 0x7f; | ||
139 | |||
140 | return _sz * SLOT_SZ * 4; | ||
141 | } | ||
142 | |||
143 | static void dspram_save(struct meta_ext_context *dsp_ctx, | ||
144 | unsigned int ramA_sz, unsigned int ramB_sz) | ||
145 | { | ||
146 | unsigned int ram_sz[2]; | ||
147 | int i; | ||
148 | |||
149 | ram_sz[0] = ramA_sz; | ||
150 | ram_sz[1] = ramB_sz; | ||
151 | |||
152 | for (i = 0; i < 2; i++) { | ||
153 | if (ram_sz[i] != 0) { | ||
154 | unsigned int sz; | ||
155 | |||
156 | if (i == 0) | ||
157 | sz = decode_dspram_size(ram_sz[i] >> 8); | ||
158 | else | ||
159 | sz = decode_dspram_size(ram_sz[i]); | ||
160 | |||
161 | if (dsp_ctx->ram[i] == NULL) { | ||
162 | dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL); | ||
163 | |||
164 | if (dsp_ctx->ram[i] == NULL) | ||
165 | panic("couldn't save DSP context"); | ||
166 | } else { | ||
167 | if (ram_sz[i] > dsp_ctx->ram_sz[i]) { | ||
168 | kfree(dsp_ctx->ram[i]); | ||
169 | |||
170 | dsp_ctx->ram[i] = kmalloc(sz, | ||
171 | GFP_KERNEL); | ||
172 | |||
173 | if (dsp_ctx->ram[i] == NULL) | ||
174 | panic("couldn't save DSP context"); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | if (i == 0) | ||
179 | __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]); | ||
180 | else | ||
181 | __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]); | ||
182 | |||
183 | dsp_ctx->ram_sz[i] = ram_sz[i]; | ||
184 | } | ||
185 | } | ||
186 | } | ||
187 | #endif /* CONFIG_METAG_DSP */ | ||
188 | |||
189 | /* | ||
190 | * Allow interrupts to be nested and save any "extended" register | ||
191 | * context state, e.g. DSP regs and RAMs. | ||
192 | */ | ||
193 | static void nest_interrupts(TBIRES State, unsigned long mask) | ||
194 | { | ||
195 | #ifdef CONFIG_METAG_DSP | ||
196 | struct meta_ext_context *dsp_ctx; | ||
197 | unsigned int D0_8; | ||
198 | |||
199 | /* | ||
200 | * D0.8 may contain an ECH encoding. The upper 16 bits | ||
201 | * tell us what DSP resources the current process is | ||
202 | * using. OR the bits into the SaveMask so that | ||
203 | * __TBINestInts() knows what resources to save as | ||
204 | * part of this context. | ||
205 | * | ||
206 | * Don't save the context if we're nesting interrupts in the | ||
207 | * kernel because the kernel doesn't use DSP hardware. | ||
208 | */ | ||
209 | D0_8 = __core_reg_get(D0.8); | ||
210 | |||
211 | if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) { | ||
212 | State.Sig.SaveMask |= (D0_8 >> 16); | ||
213 | |||
214 | dsp_ctx = current->thread.dsp_context; | ||
215 | if (dsp_ctx == NULL) { | ||
216 | dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL); | ||
217 | if (dsp_ctx == NULL) | ||
218 | panic("couldn't save DSP context: ENOMEM"); | ||
219 | |||
220 | current->thread.dsp_context = dsp_ctx; | ||
221 | } | ||
222 | |||
223 | current->thread.user_flags |= (D0_8 & 0xffff0000); | ||
224 | __TBINestInts(State, &dsp_ctx->regs, mask); | ||
225 | dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f); | ||
226 | } else | ||
227 | __TBINestInts(State, NULL, mask); | ||
228 | #else | ||
229 | __TBINestInts(State, NULL, mask); | ||
230 | #endif | ||
231 | } | ||
232 | |||
233 | void head_end(TBIRES State, unsigned long mask) | ||
234 | { | ||
235 | unsigned int savemask = (unsigned short)State.Sig.SaveMask; | ||
236 | unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask; | ||
237 | |||
238 | if (savemask & TBICTX_PRIV_BIT) { | ||
239 | ctx_savemask |= TBICTX_PRIV_BIT; | ||
240 | current->thread.user_flags = savemask; | ||
241 | } | ||
242 | |||
243 | /* Always undo the sleep bit */ | ||
244 | ctx_savemask &= ~TBICTX_WAIT_BIT; | ||
245 | |||
246 | /* Always save the catch buffer and RD pipe if they are dirty */ | ||
247 | savemask |= TBICTX_XCBF_BIT; | ||
248 | |||
249 | /* Only save the catch and RD if we have not already done so. | ||
250 | * Note - the RD bits are in the pCtx only, and not in the | ||
251 | * State.SaveMask. | ||
252 | */ | ||
253 | if ((savemask & TBICTX_CBUF_BIT) || | ||
254 | (ctx_savemask & TBICTX_CBRP_BIT)) { | ||
255 | /* Have we already saved the buffers though? | ||
256 | * - See TestTrack 5071 */ | ||
257 | if (ctx_savemask & TBICTX_XCBF_BIT) { | ||
258 | /* Strip off the bits so the call to __TBINestInts | ||
259 | * won't save the buffers again. */ | ||
260 | savemask &= ~TBICTX_CBUF_BIT; | ||
261 | ctx_savemask &= ~TBICTX_CBRP_BIT; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | #ifdef CONFIG_METAG_META21 | ||
266 | { | ||
267 | unsigned int depth, txdefr; | ||
268 | |||
269 | /* | ||
270 | * Save TXDEFR state. | ||
271 | * | ||
272 | * The process may have been interrupted after a LNKSET, but | ||
273 | * before it could read the DEFR state, so we mustn't lose that | ||
274 | * state or it could end up retrying an atomic operation that | ||
275 | * succeeded. | ||
276 | * | ||
277 | * All interrupts are disabled at this point so we | ||
278 | * don't need to perform any locking. We must do this | ||
279 | * dance before we use LNKGET or LNKSET. | ||
280 | */ | ||
281 | BUG_ON(current->thread.int_depth > HARDIRQ_BITS); | ||
282 | |||
283 | depth = current->thread.int_depth++; | ||
284 | |||
285 | txdefr = __core_reg_get(TXDEFR); | ||
286 | |||
287 | txdefr &= TXDEFR_BUS_STATE_BITS; | ||
288 | if (txdefr & TXDEFR_LNKSET_SUCCESS) | ||
289 | current->thread.txdefr_failure &= ~(1 << depth); | ||
290 | else | ||
291 | current->thread.txdefr_failure |= (1 << depth); | ||
292 | } | ||
293 | #endif | ||
294 | |||
295 | State.Sig.SaveMask = savemask; | ||
296 | State.Sig.pCtx->SaveMask = ctx_savemask; | ||
297 | |||
298 | nest_interrupts(State, mask); | ||
299 | |||
300 | #ifdef CONFIG_METAG_POISON_CATCH_BUFFERS | ||
301 | /* Poison the catch registers. This shows up any mistakes we have | ||
302 | * made in their handling MUCH quicker. | ||
303 | */ | ||
304 | __core_reg_set(TXCATCH0, 0x87650021); | ||
305 | __core_reg_set(TXCATCH1, 0x87654322); | ||
306 | __core_reg_set(TXCATCH2, 0x87654323); | ||
307 | __core_reg_set(TXCATCH3, 0x87654324); | ||
308 | #endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */ | ||
309 | } | ||
310 | |||
311 | TBIRES tail_end_sys(TBIRES State, int syscall, int *restart) | ||
312 | { | ||
313 | struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; | ||
314 | unsigned long flags; | ||
315 | |||
316 | local_irq_disable(); | ||
317 | |||
318 | if (user_mode(regs)) { | ||
319 | flags = current_thread_info()->flags; | ||
320 | if (flags & _TIF_WORK_MASK && | ||
321 | do_work_pending(regs, flags, syscall)) { | ||
322 | *restart = 1; | ||
323 | return State; | ||
324 | } | ||
325 | |||
326 | #ifdef CONFIG_METAG_FPU | ||
327 | if (current->thread.fpu_context && | ||
328 | current->thread.fpu_context->needs_restore) { | ||
329 | __TBICtxFPURestore(State, current->thread.fpu_context); | ||
330 | /* | ||
331 | * Clearing this bit ensures the FP unit is not made | ||
332 | * active again unless it is used. | ||
333 | */ | ||
334 | State.Sig.SaveMask &= ~TBICTX_FPAC_BIT; | ||
335 | current->thread.fpu_context->needs_restore = false; | ||
336 | } | ||
337 | State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR); | ||
338 | #endif | ||
339 | } | ||
340 | |||
341 | /* TBI will turn interrupts back on at some point. */ | ||
342 | if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask)) | ||
343 | trace_hardirqs_on(); | ||
344 | |||
345 | #ifdef CONFIG_METAG_DSP | ||
346 | /* | ||
347 | * If we previously saved an extended context then restore it | ||
348 | * now. Otherwise, clear D0.8 because this process is not | ||
349 | * using DSP hardware. | ||
350 | */ | ||
351 | if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) { | ||
352 | unsigned int D0_8; | ||
353 | struct meta_ext_context *dsp_ctx = current->thread.dsp_context; | ||
354 | |||
355 | /* Make sure we're going to return to userland. */ | ||
356 | BUG_ON(current->thread.int_depth != 1); | ||
357 | |||
358 | if (dsp_ctx->ram_sz[0] > 0) | ||
359 | __TBIDspramRestoreA(dsp_ctx->ram_sz[0], | ||
360 | dsp_ctx->ram[0]); | ||
361 | if (dsp_ctx->ram_sz[1] > 0) | ||
362 | __TBIDspramRestoreB(dsp_ctx->ram_sz[1], | ||
363 | dsp_ctx->ram[1]); | ||
364 | |||
365 | State.Sig.SaveMask |= State.Sig.pCtx->SaveMask; | ||
366 | __TBICtxRestore(State, current->thread.dsp_context); | ||
367 | D0_8 = __core_reg_get(D0.8); | ||
368 | D0_8 |= current->thread.user_flags & 0xffff0000; | ||
369 | D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff; | ||
370 | __core_reg_set(D0.8, D0_8); | ||
371 | } else | ||
372 | __core_reg_set(D0.8, 0); | ||
373 | #endif /* CONFIG_METAG_DSP */ | ||
374 | |||
375 | #ifdef CONFIG_METAG_META21 | ||
376 | { | ||
377 | unsigned int depth, txdefr; | ||
378 | |||
379 | /* | ||
380 | * If there hasn't been a LNKSET since the last LNKGET then the | ||
381 | * link flag will be set, causing the next LNKSET to succeed if | ||
382 | * the addresses match. The two LNK operations may not be a pair | ||
383 | * (e.g. see atomic_read()), so the LNKSET should fail. | ||
384 | * We use a conditional-never LNKSET to clear the link flag | ||
385 | * without side effects. | ||
386 | */ | ||
387 | asm volatile("LNKSETDNV [D0Re0],D0Re0"); | ||
388 | |||
389 | depth = --current->thread.int_depth; | ||
390 | |||
391 | BUG_ON(user_mode(regs) && depth); | ||
392 | |||
393 | txdefr = __core_reg_get(TXDEFR); | ||
394 | |||
395 | txdefr &= ~TXDEFR_BUS_STATE_BITS; | ||
396 | |||
397 | /* Do we need to restore a failure code into TXDEFR? */ | ||
398 | if (current->thread.txdefr_failure & (1 << depth)) | ||
399 | txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT); | ||
400 | else | ||
401 | txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT); | ||
402 | |||
403 | __core_reg_set(TXDEFR, txdefr); | ||
404 | } | ||
405 | #endif | ||
406 | return State; | ||
407 | } | ||
408 | |||
409 | #ifdef CONFIG_SMP | ||
410 | /* | ||
411 | * If we took an interrupt in the middle of __kuser_get_tls then we need | ||
412 | * to rewind the PC to the start of the function in case the process | ||
413 | * gets migrated to another thread (SMP only) and it reads the wrong tls | ||
414 | * data. | ||
415 | */ | ||
416 | static inline void _restart_critical_section(TBIRES State) | ||
417 | { | ||
418 | unsigned long get_tls_start; | ||
419 | unsigned long get_tls_end; | ||
420 | |||
421 | get_tls_start = (unsigned long)__kuser_get_tls - | ||
422 | (unsigned long)&__user_gateway_start; | ||
423 | |||
424 | get_tls_start += USER_GATEWAY_PAGE; | ||
425 | |||
426 | get_tls_end = (unsigned long)__kuser_get_tls_end - | ||
427 | (unsigned long)&__user_gateway_start; | ||
428 | |||
429 | get_tls_end += USER_GATEWAY_PAGE; | ||
430 | |||
431 | if ((State.Sig.pCtx->CurrPC >= get_tls_start) && | ||
432 | (State.Sig.pCtx->CurrPC < get_tls_end)) | ||
433 | State.Sig.pCtx->CurrPC = get_tls_start; | ||
434 | } | ||
435 | #else | ||
436 | /* | ||
437 | * If we took an interrupt in the middle of | ||
438 | * __kuser_cmpxchg then we need to rewind the PC to the | ||
439 | * start of the function. | ||
440 | */ | ||
441 | static inline void _restart_critical_section(TBIRES State) | ||
442 | { | ||
443 | unsigned long cmpxchg_start; | ||
444 | unsigned long cmpxchg_end; | ||
445 | |||
446 | cmpxchg_start = (unsigned long)__kuser_cmpxchg - | ||
447 | (unsigned long)&__user_gateway_start; | ||
448 | |||
449 | cmpxchg_start += USER_GATEWAY_PAGE; | ||
450 | |||
451 | cmpxchg_end = (unsigned long)__kuser_cmpxchg_end - | ||
452 | (unsigned long)&__user_gateway_start; | ||
453 | |||
454 | cmpxchg_end += USER_GATEWAY_PAGE; | ||
455 | |||
456 | if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) && | ||
457 | (State.Sig.pCtx->CurrPC < cmpxchg_end)) | ||
458 | State.Sig.pCtx->CurrPC = cmpxchg_start; | ||
459 | } | ||
460 | #endif | ||
461 | |||
462 | /* Used by kick_handler() */ | ||
463 | void restart_critical_section(TBIRES State) | ||
464 | { | ||
465 | _restart_critical_section(State); | ||
466 | } | ||
467 | |||
468 | TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst, | ||
469 | PTBI pTBI) | ||
470 | { | ||
471 | head_end(State, ~INTS_OFF_MASK); | ||
472 | |||
473 | /* If we interrupted user code handle any critical sections. */ | ||
474 | if (State.Sig.SaveMask & TBICTX_PRIV_BIT) | ||
475 | _restart_critical_section(State); | ||
476 | |||
477 | trace_hardirqs_off(); | ||
478 | |||
479 | do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx); | ||
480 | |||
481 | return tail_end(State); | ||
482 | } | ||
483 | |||
484 | static unsigned int load_fault(PTBICTXEXTCB0 pbuf) | ||
485 | { | ||
486 | return pbuf->CBFlags & TXCATCH0_READ_BIT; | ||
487 | } | ||
488 | |||
489 | static unsigned long fault_address(PTBICTXEXTCB0 pbuf) | ||
490 | { | ||
491 | return pbuf->CBAddr; | ||
492 | } | ||
493 | |||
494 | static void unhandled_fault(struct pt_regs *regs, unsigned long addr, | ||
495 | int signo, int code, int trapno) | ||
496 | { | ||
497 | if (user_mode(regs)) { | ||
498 | siginfo_t info; | ||
499 | |||
500 | if (show_unhandled_signals && unhandled_signal(current, signo) | ||
501 | && printk_ratelimit()) { | ||
502 | |||
503 | pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n", | ||
504 | current->pid, regs->ctx.CurrPC, addr, | ||
505 | trapno, trap_name(trapno)); | ||
506 | print_vma_addr(" in ", regs->ctx.CurrPC); | ||
507 | print_vma_addr(" rtp in ", regs->ctx.DX[4].U1); | ||
508 | printk("\n"); | ||
509 | show_regs(regs); | ||
510 | } | ||
511 | |||
512 | info.si_signo = signo; | ||
513 | info.si_errno = 0; | ||
514 | info.si_code = code; | ||
515 | info.si_addr = (__force void __user *)addr; | ||
516 | info.si_trapno = trapno; | ||
517 | force_sig_info(signo, &info, current); | ||
518 | } else { | ||
519 | die("Oops", regs, trapno, addr); | ||
520 | } | ||
521 | } | ||
522 | |||
523 | static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs, | ||
524 | unsigned int data_address, int trapno) | ||
525 | { | ||
526 | int ret; | ||
527 | |||
528 | ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno); | ||
529 | |||
530 | return ret; | ||
531 | } | ||
532 | |||
533 | static unsigned long get_inst_fault_address(struct pt_regs *regs) | ||
534 | { | ||
535 | return regs->ctx.CurrPC; | ||
536 | } | ||
537 | |||
538 | TBIRES fault_handler(TBIRES State, int SigNum, int Triggers, | ||
539 | int Inst, PTBI pTBI) | ||
540 | { | ||
541 | struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; | ||
542 | PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)®s->extcb0; | ||
543 | unsigned long data_address; | ||
544 | |||
545 | head_end(State, ~INTS_OFF_MASK); | ||
546 | |||
547 | /* Hardware breakpoint or data watch */ | ||
548 | if ((SigNum == TBIXXF_SIGNUM_IHF) || | ||
549 | ((SigNum == TBIXXF_SIGNUM_DHF) && | ||
550 | (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT | | ||
551 | TXCATCH0_WATCH0_BIT)))) { | ||
552 | State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, | ||
553 | pTBI); | ||
554 | return tail_end(State); | ||
555 | } | ||
556 | |||
557 | local_irq_enable(); | ||
558 | |||
559 | data_address = fault_address(pcbuf); | ||
560 | |||
561 | switch (SigNum) { | ||
562 | case TBIXXF_SIGNUM_IGF: | ||
563 | /* 1st-level entry invalid (instruction fetch) */ | ||
564 | case TBIXXF_SIGNUM_IPF: { | ||
565 | /* 2nd-level entry invalid (instruction fetch) */ | ||
566 | unsigned long addr = get_inst_fault_address(regs); | ||
567 | do_page_fault(regs, addr, 0, SigNum); | ||
568 | break; | ||
569 | } | ||
570 | |||
571 | case TBIXXF_SIGNUM_DGF: | ||
572 | /* 1st-level entry invalid (data access) */ | ||
573 | case TBIXXF_SIGNUM_DPF: | ||
574 | /* 2nd-level entry invalid (data access) */ | ||
575 | case TBIXXF_SIGNUM_DWF: | ||
576 | /* Write to read only page */ | ||
577 | handle_data_fault(pcbuf, regs, data_address, SigNum); | ||
578 | break; | ||
579 | |||
580 | case TBIXXF_SIGNUM_IIF: | ||
581 | /* Illegal instruction */ | ||
582 | unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC, | ||
583 | SigNum); | ||
584 | break; | ||
585 | |||
586 | case TBIXXF_SIGNUM_DHF: | ||
587 | /* Unaligned access */ | ||
588 | unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN, | ||
589 | SigNum); | ||
590 | break; | ||
591 | case TBIXXF_SIGNUM_PGF: | ||
592 | /* Privilege violation */ | ||
593 | unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR, | ||
594 | SigNum); | ||
595 | break; | ||
596 | default: | ||
597 | BUG(); | ||
598 | break; | ||
599 | } | ||
600 | |||
601 | return tail_end(State); | ||
602 | } | ||
603 | |||
604 | static bool switch_is_syscall(unsigned int inst) | ||
605 | { | ||
606 | return inst == __METAG_SW_ENCODING(SYS); | ||
607 | } | ||
608 | |||
609 | static bool switch_is_legacy_syscall(unsigned int inst) | ||
610 | { | ||
611 | return inst == __METAG_SW_ENCODING(SYS_LEGACY); | ||
612 | } | ||
613 | |||
614 | static inline void step_over_switch(struct pt_regs *regs, unsigned int inst) | ||
615 | { | ||
616 | regs->ctx.CurrPC += 4; | ||
617 | } | ||
618 | |||
619 | static inline int test_syscall_work(void) | ||
620 | { | ||
621 | return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK; | ||
622 | } | ||
623 | |||
624 | TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers, | ||
625 | int Inst, PTBI pTBI) | ||
626 | { | ||
627 | struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; | ||
628 | unsigned int sysnumber; | ||
629 | unsigned long long a1_a2, a3_a4, a5_a6; | ||
630 | LPSYSCALL syscall_entry; | ||
631 | int restart; | ||
632 | |||
633 | head_end(State, ~INTS_OFF_MASK); | ||
634 | |||
635 | /* | ||
636 | * If this is not a syscall SWITCH it could be a breakpoint. | ||
637 | */ | ||
638 | if (!switch_is_syscall(Inst)) { | ||
639 | /* | ||
640 | * Alert the user if they're trying to use legacy system | ||
641 | * calls. This suggests they need to update their C | ||
642 | * library and build against up to date kernel headers. | ||
643 | */ | ||
644 | if (switch_is_legacy_syscall(Inst)) | ||
645 | pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n"); | ||
646 | /* | ||
647 | * We don't know how to handle the SWITCH and cannot | ||
648 | * safely ignore it, so treat all unknown switches | ||
649 | * (including breakpoints) as traps. | ||
650 | */ | ||
651 | force_sig(SIGTRAP, current); | ||
652 | return tail_end(State); | ||
653 | } | ||
654 | |||
655 | local_irq_enable(); | ||
656 | |||
657 | restart_syscall: | ||
658 | restart = 0; | ||
659 | sysnumber = regs->ctx.DX[0].U1; | ||
660 | |||
661 | if (test_syscall_work()) | ||
662 | sysnumber = syscall_trace_enter(regs); | ||
663 | |||
664 | /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */ | ||
665 | step_over_switch(regs, Inst); | ||
666 | |||
667 | if (sysnumber >= __NR_syscalls) { | ||
668 | pr_debug("unknown syscall number: %d\n", sysnumber); | ||
669 | syscall_entry = (LPSYSCALL) sys_ni_syscall; | ||
670 | } else { | ||
671 | syscall_entry = (LPSYSCALL) sys_call_table[sysnumber]; | ||
672 | } | ||
673 | |||
674 | /* Use 64bit loads for speed. */ | ||
675 | a5_a6 = *(unsigned long long *)®s->ctx.DX[1]; | ||
676 | a3_a4 = *(unsigned long long *)®s->ctx.DX[2]; | ||
677 | a1_a2 = *(unsigned long long *)®s->ctx.DX[3]; | ||
678 | |||
679 | /* here is the actual call to the syscall handler functions */ | ||
680 | regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6); | ||
681 | |||
682 | if (test_syscall_work()) | ||
683 | syscall_trace_leave(regs); | ||
684 | |||
685 | State = tail_end_sys(State, sysnumber, &restart); | ||
686 | /* Handlerless restarts shouldn't go via userland */ | ||
687 | if (restart) | ||
688 | goto restart_syscall; | ||
689 | return State; | ||
690 | } | ||
691 | |||
692 | TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers, | ||
693 | int Inst, PTBI pTBI) | ||
694 | { | ||
695 | struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; | ||
696 | |||
697 | /* | ||
698 | * This can be caused by any user process simply executing an unusual | ||
699 | * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the | ||
700 | * thread to stop, so signal a SIGTRAP instead. | ||
701 | */ | ||
702 | head_end(State, ~INTS_OFF_MASK); | ||
703 | if (user_mode(regs)) | ||
704 | force_sig(SIGTRAP, current); | ||
705 | else | ||
706 | State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI); | ||
707 | return tail_end(State); | ||
708 | } | ||
709 | |||
710 | #ifdef CONFIG_METAG_META21 | ||
711 | TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) | ||
712 | { | ||
713 | struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; | ||
714 | unsigned int error_state = Triggers; | ||
715 | siginfo_t info; | ||
716 | |||
717 | head_end(State, ~INTS_OFF_MASK); | ||
718 | |||
719 | local_irq_enable(); | ||
720 | |||
721 | info.si_signo = SIGFPE; | ||
722 | |||
723 | if (error_state & TXSTAT_FPE_INVALID_BIT) | ||
724 | info.si_code = FPE_FLTINV; | ||
725 | else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT) | ||
726 | info.si_code = FPE_FLTDIV; | ||
727 | else if (error_state & TXSTAT_FPE_OVERFLOW_BIT) | ||
728 | info.si_code = FPE_FLTOVF; | ||
729 | else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT) | ||
730 | info.si_code = FPE_FLTUND; | ||
731 | else if (error_state & TXSTAT_FPE_INEXACT_BIT) | ||
732 | info.si_code = FPE_FLTRES; | ||
733 | else | ||
734 | info.si_code = 0; | ||
735 | info.si_errno = 0; | ||
736 | info.si_addr = (__force void __user *)regs->ctx.CurrPC; | ||
737 | force_sig_info(SIGFPE, &info, current); | ||
738 | |||
739 | return tail_end(State); | ||
740 | } | ||
741 | #endif | ||
742 | |||
743 | #ifdef CONFIG_METAG_SUSPEND_MEM | ||
744 | struct traps_context { | ||
745 | PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1]; | ||
746 | }; | ||
747 | |||
748 | static struct traps_context *metag_traps_context; | ||
749 | |||
750 | int traps_save_context(void) | ||
751 | { | ||
752 | unsigned long cpu = smp_processor_id(); | ||
753 | PTBI _pTBI = per_cpu(pTBI, cpu); | ||
754 | struct traps_context *context; | ||
755 | |||
756 | context = kzalloc(sizeof(*context), GFP_ATOMIC); | ||
757 | if (!context) | ||
758 | return -ENOMEM; | ||
759 | |||
760 | memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs)); | ||
761 | |||
762 | metag_traps_context = context; | ||
763 | return 0; | ||
764 | } | ||
765 | |||
766 | int traps_restore_context(void) | ||
767 | { | ||
768 | unsigned long cpu = smp_processor_id(); | ||
769 | PTBI _pTBI = per_cpu(pTBI, cpu); | ||
770 | struct traps_context *context = metag_traps_context; | ||
771 | |||
772 | metag_traps_context = NULL; | ||
773 | |||
774 | memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs)); | ||
775 | |||
776 | kfree(context); | ||
777 | return 0; | ||
778 | } | ||
779 | #endif | ||
780 | |||
781 | #ifdef CONFIG_SMP | ||
782 | static inline unsigned int _get_trigger_mask(void) | ||
783 | { | ||
784 | unsigned long cpu = smp_processor_id(); | ||
785 | return per_cpu(trigger_mask, cpu); | ||
786 | } | ||
787 | |||
788 | unsigned int get_trigger_mask(void) | ||
789 | { | ||
790 | return _get_trigger_mask(); | ||
791 | } | ||
792 | EXPORT_SYMBOL(get_trigger_mask); | ||
793 | |||
794 | static void set_trigger_mask(unsigned int mask) | ||
795 | { | ||
796 | unsigned long cpu = smp_processor_id(); | ||
797 | per_cpu(trigger_mask, cpu) = mask; | ||
798 | } | ||
799 | |||
800 | void arch_local_irq_enable(void) | ||
801 | { | ||
802 | preempt_disable(); | ||
803 | arch_local_irq_restore(_get_trigger_mask()); | ||
804 | preempt_enable_no_resched(); | ||
805 | } | ||
806 | EXPORT_SYMBOL(arch_local_irq_enable); | ||
807 | #else | ||
808 | static void set_trigger_mask(unsigned int mask) | ||
809 | { | ||
810 | global_trigger_mask = mask; | ||
811 | } | ||
812 | #endif | ||
813 | |||
814 | void __cpuinit per_cpu_trap_init(unsigned long cpu) | ||
815 | { | ||
816 | TBIRES int_context; | ||
817 | unsigned int thread = cpu_2_hwthread_id[cpu]; | ||
818 | |||
819 | set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */ | ||
820 | TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */ | ||
821 | TBI_TRIG_BIT(TBID_SIGNUM_SW1) | | ||
822 | TBI_TRIG_BIT(TBID_SIGNUM_SWS)); | ||
823 | |||
824 | /* non-priv - use current stack */ | ||
825 | int_context.Sig.pCtx = NULL; | ||
826 | /* Start with interrupts off */ | ||
827 | int_context.Sig.TrigMask = INTS_OFF_MASK; | ||
828 | int_context.Sig.SaveMask = 0; | ||
829 | |||
830 | /* And call __TBIASyncTrigger() */ | ||
831 | __TBIASyncTrigger(int_context); | ||
832 | } | ||
833 | |||
834 | void __init trap_init(void) | ||
835 | { | ||
836 | unsigned long cpu = smp_processor_id(); | ||
837 | PTBI _pTBI = per_cpu(pTBI, cpu); | ||
838 | |||
839 | _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler; | ||
840 | _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler; | ||
841 | _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler; | ||
842 | _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler; | ||
843 | _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler; | ||
844 | _pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_handler; | ||
845 | |||
846 | #ifdef CONFIG_METAG_META21 | ||
847 | _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR; | ||
848 | _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler; | ||
849 | #endif | ||
850 | |||
851 | per_cpu_trap_init(cpu); | ||
852 | } | ||
853 | |||
854 | void tbi_startup_interrupt(int irq) | ||
855 | { | ||
856 | unsigned long cpu = smp_processor_id(); | ||
857 | PTBI _pTBI = per_cpu(pTBI, cpu); | ||
858 | |||
859 | BUG_ON(irq > TBID_SIGNUM_MAX); | ||
860 | |||
861 | /* For TR1 and TR2, the thread id is encoded in the irq number */ | ||
862 | if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3) | ||
863 | cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4]; | ||
864 | |||
865 | set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq)); | ||
866 | |||
867 | _pTBI->fnSigs[irq] = trigger_handler; | ||
868 | } | ||
869 | |||
870 | void tbi_shutdown_interrupt(int irq) | ||
871 | { | ||
872 | unsigned long cpu = smp_processor_id(); | ||
873 | PTBI _pTBI = per_cpu(pTBI, cpu); | ||
874 | |||
875 | BUG_ON(irq > TBID_SIGNUM_MAX); | ||
876 | |||
877 | set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq)); | ||
878 | |||
879 | _pTBI->fnSigs[irq] = __TBIUnExpXXX; | ||
880 | } | ||
881 | |||
882 | int ret_from_fork(TBIRES arg) | ||
883 | { | ||
884 | struct task_struct *prev = arg.Switch.pPara; | ||
885 | struct task_struct *tsk = current; | ||
886 | struct pt_regs *regs = task_pt_regs(tsk); | ||
887 | int (*fn)(void *); | ||
888 | TBIRES Next; | ||
889 | |||
890 | schedule_tail(prev); | ||
891 | |||
892 | if (tsk->flags & PF_KTHREAD) { | ||
893 | fn = (void *)regs->ctx.DX[4].U1; | ||
894 | BUG_ON(!fn); | ||
895 | |||
896 | fn((void *)regs->ctx.DX[3].U1); | ||
897 | } | ||
898 | |||
899 | if (test_syscall_work()) | ||
900 | syscall_trace_leave(regs); | ||
901 | |||
902 | preempt_disable(); | ||
903 | |||
904 | Next.Sig.TrigMask = get_trigger_mask(); | ||
905 | Next.Sig.SaveMask = 0; | ||
906 | Next.Sig.pCtx = ®s->ctx; | ||
907 | |||
908 | set_gateway_tls(current->thread.tls_ptr); | ||
909 | |||
910 | preempt_enable_no_resched(); | ||
911 | |||
912 | /* And interrupts should come back on when we resume the real usermode | ||
913 | * code. Call __TBIASyncResume() | ||
914 | */ | ||
915 | __TBIASyncResume(tail_end(Next)); | ||
916 | /* ASyncResume should NEVER return */ | ||
917 | BUG(); | ||
918 | return 0; | ||
919 | } | ||
920 | |||
921 | void show_trace(struct task_struct *tsk, unsigned long *sp, | ||
922 | struct pt_regs *regs) | ||
923 | { | ||
924 | unsigned long addr; | ||
925 | #ifdef CONFIG_FRAME_POINTER | ||
926 | unsigned long fp, fpnew; | ||
927 | unsigned long stack; | ||
928 | #endif | ||
929 | |||
930 | if (regs && user_mode(regs)) | ||
931 | return; | ||
932 | |||
933 | printk("\nCall trace: "); | ||
934 | #ifdef CONFIG_KALLSYMS | ||
935 | printk("\n"); | ||
936 | #endif | ||
937 | |||
938 | if (!tsk) | ||
939 | tsk = current; | ||
940 | |||
941 | #ifdef CONFIG_FRAME_POINTER | ||
942 | if (regs) { | ||
943 | print_ip_sym(regs->ctx.CurrPC); | ||
944 | fp = regs->ctx.AX[1].U0; | ||
945 | } else { | ||
946 | fp = __core_reg_get(A0FrP); | ||
947 | } | ||
948 | |||
949 | /* detect when the frame pointer has been used for other purposes and | ||
950 | * doesn't point to the stack (it may point completely elsewhere which | ||
951 | * kstack_end may not detect). | ||
952 | */ | ||
953 | stack = (unsigned long)task_stack_page(tsk); | ||
954 | while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) { | ||
955 | addr = __raw_readl((unsigned long *)(fp + 4)) - 4; | ||
956 | if (kernel_text_address(addr)) | ||
957 | print_ip_sym(addr); | ||
958 | else | ||
959 | break; | ||
960 | /* stack grows up, so frame pointers must decrease */ | ||
961 | fpnew = __raw_readl((unsigned long *)(fp + 0)); | ||
962 | if (fpnew >= fp) | ||
963 | break; | ||
964 | fp = fpnew; | ||
965 | } | ||
966 | #else | ||
967 | while (!kstack_end(sp)) { | ||
968 | addr = (*sp--) - 4; | ||
969 | if (kernel_text_address(addr)) | ||
970 | print_ip_sym(addr); | ||
971 | } | ||
972 | #endif | ||
973 | |||
974 | printk("\n"); | ||
975 | |||
976 | debug_show_held_locks(tsk); | ||
977 | } | ||
978 | |||
979 | void show_stack(struct task_struct *tsk, unsigned long *sp) | ||
980 | { | ||
981 | if (!tsk) | ||
982 | tsk = current; | ||
983 | if (tsk == current) | ||
984 | sp = (unsigned long *)current_stack_pointer; | ||
985 | else | ||
986 | sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0; | ||
987 | |||
988 | show_trace(tsk, sp, NULL); | ||
989 | } | ||
990 | |||
991 | void dump_stack(void) | ||
992 | { | ||
993 | show_stack(NULL, NULL); | ||
994 | } | ||
995 | EXPORT_SYMBOL(dump_stack); | ||
diff --git a/arch/metag/kernel/user_gateway.S b/arch/metag/kernel/user_gateway.S new file mode 100644 index 000000000000..7167f3e8db6b --- /dev/null +++ b/arch/metag/kernel/user_gateway.S | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * This file contains code that can be accessed from userspace and can | ||
5 | * access certain kernel data structures without the overhead of a system | ||
6 | * call. | ||
7 | */ | ||
8 | |||
9 | #include <asm/metag_regs.h> | ||
10 | #include <asm/user_gateway.h> | ||
11 | |||
12 | /* | ||
13 | * User helpers. | ||
14 | * | ||
15 | * These are segment of kernel provided user code reachable from user space | ||
16 | * at a fixed address in kernel memory. This is used to provide user space | ||
17 | * with some operations which require kernel help because of unimplemented | ||
18 | * native feature and/or instructions in some Meta CPUs. The idea is for | ||
19 | * this code to be executed directly in user mode for best efficiency but | ||
20 | * which is too intimate with the kernel counter part to be left to user | ||
21 | * libraries. The kernel reserves the right to change this code as needed | ||
22 | * without warning. Only the entry points and their results are guaranteed | ||
23 | * to be stable. | ||
24 | * | ||
25 | * Each segment is 64-byte aligned. This mechanism should be used only for | ||
26 | * for things that are really small and justified, and not be abused freely. | ||
27 | */ | ||
28 | .text | ||
29 | .global ___user_gateway_start | ||
30 | ___user_gateway_start: | ||
31 | |||
32 | /* get_tls | ||
33 | * Offset: 0 | ||
34 | * Description: Get the TLS pointer for this process. | ||
35 | */ | ||
36 | .global ___kuser_get_tls | ||
37 | .type ___kuser_get_tls,function | ||
38 | ___kuser_get_tls: | ||
39 | MOVT D1Ar1,#HI(USER_GATEWAY_PAGE + USER_GATEWAY_TLS) | ||
40 | ADD D1Ar1,D1Ar1,#LO(USER_GATEWAY_PAGE + USER_GATEWAY_TLS) | ||
41 | MOV D1Ar3,TXENABLE | ||
42 | AND D1Ar3,D1Ar3,#(TXENABLE_THREAD_BITS) | ||
43 | LSR D1Ar3,D1Ar3,#(TXENABLE_THREAD_S - 2) | ||
44 | GETD D0Re0,[D1Ar1+D1Ar3] | ||
45 | ___kuser_get_tls_end: /* Beyond this point the read will complete */ | ||
46 | MOV PC,D1RtP | ||
47 | .size ___kuser_get_tls,.-___kuser_get_tls | ||
48 | .global ___kuser_get_tls_end | ||
49 | |||
50 | /* cmpxchg | ||
51 | * Offset: 64 | ||
52 | * Description: Replace the value at 'ptr' with 'newval' if the current | ||
53 | * value is 'oldval'. Return zero if we succeeded, | ||
54 | * non-zero otherwise. | ||
55 | * | ||
56 | * Reference prototype: | ||
57 | * | ||
58 | * int __kuser_cmpxchg(int oldval, int newval, unsigned long *ptr) | ||
59 | * | ||
60 | */ | ||
61 | .balign 64 | ||
62 | .global ___kuser_cmpxchg | ||
63 | .type ___kuser_cmpxchg,function | ||
64 | ___kuser_cmpxchg: | ||
65 | #ifdef CONFIG_SMP | ||
66 | /* | ||
67 | * We must use LNKGET/LNKSET with an SMP kernel because the other method | ||
68 | * does not provide atomicity across multiple CPUs. | ||
69 | */ | ||
70 | 0: LNKGETD D0Re0,[D1Ar3] | ||
71 | CMP D0Re0,D1Ar1 | ||
72 | LNKSETDZ [D1Ar3],D0Ar2 | ||
73 | BNZ 1f | ||
74 | DEFR D0Re0,TXSTAT | ||
75 | ANDT D0Re0,D0Re0,#HI(0x3f000000) | ||
76 | CMPT D0Re0,#HI(0x02000000) | ||
77 | BNE 0b | ||
78 | #ifdef CONFIG_METAG_LNKGET_AROUND_CACHE | ||
79 | DCACHE [D1Ar3], D0Re0 | ||
80 | #endif | ||
81 | 1: MOV D0Re0,#1 | ||
82 | XORZ D0Re0,D0Re0,D0Re0 | ||
83 | MOV PC,D1RtP | ||
84 | #else | ||
85 | GETD D0Re0,[D1Ar3] | ||
86 | CMP D0Re0,D1Ar1 | ||
87 | SETDZ [D1Ar3],D0Ar2 | ||
88 | ___kuser_cmpxchg_end: /* Beyond this point the write will complete */ | ||
89 | MOV D0Re0,#1 | ||
90 | XORZ D0Re0,D0Re0,D0Re0 | ||
91 | MOV PC,D1RtP | ||
92 | #endif /* CONFIG_SMP */ | ||
93 | .size ___kuser_cmpxchg,.-___kuser_cmpxchg | ||
94 | .global ___kuser_cmpxchg_end | ||
95 | |||
96 | .global ___user_gateway_end | ||
97 | ___user_gateway_end: | ||
diff --git a/arch/metag/kernel/vmlinux.lds.S b/arch/metag/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..e12055e88bfe --- /dev/null +++ b/arch/metag/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,71 @@ | |||
1 | /* ld script to make Meta Linux kernel */ | ||
2 | |||
3 | #include <asm/thread_info.h> | ||
4 | #include <asm/page.h> | ||
5 | #include <asm/cache.h> | ||
6 | |||
7 | #include <asm-generic/vmlinux.lds.h> | ||
8 | |||
9 | OUTPUT_FORMAT("elf32-metag", "elf32-metag", "elf32-metag") | ||
10 | OUTPUT_ARCH(metag) | ||
11 | ENTRY(__start) | ||
12 | |||
13 | _jiffies = _jiffies_64; | ||
14 | SECTIONS | ||
15 | { | ||
16 | . = CONFIG_PAGE_OFFSET; | ||
17 | _text = .; | ||
18 | __text = .; | ||
19 | __stext = .; | ||
20 | HEAD_TEXT_SECTION | ||
21 | .text : { | ||
22 | TEXT_TEXT | ||
23 | SCHED_TEXT | ||
24 | LOCK_TEXT | ||
25 | KPROBES_TEXT | ||
26 | IRQENTRY_TEXT | ||
27 | *(.text.*) | ||
28 | *(.gnu.warning) | ||
29 | } | ||
30 | |||
31 | __etext = .; /* End of text section */ | ||
32 | |||
33 | __sdata = .; | ||
34 | RO_DATA_SECTION(PAGE_SIZE) | ||
35 | RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) | ||
36 | __edata = .; /* End of data section */ | ||
37 | |||
38 | EXCEPTION_TABLE(16) | ||
39 | NOTES | ||
40 | |||
41 | . = ALIGN(PAGE_SIZE); /* Init code and data */ | ||
42 | ___init_begin = .; | ||
43 | INIT_TEXT_SECTION(PAGE_SIZE) | ||
44 | INIT_DATA_SECTION(16) | ||
45 | |||
46 | .init.arch.info : { | ||
47 | ___arch_info_begin = .; | ||
48 | *(.arch.info.init) | ||
49 | ___arch_info_end = .; | ||
50 | } | ||
51 | |||
52 | PERCPU_SECTION(L1_CACHE_BYTES) | ||
53 | |||
54 | ___init_end = .; | ||
55 | |||
56 | BSS_SECTION(0, PAGE_SIZE, 0) | ||
57 | |||
58 | __end = .; | ||
59 | |||
60 | . = ALIGN(PAGE_SIZE); | ||
61 | __heap_start = .; | ||
62 | |||
63 | DWARF_DEBUG | ||
64 | |||
65 | /* When something in the kernel is NOT compiled as a module, the | ||
66 | * module cleanup code and data are put into these segments. Both | ||
67 | * can then be thrown away, as cleanup code is never called unless | ||
68 | * it's a module. | ||
69 | */ | ||
70 | DISCARDS | ||
71 | } | ||
diff --git a/arch/metag/lib/Makefile b/arch/metag/lib/Makefile new file mode 100644 index 000000000000..a41d24e270e6 --- /dev/null +++ b/arch/metag/lib/Makefile | |||
@@ -0,0 +1,22 @@ | |||
1 | # | ||
2 | # Makefile for Meta-specific library files. | ||
3 | # | ||
4 | |||
5 | lib-y += usercopy.o | ||
6 | lib-y += copy_page.o | ||
7 | lib-y += clear_page.o | ||
8 | lib-y += memcpy.o | ||
9 | lib-y += memmove.o | ||
10 | lib-y += memset.o | ||
11 | lib-y += delay.o | ||
12 | lib-y += div64.o | ||
13 | lib-y += muldi3.o | ||
14 | lib-y += ashrdi3.o | ||
15 | lib-y += ashldi3.o | ||
16 | lib-y += lshrdi3.o | ||
17 | lib-y += divsi3.o | ||
18 | lib-y += modsi3.o | ||
19 | lib-y += cmpdi2.o | ||
20 | lib-y += ucmpdi2.o | ||
21 | lib-y += ip_fast_csum.o | ||
22 | lib-y += checksum.o | ||
diff --git a/arch/metag/lib/ashldi3.S b/arch/metag/lib/ashldi3.S new file mode 100644 index 000000000000..78d6974cffef --- /dev/null +++ b/arch/metag/lib/ashldi3.S | |||
@@ -0,0 +1,33 @@ | |||
1 | ! Copyright (C) 2012 by Imagination Technologies Ltd. | ||
2 | ! | ||
3 | ! 64-bit arithmetic shift left routine. | ||
4 | ! | ||
5 | |||
6 | .text | ||
7 | .global ___ashldi3 | ||
8 | .type ___ashldi3,function | ||
9 | |||
10 | ___ashldi3: | ||
11 | MOV D0Re0,D0Ar2 | ||
12 | MOV D1Re0,D1Ar1 | ||
13 | CMP D1Ar3,#0 ! COUNT == 0 | ||
14 | MOVEQ PC,D1RtP ! Yes, return | ||
15 | |||
16 | SUBS D0Ar4,D1Ar3,#32 ! N = COUNT - 32 | ||
17 | BGE $L10 | ||
18 | |||
19 | !! Shift < 32 | ||
20 | NEG D0Ar4,D0Ar4 ! N = - N | ||
21 | LSL D1Re0,D1Re0,D1Ar3 ! HI = HI << COUNT | ||
22 | LSR D0Ar6,D0Re0,D0Ar4 ! TMP= LO >> -(COUNT - 32) | ||
23 | OR D1Re0,D1Re0,D0Ar6 ! HI = HI | TMP | ||
24 | SWAP D0Ar4,D1Ar3 | ||
25 | LSL D0Re0,D0Re0,D0Ar4 ! LO = LO << COUNT | ||
26 | MOV PC,D1RtP | ||
27 | |||
28 | $L10: | ||
29 | !! Shift >= 32 | ||
30 | LSL D1Re0,D0Re0,D0Ar4 ! HI = LO << N | ||
31 | MOV D0Re0,#0 ! LO = 0 | ||
32 | MOV PC,D1RtP | ||
33 | .size ___ashldi3,.-___ashldi3 | ||
diff --git a/arch/metag/lib/ashrdi3.S b/arch/metag/lib/ashrdi3.S new file mode 100644 index 000000000000..7cb7ed3bb1ad --- /dev/null +++ b/arch/metag/lib/ashrdi3.S | |||
@@ -0,0 +1,33 @@ | |||
1 | ! Copyright (C) 2012 by Imagination Technologies Ltd. | ||
2 | ! | ||
3 | ! 64-bit arithmetic shift right routine. | ||
4 | ! | ||
5 | |||
6 | .text | ||
7 | .global ___ashrdi3 | ||
8 | .type ___ashrdi3,function | ||
9 | |||
10 | ___ashrdi3: | ||
11 | MOV D0Re0,D0Ar2 | ||
12 | MOV D1Re0,D1Ar1 | ||
13 | CMP D1Ar3,#0 ! COUNT == 0 | ||
14 | MOVEQ PC,D1RtP ! Yes, return | ||
15 | |||
16 | MOV D0Ar4,D1Ar3 | ||
17 | SUBS D1Ar3,D1Ar3,#32 ! N = COUNT - 32 | ||
18 | BGE $L20 | ||
19 | |||
20 | !! Shift < 32 | ||
21 | NEG D1Ar3,D1Ar3 ! N = - N | ||
22 | LSR D0Re0,D0Re0,D0Ar4 ! LO = LO >> COUNT | ||
23 | LSL D0Ar6,D1Re0,D1Ar3 ! TMP= HI << -(COUNT - 32) | ||
24 | OR D0Re0,D0Re0,D0Ar6 ! LO = LO | TMP | ||
25 | SWAP D1Ar3,D0Ar4 | ||
26 | ASR D1Re0,D1Re0,D1Ar3 ! HI = HI >> COUNT | ||
27 | MOV PC,D1RtP | ||
28 | $L20: | ||
29 | !! Shift >= 32 | ||
30 | ASR D0Re0,D1Re0,D1Ar3 ! LO = HI >> N | ||
31 | ASR D1Re0,D1Re0,#31 ! HI = HI >> 31 | ||
32 | MOV PC,D1RtP | ||
33 | .size ___ashrdi3,.-___ashrdi3 | ||
diff --git a/arch/metag/lib/checksum.c b/arch/metag/lib/checksum.c new file mode 100644 index 000000000000..44d2e1913560 --- /dev/null +++ b/arch/metag/lib/checksum.c | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * | ||
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
4 | * operating system. INET is implemented using the BSD Socket | ||
5 | * interface as the means of communication with the user level. | ||
6 | * | ||
7 | * IP/TCP/UDP checksumming routines | ||
8 | * | ||
9 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> | ||
10 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | ||
11 | * Tom May, <ftom@netcom.com> | ||
12 | * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> | ||
13 | * Lots of code moved from tcp.c and ip.c; see those files | ||
14 | * for more names. | ||
15 | * | ||
16 | * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: | ||
17 | * Fixed some nasty bugs, causing some horrible crashes. | ||
18 | * A: At some points, the sum (%0) was used as | ||
19 | * length-counter instead of the length counter | ||
20 | * (%1). Thanks to Roman Hodek for pointing this out. | ||
21 | * B: GCC seems to mess up if one uses too many | ||
22 | * data-registers to hold input values and one tries to | ||
23 | * specify d0 and d1 as scratch registers. Letting gcc | ||
24 | * choose these registers itself solves the problem. | ||
25 | * | ||
26 | * This program is free software; you can redistribute it and/or | ||
27 | * modify it under the terms of the GNU General Public License | ||
28 | * as published by the Free Software Foundation; either version | ||
29 | * 2 of the License, or (at your option) any later version. | ||
30 | */ | ||
31 | |||
32 | /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access | ||
33 | kills, so most of the assembly has to go. */ | ||
34 | |||
35 | #include <linux/module.h> | ||
36 | #include <net/checksum.h> | ||
37 | |||
38 | #include <asm/byteorder.h> | ||
39 | |||
40 | static inline unsigned short from32to16(unsigned int x) | ||
41 | { | ||
42 | /* add up 16-bit and 16-bit for 16+c bit */ | ||
43 | x = (x & 0xffff) + (x >> 16); | ||
44 | /* add up carry.. */ | ||
45 | x = (x & 0xffff) + (x >> 16); | ||
46 | return x; | ||
47 | } | ||
48 | |||
49 | static unsigned int do_csum(const unsigned char *buff, int len) | ||
50 | { | ||
51 | int odd; | ||
52 | unsigned int result = 0; | ||
53 | |||
54 | if (len <= 0) | ||
55 | goto out; | ||
56 | odd = 1 & (unsigned long) buff; | ||
57 | if (odd) { | ||
58 | #ifdef __LITTLE_ENDIAN | ||
59 | result += (*buff << 8); | ||
60 | #else | ||
61 | result = *buff; | ||
62 | #endif | ||
63 | len--; | ||
64 | buff++; | ||
65 | } | ||
66 | if (len >= 2) { | ||
67 | if (2 & (unsigned long) buff) { | ||
68 | result += *(unsigned short *) buff; | ||
69 | len -= 2; | ||
70 | buff += 2; | ||
71 | } | ||
72 | if (len >= 4) { | ||
73 | const unsigned char *end = buff + ((unsigned)len & ~3); | ||
74 | unsigned int carry = 0; | ||
75 | do { | ||
76 | unsigned int w = *(unsigned int *) buff; | ||
77 | buff += 4; | ||
78 | result += carry; | ||
79 | result += w; | ||
80 | carry = (w > result); | ||
81 | } while (buff < end); | ||
82 | result += carry; | ||
83 | result = (result & 0xffff) + (result >> 16); | ||
84 | } | ||
85 | if (len & 2) { | ||
86 | result += *(unsigned short *) buff; | ||
87 | buff += 2; | ||
88 | } | ||
89 | } | ||
90 | if (len & 1) | ||
91 | #ifdef __LITTLE_ENDIAN | ||
92 | result += *buff; | ||
93 | #else | ||
94 | result += (*buff << 8); | ||
95 | #endif | ||
96 | result = from32to16(result); | ||
97 | if (odd) | ||
98 | result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); | ||
99 | out: | ||
100 | return result; | ||
101 | } | ||
102 | EXPORT_SYMBOL(ip_fast_csum); | ||
103 | |||
104 | /* | ||
105 | * computes the checksum of a memory block at buff, length len, | ||
106 | * and adds in "sum" (32-bit) | ||
107 | * | ||
108 | * returns a 32-bit number suitable for feeding into itself | ||
109 | * or csum_tcpudp_magic | ||
110 | * | ||
111 | * this function must be called with even lengths, except | ||
112 | * for the last fragment, which may be odd | ||
113 | * | ||
114 | * it's best to have buff aligned on a 32-bit boundary | ||
115 | */ | ||
116 | __wsum csum_partial(const void *buff, int len, __wsum wsum) | ||
117 | { | ||
118 | unsigned int sum = (__force unsigned int)wsum; | ||
119 | unsigned int result = do_csum(buff, len); | ||
120 | |||
121 | /* add in old sum, and carry.. */ | ||
122 | result += sum; | ||
123 | if (sum > result) | ||
124 | result += 1; | ||
125 | return (__force __wsum)result; | ||
126 | } | ||
127 | EXPORT_SYMBOL(csum_partial); | ||
128 | |||
129 | /* | ||
130 | * this routine is used for miscellaneous IP-like checksums, mainly | ||
131 | * in icmp.c | ||
132 | */ | ||
133 | __sum16 ip_compute_csum(const void *buff, int len) | ||
134 | { | ||
135 | return (__force __sum16)~do_csum(buff, len); | ||
136 | } | ||
137 | EXPORT_SYMBOL(ip_compute_csum); | ||
138 | |||
139 | /* | ||
140 | * copy from fs while checksumming, otherwise like csum_partial | ||
141 | */ | ||
142 | __wsum | ||
143 | csum_partial_copy_from_user(const void __user *src, void *dst, int len, | ||
144 | __wsum sum, int *csum_err) | ||
145 | { | ||
146 | int missing; | ||
147 | |||
148 | missing = __copy_from_user(dst, src, len); | ||
149 | if (missing) { | ||
150 | memset(dst + len - missing, 0, missing); | ||
151 | *csum_err = -EFAULT; | ||
152 | } else | ||
153 | *csum_err = 0; | ||
154 | |||
155 | return csum_partial(dst, len, sum); | ||
156 | } | ||
157 | EXPORT_SYMBOL(csum_partial_copy_from_user); | ||
158 | |||
159 | /* | ||
160 | * copy from ds while checksumming, otherwise like csum_partial | ||
161 | */ | ||
162 | __wsum | ||
163 | csum_partial_copy(const void *src, void *dst, int len, __wsum sum) | ||
164 | { | ||
165 | memcpy(dst, src, len); | ||
166 | return csum_partial(dst, len, sum); | ||
167 | } | ||
168 | EXPORT_SYMBOL(csum_partial_copy); | ||
diff --git a/arch/metag/lib/clear_page.S b/arch/metag/lib/clear_page.S new file mode 100644 index 000000000000..43144eebec55 --- /dev/null +++ b/arch/metag/lib/clear_page.S | |||
@@ -0,0 +1,17 @@ | |||
1 | ! Copyright 2007,2008,2009 Imagination Technologies Ltd. | ||
2 | |||
3 | #include <asm/page.h> | ||
4 | |||
5 | .text | ||
6 | .global _clear_page | ||
7 | .type _clear_page,function | ||
8 | !! D1Ar1 - page | ||
9 | _clear_page: | ||
10 | MOV TXRPT,#((PAGE_SIZE / 8) - 1) | ||
11 | MOV D0Re0,#0 | ||
12 | MOV D1Re0,#0 | ||
13 | $Lclear_page_loop: | ||
14 | SETL [D1Ar1++],D0Re0,D1Re0 | ||
15 | BR $Lclear_page_loop | ||
16 | MOV PC,D1RtP | ||
17 | .size _clear_page,.-_clear_page | ||
diff --git a/arch/metag/lib/cmpdi2.S b/arch/metag/lib/cmpdi2.S new file mode 100644 index 000000000000..9c5c663c5aea --- /dev/null +++ b/arch/metag/lib/cmpdi2.S | |||
@@ -0,0 +1,32 @@ | |||
1 | ! Copyright (C) 2012 by Imagination Technologies Ltd. | ||
2 | ! | ||
3 | ! 64-bit signed compare routine. | ||
4 | ! | ||
5 | |||
6 | .text | ||
7 | .global ___cmpdi2 | ||
8 | .type ___cmpdi2,function | ||
9 | |||
10 | ! low high | ||
11 | ! s64 a (D0Ar2, D1Ar1) | ||
12 | ! s64 b (D0Ar4, D1Ar3) | ||
13 | ___cmpdi2: | ||
14 | ! start at 1 (equal) and conditionally increment or decrement | ||
15 | MOV D0Re0,#1 | ||
16 | |||
17 | ! high words differ? | ||
18 | CMP D1Ar1,D1Ar3 | ||
19 | BNE $Lhigh_differ | ||
20 | |||
21 | ! unsigned compare low words | ||
22 | CMP D0Ar2,D0Ar4 | ||
23 | SUBLO D0Re0,D0Re0,#1 | ||
24 | ADDHI D0Re0,D0Re0,#1 | ||
25 | MOV PC,D1RtP | ||
26 | |||
27 | $Lhigh_differ: | ||
28 | ! signed compare high words | ||
29 | SUBLT D0Re0,D0Re0,#1 | ||
30 | ADDGT D0Re0,D0Re0,#1 | ||
31 | MOV PC,D1RtP | ||
32 | .size ___cmpdi2,.-___cmpdi2 | ||
diff --git a/arch/metag/lib/copy_page.S b/arch/metag/lib/copy_page.S new file mode 100644 index 000000000000..91f7d461239c --- /dev/null +++ b/arch/metag/lib/copy_page.S | |||
@@ -0,0 +1,20 @@ | |||
1 | ! Copyright 2007,2008 Imagination Technologies Ltd. | ||
2 | |||
3 | #include <asm/page.h> | ||
4 | |||
5 | .text | ||
6 | .global _copy_page | ||
7 | .type _copy_page,function | ||
8 | !! D1Ar1 - to | ||
9 | !! D0Ar2 - from | ||
10 | _copy_page: | ||
11 | MOV D0FrT,#PAGE_SIZE | ||
12 | $Lcopy_page_loop: | ||
13 | GETL D0Re0,D1Re0,[D0Ar2++] | ||
14 | GETL D0Ar6,D1Ar5,[D0Ar2++] | ||
15 | SETL [D1Ar1++],D0Re0,D1Re0 | ||
16 | SETL [D1Ar1++],D0Ar6,D1Ar5 | ||
17 | SUBS D0FrT,D0FrT,#16 | ||
18 | BNZ $Lcopy_page_loop | ||
19 | MOV PC,D1RtP | ||
20 | .size _copy_page,.-_copy_page | ||
diff --git a/arch/metag/lib/delay.c b/arch/metag/lib/delay.c new file mode 100644 index 000000000000..0b308f48b37a --- /dev/null +++ b/arch/metag/lib/delay.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Precise Delay Loops for Meta | ||
3 | * | ||
4 | * Copyright (C) 1993 Linus Torvalds | ||
5 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | ||
6 | * Copyright (C) 2007,2009 Imagination Technologies Ltd. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/export.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/delay.h> | ||
13 | |||
14 | #include <asm/core_reg.h> | ||
15 | #include <asm/processor.h> | ||
16 | |||
17 | /* | ||
18 | * TXTACTCYC is only 24 bits, so on chips with fast clocks it will wrap | ||
19 | * many times per-second. If it does wrap __delay will return prematurely, | ||
20 | * but this is only likely with large delay values. | ||
21 | * | ||
22 | * We also can't implement read_current_timer() with TXTACTCYC due to | ||
23 | * this wrapping behaviour. | ||
24 | */ | ||
25 | #define rdtimer(t) t = __core_reg_get(TXTACTCYC) | ||
26 | |||
27 | void __delay(unsigned long loops) | ||
28 | { | ||
29 | unsigned long bclock, now; | ||
30 | |||
31 | rdtimer(bclock); | ||
32 | do { | ||
33 | asm("NOP"); | ||
34 | rdtimer(now); | ||
35 | } while ((now-bclock) < loops); | ||
36 | } | ||
37 | EXPORT_SYMBOL(__delay); | ||
38 | |||
39 | inline void __const_udelay(unsigned long xloops) | ||
40 | { | ||
41 | u64 loops = (u64)xloops * (u64)loops_per_jiffy * HZ; | ||
42 | __delay(loops >> 32); | ||
43 | } | ||
44 | EXPORT_SYMBOL(__const_udelay); | ||
45 | |||
46 | void __udelay(unsigned long usecs) | ||
47 | { | ||
48 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ | ||
49 | } | ||
50 | EXPORT_SYMBOL(__udelay); | ||
51 | |||
52 | void __ndelay(unsigned long nsecs) | ||
53 | { | ||
54 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ | ||
55 | } | ||
56 | EXPORT_SYMBOL(__ndelay); | ||
diff --git a/arch/metag/lib/div64.S b/arch/metag/lib/div64.S new file mode 100644 index 000000000000..1cfc93498f70 --- /dev/null +++ b/arch/metag/lib/div64.S | |||
@@ -0,0 +1,108 @@ | |||
1 | ! Copyright (C) 2012 Imagination Technologies Ltd. | ||
2 | ! | ||
3 | ! Signed/unsigned 64-bit division routines. | ||
4 | ! | ||
5 | |||
6 | .text | ||
7 | .global _div_u64 | ||
8 | .type _div_u64,function | ||
9 | |||
10 | _div_u64: | ||
11 | $L1: | ||
12 | ORS A0.3,D1Ar3,D0Ar4 | ||
13 | BNE $L3 | ||
14 | $L2: | ||
15 | MOV D0Re0,D0Ar2 | ||
16 | MOV D1Re0,D1Ar1 | ||
17 | MOV PC,D1RtP | ||
18 | $L3: | ||
19 | CMP D1Ar3,D1Ar1 | ||
20 | CMPEQ D0Ar4,D0Ar2 | ||
21 | MOV D0Re0,#1 | ||
22 | MOV D1Re0,#0 | ||
23 | BHS $L6 | ||
24 | $L4: | ||
25 | ADDS D0Ar6,D0Ar4,D0Ar4 | ||
26 | ADD D1Ar5,D1Ar3,D1Ar3 | ||
27 | ADDCS D1Ar5,D1Ar5,#1 | ||
28 | CMP D1Ar5,D1Ar3 | ||
29 | CMPEQ D0Ar6,D0Ar4 | ||
30 | BLO $L6 | ||
31 | $L5: | ||
32 | MOV D0Ar4,D0Ar6 | ||
33 | MOV D1Ar3,D1Ar5 | ||
34 | ADDS D0Re0,D0Re0,D0Re0 | ||
35 | ADD D1Re0,D1Re0,D1Re0 | ||
36 | ADDCS D1Re0,D1Re0,#1 | ||
37 | CMP D1Ar3,D1Ar1 | ||
38 | CMPEQ D0Ar4,D0Ar2 | ||
39 | BLO $L4 | ||
40 | $L6: | ||
41 | ORS A0.3,D1Re0,D0Re0 | ||
42 | MOV D0Ar6,#0 | ||
43 | MOV D1Ar5,D0Ar6 | ||
44 | BEQ $L10 | ||
45 | $L7: | ||
46 | CMP D1Ar1,D1Ar3 | ||
47 | CMPEQ D0Ar2,D0Ar4 | ||
48 | BLO $L9 | ||
49 | $L8: | ||
50 | ADDS D0Ar6,D0Ar6,D0Re0 | ||
51 | ADD D1Ar5,D1Ar5,D1Re0 | ||
52 | ADDCS D1Ar5,D1Ar5,#1 | ||
53 | |||
54 | SUBS D0Ar2,D0Ar2,D0Ar4 | ||
55 | SUB D1Ar1,D1Ar1,D1Ar3 | ||
56 | SUBCS D1Ar1,D1Ar1,#1 | ||
57 | $L9: | ||
58 | LSL A0.3,D1Re0,#31 | ||
59 | LSR D0Re0,D0Re0,#1 | ||
60 | LSR D1Re0,D1Re0,#1 | ||
61 | OR D0Re0,D0Re0,A0.3 | ||
62 | LSL A0.3,D1Ar3,#31 | ||
63 | LSR D0Ar4,D0Ar4,#1 | ||
64 | LSR D1Ar3,D1Ar3,#1 | ||
65 | OR D0Ar4,D0Ar4,A0.3 | ||
66 | ORS A0.3,D1Re0,D0Re0 | ||
67 | BNE $L7 | ||
68 | $L10: | ||
69 | MOV D0Re0,D0Ar6 | ||
70 | MOV D1Re0,D1Ar5 | ||
71 | MOV PC,D1RtP | ||
72 | .size _div_u64,.-_div_u64 | ||
73 | |||
74 | .text | ||
75 | .global _div_s64 | ||
76 | .type _div_s64,function | ||
77 | _div_s64: | ||
78 | MSETL [A0StP],D0FrT,D0.5 | ||
79 | XOR D0.5,D0Ar2,D0Ar4 | ||
80 | XOR D1.5,D1Ar1,D1Ar3 | ||
81 | TSTT D1Ar1,#HI(0x80000000) | ||
82 | BZ $L25 | ||
83 | |||
84 | NEGS D0Ar2,D0Ar2 | ||
85 | NEG D1Ar1,D1Ar1 | ||
86 | SUBCS D1Ar1,D1Ar1,#1 | ||
87 | $L25: | ||
88 | TSTT D1Ar3,#HI(0x80000000) | ||
89 | BZ $L27 | ||
90 | |||
91 | NEGS D0Ar4,D0Ar4 | ||
92 | NEG D1Ar3,D1Ar3 | ||
93 | SUBCS D1Ar3,D1Ar3,#1 | ||
94 | $L27: | ||
95 | CALLR D1RtP,_div_u64 | ||
96 | TSTT D1.5,#HI(0x80000000) | ||
97 | BZ $L29 | ||
98 | |||
99 | NEGS D0Re0,D0Re0 | ||
100 | NEG D1Re0,D1Re0 | ||
101 | SUBCS D1Re0,D1Re0,#1 | ||
102 | $L29: | ||
103 | |||
104 | GETL D0FrT,D1RtP,[A0StP+#(-16)] | ||
105 | GETL D0.5,D1.5,[A0StP+#(-8)] | ||
106 | SUB A0StP,A0StP,#16 | ||
107 | MOV PC,D1RtP | ||
108 | .size _div_s64,.-_div_s64 | ||
diff --git a/arch/metag/lib/divsi3.S b/arch/metag/lib/divsi3.S new file mode 100644 index 000000000000..7c8a8ae9a0a1 --- /dev/null +++ b/arch/metag/lib/divsi3.S | |||
@@ -0,0 +1,100 @@ | |||
1 | ! Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007 | ||
2 | ! Imagination Technologies Ltd | ||
3 | ! | ||
4 | ! Integer divide routines. | ||
5 | ! | ||
6 | |||
7 | .text | ||
8 | .global ___udivsi3 | ||
9 | .type ___udivsi3,function | ||
10 | .align 2 | ||
11 | ___udivsi3: | ||
12 | !! | ||
13 | !! Since core is signed divide case, just set control variable | ||
14 | !! | ||
15 | MOV D1Re0,D0Ar2 ! Au already in A1Ar1, Bu -> D1Re0 | ||
16 | MOV D0Re0,#0 ! Result is 0 | ||
17 | MOV D0Ar4,#0 ! Return positive result | ||
18 | B $LIDMCUStart | ||
19 | .size ___udivsi3,.-___udivsi3 | ||
20 | |||
21 | !! | ||
22 | !! 32-bit division signed i/p - passed signed 32-bit numbers | ||
23 | !! | ||
24 | .global ___divsi3 | ||
25 | .type ___divsi3,function | ||
26 | .align 2 | ||
27 | ___divsi3: | ||
28 | !! | ||
29 | !! A already in D1Ar1, B already in D0Ar2 -> make B abs(B) | ||
30 | !! | ||
31 | MOV D1Re0,D0Ar2 ! A already in A1Ar1, B -> D1Re0 | ||
32 | MOV D0Re0,#0 ! Result is 0 | ||
33 | XOR D0Ar4,D1Ar1,D1Re0 ! D0Ar4 -ive if result is -ive | ||
34 | ABS D1Ar1,D1Ar1 ! abs(A) -> Au | ||
35 | ABS D1Re0,D1Re0 ! abs(B) -> Bu | ||
36 | $LIDMCUStart: | ||
37 | CMP D1Ar1,D1Re0 ! Is ( Au > Bu )? | ||
38 | LSR D1Ar3,D1Ar1,#2 ! Calculate (Au & (~3)) >> 2 | ||
39 | CMPHI D1Re0,D1Ar3 ! OR ( (Au & (~3)) <= (Bu << 2) )? | ||
40 | LSLSHI D1Ar3,D1Re0,#1 ! Buq = Bu << 1 | ||
41 | BLS $LIDMCUSetup ! Yes: Do normal divide | ||
42 | !! | ||
43 | !! Quick divide setup can assume that CurBit only needs to start at 2 | ||
44 | !! | ||
45 | $LIDMCQuick: | ||
46 | CMP D1Ar1,D1Ar3 ! ( A >= Buq )? | ||
47 | ADDCC D0Re0,D0Re0,#2 ! If yes result += 2 | ||
48 | SUBCC D1Ar1,D1Ar1,D1Ar3 ! and A -= Buq | ||
49 | CMP D1Ar1,D1Re0 ! ( A >= Bu )? | ||
50 | ADDCC D0Re0,D0Re0,#1 ! If yes result += 1 | ||
51 | SUBCC D1Ar1,D1Ar1,D1Re0 ! and A -= Bu | ||
52 | ORS D0Ar4,D0Ar4,D0Ar4 ! Return neg result? | ||
53 | NEG D0Ar2,D0Re0 ! Calulate neg result | ||
54 | MOVMI D0Re0,D0Ar2 ! Yes: Take neg result | ||
55 | $LIDMCRet: | ||
56 | MOV PC,D1RtP | ||
57 | !! | ||
58 | !! Setup for general unsigned divide code | ||
59 | !! | ||
60 | !! D0Re0 is used to form the result, already set to Zero | ||
61 | !! D1Re0 is the input Bu value, this gets trashed | ||
62 | !! D0Ar6 is curbit which is set to 1 at the start and shifted up | ||
63 | !! D0Ar4 is negative if we should return a negative result | ||
64 | !! D1Ar1 is the input Au value, eventually this holds the remainder | ||
65 | !! | ||
66 | $LIDMCUSetup: | ||
67 | CMP D1Ar1,D1Re0 ! Is ( Au < Bu )? | ||
68 | MOV D0Ar6,#1 ! Set curbit to 1 | ||
69 | BCS $LIDMCRet ! Yes: Return 0 remainder Au | ||
70 | !! | ||
71 | !! Calculate alignment using FFB instruction | ||
72 | !! | ||
73 | FFB D1Ar5,D1Ar1 ! Find first bit of Au | ||
74 | ANDN D1Ar5,D1Ar5,#31 ! Handle exceptional case. | ||
75 | ORN D1Ar5,D1Ar5,#31 ! if N bit set, set to 31 | ||
76 | FFB D1Ar3,D1Re0 ! Find first bit of Bu | ||
77 | ANDN D1Ar3,D1Ar3,#31 ! Handle exceptional case. | ||
78 | ORN D1Ar3,D1Ar3,#31 ! if N bit set, set to 31 | ||
79 | SUBS D1Ar3,D1Ar5,D1Ar3 ! calculate diff, ffbA - ffbB | ||
80 | MOV D0Ar2,D1Ar3 ! copy into bank 0 | ||
81 | LSLGT D1Re0,D1Re0,D1Ar3 ! ( > 0) ? left shift B | ||
82 | LSLGT D0Ar6,D0Ar6,D0Ar2 ! ( > 0) ? left shift curbit | ||
83 | !! | ||
84 | !! Now we start the divide proper, logic is | ||
85 | !! | ||
86 | !! if ( A >= B ) add curbit to result and subtract B from A | ||
87 | !! shift curbit and B down by 1 in either case | ||
88 | !! | ||
89 | $LIDMCLoop: | ||
90 | CMP D1Ar1, D1Re0 ! ( A >= B )? | ||
91 | ADDCC D0Re0, D0Re0, D0Ar6 ! If yes result += curbit | ||
92 | SUBCC D1Ar1, D1Ar1, D1Re0 ! and A -= B | ||
93 | LSRS D0Ar6, D0Ar6, #1 ! Shift down curbit, is it zero? | ||
94 | LSR D1Re0, D1Re0, #1 ! Shift down B | ||
95 | BNZ $LIDMCLoop ! Was single bit in curbit lost? | ||
96 | ORS D0Ar4,D0Ar4,D0Ar4 ! Return neg result? | ||
97 | NEG D0Ar2,D0Re0 ! Calulate neg result | ||
98 | MOVMI D0Re0,D0Ar2 ! Yes: Take neg result | ||
99 | MOV PC,D1RtP | ||
100 | .size ___divsi3,.-___divsi3 | ||
diff --git a/arch/metag/lib/ip_fast_csum.S b/arch/metag/lib/ip_fast_csum.S new file mode 100644 index 000000000000..533b1e73deac --- /dev/null +++ b/arch/metag/lib/ip_fast_csum.S | |||
@@ -0,0 +1,32 @@ | |||
1 | |||
2 | .text | ||
3 | /* | ||
4 | * This is a version of ip_compute_csum() optimized for IP headers, | ||
5 | * which always checksum on 4 octet boundaries. | ||
6 | * | ||
7 | * extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); | ||
8 | * | ||
9 | */ | ||
10 | .global _ip_fast_csum | ||
11 | .type _ip_fast_csum,function | ||
12 | _ip_fast_csum: | ||
13 | !! TXRPT needs loops - 1 | ||
14 | SUBS TXRPT,D0Ar2,#1 | ||
15 | MOV D0Re0,#0 | ||
16 | BLO $Lfast_csum_exit | ||
17 | $Lfast_csum_loop: | ||
18 | GETD D1Ar3,[D1Ar1++] | ||
19 | ADDS D0Re0,D0Re0,D1Ar3 | ||
20 | ADDCS D0Re0,D0Re0,#1 | ||
21 | BR $Lfast_csum_loop | ||
22 | LSR D0Ar4,D0Re0,#16 | ||
23 | AND D0Re0,D0Re0,#0xffff | ||
24 | AND D0Ar4,D0Ar4,#0xffff | ||
25 | ADD D0Re0,D0Re0,D0Ar4 | ||
26 | LSR D0Ar4,D0Re0,#16 | ||
27 | ADD D0Re0,D0Re0,D0Ar4 | ||
28 | XOR D0Re0,D0Re0,#-1 | ||
29 | AND D0Re0,D0Re0,#0xffff | ||
30 | $Lfast_csum_exit: | ||
31 | MOV PC,D1RtP | ||
32 | .size _ip_fast_csum,.-_ip_fast_csum | ||
diff --git a/arch/metag/lib/lshrdi3.S b/arch/metag/lib/lshrdi3.S new file mode 100644 index 000000000000..47f720283077 --- /dev/null +++ b/arch/metag/lib/lshrdi3.S | |||
@@ -0,0 +1,33 @@ | |||
1 | ! Copyright (C) 2012 by Imagination Technologies Ltd. | ||
2 | ! | ||
3 | ! 64-bit logical shift right routine. | ||
4 | ! | ||
5 | |||
6 | .text | ||
7 | .global ___lshrdi3 | ||
8 | .type ___lshrdi3,function | ||
9 | |||
10 | ___lshrdi3: | ||
11 | MOV D0Re0,D0Ar2 | ||
12 | MOV D1Re0,D1Ar1 | ||
13 | CMP D1Ar3,#0 ! COUNT == 0 | ||
14 | MOVEQ PC,D1RtP ! Yes, return | ||
15 | |||
16 | MOV D0Ar4,D1Ar3 | ||
17 | SUBS D1Ar3,D1Ar3,#32 ! N = COUNT - 32 | ||
18 | BGE $L30 | ||
19 | |||
20 | !! Shift < 32 | ||
21 | NEG D1Ar3,D1Ar3 ! N = - N | ||
22 | LSR D0Re0,D0Re0,D0Ar4 ! LO = LO >> COUNT | ||
23 | LSL D0Ar6,D1Re0,D1Ar3 ! TMP= HI << -(COUNT - 32) | ||
24 | OR D0Re0,D0Re0,D0Ar6 ! LO = LO | TMP | ||
25 | SWAP D1Ar3,D0Ar4 | ||
26 | LSR D1Re0,D1Re0,D1Ar3 ! HI = HI >> COUNT | ||
27 | MOV PC,D1RtP | ||
28 | $L30: | ||
29 | !! Shift >= 32 | ||
30 | LSR D0Re0,D1Re0,D1Ar3 ! LO = HI >> N | ||
31 | MOV D1Re0,#0 ! HI = 0 | ||
32 | MOV PC,D1RtP | ||
33 | .size ___lshrdi3,.-___lshrdi3 | ||
diff --git a/arch/metag/lib/memcpy.S b/arch/metag/lib/memcpy.S new file mode 100644 index 000000000000..46b7a2b9479e --- /dev/null +++ b/arch/metag/lib/memcpy.S | |||
@@ -0,0 +1,185 @@ | |||
1 | ! Copyright (C) 2008-2012 Imagination Technologies Ltd. | ||
2 | |||
3 | .text | ||
4 | .global _memcpy | ||
5 | .type _memcpy,function | ||
6 | ! D1Ar1 dst | ||
7 | ! D0Ar2 src | ||
8 | ! D1Ar3 cnt | ||
9 | ! D0Re0 dst | ||
10 | _memcpy: | ||
11 | CMP D1Ar3, #16 | ||
12 | MOV A1.2, D0Ar2 ! source pointer | ||
13 | MOV A0.2, D1Ar1 ! destination pointer | ||
14 | MOV A0.3, D1Ar1 ! for return value | ||
15 | ! If there are less than 16 bytes to copy use the byte copy loop | ||
16 | BGE $Llong_copy | ||
17 | |||
18 | $Lbyte_copy: | ||
19 | ! Simply copy a byte at a time | ||
20 | SUBS TXRPT, D1Ar3, #1 | ||
21 | BLT $Lend | ||
22 | $Lloop_byte: | ||
23 | GETB D1Re0, [A1.2++] | ||
24 | SETB [A0.2++], D1Re0 | ||
25 | BR $Lloop_byte | ||
26 | |||
27 | $Lend: | ||
28 | ! Finally set return value and return | ||
29 | MOV D0Re0, A0.3 | ||
30 | MOV PC, D1RtP | ||
31 | |||
32 | $Llong_copy: | ||
33 | ANDS D1Ar5, D1Ar1, #7 ! test destination alignment | ||
34 | BZ $Laligned_dst | ||
35 | |||
36 | ! The destination address is not 8 byte aligned. We will copy bytes from | ||
37 | ! the source to the destination until the remaining data has an 8 byte | ||
38 | ! destination address alignment (i.e we should never copy more than 7 | ||
39 | ! bytes here). | ||
40 | $Lalign_dst: | ||
41 | GETB D0Re0, [A1.2++] | ||
42 | ADD D1Ar5, D1Ar5, #1 ! dest is aligned when D1Ar5 reaches #8 | ||
43 | SUB D1Ar3, D1Ar3, #1 ! decrement count of remaining bytes | ||
44 | SETB [A0.2++], D0Re0 | ||
45 | CMP D1Ar5, #8 | ||
46 | BNE $Lalign_dst | ||
47 | |||
48 | ! We have at least (16 - 7) = 9 bytes to copy - calculate the number of 8 byte | ||
49 | ! blocks, then jump to the unaligned copy loop or fall through to the aligned | ||
50 | ! copy loop as appropriate. | ||
51 | $Laligned_dst: | ||
52 | MOV D0Ar4, A1.2 | ||
53 | LSR D1Ar5, D1Ar3, #3 ! D1Ar5 = number of 8 byte blocks | ||
54 | ANDS D0Ar4, D0Ar4, #7 ! test source alignment | ||
55 | BNZ $Lunaligned_copy ! if unaligned, use unaligned copy loop | ||
56 | |||
57 | ! Both source and destination are 8 byte aligned - the easy case. | ||
58 | $Laligned_copy: | ||
59 | LSRS D1Ar5, D1Ar3, #5 ! D1Ar5 = number of 32 byte blocks | ||
60 | BZ $Lbyte_copy | ||
61 | SUB TXRPT, D1Ar5, #1 | ||
62 | |||
63 | $Laligned_32: | ||
64 | GETL D0Re0, D1Re0, [A1.2++] | ||
65 | GETL D0Ar6, D1Ar5, [A1.2++] | ||
66 | SETL [A0.2++], D0Re0, D1Re0 | ||
67 | SETL [A0.2++], D0Ar6, D1Ar5 | ||
68 | GETL D0Re0, D1Re0, [A1.2++] | ||
69 | GETL D0Ar6, D1Ar5, [A1.2++] | ||
70 | SETL [A0.2++], D0Re0, D1Re0 | ||
71 | SETL [A0.2++], D0Ar6, D1Ar5 | ||
72 | BR $Laligned_32 | ||
73 | |||
74 | ! If there are any remaining bytes use the byte copy loop, otherwise we are done | ||
75 | ANDS D1Ar3, D1Ar3, #0x1f | ||
76 | BNZ $Lbyte_copy | ||
77 | B $Lend | ||
78 | |||
79 | ! The destination is 8 byte aligned but the source is not, and there are 8 | ||
80 | ! or more bytes to be copied. | ||
81 | $Lunaligned_copy: | ||
82 | ! Adjust the source pointer (A1.2) to the 8 byte boundary before its | ||
83 | ! current value | ||
84 | MOV D0Ar4, A1.2 | ||
85 | MOV D0Ar6, A1.2 | ||
86 | ANDMB D0Ar4, D0Ar4, #0xfff8 | ||
87 | MOV A1.2, D0Ar4 | ||
88 | ! Save the number of bytes of mis-alignment in D0Ar4 for use later | ||
89 | SUBS D0Ar6, D0Ar6, D0Ar4 | ||
90 | MOV D0Ar4, D0Ar6 | ||
91 | ! if there is no mis-alignment after all, use the aligned copy loop | ||
92 | BZ $Laligned_copy | ||
93 | |||
94 | ! prefetch 8 bytes | ||
95 | GETL D0Re0, D1Re0, [A1.2] | ||
96 | |||
97 | SUB TXRPT, D1Ar5, #1 | ||
98 | |||
99 | ! There are 3 mis-alignment cases to be considered. Less than 4 bytes, exactly | ||
100 | ! 4 bytes, and more than 4 bytes. | ||
101 | CMP D0Ar6, #4 | ||
102 | BLT $Lunaligned_1_2_3 ! use 1-3 byte mis-alignment loop | ||
103 | BZ $Lunaligned_4 ! use 4 byte mis-alignment loop | ||
104 | |||
105 | ! The mis-alignment is more than 4 bytes | ||
106 | $Lunaligned_5_6_7: | ||
107 | SUB D0Ar6, D0Ar6, #4 | ||
108 | ! Calculate the bit offsets required for the shift operations necesssary | ||
109 | ! to align the data. | ||
110 | ! D0Ar6 = bit offset, D1Ar5 = (32 - bit offset) | ||
111 | MULW D0Ar6, D0Ar6, #8 | ||
112 | MOV D1Ar5, #32 | ||
113 | SUB D1Ar5, D1Ar5, D0Ar6 | ||
114 | ! Move data 4 bytes before we enter the main loop | ||
115 | MOV D0Re0, D1Re0 | ||
116 | |||
117 | $Lloop_5_6_7: | ||
118 | GETL D0Ar2, D1Ar1, [++A1.2] | ||
119 | ! form 64-bit data in D0Re0, D1Re0 | ||
120 | LSR D0Re0, D0Re0, D0Ar6 | ||
121 | MOV D1Re0, D0Ar2 | ||
122 | LSL D1Re0, D1Re0, D1Ar5 | ||
123 | ADD D0Re0, D0Re0, D1Re0 | ||
124 | |||
125 | LSR D0Ar2, D0Ar2, D0Ar6 | ||
126 | LSL D1Re0, D1Ar1, D1Ar5 | ||
127 | ADD D1Re0, D1Re0, D0Ar2 | ||
128 | |||
129 | SETL [A0.2++], D0Re0, D1Re0 | ||
130 | MOV D0Re0, D1Ar1 | ||
131 | BR $Lloop_5_6_7 | ||
132 | |||
133 | B $Lunaligned_end | ||
134 | |||
135 | $Lunaligned_1_2_3: | ||
136 | ! Calculate the bit offsets required for the shift operations necesssary | ||
137 | ! to align the data. | ||
138 | ! D0Ar6 = bit offset, D1Ar5 = (32 - bit offset) | ||
139 | MULW D0Ar6, D0Ar6, #8 | ||
140 | MOV D1Ar5, #32 | ||
141 | SUB D1Ar5, D1Ar5, D0Ar6 | ||
142 | |||
143 | $Lloop_1_2_3: | ||
144 | ! form 64-bit data in D0Re0,D1Re0 | ||
145 | LSR D0Re0, D0Re0, D0Ar6 | ||
146 | LSL D1Ar1, D1Re0, D1Ar5 | ||
147 | ADD D0Re0, D0Re0, D1Ar1 | ||
148 | MOV D0Ar2, D1Re0 | ||
149 | LSR D0FrT, D0Ar2, D0Ar6 | ||
150 | GETL D0Ar2, D1Ar1, [++A1.2] | ||
151 | |||
152 | MOV D1Re0, D0Ar2 | ||
153 | LSL D1Re0, D1Re0, D1Ar5 | ||
154 | ADD D1Re0, D1Re0, D0FrT | ||
155 | |||
156 | SETL [A0.2++], D0Re0, D1Re0 | ||
157 | MOV D0Re0, D0Ar2 | ||
158 | MOV D1Re0, D1Ar1 | ||
159 | BR $Lloop_1_2_3 | ||
160 | |||
161 | B $Lunaligned_end | ||
162 | |||
163 | ! The 4 byte mis-alignment case - this does not require any shifting, just a | ||
164 | ! shuffling of registers. | ||
165 | $Lunaligned_4: | ||
166 | MOV D0Re0, D1Re0 | ||
167 | $Lloop_4: | ||
168 | GETL D0Ar2, D1Ar1, [++A1.2] | ||
169 | MOV D1Re0, D0Ar2 | ||
170 | SETL [A0.2++], D0Re0, D1Re0 | ||
171 | MOV D0Re0, D1Ar1 | ||
172 | BR $Lloop_4 | ||
173 | |||
174 | $Lunaligned_end: | ||
175 | ! If there are no remaining bytes to copy, we are done. | ||
176 | ANDS D1Ar3, D1Ar3, #7 | ||
177 | BZ $Lend | ||
178 | ! Re-adjust the source pointer (A1.2) back to the actual (unaligned) byte | ||
179 | ! address of the remaining bytes, and fall through to the byte copy loop. | ||
180 | MOV D0Ar6, A1.2 | ||
181 | ADD D1Ar5, D0Ar4, D0Ar6 | ||
182 | MOV A1.2, D1Ar5 | ||
183 | B $Lbyte_copy | ||
184 | |||
185 | .size _memcpy,.-_memcpy | ||
diff --git a/arch/metag/lib/memmove.S b/arch/metag/lib/memmove.S new file mode 100644 index 000000000000..228ea04d7b39 --- /dev/null +++ b/arch/metag/lib/memmove.S | |||
@@ -0,0 +1,345 @@ | |||
1 | ! Copyright (C) 2008-2012 Imagination Technologies Ltd. | ||
2 | |||
3 | .text | ||
4 | .global _memmove | ||
5 | .type _memmove,function | ||
6 | ! D1Ar1 dst | ||
7 | ! D0Ar2 src | ||
8 | ! D1Ar3 cnt | ||
9 | ! D0Re0 dst | ||
10 | _memmove: | ||
11 | CMP D1Ar3, #0 | ||
12 | MOV D0Re0, D1Ar1 | ||
13 | BZ $LEND2 | ||
14 | MSETL [A0StP], D0.5, D0.6, D0.7 | ||
15 | MOV D1Ar5, D0Ar2 | ||
16 | CMP D1Ar1, D1Ar5 | ||
17 | BLT $Lforwards_copy | ||
18 | SUB D0Ar4, D1Ar1, D1Ar3 | ||
19 | ADD D0Ar4, D0Ar4, #1 | ||
20 | CMP D0Ar2, D0Ar4 | ||
21 | BLT $Lforwards_copy | ||
22 | ! should copy backwards | ||
23 | MOV D1Re0, D0Ar2 | ||
24 | ! adjust pointer to the end of mem | ||
25 | ADD D0Ar2, D1Re0, D1Ar3 | ||
26 | ADD D1Ar1, D1Ar1, D1Ar3 | ||
27 | |||
28 | MOV A1.2, D0Ar2 | ||
29 | MOV A0.2, D1Ar1 | ||
30 | CMP D1Ar3, #8 | ||
31 | BLT $Lbbyte_loop | ||
32 | |||
33 | MOV D0Ar4, D0Ar2 | ||
34 | MOV D1Ar5, D1Ar1 | ||
35 | |||
36 | ! test 8 byte alignment | ||
37 | ANDS D1Ar5, D1Ar5, #7 | ||
38 | BNE $Lbdest_unaligned | ||
39 | |||
40 | ANDS D0Ar4, D0Ar4, #7 | ||
41 | BNE $Lbsrc_unaligned | ||
42 | |||
43 | LSR D1Ar5, D1Ar3, #3 | ||
44 | |||
45 | $Lbaligned_loop: | ||
46 | GETL D0Re0, D1Re0, [--A1.2] | ||
47 | SETL [--A0.2], D0Re0, D1Re0 | ||
48 | SUBS D1Ar5, D1Ar5, #1 | ||
49 | BNE $Lbaligned_loop | ||
50 | |||
51 | ANDS D1Ar3, D1Ar3, #7 | ||
52 | BZ $Lbbyte_loop_exit | ||
53 | $Lbbyte_loop: | ||
54 | GETB D1Re0, [--A1.2] | ||
55 | SETB [--A0.2], D1Re0 | ||
56 | SUBS D1Ar3, D1Ar3, #1 | ||
57 | BNE $Lbbyte_loop | ||
58 | $Lbbyte_loop_exit: | ||
59 | MOV D0Re0, A0.2 | ||
60 | $LEND: | ||
61 | SUB A0.2, A0StP, #24 | ||
62 | MGETL D0.5, D0.6, D0.7, [A0.2] | ||
63 | SUB A0StP, A0StP, #24 | ||
64 | $LEND2: | ||
65 | MOV PC, D1RtP | ||
66 | |||
67 | $Lbdest_unaligned: | ||
68 | GETB D0Re0, [--A1.2] | ||
69 | SETB [--A0.2], D0Re0 | ||
70 | SUBS D1Ar5, D1Ar5, #1 | ||
71 | SUB D1Ar3, D1Ar3, #1 | ||
72 | BNE $Lbdest_unaligned | ||
73 | CMP D1Ar3, #8 | ||
74 | BLT $Lbbyte_loop | ||
75 | $Lbsrc_unaligned: | ||
76 | LSR D1Ar5, D1Ar3, #3 | ||
77 | ! adjust A1.2 | ||
78 | MOV D0Ar4, A1.2 | ||
79 | ! save original address | ||
80 | MOV D0Ar6, A1.2 | ||
81 | |||
82 | ADD D0Ar4, D0Ar4, #7 | ||
83 | ANDMB D0Ar4, D0Ar4, #0xfff8 | ||
84 | ! new address is the 8-byte aligned one above the original | ||
85 | MOV A1.2, D0Ar4 | ||
86 | |||
87 | ! A0.2 dst 64-bit is aligned | ||
88 | ! measure the gap size | ||
89 | SUB D0Ar6, D0Ar4, D0Ar6 | ||
90 | MOVS D0Ar4, D0Ar6 | ||
91 | ! keep this information for the later adjustment | ||
92 | ! both aligned | ||
93 | BZ $Lbaligned_loop | ||
94 | |||
95 | ! prefetch | ||
96 | GETL D0Re0, D1Re0, [--A1.2] | ||
97 | |||
98 | CMP D0Ar6, #4 | ||
99 | BLT $Lbunaligned_1_2_3 | ||
100 | ! 32-bit aligned | ||
101 | BZ $Lbaligned_4 | ||
102 | |||
103 | SUB D0Ar6, D0Ar6, #4 | ||
104 | ! D1.6 stores the gap size in bits | ||
105 | MULW D1.6, D0Ar6, #8 | ||
106 | MOV D0.6, #32 | ||
107 | ! D0.6 stores the complement of the gap size | ||
108 | SUB D0.6, D0.6, D1.6 | ||
109 | |||
110 | $Lbunaligned_5_6_7: | ||
111 | GETL D0.7, D1.7, [--A1.2] | ||
112 | ! form 64-bit data in D0Re0, D1Re0 | ||
113 | MOV D1Re0, D0Re0 | ||
114 | ! D1Re0 << gap-size | ||
115 | LSL D1Re0, D1Re0, D1.6 | ||
116 | MOV D0Re0, D1.7 | ||
117 | ! D0Re0 >> complement | ||
118 | LSR D0Re0, D0Re0, D0.6 | ||
119 | MOV D1.5, D0Re0 | ||
120 | ! combine the both | ||
121 | ADD D1Re0, D1Re0, D1.5 | ||
122 | |||
123 | MOV D1.5, D1.7 | ||
124 | LSL D1.5, D1.5, D1.6 | ||
125 | MOV D0Re0, D0.7 | ||
126 | LSR D0Re0, D0Re0, D0.6 | ||
127 | MOV D0.5, D1.5 | ||
128 | ADD D0Re0, D0Re0, D0.5 | ||
129 | |||
130 | SETL [--A0.2], D0Re0, D1Re0 | ||
131 | MOV D0Re0, D0.7 | ||
132 | MOV D1Re0, D1.7 | ||
133 | SUBS D1Ar5, D1Ar5, #1 | ||
134 | BNE $Lbunaligned_5_6_7 | ||
135 | |||
136 | ANDS D1Ar3, D1Ar3, #7 | ||
137 | BZ $Lbbyte_loop_exit | ||
138 | ! Adjust A1.2 | ||
139 | ! A1.2 <- A1.2 +8 - gapsize | ||
140 | ADD A1.2, A1.2, #8 | ||
141 | SUB A1.2, A1.2, D0Ar4 | ||
142 | B $Lbbyte_loop | ||
143 | |||
144 | $Lbunaligned_1_2_3: | ||
145 | MULW D1.6, D0Ar6, #8 | ||
146 | MOV D0.6, #32 | ||
147 | SUB D0.6, D0.6, D1.6 | ||
148 | |||
149 | $Lbunaligned_1_2_3_loop: | ||
150 | GETL D0.7, D1.7, [--A1.2] | ||
151 | ! form 64-bit data in D0Re0, D1Re0 | ||
152 | LSL D1Re0, D1Re0, D1.6 | ||
153 | ! save D0Re0 for later use | ||
154 | MOV D0.5, D0Re0 | ||
155 | LSR D0Re0, D0Re0, D0.6 | ||
156 | MOV D1.5, D0Re0 | ||
157 | ADD D1Re0, D1Re0, D1.5 | ||
158 | |||
159 | ! orignal data in D0Re0 | ||
160 | MOV D1.5, D0.5 | ||
161 | LSL D1.5, D1.5, D1.6 | ||
162 | MOV D0Re0, D1.7 | ||
163 | LSR D0Re0, D0Re0, D0.6 | ||
164 | MOV D0.5, D1.5 | ||
165 | ADD D0Re0, D0Re0, D0.5 | ||
166 | |||
167 | SETL [--A0.2], D0Re0, D1Re0 | ||
168 | MOV D0Re0, D0.7 | ||
169 | MOV D1Re0, D1.7 | ||
170 | SUBS D1Ar5, D1Ar5, #1 | ||
171 | BNE $Lbunaligned_1_2_3_loop | ||
172 | |||
173 | ANDS D1Ar3, D1Ar3, #7 | ||
174 | BZ $Lbbyte_loop_exit | ||
175 | ! Adjust A1.2 | ||
176 | ADD A1.2, A1.2, #8 | ||
177 | SUB A1.2, A1.2, D0Ar4 | ||
178 | B $Lbbyte_loop | ||
179 | |||
180 | $Lbaligned_4: | ||
181 | GETL D0.7, D1.7, [--A1.2] | ||
182 | MOV D1Re0, D0Re0 | ||
183 | MOV D0Re0, D1.7 | ||
184 | SETL [--A0.2], D0Re0, D1Re0 | ||
185 | MOV D0Re0, D0.7 | ||
186 | MOV D1Re0, D1.7 | ||
187 | SUBS D1Ar5, D1Ar5, #1 | ||
188 | BNE $Lbaligned_4 | ||
189 | ANDS D1Ar3, D1Ar3, #7 | ||
190 | BZ $Lbbyte_loop_exit | ||
191 | ! Adjust A1.2 | ||
192 | ADD A1.2, A1.2, #8 | ||
193 | SUB A1.2, A1.2, D0Ar4 | ||
194 | B $Lbbyte_loop | ||
195 | |||
196 | $Lforwards_copy: | ||
197 | MOV A1.2, D0Ar2 | ||
198 | MOV A0.2, D1Ar1 | ||
199 | CMP D1Ar3, #8 | ||
200 | BLT $Lfbyte_loop | ||
201 | |||
202 | MOV D0Ar4, D0Ar2 | ||
203 | MOV D1Ar5, D1Ar1 | ||
204 | |||
205 | ANDS D1Ar5, D1Ar5, #7 | ||
206 | BNE $Lfdest_unaligned | ||
207 | |||
208 | ANDS D0Ar4, D0Ar4, #7 | ||
209 | BNE $Lfsrc_unaligned | ||
210 | |||
211 | LSR D1Ar5, D1Ar3, #3 | ||
212 | |||
213 | $Lfaligned_loop: | ||
214 | GETL D0Re0, D1Re0, [A1.2++] | ||
215 | SUBS D1Ar5, D1Ar5, #1 | ||
216 | SETL [A0.2++], D0Re0, D1Re0 | ||
217 | BNE $Lfaligned_loop | ||
218 | |||
219 | ANDS D1Ar3, D1Ar3, #7 | ||
220 | BZ $Lfbyte_loop_exit | ||
221 | $Lfbyte_loop: | ||
222 | GETB D1Re0, [A1.2++] | ||
223 | SETB [A0.2++], D1Re0 | ||
224 | SUBS D1Ar3, D1Ar3, #1 | ||
225 | BNE $Lfbyte_loop | ||
226 | $Lfbyte_loop_exit: | ||
227 | MOV D0Re0, D1Ar1 | ||
228 | B $LEND | ||
229 | |||
230 | $Lfdest_unaligned: | ||
231 | GETB D0Re0, [A1.2++] | ||
232 | ADD D1Ar5, D1Ar5, #1 | ||
233 | SUB D1Ar3, D1Ar3, #1 | ||
234 | SETB [A0.2++], D0Re0 | ||
235 | CMP D1Ar5, #8 | ||
236 | BNE $Lfdest_unaligned | ||
237 | CMP D1Ar3, #8 | ||
238 | BLT $Lfbyte_loop | ||
239 | $Lfsrc_unaligned: | ||
240 | ! adjust A1.2 | ||
241 | LSR D1Ar5, D1Ar3, #3 | ||
242 | |||
243 | MOV D0Ar4, A1.2 | ||
244 | MOV D0Ar6, A1.2 | ||
245 | ANDMB D0Ar4, D0Ar4, #0xfff8 | ||
246 | MOV A1.2, D0Ar4 | ||
247 | |||
248 | ! A0.2 dst 64-bit is aligned | ||
249 | SUB D0Ar6, D0Ar6, D0Ar4 | ||
250 | ! keep the information for the later adjustment | ||
251 | MOVS D0Ar4, D0Ar6 | ||
252 | |||
253 | ! both aligned | ||
254 | BZ $Lfaligned_loop | ||
255 | |||
256 | ! prefetch | ||
257 | GETL D0Re0, D1Re0, [A1.2] | ||
258 | |||
259 | CMP D0Ar6, #4 | ||
260 | BLT $Lfunaligned_1_2_3 | ||
261 | BZ $Lfaligned_4 | ||
262 | |||
263 | SUB D0Ar6, D0Ar6, #4 | ||
264 | MULW D0.6, D0Ar6, #8 | ||
265 | MOV D1.6, #32 | ||
266 | SUB D1.6, D1.6, D0.6 | ||
267 | |||
268 | $Lfunaligned_5_6_7: | ||
269 | GETL D0.7, D1.7, [++A1.2] | ||
270 | ! form 64-bit data in D0Re0, D1Re0 | ||
271 | MOV D0Re0, D1Re0 | ||
272 | LSR D0Re0, D0Re0, D0.6 | ||
273 | MOV D1Re0, D0.7 | ||
274 | LSL D1Re0, D1Re0, D1.6 | ||
275 | MOV D0.5, D1Re0 | ||
276 | ADD D0Re0, D0Re0, D0.5 | ||
277 | |||
278 | MOV D0.5, D0.7 | ||
279 | LSR D0.5, D0.5, D0.6 | ||
280 | MOV D1Re0, D1.7 | ||
281 | LSL D1Re0, D1Re0, D1.6 | ||
282 | MOV D1.5, D0.5 | ||
283 | ADD D1Re0, D1Re0, D1.5 | ||
284 | |||
285 | SETL [A0.2++], D0Re0, D1Re0 | ||
286 | MOV D0Re0, D0.7 | ||
287 | MOV D1Re0, D1.7 | ||
288 | SUBS D1Ar5, D1Ar5, #1 | ||
289 | BNE $Lfunaligned_5_6_7 | ||
290 | |||
291 | ANDS D1Ar3, D1Ar3, #7 | ||
292 | BZ $Lfbyte_loop_exit | ||
293 | ! Adjust A1.2 | ||
294 | ADD A1.2, A1.2, D0Ar4 | ||
295 | B $Lfbyte_loop | ||
296 | |||
297 | $Lfunaligned_1_2_3: | ||
298 | MULW D0.6, D0Ar6, #8 | ||
299 | MOV D1.6, #32 | ||
300 | SUB D1.6, D1.6, D0.6 | ||
301 | |||
302 | $Lfunaligned_1_2_3_loop: | ||
303 | GETL D0.7, D1.7, [++A1.2] | ||
304 | ! form 64-bit data in D0Re0, D1Re0 | ||
305 | LSR D0Re0, D0Re0, D0.6 | ||
306 | MOV D1.5, D1Re0 | ||
307 | LSL D1Re0, D1Re0, D1.6 | ||
308 | MOV D0.5, D1Re0 | ||
309 | ADD D0Re0, D0Re0, D0.5 | ||
310 | |||
311 | MOV D0.5, D1.5 | ||
312 | LSR D0.5, D0.5, D0.6 | ||
313 | MOV D1Re0, D0.7 | ||
314 | LSL D1Re0, D1Re0, D1.6 | ||
315 | MOV D1.5, D0.5 | ||
316 | ADD D1Re0, D1Re0, D1.5 | ||
317 | |||
318 | SETL [A0.2++], D0Re0, D1Re0 | ||
319 | MOV D0Re0, D0.7 | ||
320 | MOV D1Re0, D1.7 | ||
321 | SUBS D1Ar5, D1Ar5, #1 | ||
322 | BNE $Lfunaligned_1_2_3_loop | ||
323 | |||
324 | ANDS D1Ar3, D1Ar3, #7 | ||
325 | BZ $Lfbyte_loop_exit | ||
326 | ! Adjust A1.2 | ||
327 | ADD A1.2, A1.2, D0Ar4 | ||
328 | B $Lfbyte_loop | ||
329 | |||
330 | $Lfaligned_4: | ||
331 | GETL D0.7, D1.7, [++A1.2] | ||
332 | MOV D0Re0, D1Re0 | ||
333 | MOV D1Re0, D0.7 | ||
334 | SETL [A0.2++], D0Re0, D1Re0 | ||
335 | MOV D0Re0, D0.7 | ||
336 | MOV D1Re0, D1.7 | ||
337 | SUBS D1Ar5, D1Ar5, #1 | ||
338 | BNE $Lfaligned_4 | ||
339 | ANDS D1Ar3, D1Ar3, #7 | ||
340 | BZ $Lfbyte_loop_exit | ||
341 | ! Adjust A1.2 | ||
342 | ADD A1.2, A1.2, D0Ar4 | ||
343 | B $Lfbyte_loop | ||
344 | |||
345 | .size _memmove,.-_memmove | ||
diff --git a/arch/metag/lib/memset.S b/arch/metag/lib/memset.S new file mode 100644 index 000000000000..721085bad1d2 --- /dev/null +++ b/arch/metag/lib/memset.S | |||
@@ -0,0 +1,86 @@ | |||
1 | ! Copyright (C) 2008-2012 Imagination Technologies Ltd. | ||
2 | |||
3 | .text | ||
4 | .global _memset | ||
5 | .type _memset,function | ||
6 | ! D1Ar1 dst | ||
7 | ! D0Ar2 c | ||
8 | ! D1Ar3 cnt | ||
9 | ! D0Re0 dst | ||
10 | _memset: | ||
11 | AND D0Ar2,D0Ar2,#0xFF ! Ensure a byte input value | ||
12 | MULW D0Ar2,D0Ar2,#0x0101 ! Duplicate byte value into 0-15 | ||
13 | ANDS D0Ar4,D1Ar1,#7 ! Extract bottom LSBs of dst | ||
14 | LSL D0Re0,D0Ar2,#16 ! Duplicate byte value into 16-31 | ||
15 | ADD A0.2,D0Ar2,D0Re0 ! Duplicate byte value into 4 (A0.2) | ||
16 | MOV D0Re0,D1Ar1 ! Return dst | ||
17 | BZ $LLongStub ! if start address is aligned | ||
18 | ! start address is not aligned on an 8 byte boundary, so we | ||
19 | ! need the number of bytes up to the next 8 byte address | ||
20 | ! boundary, or the length of the string if less than 8, in D1Ar5 | ||
21 | MOV D0Ar2,#8 ! Need 8 - N in D1Ar5 ... | ||
22 | SUB D1Ar5,D0Ar2,D0Ar4 ! ... subtract N | ||
23 | CMP D1Ar3,D1Ar5 | ||
24 | MOVMI D1Ar5,D1Ar3 | ||
25 | B $LByteStub ! dst is mis-aligned, do $LByteStub | ||
26 | |||
27 | ! | ||
28 | ! Preamble to LongLoop which generates 4*8 bytes per interation (5 cycles) | ||
29 | ! | ||
30 | $LLongStub: | ||
31 | LSRS D0Ar2,D1Ar3,#5 | ||
32 | AND D1Ar3,D1Ar3,#0x1F | ||
33 | MOV A1.2,A0.2 | ||
34 | BEQ $LLongishStub | ||
35 | SUB TXRPT,D0Ar2,#1 | ||
36 | CMP D1Ar3,#0 | ||
37 | $LLongLoop: | ||
38 | SETL [D1Ar1++],A0.2,A1.2 | ||
39 | SETL [D1Ar1++],A0.2,A1.2 | ||
40 | SETL [D1Ar1++],A0.2,A1.2 | ||
41 | SETL [D1Ar1++],A0.2,A1.2 | ||
42 | BR $LLongLoop | ||
43 | BZ $Lexit | ||
44 | ! | ||
45 | ! Preamble to LongishLoop which generates 1*8 bytes per interation (2 cycles) | ||
46 | ! | ||
47 | $LLongishStub: | ||
48 | LSRS D0Ar2,D1Ar3,#3 | ||
49 | AND D1Ar3,D1Ar3,#0x7 | ||
50 | MOV D1Ar5,D1Ar3 | ||
51 | BEQ $LByteStub | ||
52 | SUB TXRPT,D0Ar2,#1 | ||
53 | CMP D1Ar3,#0 | ||
54 | $LLongishLoop: | ||
55 | SETL [D1Ar1++],A0.2,A1.2 | ||
56 | BR $LLongishLoop | ||
57 | BZ $Lexit | ||
58 | ! | ||
59 | ! This does a byte structured burst of up to 7 bytes | ||
60 | ! | ||
61 | ! D1Ar1 should point to the location required | ||
62 | ! D1Ar3 should be the remaining total byte count | ||
63 | ! D1Ar5 should be burst size (<= D1Ar3) | ||
64 | ! | ||
65 | $LByteStub: | ||
66 | SUBS D1Ar3,D1Ar3,D1Ar5 ! Reduce count | ||
67 | ADD D1Ar1,D1Ar1,D1Ar5 ! Advance pointer to end of area | ||
68 | MULW D1Ar5,D1Ar5,#4 ! Scale to (1*4), (2*4), (3*4) | ||
69 | SUB D1Ar5,D1Ar5,#(8*4) ! Rebase to -(7*4), -(6*4), -(5*4), ... | ||
70 | MOV A1.2,D1Ar5 | ||
71 | SUB PC,CPC1,A1.2 ! Jump into table below | ||
72 | SETB [D1Ar1+#(-7)],A0.2 | ||
73 | SETB [D1Ar1+#(-6)],A0.2 | ||
74 | SETB [D1Ar1+#(-5)],A0.2 | ||
75 | SETB [D1Ar1+#(-4)],A0.2 | ||
76 | SETB [D1Ar1+#(-3)],A0.2 | ||
77 | SETB [D1Ar1+#(-2)],A0.2 | ||
78 | SETB [D1Ar1+#(-1)],A0.2 | ||
79 | ! | ||
80 | ! Return if all data has been output, otherwise do $LLongStub | ||
81 | ! | ||
82 | BNZ $LLongStub | ||
83 | $Lexit: | ||
84 | MOV PC,D1RtP | ||
85 | .size _memset,.-_memset | ||
86 | |||
diff --git a/arch/metag/lib/modsi3.S b/arch/metag/lib/modsi3.S new file mode 100644 index 000000000000..210cfa856593 --- /dev/null +++ b/arch/metag/lib/modsi3.S | |||
@@ -0,0 +1,38 @@ | |||
1 | ! Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007 | ||
2 | ! Imagination Technologies Ltd | ||
3 | ! | ||
4 | ! Integer modulus routines. | ||
5 | ! | ||
6 | !! | ||
7 | !! 32-bit modulus unsigned i/p - passed unsigned 32-bit numbers | ||
8 | !! | ||
9 | .text | ||
10 | .global ___umodsi3 | ||
11 | .type ___umodsi3,function | ||
12 | .align 2 | ||
13 | ___umodsi3: | ||
14 | MOV D0FrT,D1RtP ! Save original return address | ||
15 | CALLR D1RtP,___udivsi3 | ||
16 | MOV D1RtP,D0FrT ! Recover return address | ||
17 | MOV D0Re0,D1Ar1 ! Return remainder | ||
18 | MOV PC,D1RtP | ||
19 | .size ___umodsi3,.-___umodsi3 | ||
20 | |||
21 | !! | ||
22 | !! 32-bit modulus signed i/p - passed signed 32-bit numbers | ||
23 | !! | ||
24 | .global ___modsi3 | ||
25 | .type ___modsi3,function | ||
26 | .align 2 | ||
27 | ___modsi3: | ||
28 | MOV D0FrT,D1RtP ! Save original return address | ||
29 | MOV A0.2,D1Ar1 ! Save A in A0.2 | ||
30 | CALLR D1RtP,___divsi3 | ||
31 | MOV D1RtP,D0FrT ! Recover return address | ||
32 | MOV D1Re0,A0.2 ! Recover A | ||
33 | MOV D0Re0,D1Ar1 ! Return remainder | ||
34 | ORS D1Re0,D1Re0,D1Re0 ! Was A negative? | ||
35 | NEG D1Ar1,D1Ar1 ! Negate remainder | ||
36 | MOVMI D0Re0,D1Ar1 ! Return neg remainder | ||
37 | MOV PC, D1RtP | ||
38 | .size ___modsi3,.-___modsi3 | ||
diff --git a/arch/metag/lib/muldi3.S b/arch/metag/lib/muldi3.S new file mode 100644 index 000000000000..ee66ca8644d0 --- /dev/null +++ b/arch/metag/lib/muldi3.S | |||
@@ -0,0 +1,44 @@ | |||
1 | ! Copyright (C) 2012 by Imagination Technologies Ltd. | ||
2 | ! | ||
3 | ! 64-bit multiply routine. | ||
4 | ! | ||
5 | |||
6 | ! | ||
7 | ! 64-bit signed/unsigned multiply | ||
8 | ! | ||
9 | ! A = D1Ar1:D0Ar2 = a 2^48 + b 2^32 + c 2^16 + d 2^0 | ||
10 | ! | ||
11 | ! B = D1Ar3:D0Ar4 = w 2^48 + x 2^32 + y 2^16 + z 2^0 | ||
12 | ! | ||
13 | .text | ||
14 | .global ___muldi3 | ||
15 | .type ___muldi3,function | ||
16 | |||
17 | ___muldi3: | ||
18 | MULD D1Re0,D1Ar1,D0Ar4 ! (a 2^48 + b 2^32)(y 2^16 + z 2^0) | ||
19 | MULD D0Re0,D0Ar2,D1Ar3 ! (w 2^48 + x 2^32)(c 2^16 + d 2^0) | ||
20 | ADD D1Re0,D1Re0,D0Re0 | ||
21 | |||
22 | MULW D0Re0,D0Ar2,D0Ar4 ! (d 2^0) * (z 2^0) | ||
23 | |||
24 | RTDW D0Ar2,D0Ar2 | ||
25 | MULW D0Ar6,D0Ar2,D0Ar4 ! (c 2^16)(z 2^0) | ||
26 | LSR D1Ar5,D0Ar6,#16 | ||
27 | LSL D0Ar6,D0Ar6,#16 | ||
28 | ADDS D0Re0,D0Re0,D0Ar6 | ||
29 | ADDCS D1Re0,D1Re0,#1 | ||
30 | RTDW D0Ar4,D0Ar4 | ||
31 | ADD D1Re0,D1Re0,D1Ar5 | ||
32 | |||
33 | MULW D0Ar6,D0Ar2,D0Ar4 ! (c 2^16)(y 2^16) | ||
34 | ADD D1Re0,D1Re0,D0Ar6 | ||
35 | |||
36 | RTDW D0Ar2,D0Ar2 | ||
37 | MULW D0Ar6,D0Ar2,D0Ar4 ! (d 2^0)(y 2^16) | ||
38 | LSR D1Ar5,D0Ar6,#16 | ||
39 | LSL D0Ar6,D0Ar6,#16 | ||
40 | ADDS D0Re0,D0Re0,D0Ar6 | ||
41 | ADD D1Re0,D1Re0,D1Ar5 | ||
42 | ADDCS D1Re0,D1Re0,#1 | ||
43 | MOV PC, D1RtP | ||
44 | .size ___muldi3,.-___muldi3 | ||
diff --git a/arch/metag/lib/ucmpdi2.S b/arch/metag/lib/ucmpdi2.S new file mode 100644 index 000000000000..6f3347f7daeb --- /dev/null +++ b/arch/metag/lib/ucmpdi2.S | |||
@@ -0,0 +1,27 @@ | |||
1 | ! Copyright (C) 2012 by Imagination Technologies Ltd. | ||
2 | ! | ||
3 | ! 64-bit unsigned compare routine. | ||
4 | ! | ||
5 | |||
6 | .text | ||
7 | .global ___ucmpdi2 | ||
8 | .type ___ucmpdi2,function | ||
9 | |||
10 | ! low high | ||
11 | ! u64 a (D0Ar2, D1Ar1) | ||
12 | ! u64 b (D0Ar4, D1Ar3) | ||
13 | ___ucmpdi2: | ||
14 | ! start at 1 (equal) and conditionally increment or decrement | ||
15 | MOV D0Re0,#1 | ||
16 | |||
17 | ! high words | ||
18 | CMP D1Ar1,D1Ar3 | ||
19 | ! or if equal, low words | ||
20 | CMPEQ D0Ar2,D0Ar4 | ||
21 | |||
22 | ! unsigned compare | ||
23 | SUBLO D0Re0,D0Re0,#1 | ||
24 | ADDHI D0Re0,D0Re0,#1 | ||
25 | |||
26 | MOV PC,D1RtP | ||
27 | .size ___ucmpdi2,.-___ucmpdi2 | ||
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c new file mode 100644 index 000000000000..b3ebfe9c8e88 --- /dev/null +++ b/arch/metag/lib/usercopy.c | |||
@@ -0,0 +1,1354 @@ | |||
1 | /* | ||
2 | * User address space access functions. | ||
3 | * The non-inlined parts of asm-metag/uaccess.h are here. | ||
4 | * | ||
5 | * Copyright (C) 2006, Imagination Technologies. | ||
6 | * Copyright (C) 2000, Axis Communications AB. | ||
7 | * | ||
8 | * Written by Hans-Peter Nilsson. | ||
9 | * Pieces used from memcpy, originally by Kenny Ranerup long time ago. | ||
10 | * Modified for Meta by Will Newton. | ||
11 | */ | ||
12 | |||
13 | #include <linux/export.h> | ||
14 | #include <linux/uaccess.h> | ||
15 | #include <asm/cache.h> /* def of L1_CACHE_BYTES */ | ||
16 | |||
17 | #define USE_RAPF | ||
18 | #define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES) | ||
19 | |||
20 | |||
21 | /* The "double write" in this code is because the Meta will not fault | ||
22 | * immediately unless the memory pipe is forced to by e.g. a data stall or | ||
23 | * another memory op. The second write should be discarded by the write | ||
24 | * combiner so should have virtually no cost. | ||
25 | */ | ||
26 | |||
27 | #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
28 | asm volatile ( \ | ||
29 | COPY \ | ||
30 | "1:\n" \ | ||
31 | " .section .fixup,\"ax\"\n" \ | ||
32 | " MOV D1Ar1,#0\n" \ | ||
33 | FIXUP \ | ||
34 | " MOVT D1Ar1,#HI(1b)\n" \ | ||
35 | " JUMP D1Ar1,#LO(1b)\n" \ | ||
36 | " .previous\n" \ | ||
37 | " .section __ex_table,\"a\"\n" \ | ||
38 | TENTRY \ | ||
39 | " .previous\n" \ | ||
40 | : "=r" (to), "=r" (from), "=r" (ret) \ | ||
41 | : "0" (to), "1" (from), "2" (ret) \ | ||
42 | : "D1Ar1", "memory") | ||
43 | |||
44 | |||
45 | #define __asm_copy_to_user_1(to, from, ret) \ | ||
46 | __asm_copy_user_cont(to, from, ret, \ | ||
47 | " GETB D1Ar1,[%1++]\n" \ | ||
48 | " SETB [%0],D1Ar1\n" \ | ||
49 | "2: SETB [%0++],D1Ar1\n", \ | ||
50 | "3: ADD %2,%2,#1\n", \ | ||
51 | " .long 2b,3b\n") | ||
52 | |||
53 | #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
54 | __asm_copy_user_cont(to, from, ret, \ | ||
55 | " GETW D1Ar1,[%1++]\n" \ | ||
56 | " SETW [%0],D1Ar1\n" \ | ||
57 | "2: SETW [%0++],D1Ar1\n" COPY, \ | ||
58 | "3: ADD %2,%2,#2\n" FIXUP, \ | ||
59 | " .long 2b,3b\n" TENTRY) | ||
60 | |||
61 | #define __asm_copy_to_user_2(to, from, ret) \ | ||
62 | __asm_copy_to_user_2x_cont(to, from, ret, "", "", "") | ||
63 | |||
64 | #define __asm_copy_to_user_3(to, from, ret) \ | ||
65 | __asm_copy_to_user_2x_cont(to, from, ret, \ | ||
66 | " GETB D1Ar1,[%1++]\n" \ | ||
67 | " SETB [%0],D1Ar1\n" \ | ||
68 | "4: SETB [%0++],D1Ar1\n", \ | ||
69 | "5: ADD %2,%2,#1\n", \ | ||
70 | " .long 4b,5b\n") | ||
71 | |||
72 | #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
73 | __asm_copy_user_cont(to, from, ret, \ | ||
74 | " GETD D1Ar1,[%1++]\n" \ | ||
75 | " SETD [%0],D1Ar1\n" \ | ||
76 | "2: SETD [%0++],D1Ar1\n" COPY, \ | ||
77 | "3: ADD %2,%2,#4\n" FIXUP, \ | ||
78 | " .long 2b,3b\n" TENTRY) | ||
79 | |||
80 | #define __asm_copy_to_user_4(to, from, ret) \ | ||
81 | __asm_copy_to_user_4x_cont(to, from, ret, "", "", "") | ||
82 | |||
83 | #define __asm_copy_to_user_5(to, from, ret) \ | ||
84 | __asm_copy_to_user_4x_cont(to, from, ret, \ | ||
85 | " GETB D1Ar1,[%1++]\n" \ | ||
86 | " SETB [%0],D1Ar1\n" \ | ||
87 | "4: SETB [%0++],D1Ar1\n", \ | ||
88 | "5: ADD %2,%2,#1\n", \ | ||
89 | " .long 4b,5b\n") | ||
90 | |||
91 | #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
92 | __asm_copy_to_user_4x_cont(to, from, ret, \ | ||
93 | " GETW D1Ar1,[%1++]\n" \ | ||
94 | " SETW [%0],D1Ar1\n" \ | ||
95 | "4: SETW [%0++],D1Ar1\n" COPY, \ | ||
96 | "5: ADD %2,%2,#2\n" FIXUP, \ | ||
97 | " .long 4b,5b\n" TENTRY) | ||
98 | |||
99 | #define __asm_copy_to_user_6(to, from, ret) \ | ||
100 | __asm_copy_to_user_6x_cont(to, from, ret, "", "", "") | ||
101 | |||
102 | #define __asm_copy_to_user_7(to, from, ret) \ | ||
103 | __asm_copy_to_user_6x_cont(to, from, ret, \ | ||
104 | " GETB D1Ar1,[%1++]\n" \ | ||
105 | " SETB [%0],D1Ar1\n" \ | ||
106 | "6: SETB [%0++],D1Ar1\n", \ | ||
107 | "7: ADD %2,%2,#1\n", \ | ||
108 | " .long 6b,7b\n") | ||
109 | |||
110 | #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
111 | __asm_copy_to_user_4x_cont(to, from, ret, \ | ||
112 | " GETD D1Ar1,[%1++]\n" \ | ||
113 | " SETD [%0],D1Ar1\n" \ | ||
114 | "4: SETD [%0++],D1Ar1\n" COPY, \ | ||
115 | "5: ADD %2,%2,#4\n" FIXUP, \ | ||
116 | " .long 4b,5b\n" TENTRY) | ||
117 | |||
118 | #define __asm_copy_to_user_8(to, from, ret) \ | ||
119 | __asm_copy_to_user_8x_cont(to, from, ret, "", "", "") | ||
120 | |||
121 | #define __asm_copy_to_user_9(to, from, ret) \ | ||
122 | __asm_copy_to_user_8x_cont(to, from, ret, \ | ||
123 | " GETB D1Ar1,[%1++]\n" \ | ||
124 | " SETB [%0],D1Ar1\n" \ | ||
125 | "6: SETB [%0++],D1Ar1\n", \ | ||
126 | "7: ADD %2,%2,#1\n", \ | ||
127 | " .long 6b,7b\n") | ||
128 | |||
129 | #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
130 | __asm_copy_to_user_8x_cont(to, from, ret, \ | ||
131 | " GETW D1Ar1,[%1++]\n" \ | ||
132 | " SETW [%0],D1Ar1\n" \ | ||
133 | "6: SETW [%0++],D1Ar1\n" COPY, \ | ||
134 | "7: ADD %2,%2,#2\n" FIXUP, \ | ||
135 | " .long 6b,7b\n" TENTRY) | ||
136 | |||
137 | #define __asm_copy_to_user_10(to, from, ret) \ | ||
138 | __asm_copy_to_user_10x_cont(to, from, ret, "", "", "") | ||
139 | |||
140 | #define __asm_copy_to_user_11(to, from, ret) \ | ||
141 | __asm_copy_to_user_10x_cont(to, from, ret, \ | ||
142 | " GETB D1Ar1,[%1++]\n" \ | ||
143 | " SETB [%0],D1Ar1\n" \ | ||
144 | "8: SETB [%0++],D1Ar1\n", \ | ||
145 | "9: ADD %2,%2,#1\n", \ | ||
146 | " .long 8b,9b\n") | ||
147 | |||
148 | #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
149 | __asm_copy_to_user_8x_cont(to, from, ret, \ | ||
150 | " GETD D1Ar1,[%1++]\n" \ | ||
151 | " SETD [%0],D1Ar1\n" \ | ||
152 | "6: SETD [%0++],D1Ar1\n" COPY, \ | ||
153 | "7: ADD %2,%2,#4\n" FIXUP, \ | ||
154 | " .long 6b,7b\n" TENTRY) | ||
155 | #define __asm_copy_to_user_12(to, from, ret) \ | ||
156 | __asm_copy_to_user_12x_cont(to, from, ret, "", "", "") | ||
157 | |||
158 | #define __asm_copy_to_user_13(to, from, ret) \ | ||
159 | __asm_copy_to_user_12x_cont(to, from, ret, \ | ||
160 | " GETB D1Ar1,[%1++]\n" \ | ||
161 | " SETB [%0],D1Ar1\n" \ | ||
162 | "8: SETB [%0++],D1Ar1\n", \ | ||
163 | "9: ADD %2,%2,#1\n", \ | ||
164 | " .long 8b,9b\n") | ||
165 | |||
166 | #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
167 | __asm_copy_to_user_12x_cont(to, from, ret, \ | ||
168 | " GETW D1Ar1,[%1++]\n" \ | ||
169 | " SETW [%0],D1Ar1\n" \ | ||
170 | "8: SETW [%0++],D1Ar1\n" COPY, \ | ||
171 | "9: ADD %2,%2,#2\n" FIXUP, \ | ||
172 | " .long 8b,9b\n" TENTRY) | ||
173 | |||
174 | #define __asm_copy_to_user_14(to, from, ret) \ | ||
175 | __asm_copy_to_user_14x_cont(to, from, ret, "", "", "") | ||
176 | |||
177 | #define __asm_copy_to_user_15(to, from, ret) \ | ||
178 | __asm_copy_to_user_14x_cont(to, from, ret, \ | ||
179 | " GETB D1Ar1,[%1++]\n" \ | ||
180 | " SETB [%0],D1Ar1\n" \ | ||
181 | "10: SETB [%0++],D1Ar1\n", \ | ||
182 | "11: ADD %2,%2,#1\n", \ | ||
183 | " .long 10b,11b\n") | ||
184 | |||
185 | #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
186 | __asm_copy_to_user_12x_cont(to, from, ret, \ | ||
187 | " GETD D1Ar1,[%1++]\n" \ | ||
188 | " SETD [%0],D1Ar1\n" \ | ||
189 | "8: SETD [%0++],D1Ar1\n" COPY, \ | ||
190 | "9: ADD %2,%2,#4\n" FIXUP, \ | ||
191 | " .long 8b,9b\n" TENTRY) | ||
192 | |||
193 | #define __asm_copy_to_user_16(to, from, ret) \ | ||
194 | __asm_copy_to_user_16x_cont(to, from, ret, "", "", "") | ||
195 | |||
196 | #define __asm_copy_to_user_8x64(to, from, ret) \ | ||
197 | asm volatile ( \ | ||
198 | " GETL D0Ar2,D1Ar1,[%1++]\n" \ | ||
199 | " SETL [%0],D0Ar2,D1Ar1\n" \ | ||
200 | "2: SETL [%0++],D0Ar2,D1Ar1\n" \ | ||
201 | "1:\n" \ | ||
202 | " .section .fixup,\"ax\"\n" \ | ||
203 | "3: ADD %2,%2,#8\n" \ | ||
204 | " MOVT D0Ar2,#HI(1b)\n" \ | ||
205 | " JUMP D0Ar2,#LO(1b)\n" \ | ||
206 | " .previous\n" \ | ||
207 | " .section __ex_table,\"a\"\n" \ | ||
208 | " .long 2b,3b\n" \ | ||
209 | " .previous\n" \ | ||
210 | : "=r" (to), "=r" (from), "=r" (ret) \ | ||
211 | : "0" (to), "1" (from), "2" (ret) \ | ||
212 | : "D1Ar1", "D0Ar2", "memory") | ||
213 | |||
214 | /* | ||
215 | * optimized copying loop using RAPF when 64 bit aligned | ||
216 | * | ||
217 | * n will be automatically decremented inside the loop | ||
218 | * ret will be left intact. if error occurs we will rewind | ||
219 | * so that the original non optimized code will fill up | ||
220 | * this value correctly. | ||
221 | * | ||
222 | * on fault: | ||
223 | * > n will hold total number of uncopied bytes | ||
224 | * | ||
225 | * > {'to','from'} will be rewind back so that | ||
226 | * the non-optimized code will do the proper fix up | ||
227 | * | ||
228 | * DCACHE drops the cacheline which helps in reducing cache | ||
229 | * pollution. | ||
230 | * | ||
231 | * We introduce an extra SETL at the end of the loop to | ||
232 | * ensure we don't fall off the loop before we catch all | ||
233 | * erros. | ||
234 | * | ||
235 | * NOTICE: | ||
236 | * LSM_STEP in TXSTATUS must be cleared in fix up code. | ||
237 | * since we're using M{S,G}ETL, a fault might happen at | ||
238 | * any address in the middle of M{S,G}ETL causing | ||
239 | * the value of LSM_STEP to be incorrect which can | ||
240 | * cause subsequent use of M{S,G}ET{L,D} to go wrong. | ||
241 | * ie: if LSM_STEP was 1 when a fault occurs, the | ||
242 | * next call to M{S,G}ET{L,D} will skip the first | ||
243 | * copy/getting as it think that the first 1 has already | ||
244 | * been done. | ||
245 | * | ||
246 | */ | ||
247 | #define __asm_copy_user_64bit_rapf_loop( \ | ||
248 | to, from, ret, n, id, FIXUP) \ | ||
249 | asm volatile ( \ | ||
250 | ".balign 8\n" \ | ||
251 | "MOV RAPF, %1\n" \ | ||
252 | "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ | ||
253 | "MOV D0Ar6, #0\n" \ | ||
254 | "LSR D1Ar5, %3, #6\n" \ | ||
255 | "SUB TXRPT, D1Ar5, #2\n" \ | ||
256 | "MOV RAPF, %1\n" \ | ||
257 | "$Lloop"id":\n" \ | ||
258 | "ADD RAPF, %1, #64\n" \ | ||
259 | "21:\n" \ | ||
260 | "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
261 | "22:\n" \ | ||
262 | "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
263 | "SUB %3, %3, #32\n" \ | ||
264 | "23:\n" \ | ||
265 | "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
266 | "24:\n" \ | ||
267 | "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
268 | "SUB %3, %3, #32\n" \ | ||
269 | "DCACHE [%1+#-64], D0Ar6\n" \ | ||
270 | "BR $Lloop"id"\n" \ | ||
271 | \ | ||
272 | "MOV RAPF, %1\n" \ | ||
273 | "25:\n" \ | ||
274 | "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
275 | "26:\n" \ | ||
276 | "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
277 | "SUB %3, %3, #32\n" \ | ||
278 | "27:\n" \ | ||
279 | "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
280 | "28:\n" \ | ||
281 | "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
282 | "SUB %0, %0, #8\n" \ | ||
283 | "29:\n" \ | ||
284 | "SETL [%0++], D0.7, D1.7\n" \ | ||
285 | "SUB %3, %3, #32\n" \ | ||
286 | "1:" \ | ||
287 | "DCACHE [%1+#-64], D0Ar6\n" \ | ||
288 | "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ | ||
289 | "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ | ||
290 | "GETL D0.5, D1.5, [A0StP+#-24]\n" \ | ||
291 | "GETL D0.6, D1.6, [A0StP+#-16]\n" \ | ||
292 | "GETL D0.7, D1.7, [A0StP+#-8]\n" \ | ||
293 | "SUB A0StP, A0StP, #40\n" \ | ||
294 | " .section .fixup,\"ax\"\n" \ | ||
295 | "4:\n" \ | ||
296 | " ADD %0, %0, #8\n" \ | ||
297 | "3:\n" \ | ||
298 | " MOV D0Ar2, TXSTATUS\n" \ | ||
299 | " MOV D1Ar1, TXSTATUS\n" \ | ||
300 | " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \ | ||
301 | " MOV TXSTATUS, D1Ar1\n" \ | ||
302 | FIXUP \ | ||
303 | " MOVT D0Ar2,#HI(1b)\n" \ | ||
304 | " JUMP D0Ar2,#LO(1b)\n" \ | ||
305 | " .previous\n" \ | ||
306 | " .section __ex_table,\"a\"\n" \ | ||
307 | " .long 21b,3b\n" \ | ||
308 | " .long 22b,3b\n" \ | ||
309 | " .long 23b,3b\n" \ | ||
310 | " .long 24b,3b\n" \ | ||
311 | " .long 25b,3b\n" \ | ||
312 | " .long 26b,3b\n" \ | ||
313 | " .long 27b,3b\n" \ | ||
314 | " .long 28b,3b\n" \ | ||
315 | " .long 29b,4b\n" \ | ||
316 | " .previous\n" \ | ||
317 | : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ | ||
318 | : "0" (to), "1" (from), "2" (ret), "3" (n) \ | ||
319 | : "D1Ar1", "D0Ar2", "memory") | ||
320 | |||
321 | /* rewind 'to' and 'from' pointers when a fault occurs | ||
322 | * | ||
323 | * Rationale: | ||
324 | * A fault always occurs on writing to user buffer. A fault | ||
325 | * is at a single address, so we need to rewind by only 4 | ||
326 | * bytes. | ||
327 | * Since we do a complete read from kernel buffer before | ||
328 | * writing, we need to rewind it also. The amount to be | ||
329 | * rewind equals the number of faulty writes in MSETD | ||
330 | * which is: [4 - (LSM_STEP-1)]*8 | ||
331 | * LSM_STEP is bits 10:8 in TXSTATUS which is already read | ||
332 | * and stored in D0Ar2 | ||
333 | * | ||
334 | * NOTE: If a fault occurs at the last operation in M{G,S}ETL | ||
335 | * LSM_STEP will be 0. ie: we do 4 writes in our case, if | ||
336 | * a fault happens at the 4th write, LSM_STEP will be 0 | ||
337 | * instead of 4. The code copes with that. | ||
338 | * | ||
339 | * n is updated by the number of successful writes, which is: | ||
340 | * n = n - (LSM_STEP-1)*8 | ||
341 | */ | ||
342 | #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ | ||
343 | __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ | ||
344 | "LSR D0Ar2, D0Ar2, #8\n" \ | ||
345 | "AND D0Ar2, D0Ar2, #0x7\n" \ | ||
346 | "ADDZ D0Ar2, D0Ar2, #4\n" \ | ||
347 | "SUB D0Ar2, D0Ar2, #1\n" \ | ||
348 | "MOV D1Ar1, #4\n" \ | ||
349 | "SUB D0Ar2, D1Ar1, D0Ar2\n" \ | ||
350 | "LSL D0Ar2, D0Ar2, #3\n" \ | ||
351 | "LSL D1Ar1, D1Ar1, #3\n" \ | ||
352 | "SUB D1Ar1, D1Ar1, D0Ar2\n" \ | ||
353 | "SUB %0, %0, #8\n" \ | ||
354 | "SUB %1, %1,D0Ar2\n" \ | ||
355 | "SUB %3, %3, D1Ar1\n") | ||
356 | |||
357 | /* | ||
358 | * optimized copying loop using RAPF when 32 bit aligned | ||
359 | * | ||
360 | * n will be automatically decremented inside the loop | ||
361 | * ret will be left intact. if error occurs we will rewind | ||
362 | * so that the original non optimized code will fill up | ||
363 | * this value correctly. | ||
364 | * | ||
365 | * on fault: | ||
366 | * > n will hold total number of uncopied bytes | ||
367 | * | ||
368 | * > {'to','from'} will be rewind back so that | ||
369 | * the non-optimized code will do the proper fix up | ||
370 | * | ||
371 | * DCACHE drops the cacheline which helps in reducing cache | ||
372 | * pollution. | ||
373 | * | ||
374 | * We introduce an extra SETD at the end of the loop to | ||
375 | * ensure we don't fall off the loop before we catch all | ||
376 | * erros. | ||
377 | * | ||
378 | * NOTICE: | ||
379 | * LSM_STEP in TXSTATUS must be cleared in fix up code. | ||
380 | * since we're using M{S,G}ETL, a fault might happen at | ||
381 | * any address in the middle of M{S,G}ETL causing | ||
382 | * the value of LSM_STEP to be incorrect which can | ||
383 | * cause subsequent use of M{S,G}ET{L,D} to go wrong. | ||
384 | * ie: if LSM_STEP was 1 when a fault occurs, the | ||
385 | * next call to M{S,G}ET{L,D} will skip the first | ||
386 | * copy/getting as it think that the first 1 has already | ||
387 | * been done. | ||
388 | * | ||
389 | */ | ||
390 | #define __asm_copy_user_32bit_rapf_loop( \ | ||
391 | to, from, ret, n, id, FIXUP) \ | ||
392 | asm volatile ( \ | ||
393 | ".balign 8\n" \ | ||
394 | "MOV RAPF, %1\n" \ | ||
395 | "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ | ||
396 | "MOV D0Ar6, #0\n" \ | ||
397 | "LSR D1Ar5, %3, #6\n" \ | ||
398 | "SUB TXRPT, D1Ar5, #2\n" \ | ||
399 | "MOV RAPF, %1\n" \ | ||
400 | "$Lloop"id":\n" \ | ||
401 | "ADD RAPF, %1, #64\n" \ | ||
402 | "21:\n" \ | ||
403 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
404 | "22:\n" \ | ||
405 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
406 | "SUB %3, %3, #16\n" \ | ||
407 | "23:\n" \ | ||
408 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
409 | "24:\n" \ | ||
410 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
411 | "SUB %3, %3, #16\n" \ | ||
412 | "25:\n" \ | ||
413 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
414 | "26:\n" \ | ||
415 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
416 | "SUB %3, %3, #16\n" \ | ||
417 | "27:\n" \ | ||
418 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
419 | "28:\n" \ | ||
420 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
421 | "SUB %3, %3, #16\n" \ | ||
422 | "DCACHE [%1+#-64], D0Ar6\n" \ | ||
423 | "BR $Lloop"id"\n" \ | ||
424 | \ | ||
425 | "MOV RAPF, %1\n" \ | ||
426 | "29:\n" \ | ||
427 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
428 | "30:\n" \ | ||
429 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
430 | "SUB %3, %3, #16\n" \ | ||
431 | "31:\n" \ | ||
432 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
433 | "32:\n" \ | ||
434 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
435 | "SUB %3, %3, #16\n" \ | ||
436 | "33:\n" \ | ||
437 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
438 | "34:\n" \ | ||
439 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
440 | "SUB %3, %3, #16\n" \ | ||
441 | "35:\n" \ | ||
442 | "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ | ||
443 | "36:\n" \ | ||
444 | "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ | ||
445 | "SUB %0, %0, #4\n" \ | ||
446 | "37:\n" \ | ||
447 | "SETD [%0++], D0.7\n" \ | ||
448 | "SUB %3, %3, #16\n" \ | ||
449 | "1:" \ | ||
450 | "DCACHE [%1+#-64], D0Ar6\n" \ | ||
451 | "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ | ||
452 | "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ | ||
453 | "GETL D0.5, D1.5, [A0StP+#-24]\n" \ | ||
454 | "GETL D0.6, D1.6, [A0StP+#-16]\n" \ | ||
455 | "GETL D0.7, D1.7, [A0StP+#-8]\n" \ | ||
456 | "SUB A0StP, A0StP, #40\n" \ | ||
457 | " .section .fixup,\"ax\"\n" \ | ||
458 | "4:\n" \ | ||
459 | " ADD %0, %0, #4\n" \ | ||
460 | "3:\n" \ | ||
461 | " MOV D0Ar2, TXSTATUS\n" \ | ||
462 | " MOV D1Ar1, TXSTATUS\n" \ | ||
463 | " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \ | ||
464 | " MOV TXSTATUS, D1Ar1\n" \ | ||
465 | FIXUP \ | ||
466 | " MOVT D0Ar2,#HI(1b)\n" \ | ||
467 | " JUMP D0Ar2,#LO(1b)\n" \ | ||
468 | " .previous\n" \ | ||
469 | " .section __ex_table,\"a\"\n" \ | ||
470 | " .long 21b,3b\n" \ | ||
471 | " .long 22b,3b\n" \ | ||
472 | " .long 23b,3b\n" \ | ||
473 | " .long 24b,3b\n" \ | ||
474 | " .long 25b,3b\n" \ | ||
475 | " .long 26b,3b\n" \ | ||
476 | " .long 27b,3b\n" \ | ||
477 | " .long 28b,3b\n" \ | ||
478 | " .long 29b,3b\n" \ | ||
479 | " .long 30b,3b\n" \ | ||
480 | " .long 31b,3b\n" \ | ||
481 | " .long 32b,3b\n" \ | ||
482 | " .long 33b,3b\n" \ | ||
483 | " .long 34b,3b\n" \ | ||
484 | " .long 35b,3b\n" \ | ||
485 | " .long 36b,3b\n" \ | ||
486 | " .long 37b,4b\n" \ | ||
487 | " .previous\n" \ | ||
488 | : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ | ||
489 | : "0" (to), "1" (from), "2" (ret), "3" (n) \ | ||
490 | : "D1Ar1", "D0Ar2", "memory") | ||
491 | |||
492 | /* rewind 'to' and 'from' pointers when a fault occurs | ||
493 | * | ||
494 | * Rationale: | ||
495 | * A fault always occurs on writing to user buffer. A fault | ||
496 | * is at a single address, so we need to rewind by only 4 | ||
497 | * bytes. | ||
498 | * Since we do a complete read from kernel buffer before | ||
499 | * writing, we need to rewind it also. The amount to be | ||
500 | * rewind equals the number of faulty writes in MSETD | ||
501 | * which is: [4 - (LSM_STEP-1)]*4 | ||
502 | * LSM_STEP is bits 10:8 in TXSTATUS which is already read | ||
503 | * and stored in D0Ar2 | ||
504 | * | ||
505 | * NOTE: If a fault occurs at the last operation in M{G,S}ETL | ||
506 | * LSM_STEP will be 0. ie: we do 4 writes in our case, if | ||
507 | * a fault happens at the 4th write, LSM_STEP will be 0 | ||
508 | * instead of 4. The code copes with that. | ||
509 | * | ||
510 | * n is updated by the number of successful writes, which is: | ||
511 | * n = n - (LSM_STEP-1)*4 | ||
512 | */ | ||
513 | #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ | ||
514 | __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ | ||
515 | "LSR D0Ar2, D0Ar2, #8\n" \ | ||
516 | "AND D0Ar2, D0Ar2, #0x7\n" \ | ||
517 | "ADDZ D0Ar2, D0Ar2, #4\n" \ | ||
518 | "SUB D0Ar2, D0Ar2, #1\n" \ | ||
519 | "MOV D1Ar1, #4\n" \ | ||
520 | "SUB D0Ar2, D1Ar1, D0Ar2\n" \ | ||
521 | "LSL D0Ar2, D0Ar2, #2\n" \ | ||
522 | "LSL D1Ar1, D1Ar1, #2\n" \ | ||
523 | "SUB D1Ar1, D1Ar1, D0Ar2\n" \ | ||
524 | "SUB %0, %0, #4\n" \ | ||
525 | "SUB %1, %1, D0Ar2\n" \ | ||
526 | "SUB %3, %3, D1Ar1\n") | ||
527 | |||
528 | unsigned long __copy_user(void __user *pdst, const void *psrc, | ||
529 | unsigned long n) | ||
530 | { | ||
531 | register char __user *dst asm ("A0.2") = pdst; | ||
532 | register const char *src asm ("A1.2") = psrc; | ||
533 | unsigned long retn = 0; | ||
534 | |||
535 | if (n == 0) | ||
536 | return 0; | ||
537 | |||
538 | if ((unsigned long) src & 1) { | ||
539 | __asm_copy_to_user_1(dst, src, retn); | ||
540 | n--; | ||
541 | } | ||
542 | if ((unsigned long) dst & 1) { | ||
543 | /* Worst case - byte copy */ | ||
544 | while (n > 0) { | ||
545 | __asm_copy_to_user_1(dst, src, retn); | ||
546 | n--; | ||
547 | } | ||
548 | } | ||
549 | if (((unsigned long) src & 2) && n >= 2) { | ||
550 | __asm_copy_to_user_2(dst, src, retn); | ||
551 | n -= 2; | ||
552 | } | ||
553 | if ((unsigned long) dst & 2) { | ||
554 | /* Second worst case - word copy */ | ||
555 | while (n >= 2) { | ||
556 | __asm_copy_to_user_2(dst, src, retn); | ||
557 | n -= 2; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | #ifdef USE_RAPF | ||
562 | /* 64 bit copy loop */ | ||
563 | if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) { | ||
564 | if (n >= RAPF_MIN_BUF_SIZE) { | ||
565 | /* copy user using 64 bit rapf copy */ | ||
566 | __asm_copy_to_user_64bit_rapf_loop(dst, src, retn, | ||
567 | n, "64cu"); | ||
568 | } | ||
569 | while (n >= 8) { | ||
570 | __asm_copy_to_user_8x64(dst, src, retn); | ||
571 | n -= 8; | ||
572 | } | ||
573 | } | ||
574 | if (n >= RAPF_MIN_BUF_SIZE) { | ||
575 | /* copy user using 32 bit rapf copy */ | ||
576 | __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu"); | ||
577 | } | ||
578 | #else | ||
579 | /* 64 bit copy loop */ | ||
580 | if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) { | ||
581 | while (n >= 8) { | ||
582 | __asm_copy_to_user_8x64(dst, src, retn); | ||
583 | n -= 8; | ||
584 | } | ||
585 | } | ||
586 | #endif | ||
587 | |||
588 | while (n >= 16) { | ||
589 | __asm_copy_to_user_16(dst, src, retn); | ||
590 | n -= 16; | ||
591 | } | ||
592 | |||
593 | while (n >= 4) { | ||
594 | __asm_copy_to_user_4(dst, src, retn); | ||
595 | n -= 4; | ||
596 | } | ||
597 | |||
598 | switch (n) { | ||
599 | case 0: | ||
600 | break; | ||
601 | case 1: | ||
602 | __asm_copy_to_user_1(dst, src, retn); | ||
603 | break; | ||
604 | case 2: | ||
605 | __asm_copy_to_user_2(dst, src, retn); | ||
606 | break; | ||
607 | case 3: | ||
608 | __asm_copy_to_user_3(dst, src, retn); | ||
609 | break; | ||
610 | } | ||
611 | |||
612 | return retn; | ||
613 | } | ||
614 | EXPORT_SYMBOL(__copy_user); | ||
615 | |||
616 | #define __asm_copy_from_user_1(to, from, ret) \ | ||
617 | __asm_copy_user_cont(to, from, ret, \ | ||
618 | " GETB D1Ar1,[%1++]\n" \ | ||
619 | "2: SETB [%0++],D1Ar1\n", \ | ||
620 | "3: ADD %2,%2,#1\n" \ | ||
621 | " SETB [%0++],D1Ar1\n", \ | ||
622 | " .long 2b,3b\n") | ||
623 | |||
624 | #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
625 | __asm_copy_user_cont(to, from, ret, \ | ||
626 | " GETW D1Ar1,[%1++]\n" \ | ||
627 | "2: SETW [%0++],D1Ar1\n" COPY, \ | ||
628 | "3: ADD %2,%2,#2\n" \ | ||
629 | " SETW [%0++],D1Ar1\n" FIXUP, \ | ||
630 | " .long 2b,3b\n" TENTRY) | ||
631 | |||
632 | #define __asm_copy_from_user_2(to, from, ret) \ | ||
633 | __asm_copy_from_user_2x_cont(to, from, ret, "", "", "") | ||
634 | |||
635 | #define __asm_copy_from_user_3(to, from, ret) \ | ||
636 | __asm_copy_from_user_2x_cont(to, from, ret, \ | ||
637 | " GETB D1Ar1,[%1++]\n" \ | ||
638 | "4: SETB [%0++],D1Ar1\n", \ | ||
639 | "5: ADD %2,%2,#1\n" \ | ||
640 | " SETB [%0++],D1Ar1\n", \ | ||
641 | " .long 4b,5b\n") | ||
642 | |||
643 | #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
644 | __asm_copy_user_cont(to, from, ret, \ | ||
645 | " GETD D1Ar1,[%1++]\n" \ | ||
646 | "2: SETD [%0++],D1Ar1\n" COPY, \ | ||
647 | "3: ADD %2,%2,#4\n" \ | ||
648 | " SETD [%0++],D1Ar1\n" FIXUP, \ | ||
649 | " .long 2b,3b\n" TENTRY) | ||
650 | |||
651 | #define __asm_copy_from_user_4(to, from, ret) \ | ||
652 | __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") | ||
653 | |||
654 | #define __asm_copy_from_user_5(to, from, ret) \ | ||
655 | __asm_copy_from_user_4x_cont(to, from, ret, \ | ||
656 | " GETB D1Ar1,[%1++]\n" \ | ||
657 | "4: SETB [%0++],D1Ar1\n", \ | ||
658 | "5: ADD %2,%2,#1\n" \ | ||
659 | " SETB [%0++],D1Ar1\n", \ | ||
660 | " .long 4b,5b\n") | ||
661 | |||
662 | #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
663 | __asm_copy_from_user_4x_cont(to, from, ret, \ | ||
664 | " GETW D1Ar1,[%1++]\n" \ | ||
665 | "4: SETW [%0++],D1Ar1\n" COPY, \ | ||
666 | "5: ADD %2,%2,#2\n" \ | ||
667 | " SETW [%0++],D1Ar1\n" FIXUP, \ | ||
668 | " .long 4b,5b\n" TENTRY) | ||
669 | |||
670 | #define __asm_copy_from_user_6(to, from, ret) \ | ||
671 | __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") | ||
672 | |||
673 | #define __asm_copy_from_user_7(to, from, ret) \ | ||
674 | __asm_copy_from_user_6x_cont(to, from, ret, \ | ||
675 | " GETB D1Ar1,[%1++]\n" \ | ||
676 | "6: SETB [%0++],D1Ar1\n", \ | ||
677 | "7: ADD %2,%2,#1\n" \ | ||
678 | " SETB [%0++],D1Ar1\n", \ | ||
679 | " .long 6b,7b\n") | ||
680 | |||
681 | #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
682 | __asm_copy_from_user_4x_cont(to, from, ret, \ | ||
683 | " GETD D1Ar1,[%1++]\n" \ | ||
684 | "4: SETD [%0++],D1Ar1\n" COPY, \ | ||
685 | "5: ADD %2,%2,#4\n" \ | ||
686 | " SETD [%0++],D1Ar1\n" FIXUP, \ | ||
687 | " .long 4b,5b\n" TENTRY) | ||
688 | |||
689 | #define __asm_copy_from_user_8(to, from, ret) \ | ||
690 | __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") | ||
691 | |||
692 | #define __asm_copy_from_user_9(to, from, ret) \ | ||
693 | __asm_copy_from_user_8x_cont(to, from, ret, \ | ||
694 | " GETB D1Ar1,[%1++]\n" \ | ||
695 | "6: SETB [%0++],D1Ar1\n", \ | ||
696 | "7: ADD %2,%2,#1\n" \ | ||
697 | " SETB [%0++],D1Ar1\n", \ | ||
698 | " .long 6b,7b\n") | ||
699 | |||
700 | #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
701 | __asm_copy_from_user_8x_cont(to, from, ret, \ | ||
702 | " GETW D1Ar1,[%1++]\n" \ | ||
703 | "6: SETW [%0++],D1Ar1\n" COPY, \ | ||
704 | "7: ADD %2,%2,#2\n" \ | ||
705 | " SETW [%0++],D1Ar1\n" FIXUP, \ | ||
706 | " .long 6b,7b\n" TENTRY) | ||
707 | |||
708 | #define __asm_copy_from_user_10(to, from, ret) \ | ||
709 | __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") | ||
710 | |||
711 | #define __asm_copy_from_user_11(to, from, ret) \ | ||
712 | __asm_copy_from_user_10x_cont(to, from, ret, \ | ||
713 | " GETB D1Ar1,[%1++]\n" \ | ||
714 | "8: SETB [%0++],D1Ar1\n", \ | ||
715 | "9: ADD %2,%2,#1\n" \ | ||
716 | " SETB [%0++],D1Ar1\n", \ | ||
717 | " .long 8b,9b\n") | ||
718 | |||
719 | #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
720 | __asm_copy_from_user_8x_cont(to, from, ret, \ | ||
721 | " GETD D1Ar1,[%1++]\n" \ | ||
722 | "6: SETD [%0++],D1Ar1\n" COPY, \ | ||
723 | "7: ADD %2,%2,#4\n" \ | ||
724 | " SETD [%0++],D1Ar1\n" FIXUP, \ | ||
725 | " .long 6b,7b\n" TENTRY) | ||
726 | |||
727 | #define __asm_copy_from_user_12(to, from, ret) \ | ||
728 | __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") | ||
729 | |||
730 | #define __asm_copy_from_user_13(to, from, ret) \ | ||
731 | __asm_copy_from_user_12x_cont(to, from, ret, \ | ||
732 | " GETB D1Ar1,[%1++]\n" \ | ||
733 | "8: SETB [%0++],D1Ar1\n", \ | ||
734 | "9: ADD %2,%2,#1\n" \ | ||
735 | " SETB [%0++],D1Ar1\n", \ | ||
736 | " .long 8b,9b\n") | ||
737 | |||
738 | #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
739 | __asm_copy_from_user_12x_cont(to, from, ret, \ | ||
740 | " GETW D1Ar1,[%1++]\n" \ | ||
741 | "8: SETW [%0++],D1Ar1\n" COPY, \ | ||
742 | "9: ADD %2,%2,#2\n" \ | ||
743 | " SETW [%0++],D1Ar1\n" FIXUP, \ | ||
744 | " .long 8b,9b\n" TENTRY) | ||
745 | |||
746 | #define __asm_copy_from_user_14(to, from, ret) \ | ||
747 | __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") | ||
748 | |||
749 | #define __asm_copy_from_user_15(to, from, ret) \ | ||
750 | __asm_copy_from_user_14x_cont(to, from, ret, \ | ||
751 | " GETB D1Ar1,[%1++]\n" \ | ||
752 | "10: SETB [%0++],D1Ar1\n", \ | ||
753 | "11: ADD %2,%2,#1\n" \ | ||
754 | " SETB [%0++],D1Ar1\n", \ | ||
755 | " .long 10b,11b\n") | ||
756 | |||
757 | #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ | ||
758 | __asm_copy_from_user_12x_cont(to, from, ret, \ | ||
759 | " GETD D1Ar1,[%1++]\n" \ | ||
760 | "8: SETD [%0++],D1Ar1\n" COPY, \ | ||
761 | "9: ADD %2,%2,#4\n" \ | ||
762 | " SETD [%0++],D1Ar1\n" FIXUP, \ | ||
763 | " .long 8b,9b\n" TENTRY) | ||
764 | |||
765 | #define __asm_copy_from_user_16(to, from, ret) \ | ||
766 | __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") | ||
767 | |||
768 | #define __asm_copy_from_user_8x64(to, from, ret) \ | ||
769 | asm volatile ( \ | ||
770 | " GETL D0Ar2,D1Ar1,[%1++]\n" \ | ||
771 | "2: SETL [%0++],D0Ar2,D1Ar1\n" \ | ||
772 | "1:\n" \ | ||
773 | " .section .fixup,\"ax\"\n" \ | ||
774 | " MOV D1Ar1,#0\n" \ | ||
775 | " MOV D0Ar2,#0\n" \ | ||
776 | "3: ADD %2,%2,#8\n" \ | ||
777 | " SETL [%0++],D0Ar2,D1Ar1\n" \ | ||
778 | " MOVT D0Ar2,#HI(1b)\n" \ | ||
779 | " JUMP D0Ar2,#LO(1b)\n" \ | ||
780 | " .previous\n" \ | ||
781 | " .section __ex_table,\"a\"\n" \ | ||
782 | " .long 2b,3b\n" \ | ||
783 | " .previous\n" \ | ||
784 | : "=a" (to), "=r" (from), "=r" (ret) \ | ||
785 | : "0" (to), "1" (from), "2" (ret) \ | ||
786 | : "D1Ar1", "D0Ar2", "memory") | ||
787 | |||
788 | /* rewind 'from' pointer when a fault occurs | ||
789 | * | ||
790 | * Rationale: | ||
791 | * A fault occurs while reading from user buffer, which is the | ||
792 | * source. Since the fault is at a single address, we only | ||
793 | * need to rewind by 8 bytes. | ||
794 | * Since we don't write to kernel buffer until we read first, | ||
795 | * the kernel buffer is at the right state and needn't be | ||
796 | * corrected. | ||
797 | */ | ||
798 | #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ | ||
799 | __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ | ||
800 | "SUB %1, %1, #8\n") | ||
801 | |||
802 | /* rewind 'from' pointer when a fault occurs | ||
803 | * | ||
804 | * Rationale: | ||
805 | * A fault occurs while reading from user buffer, which is the | ||
806 | * source. Since the fault is at a single address, we only | ||
807 | * need to rewind by 4 bytes. | ||
808 | * Since we don't write to kernel buffer until we read first, | ||
809 | * the kernel buffer is at the right state and needn't be | ||
810 | * corrected. | ||
811 | */ | ||
812 | #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ | ||
813 | __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ | ||
814 | "SUB %1, %1, #4\n") | ||
815 | |||
816 | |||
817 | /* Copy from user to kernel, zeroing the bytes that were inaccessible in | ||
818 | userland. The return-value is the number of bytes that were | ||
819 | inaccessible. */ | ||
820 | unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, | ||
821 | unsigned long n) | ||
822 | { | ||
823 | register char *dst asm ("A0.2") = pdst; | ||
824 | register const char __user *src asm ("A1.2") = psrc; | ||
825 | unsigned long retn = 0; | ||
826 | |||
827 | if (n == 0) | ||
828 | return 0; | ||
829 | |||
830 | if ((unsigned long) src & 1) { | ||
831 | __asm_copy_from_user_1(dst, src, retn); | ||
832 | n--; | ||
833 | } | ||
834 | if ((unsigned long) dst & 1) { | ||
835 | /* Worst case - byte copy */ | ||
836 | while (n > 0) { | ||
837 | __asm_copy_from_user_1(dst, src, retn); | ||
838 | n--; | ||
839 | if (retn) | ||
840 | goto copy_exception_bytes; | ||
841 | } | ||
842 | } | ||
843 | if (((unsigned long) src & 2) && n >= 2) { | ||
844 | __asm_copy_from_user_2(dst, src, retn); | ||
845 | n -= 2; | ||
846 | } | ||
847 | if ((unsigned long) dst & 2) { | ||
848 | /* Second worst case - word copy */ | ||
849 | while (n >= 2) { | ||
850 | __asm_copy_from_user_2(dst, src, retn); | ||
851 | n -= 2; | ||
852 | if (retn) | ||
853 | goto copy_exception_bytes; | ||
854 | } | ||
855 | } | ||
856 | |||
857 | /* We only need one check after the unalignment-adjustments, | ||
858 | because if both adjustments were done, either both or | ||
859 | neither reference had an exception. */ | ||
860 | if (retn != 0) | ||
861 | goto copy_exception_bytes; | ||
862 | |||
863 | #ifdef USE_RAPF | ||
864 | /* 64 bit copy loop */ | ||
865 | if (!(((unsigned long) src | (unsigned long) dst) & 7)) { | ||
866 | if (n >= RAPF_MIN_BUF_SIZE) { | ||
867 | /* Copy using fast 64bit rapf */ | ||
868 | __asm_copy_from_user_64bit_rapf_loop(dst, src, retn, | ||
869 | n, "64cuz"); | ||
870 | } | ||
871 | while (n >= 8) { | ||
872 | __asm_copy_from_user_8x64(dst, src, retn); | ||
873 | n -= 8; | ||
874 | if (retn) | ||
875 | goto copy_exception_bytes; | ||
876 | } | ||
877 | } | ||
878 | |||
879 | if (n >= RAPF_MIN_BUF_SIZE) { | ||
880 | /* Copy using fast 32bit rapf */ | ||
881 | __asm_copy_from_user_32bit_rapf_loop(dst, src, retn, | ||
882 | n, "32cuz"); | ||
883 | } | ||
884 | #else | ||
885 | /* 64 bit copy loop */ | ||
886 | if (!(((unsigned long) src | (unsigned long) dst) & 7)) { | ||
887 | while (n >= 8) { | ||
888 | __asm_copy_from_user_8x64(dst, src, retn); | ||
889 | n -= 8; | ||
890 | if (retn) | ||
891 | goto copy_exception_bytes; | ||
892 | } | ||
893 | } | ||
894 | #endif | ||
895 | |||
896 | while (n >= 4) { | ||
897 | __asm_copy_from_user_4(dst, src, retn); | ||
898 | n -= 4; | ||
899 | |||
900 | if (retn) | ||
901 | goto copy_exception_bytes; | ||
902 | } | ||
903 | |||
904 | /* If we get here, there were no memory read faults. */ | ||
905 | switch (n) { | ||
906 | /* These copies are at least "naturally aligned" (so we don't | ||
907 | have to check each byte), due to the src alignment code. | ||
908 | The *_3 case *will* get the correct count for retn. */ | ||
909 | case 0: | ||
910 | /* This case deliberately left in (if you have doubts check the | ||
911 | generated assembly code). */ | ||
912 | break; | ||
913 | case 1: | ||
914 | __asm_copy_from_user_1(dst, src, retn); | ||
915 | break; | ||
916 | case 2: | ||
917 | __asm_copy_from_user_2(dst, src, retn); | ||
918 | break; | ||
919 | case 3: | ||
920 | __asm_copy_from_user_3(dst, src, retn); | ||
921 | break; | ||
922 | } | ||
923 | |||
924 | /* If we get here, retn correctly reflects the number of failing | ||
925 | bytes. */ | ||
926 | return retn; | ||
927 | |||
928 | copy_exception_bytes: | ||
929 | /* We already have "retn" bytes cleared, and need to clear the | ||
930 | remaining "n" bytes. A non-optimized simple byte-for-byte in-line | ||
931 | memset is preferred here, since this isn't speed-critical code and | ||
932 | we'd rather have this a leaf-function than calling memset. */ | ||
933 | { | ||
934 | char *endp; | ||
935 | for (endp = dst + n; dst < endp; dst++) | ||
936 | *dst = 0; | ||
937 | } | ||
938 | |||
939 | return retn + n; | ||
940 | } | ||
941 | EXPORT_SYMBOL(__copy_user_zeroing); | ||
942 | |||
943 | #define __asm_clear_8x64(to, ret) \ | ||
944 | asm volatile ( \ | ||
945 | " MOV D0Ar2,#0\n" \ | ||
946 | " MOV D1Ar1,#0\n" \ | ||
947 | " SETL [%0],D0Ar2,D1Ar1\n" \ | ||
948 | "2: SETL [%0++],D0Ar2,D1Ar1\n" \ | ||
949 | "1:\n" \ | ||
950 | " .section .fixup,\"ax\"\n" \ | ||
951 | "3: ADD %1,%1,#8\n" \ | ||
952 | " MOVT D0Ar2,#HI(1b)\n" \ | ||
953 | " JUMP D0Ar2,#LO(1b)\n" \ | ||
954 | " .previous\n" \ | ||
955 | " .section __ex_table,\"a\"\n" \ | ||
956 | " .long 2b,3b\n" \ | ||
957 | " .previous\n" \ | ||
958 | : "=r" (to), "=r" (ret) \ | ||
959 | : "0" (to), "1" (ret) \ | ||
960 | : "D1Ar1", "D0Ar2", "memory") | ||
961 | |||
962 | /* Zero userspace. */ | ||
963 | |||
964 | #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \ | ||
965 | asm volatile ( \ | ||
966 | " MOV D1Ar1,#0\n" \ | ||
967 | CLEAR \ | ||
968 | "1:\n" \ | ||
969 | " .section .fixup,\"ax\"\n" \ | ||
970 | FIXUP \ | ||
971 | " MOVT D1Ar1,#HI(1b)\n" \ | ||
972 | " JUMP D1Ar1,#LO(1b)\n" \ | ||
973 | " .previous\n" \ | ||
974 | " .section __ex_table,\"a\"\n" \ | ||
975 | TENTRY \ | ||
976 | " .previous" \ | ||
977 | : "=r" (to), "=r" (ret) \ | ||
978 | : "0" (to), "1" (ret) \ | ||
979 | : "D1Ar1", "memory") | ||
980 | |||
981 | #define __asm_clear_1(to, ret) \ | ||
982 | __asm_clear(to, ret, \ | ||
983 | " SETB [%0],D1Ar1\n" \ | ||
984 | "2: SETB [%0++],D1Ar1\n", \ | ||
985 | "3: ADD %1,%1,#1\n", \ | ||
986 | " .long 2b,3b\n") | ||
987 | |||
988 | #define __asm_clear_2(to, ret) \ | ||
989 | __asm_clear(to, ret, \ | ||
990 | " SETW [%0],D1Ar1\n" \ | ||
991 | "2: SETW [%0++],D1Ar1\n", \ | ||
992 | "3: ADD %1,%1,#2\n", \ | ||
993 | " .long 2b,3b\n") | ||
994 | |||
995 | #define __asm_clear_3(to, ret) \ | ||
996 | __asm_clear(to, ret, \ | ||
997 | "2: SETW [%0++],D1Ar1\n" \ | ||
998 | " SETB [%0],D1Ar1\n" \ | ||
999 | "3: SETB [%0++],D1Ar1\n", \ | ||
1000 | "4: ADD %1,%1,#2\n" \ | ||
1001 | "5: ADD %1,%1,#1\n", \ | ||
1002 | " .long 2b,4b\n" \ | ||
1003 | " .long 3b,5b\n") | ||
1004 | |||
1005 | #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | ||
1006 | __asm_clear(to, ret, \ | ||
1007 | " SETD [%0],D1Ar1\n" \ | ||
1008 | "2: SETD [%0++],D1Ar1\n" CLEAR, \ | ||
1009 | "3: ADD %1,%1,#4\n" FIXUP, \ | ||
1010 | " .long 2b,3b\n" TENTRY) | ||
1011 | |||
1012 | #define __asm_clear_4(to, ret) \ | ||
1013 | __asm_clear_4x_cont(to, ret, "", "", "") | ||
1014 | |||
1015 | #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | ||
1016 | __asm_clear_4x_cont(to, ret, \ | ||
1017 | " SETD [%0],D1Ar1\n" \ | ||
1018 | "4: SETD [%0++],D1Ar1\n" CLEAR, \ | ||
1019 | "5: ADD %1,%1,#4\n" FIXUP, \ | ||
1020 | " .long 4b,5b\n" TENTRY) | ||
1021 | |||
1022 | #define __asm_clear_8(to, ret) \ | ||
1023 | __asm_clear_8x_cont(to, ret, "", "", "") | ||
1024 | |||
1025 | #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | ||
1026 | __asm_clear_8x_cont(to, ret, \ | ||
1027 | " SETD [%0],D1Ar1\n" \ | ||
1028 | "6: SETD [%0++],D1Ar1\n" CLEAR, \ | ||
1029 | "7: ADD %1,%1,#4\n" FIXUP, \ | ||
1030 | " .long 6b,7b\n" TENTRY) | ||
1031 | |||
1032 | #define __asm_clear_12(to, ret) \ | ||
1033 | __asm_clear_12x_cont(to, ret, "", "", "") | ||
1034 | |||
1035 | #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \ | ||
1036 | __asm_clear_12x_cont(to, ret, \ | ||
1037 | " SETD [%0],D1Ar1\n" \ | ||
1038 | "8: SETD [%0++],D1Ar1\n" CLEAR, \ | ||
1039 | "9: ADD %1,%1,#4\n" FIXUP, \ | ||
1040 | " .long 8b,9b\n" TENTRY) | ||
1041 | |||
1042 | #define __asm_clear_16(to, ret) \ | ||
1043 | __asm_clear_16x_cont(to, ret, "", "", "") | ||
1044 | |||
1045 | unsigned long __do_clear_user(void __user *pto, unsigned long pn) | ||
1046 | { | ||
1047 | register char __user *dst asm ("D0Re0") = pto; | ||
1048 | register unsigned long n asm ("D1Re0") = pn; | ||
1049 | register unsigned long retn asm ("D0Ar6") = 0; | ||
1050 | |||
1051 | if ((unsigned long) dst & 1) { | ||
1052 | __asm_clear_1(dst, retn); | ||
1053 | n--; | ||
1054 | } | ||
1055 | |||
1056 | if ((unsigned long) dst & 2) { | ||
1057 | __asm_clear_2(dst, retn); | ||
1058 | n -= 2; | ||
1059 | } | ||
1060 | |||
1061 | /* 64 bit copy loop */ | ||
1062 | if (!((__force unsigned long) dst & 7)) { | ||
1063 | while (n >= 8) { | ||
1064 | __asm_clear_8x64(dst, retn); | ||
1065 | n -= 8; | ||
1066 | } | ||
1067 | } | ||
1068 | |||
1069 | while (n >= 16) { | ||
1070 | __asm_clear_16(dst, retn); | ||
1071 | n -= 16; | ||
1072 | } | ||
1073 | |||
1074 | while (n >= 4) { | ||
1075 | __asm_clear_4(dst, retn); | ||
1076 | n -= 4; | ||
1077 | } | ||
1078 | |||
1079 | switch (n) { | ||
1080 | case 0: | ||
1081 | break; | ||
1082 | case 1: | ||
1083 | __asm_clear_1(dst, retn); | ||
1084 | break; | ||
1085 | case 2: | ||
1086 | __asm_clear_2(dst, retn); | ||
1087 | break; | ||
1088 | case 3: | ||
1089 | __asm_clear_3(dst, retn); | ||
1090 | break; | ||
1091 | } | ||
1092 | |||
1093 | return retn; | ||
1094 | } | ||
1095 | EXPORT_SYMBOL(__do_clear_user); | ||
1096 | |||
1097 | unsigned char __get_user_asm_b(const void __user *addr, long *err) | ||
1098 | { | ||
1099 | register unsigned char x asm ("D0Re0") = 0; | ||
1100 | asm volatile ( | ||
1101 | " GETB %0,[%2]\n" | ||
1102 | "1:\n" | ||
1103 | " GETB %0,[%2]\n" | ||
1104 | "2:\n" | ||
1105 | " .section .fixup,\"ax\"\n" | ||
1106 | "3: MOV D0FrT,%3\n" | ||
1107 | " SETD [%1],D0FrT\n" | ||
1108 | " MOVT D0FrT,#HI(2b)\n" | ||
1109 | " JUMP D0FrT,#LO(2b)\n" | ||
1110 | " .previous\n" | ||
1111 | " .section __ex_table,\"a\"\n" | ||
1112 | " .long 1b,3b\n" | ||
1113 | " .previous\n" | ||
1114 | : "=r" (x) | ||
1115 | : "r" (err), "r" (addr), "P" (-EFAULT) | ||
1116 | : "D0FrT"); | ||
1117 | return x; | ||
1118 | } | ||
1119 | EXPORT_SYMBOL(__get_user_asm_b); | ||
1120 | |||
1121 | unsigned short __get_user_asm_w(const void __user *addr, long *err) | ||
1122 | { | ||
1123 | register unsigned short x asm ("D0Re0") = 0; | ||
1124 | asm volatile ( | ||
1125 | " GETW %0,[%2]\n" | ||
1126 | "1:\n" | ||
1127 | " GETW %0,[%2]\n" | ||
1128 | "2:\n" | ||
1129 | " .section .fixup,\"ax\"\n" | ||
1130 | "3: MOV D0FrT,%3\n" | ||
1131 | " SETD [%1],D0FrT\n" | ||
1132 | " MOVT D0FrT,#HI(2b)\n" | ||
1133 | " JUMP D0FrT,#LO(2b)\n" | ||
1134 | " .previous\n" | ||
1135 | " .section __ex_table,\"a\"\n" | ||
1136 | " .long 1b,3b\n" | ||
1137 | " .previous\n" | ||
1138 | : "=r" (x) | ||
1139 | : "r" (err), "r" (addr), "P" (-EFAULT) | ||
1140 | : "D0FrT"); | ||
1141 | return x; | ||
1142 | } | ||
1143 | EXPORT_SYMBOL(__get_user_asm_w); | ||
1144 | |||
1145 | unsigned int __get_user_asm_d(const void __user *addr, long *err) | ||
1146 | { | ||
1147 | register unsigned int x asm ("D0Re0") = 0; | ||
1148 | asm volatile ( | ||
1149 | " GETD %0,[%2]\n" | ||
1150 | "1:\n" | ||
1151 | " GETD %0,[%2]\n" | ||
1152 | "2:\n" | ||
1153 | " .section .fixup,\"ax\"\n" | ||
1154 | "3: MOV D0FrT,%3\n" | ||
1155 | " SETD [%1],D0FrT\n" | ||
1156 | " MOVT D0FrT,#HI(2b)\n" | ||
1157 | " JUMP D0FrT,#LO(2b)\n" | ||
1158 | " .previous\n" | ||
1159 | " .section __ex_table,\"a\"\n" | ||
1160 | " .long 1b,3b\n" | ||
1161 | " .previous\n" | ||
1162 | : "=r" (x) | ||
1163 | : "r" (err), "r" (addr), "P" (-EFAULT) | ||
1164 | : "D0FrT"); | ||
1165 | return x; | ||
1166 | } | ||
1167 | EXPORT_SYMBOL(__get_user_asm_d); | ||
1168 | |||
1169 | long __put_user_asm_b(unsigned int x, void __user *addr) | ||
1170 | { | ||
1171 | register unsigned int err asm ("D0Re0") = 0; | ||
1172 | asm volatile ( | ||
1173 | " MOV %0,#0\n" | ||
1174 | " SETB [%2],%1\n" | ||
1175 | "1:\n" | ||
1176 | " SETB [%2],%1\n" | ||
1177 | "2:\n" | ||
1178 | ".section .fixup,\"ax\"\n" | ||
1179 | "3: MOV %0,%3\n" | ||
1180 | " MOVT D0FrT,#HI(2b)\n" | ||
1181 | " JUMP D0FrT,#LO(2b)\n" | ||
1182 | ".previous\n" | ||
1183 | ".section __ex_table,\"a\"\n" | ||
1184 | " .long 1b,3b\n" | ||
1185 | ".previous" | ||
1186 | : "=r"(err) | ||
1187 | : "d" (x), "a" (addr), "P"(-EFAULT) | ||
1188 | : "D0FrT"); | ||
1189 | return err; | ||
1190 | } | ||
1191 | EXPORT_SYMBOL(__put_user_asm_b); | ||
1192 | |||
1193 | long __put_user_asm_w(unsigned int x, void __user *addr) | ||
1194 | { | ||
1195 | register unsigned int err asm ("D0Re0") = 0; | ||
1196 | asm volatile ( | ||
1197 | " MOV %0,#0\n" | ||
1198 | " SETW [%2],%1\n" | ||
1199 | "1:\n" | ||
1200 | " SETW [%2],%1\n" | ||
1201 | "2:\n" | ||
1202 | ".section .fixup,\"ax\"\n" | ||
1203 | "3: MOV %0,%3\n" | ||
1204 | " MOVT D0FrT,#HI(2b)\n" | ||
1205 | " JUMP D0FrT,#LO(2b)\n" | ||
1206 | ".previous\n" | ||
1207 | ".section __ex_table,\"a\"\n" | ||
1208 | " .long 1b,3b\n" | ||
1209 | ".previous" | ||
1210 | : "=r"(err) | ||
1211 | : "d" (x), "a" (addr), "P"(-EFAULT) | ||
1212 | : "D0FrT"); | ||
1213 | return err; | ||
1214 | } | ||
1215 | EXPORT_SYMBOL(__put_user_asm_w); | ||
1216 | |||
1217 | long __put_user_asm_d(unsigned int x, void __user *addr) | ||
1218 | { | ||
1219 | register unsigned int err asm ("D0Re0") = 0; | ||
1220 | asm volatile ( | ||
1221 | " MOV %0,#0\n" | ||
1222 | " SETD [%2],%1\n" | ||
1223 | "1:\n" | ||
1224 | " SETD [%2],%1\n" | ||
1225 | "2:\n" | ||
1226 | ".section .fixup,\"ax\"\n" | ||
1227 | "3: MOV %0,%3\n" | ||
1228 | " MOVT D0FrT,#HI(2b)\n" | ||
1229 | " JUMP D0FrT,#LO(2b)\n" | ||
1230 | ".previous\n" | ||
1231 | ".section __ex_table,\"a\"\n" | ||
1232 | " .long 1b,3b\n" | ||
1233 | ".previous" | ||
1234 | : "=r"(err) | ||
1235 | : "d" (x), "a" (addr), "P"(-EFAULT) | ||
1236 | : "D0FrT"); | ||
1237 | return err; | ||
1238 | } | ||
1239 | EXPORT_SYMBOL(__put_user_asm_d); | ||
1240 | |||
1241 | long __put_user_asm_l(unsigned long long x, void __user *addr) | ||
1242 | { | ||
1243 | register unsigned int err asm ("D0Re0") = 0; | ||
1244 | asm volatile ( | ||
1245 | " MOV %0,#0\n" | ||
1246 | " SETL [%2],%1,%t1\n" | ||
1247 | "1:\n" | ||
1248 | " SETL [%2],%1,%t1\n" | ||
1249 | "2:\n" | ||
1250 | ".section .fixup,\"ax\"\n" | ||
1251 | "3: MOV %0,%3\n" | ||
1252 | " MOVT D0FrT,#HI(2b)\n" | ||
1253 | " JUMP D0FrT,#LO(2b)\n" | ||
1254 | ".previous\n" | ||
1255 | ".section __ex_table,\"a\"\n" | ||
1256 | " .long 1b,3b\n" | ||
1257 | ".previous" | ||
1258 | : "=r"(err) | ||
1259 | : "d" (x), "a" (addr), "P"(-EFAULT) | ||
1260 | : "D0FrT"); | ||
1261 | return err; | ||
1262 | } | ||
1263 | EXPORT_SYMBOL(__put_user_asm_l); | ||
1264 | |||
1265 | long strnlen_user(const char __user *src, long count) | ||
1266 | { | ||
1267 | long res; | ||
1268 | |||
1269 | if (!access_ok(VERIFY_READ, src, 0)) | ||
1270 | return 0; | ||
1271 | |||
1272 | asm volatile (" MOV D0Ar4, %1\n" | ||
1273 | " MOV D0Ar6, %2\n" | ||
1274 | "0:\n" | ||
1275 | " SUBS D0FrT, D0Ar6, #0\n" | ||
1276 | " SUB D0Ar6, D0Ar6, #1\n" | ||
1277 | " BLE 2f\n" | ||
1278 | " GETB D0FrT, [D0Ar4+#1++]\n" | ||
1279 | "1:\n" | ||
1280 | " TST D0FrT, #255\n" | ||
1281 | " BNE 0b\n" | ||
1282 | "2:\n" | ||
1283 | " SUB %0, %2, D0Ar6\n" | ||
1284 | "3:\n" | ||
1285 | " .section .fixup,\"ax\"\n" | ||
1286 | "4:\n" | ||
1287 | " MOV %0, #0\n" | ||
1288 | " MOVT D0FrT,#HI(3b)\n" | ||
1289 | " JUMP D0FrT,#LO(3b)\n" | ||
1290 | " .previous\n" | ||
1291 | " .section __ex_table,\"a\"\n" | ||
1292 | " .long 1b,4b\n" | ||
1293 | " .previous\n" | ||
1294 | : "=r" (res) | ||
1295 | : "r" (src), "r" (count) | ||
1296 | : "D0FrT", "D0Ar4", "D0Ar6", "cc"); | ||
1297 | |||
1298 | return res; | ||
1299 | } | ||
1300 | EXPORT_SYMBOL(strnlen_user); | ||
1301 | |||
1302 | long __strncpy_from_user(char *dst, const char __user *src, long count) | ||
1303 | { | ||
1304 | long res; | ||
1305 | |||
1306 | if (count == 0) | ||
1307 | return 0; | ||
1308 | |||
1309 | /* | ||
1310 | * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop. | ||
1311 | * So do we. | ||
1312 | * | ||
1313 | * This code is deduced from: | ||
1314 | * | ||
1315 | * char tmp2; | ||
1316 | * long tmp1, tmp3; | ||
1317 | * tmp1 = count; | ||
1318 | * while ((*dst++ = (tmp2 = *src++)) != 0 | ||
1319 | * && --tmp1) | ||
1320 | * ; | ||
1321 | * | ||
1322 | * res = count - tmp1; | ||
1323 | * | ||
1324 | * with tweaks. | ||
1325 | */ | ||
1326 | |||
1327 | asm volatile (" MOV %0,%3\n" | ||
1328 | "1:\n" | ||
1329 | " GETB D0FrT,[%2++]\n" | ||
1330 | "2:\n" | ||
1331 | " CMP D0FrT,#0\n" | ||
1332 | " SETB [%1++],D0FrT\n" | ||
1333 | " BEQ 3f\n" | ||
1334 | " SUBS %0,%0,#1\n" | ||
1335 | " BNZ 1b\n" | ||
1336 | "3:\n" | ||
1337 | " SUB %0,%3,%0\n" | ||
1338 | "4:\n" | ||
1339 | " .section .fixup,\"ax\"\n" | ||
1340 | "5:\n" | ||
1341 | " MOV %0,%7\n" | ||
1342 | " MOVT D0FrT,#HI(4b)\n" | ||
1343 | " JUMP D0FrT,#LO(4b)\n" | ||
1344 | " .previous\n" | ||
1345 | " .section __ex_table,\"a\"\n" | ||
1346 | " .long 2b,5b\n" | ||
1347 | " .previous" | ||
1348 | : "=r" (res), "=r" (dst), "=r" (src), "=r" (count) | ||
1349 | : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT) | ||
1350 | : "D0FrT", "memory", "cc"); | ||
1351 | |||
1352 | return res; | ||
1353 | } | ||
1354 | EXPORT_SYMBOL(__strncpy_from_user); | ||
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig new file mode 100644 index 000000000000..cd7f2f2ad416 --- /dev/null +++ b/arch/metag/mm/Kconfig | |||
@@ -0,0 +1,153 @@ | |||
1 | menu "Memory management options" | ||
2 | |||
3 | config PAGE_OFFSET | ||
4 | hex "Kernel page offset address" | ||
5 | default "0x40000000" | ||
6 | help | ||
7 | This option allows you to set the virtual address at which the | ||
8 | kernel will be mapped to. | ||
9 | endmenu | ||
10 | |||
11 | config KERNEL_4M_PAGES | ||
12 | bool "Map kernel with 4MB pages" | ||
13 | depends on METAG_META21_MMU | ||
14 | default y | ||
15 | help | ||
16 | Map the kernel with large pages to reduce TLB pressure. | ||
17 | |||
18 | choice | ||
19 | prompt "User page size" | ||
20 | default PAGE_SIZE_4K | ||
21 | |||
22 | config PAGE_SIZE_4K | ||
23 | bool "4kB" | ||
24 | help | ||
25 | This is the default page size used by all Meta cores. | ||
26 | |||
27 | config PAGE_SIZE_8K | ||
28 | bool "8kB" | ||
29 | depends on METAG_META21_MMU | ||
30 | help | ||
31 | This enables 8kB pages as supported by Meta 2.x and later MMUs. | ||
32 | |||
33 | config PAGE_SIZE_16K | ||
34 | bool "16kB" | ||
35 | depends on METAG_META21_MMU | ||
36 | help | ||
37 | This enables 16kB pages as supported by Meta 2.x and later MMUs. | ||
38 | |||
39 | endchoice | ||
40 | |||
41 | config NUMA | ||
42 | bool "Non Uniform Memory Access (NUMA) Support" | ||
43 | help | ||
44 | Some Meta systems have MMU-mappable on-chip memories with | ||
45 | lower latencies than main memory. This enables support for | ||
46 | these blocks by binding them to nodes and allowing | ||
47 | memory policies to be used for prioritizing and controlling | ||
48 | allocation behaviour. | ||
49 | |||
50 | config FORCE_MAX_ZONEORDER | ||
51 | int "Maximum zone order" | ||
52 | range 10 32 | ||
53 | default "10" | ||
54 | help | ||
55 | The kernel memory allocator divides physically contiguous memory | ||
56 | blocks into "zones", where each zone is a power of two number of | ||
57 | pages. This option selects the largest power of two that the kernel | ||
58 | keeps in the memory allocator. If you need to allocate very large | ||
59 | blocks of physically contiguous memory, then you may need to | ||
60 | increase this value. | ||
61 | |||
62 | This config option is actually maximum order plus one. For example, | ||
63 | a value of 11 means that the largest free memory block is 2^10 pages. | ||
64 | |||
65 | The page size is not necessarily 4KB. Keep this in mind | ||
66 | when choosing a value for this option. | ||
67 | |||
68 | config METAG_L2C | ||
69 | bool "Level 2 Cache Support" | ||
70 | depends on METAG_META21 | ||
71 | help | ||
72 | Press y here to enable support for the Meta Level 2 (L2) cache. This | ||
73 | will enable the cache at start up if it hasn't already been enabled | ||
74 | by the bootloader. | ||
75 | |||
76 | If the bootloader enables the L2 you must press y here to ensure the | ||
77 | kernel takes the appropriate actions to keep the cache coherent. | ||
78 | |||
79 | config NODES_SHIFT | ||
80 | int | ||
81 | default "1" | ||
82 | depends on NEED_MULTIPLE_NODES | ||
83 | |||
84 | config ARCH_FLATMEM_ENABLE | ||
85 | def_bool y | ||
86 | depends on !NUMA | ||
87 | |||
88 | config ARCH_SPARSEMEM_ENABLE | ||
89 | def_bool y | ||
90 | select SPARSEMEM_STATIC | ||
91 | |||
92 | config ARCH_SPARSEMEM_DEFAULT | ||
93 | def_bool y | ||
94 | |||
95 | config MAX_ACTIVE_REGIONS | ||
96 | int | ||
97 | default "2" if SPARSEMEM | ||
98 | default "1" | ||
99 | |||
100 | config ARCH_POPULATES_NODE_MAP | ||
101 | def_bool y | ||
102 | |||
103 | config ARCH_SELECT_MEMORY_MODEL | ||
104 | def_bool y | ||
105 | |||
106 | config SYS_SUPPORTS_HUGETLBFS | ||
107 | def_bool y | ||
108 | depends on METAG_META21_MMU | ||
109 | |||
110 | choice | ||
111 | prompt "HugeTLB page size" | ||
112 | depends on METAG_META21_MMU && HUGETLB_PAGE | ||
113 | default HUGETLB_PAGE_SIZE_1M | ||
114 | |||
115 | config HUGETLB_PAGE_SIZE_8K | ||
116 | bool "8kB" | ||
117 | depends on PAGE_SIZE_4K | ||
118 | |||
119 | config HUGETLB_PAGE_SIZE_16K | ||
120 | bool "16kB" | ||
121 | depends on PAGE_SIZE_4K || PAGE_SIZE_8K | ||
122 | |||
123 | config HUGETLB_PAGE_SIZE_32K | ||
124 | bool "32kB" | ||
125 | |||
126 | config HUGETLB_PAGE_SIZE_64K | ||
127 | bool "64kB" | ||
128 | |||
129 | config HUGETLB_PAGE_SIZE_128K | ||
130 | bool "128kB" | ||
131 | |||
132 | config HUGETLB_PAGE_SIZE_256K | ||
133 | bool "256kB" | ||
134 | |||
135 | config HUGETLB_PAGE_SIZE_512K | ||
136 | bool "512kB" | ||
137 | |||
138 | config HUGETLB_PAGE_SIZE_1M | ||
139 | bool "1MB" | ||
140 | |||
141 | config HUGETLB_PAGE_SIZE_2M | ||
142 | bool "2MB" | ||
143 | |||
144 | config HUGETLB_PAGE_SIZE_4M | ||
145 | bool "4MB" | ||
146 | |||
147 | endchoice | ||
148 | |||
149 | config METAG_COREMEM | ||
150 | bool | ||
151 | default y if SUSPEND | ||
152 | |||
153 | source "mm/Kconfig" | ||
diff --git a/arch/metag/mm/Makefile b/arch/metag/mm/Makefile new file mode 100644 index 000000000000..994331164125 --- /dev/null +++ b/arch/metag/mm/Makefile | |||
@@ -0,0 +1,19 @@ | |||
1 | # | ||
2 | # Makefile for the linux Meta-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y += cache.o | ||
6 | obj-y += extable.o | ||
7 | obj-y += fault.o | ||
8 | obj-y += init.o | ||
9 | obj-y += ioremap.o | ||
10 | obj-y += maccess.o | ||
11 | |||
12 | mmu-y := mmu-meta1.o | ||
13 | mmu-$(CONFIG_METAG_META21_MMU) := mmu-meta2.o | ||
14 | obj-y += $(mmu-y) | ||
15 | |||
16 | obj-$(CONFIG_HIGHMEM) += highmem.o | ||
17 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
18 | obj-$(CONFIG_METAG_L2C) += l2cache.o | ||
19 | obj-$(CONFIG_NUMA) += numa.o | ||
diff --git a/arch/metag/mm/cache.c b/arch/metag/mm/cache.c new file mode 100644 index 000000000000..b5d3b2e7c160 --- /dev/null +++ b/arch/metag/mm/cache.c | |||
@@ -0,0 +1,521 @@ | |||
1 | /* | ||
2 | * arch/metag/mm/cache.c | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002, 2005, 2007, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Cache control code | ||
11 | */ | ||
12 | |||
13 | #include <linux/export.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/core_reg.h> | ||
17 | #include <asm/global_lock.h> | ||
18 | #include <asm/metag_isa.h> | ||
19 | #include <asm/metag_mem.h> | ||
20 | #include <asm/metag_regs.h> | ||
21 | |||
22 | #define DEFAULT_CACHE_WAYS_LOG2 2 | ||
23 | |||
24 | /* | ||
25 | * Size of a set in the caches. Initialised for default 16K stride, adjusted | ||
26 | * according to values passed through TBI global heap segment via LDLK (on ATP) | ||
27 | * or config registers (on HTP/MTP) | ||
28 | */ | ||
29 | static int dcache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2 | ||
30 | - DEFAULT_CACHE_WAYS_LOG2; | ||
31 | static int icache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2 | ||
32 | - DEFAULT_CACHE_WAYS_LOG2; | ||
33 | /* | ||
34 | * The number of sets in the caches. Initialised for HTP/ATP, adjusted | ||
35 | * according to NOMMU setting in config registers | ||
36 | */ | ||
37 | static unsigned char dcache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2; | ||
38 | static unsigned char icache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2; | ||
39 | |||
40 | #ifndef CONFIG_METAG_META12 | ||
41 | /** | ||
42 | * metag_lnkget_probe() - Probe whether lnkget/lnkset go around the cache | ||
43 | */ | ||
44 | static volatile u32 lnkget_testdata[16] __initdata __aligned(64); | ||
45 | |||
46 | #define LNKGET_CONSTANT 0xdeadbeef | ||
47 | |||
48 | void __init metag_lnkget_probe(void) | ||
49 | { | ||
50 | int temp; | ||
51 | long flags; | ||
52 | |||
53 | /* | ||
54 | * It's conceivable the user has configured a globally coherent cache | ||
55 | * shared with non-Linux hardware threads, so use LOCK2 to prevent them | ||
56 | * from executing and causing cache eviction during the test. | ||
57 | */ | ||
58 | __global_lock2(flags); | ||
59 | |||
60 | /* read a value to bring it into the cache */ | ||
61 | (void)lnkget_testdata[0]; | ||
62 | lnkget_testdata[0] = 0; | ||
63 | |||
64 | /* lnkget/lnkset it to modify it */ | ||
65 | asm volatile( | ||
66 | "1: LNKGETD %0, [%1]\n" | ||
67 | " LNKSETD [%1], %2\n" | ||
68 | " DEFR %0, TXSTAT\n" | ||
69 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
70 | " CMPT %0, #HI(0x02000000)\n" | ||
71 | " BNZ 1b\n" | ||
72 | : "=&d" (temp) | ||
73 | : "da" (&lnkget_testdata[0]), "bd" (LNKGET_CONSTANT) | ||
74 | : "cc"); | ||
75 | |||
76 | /* re-read it to see if the cached value changed */ | ||
77 | temp = lnkget_testdata[0]; | ||
78 | |||
79 | __global_unlock2(flags); | ||
80 | |||
81 | /* flush the cache line to fix any incoherency */ | ||
82 | __builtin_dcache_flush((void *)&lnkget_testdata[0]); | ||
83 | |||
84 | #if defined(CONFIG_METAG_LNKGET_AROUND_CACHE) | ||
85 | /* if the cache is right, LNKGET_AROUND_CACHE is unnecessary */ | ||
86 | if (temp == LNKGET_CONSTANT) | ||
87 | pr_info("LNKGET/SET go through cache but CONFIG_METAG_LNKGET_AROUND_CACHE=y\n"); | ||
88 | #elif defined(CONFIG_METAG_ATOMICITY_LNKGET) | ||
89 | /* | ||
90 | * if the cache is wrong, LNKGET_AROUND_CACHE is really necessary | ||
91 | * because the kernel is configured to use LNKGET/SET for atomicity | ||
92 | */ | ||
93 | WARN(temp != LNKGET_CONSTANT, | ||
94 | "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n" | ||
95 | "Expect kernel failure as it's used for atomicity primitives\n"); | ||
96 | #elif defined(CONFIG_SMP) | ||
97 | /* | ||
98 | * if the cache is wrong, LNKGET_AROUND_CACHE should be used or the | ||
99 | * gateway page won't flush and userland could break. | ||
100 | */ | ||
101 | WARN(temp != LNKGET_CONSTANT, | ||
102 | "LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n" | ||
103 | "Expect userland failure as it's used for user gateway page\n"); | ||
104 | #else | ||
105 | /* | ||
106 | * if the cache is wrong, LNKGET_AROUND_CACHE is set wrong, but it | ||
107 | * doesn't actually matter as it doesn't have any effect on !SMP && | ||
108 | * !ATOMICITY_LNKGET. | ||
109 | */ | ||
110 | if (temp != LNKGET_CONSTANT) | ||
111 | pr_warn("LNKGET/SET go around cache but CONFIG_METAG_LNKGET_AROUND_CACHE=n\n"); | ||
112 | #endif | ||
113 | } | ||
114 | #endif /* !CONFIG_METAG_META12 */ | ||
115 | |||
116 | /** | ||
117 | * metag_cache_probe() - Probe L1 cache configuration. | ||
118 | * | ||
119 | * Probe the L1 cache configuration to aid the L1 physical cache flushing | ||
120 | * functions. | ||
121 | */ | ||
122 | void __init metag_cache_probe(void) | ||
123 | { | ||
124 | #ifndef CONFIG_METAG_META12 | ||
125 | int coreid = metag_in32(METAC_CORE_ID); | ||
126 | int config = metag_in32(METAC_CORE_CONFIG2); | ||
127 | int cfgcache = coreid & METAC_COREID_CFGCACHE_BITS; | ||
128 | |||
129 | if (cfgcache == METAC_COREID_CFGCACHE_TYPE0 || | ||
130 | cfgcache == METAC_COREID_CFGCACHE_PRIVNOMMU) { | ||
131 | icache_sets_log2 = 1; | ||
132 | dcache_sets_log2 = 1; | ||
133 | } | ||
134 | |||
135 | /* For normal size caches, the smallest size is 4Kb. | ||
136 | For small caches, the smallest size is 64b */ | ||
137 | icache_set_shift = (config & METAC_CORECFG2_ICSMALL_BIT) | ||
138 | ? 6 : 12; | ||
139 | icache_set_shift += (config & METAC_CORE_C2ICSZ_BITS) | ||
140 | >> METAC_CORE_C2ICSZ_S; | ||
141 | icache_set_shift -= icache_sets_log2; | ||
142 | |||
143 | dcache_set_shift = (config & METAC_CORECFG2_DCSMALL_BIT) | ||
144 | ? 6 : 12; | ||
145 | dcache_set_shift += (config & METAC_CORECFG2_DCSZ_BITS) | ||
146 | >> METAC_CORECFG2_DCSZ_S; | ||
147 | dcache_set_shift -= dcache_sets_log2; | ||
148 | |||
149 | metag_lnkget_probe(); | ||
150 | #else | ||
151 | /* Extract cache sizes from global heap segment */ | ||
152 | unsigned long val, u; | ||
153 | int width, shift, addend; | ||
154 | PTBISEG seg; | ||
155 | |||
156 | seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL, | ||
157 | TBID_SEGSCOPE_GLOBAL, | ||
158 | TBID_SEGTYPE_HEAP)); | ||
159 | if (seg != NULL) { | ||
160 | val = seg->Data[1]; | ||
161 | |||
162 | /* Work out width of I-cache size bit-field */ | ||
163 | u = ((unsigned long) METAG_TBI_ICACHE_SIZE_BITS) | ||
164 | >> METAG_TBI_ICACHE_SIZE_S; | ||
165 | width = 0; | ||
166 | while (u & 1) { | ||
167 | width++; | ||
168 | u >>= 1; | ||
169 | } | ||
170 | /* Extract sign-extended size addend value */ | ||
171 | shift = 32 - (METAG_TBI_ICACHE_SIZE_S + width); | ||
172 | addend = (long) ((val & METAG_TBI_ICACHE_SIZE_BITS) | ||
173 | << shift) | ||
174 | >> (shift + METAG_TBI_ICACHE_SIZE_S); | ||
175 | /* Now calculate I-cache set size */ | ||
176 | icache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2 | ||
177 | - DEFAULT_CACHE_WAYS_LOG2) | ||
178 | + addend; | ||
179 | |||
180 | /* Similarly for D-cache */ | ||
181 | u = ((unsigned long) METAG_TBI_DCACHE_SIZE_BITS) | ||
182 | >> METAG_TBI_DCACHE_SIZE_S; | ||
183 | width = 0; | ||
184 | while (u & 1) { | ||
185 | width++; | ||
186 | u >>= 1; | ||
187 | } | ||
188 | shift = 32 - (METAG_TBI_DCACHE_SIZE_S + width); | ||
189 | addend = (long) ((val & METAG_TBI_DCACHE_SIZE_BITS) | ||
190 | << shift) | ||
191 | >> (shift + METAG_TBI_DCACHE_SIZE_S); | ||
192 | dcache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2 | ||
193 | - DEFAULT_CACHE_WAYS_LOG2) | ||
194 | + addend; | ||
195 | } | ||
196 | #endif | ||
197 | } | ||
198 | |||
199 | static void metag_phys_data_cache_flush(const void *start) | ||
200 | { | ||
201 | unsigned long flush0, flush1, flush2, flush3; | ||
202 | int loops, step; | ||
203 | int thread; | ||
204 | int part, offset; | ||
205 | int set_shift; | ||
206 | |||
207 | /* Use a sequence of writes to flush the cache region requested */ | ||
208 | thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS) | ||
209 | >> TXENABLE_THREAD_S; | ||
210 | |||
211 | /* Cache is broken into sets which lie in contiguous RAMs */ | ||
212 | set_shift = dcache_set_shift; | ||
213 | |||
214 | /* Move to the base of the physical cache flush region */ | ||
215 | flush0 = LINSYSCFLUSH_DCACHE_LINE; | ||
216 | step = 64; | ||
217 | |||
218 | /* Get partition data for this thread */ | ||
219 | part = metag_in32(SYSC_DCPART0 + | ||
220 | (SYSC_xCPARTn_STRIDE * thread)); | ||
221 | |||
222 | if ((int)start < 0) | ||
223 | /* Access Global vs Local partition */ | ||
224 | part >>= SYSC_xCPARTG_AND_S | ||
225 | - SYSC_xCPARTL_AND_S; | ||
226 | |||
227 | /* Extract offset and move SetOff */ | ||
228 | offset = (part & SYSC_xCPARTL_OR_BITS) | ||
229 | >> SYSC_xCPARTL_OR_S; | ||
230 | flush0 += (offset << (set_shift - 4)); | ||
231 | |||
232 | /* Shrink size */ | ||
233 | part = (part & SYSC_xCPARTL_AND_BITS) | ||
234 | >> SYSC_xCPARTL_AND_S; | ||
235 | loops = ((part + 1) << (set_shift - 4)); | ||
236 | |||
237 | /* Reduce loops by step of cache line size */ | ||
238 | loops /= step; | ||
239 | |||
240 | flush1 = flush0 + (1 << set_shift); | ||
241 | flush2 = flush0 + (2 << set_shift); | ||
242 | flush3 = flush0 + (3 << set_shift); | ||
243 | |||
244 | if (dcache_sets_log2 == 1) { | ||
245 | flush2 = flush1; | ||
246 | flush3 = flush1 + step; | ||
247 | flush1 = flush0 + step; | ||
248 | step <<= 1; | ||
249 | loops >>= 1; | ||
250 | } | ||
251 | |||
252 | /* Clear loops ways in cache */ | ||
253 | while (loops-- != 0) { | ||
254 | /* Clear the ways. */ | ||
255 | #if 0 | ||
256 | /* | ||
257 | * GCC doesn't generate very good code for this so we | ||
258 | * provide inline assembly instead. | ||
259 | */ | ||
260 | metag_out8(0, flush0); | ||
261 | metag_out8(0, flush1); | ||
262 | metag_out8(0, flush2); | ||
263 | metag_out8(0, flush3); | ||
264 | |||
265 | flush0 += step; | ||
266 | flush1 += step; | ||
267 | flush2 += step; | ||
268 | flush3 += step; | ||
269 | #else | ||
270 | asm volatile ( | ||
271 | "SETB\t[%0+%4++],%5\n" | ||
272 | "SETB\t[%1+%4++],%5\n" | ||
273 | "SETB\t[%2+%4++],%5\n" | ||
274 | "SETB\t[%3+%4++],%5\n" | ||
275 | : "+e" (flush0), | ||
276 | "+e" (flush1), | ||
277 | "+e" (flush2), | ||
278 | "+e" (flush3) | ||
279 | : "e" (step), "a" (0)); | ||
280 | #endif | ||
281 | } | ||
282 | } | ||
283 | |||
284 | void metag_data_cache_flush_all(const void *start) | ||
285 | { | ||
286 | if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0) | ||
287 | /* No need to flush the data cache it's not actually enabled */ | ||
288 | return; | ||
289 | |||
290 | metag_phys_data_cache_flush(start); | ||
291 | } | ||
292 | |||
293 | void metag_data_cache_flush(const void *start, int bytes) | ||
294 | { | ||
295 | unsigned long flush0; | ||
296 | int loops, step; | ||
297 | |||
298 | if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0) | ||
299 | /* No need to flush the data cache it's not actually enabled */ | ||
300 | return; | ||
301 | |||
302 | if (bytes >= 4096) { | ||
303 | metag_phys_data_cache_flush(start); | ||
304 | return; | ||
305 | } | ||
306 | |||
307 | /* Use linear cache flush mechanism on META IP */ | ||
308 | flush0 = (int)start; | ||
309 | loops = ((int)start & (DCACHE_LINE_BYTES - 1)) + bytes + | ||
310 | (DCACHE_LINE_BYTES - 1); | ||
311 | loops >>= DCACHE_LINE_S; | ||
312 | |||
313 | #define PRIM_FLUSH(addr, offset) do { \ | ||
314 | int __addr = ((int) (addr)) + ((offset) * 64); \ | ||
315 | __builtin_dcache_flush((void *)(__addr)); \ | ||
316 | } while (0) | ||
317 | |||
318 | #define LOOP_INC (4*64) | ||
319 | |||
320 | do { | ||
321 | /* By default stop */ | ||
322 | step = 0; | ||
323 | |||
324 | switch (loops) { | ||
325 | /* Drop Thru Cases! */ | ||
326 | default: | ||
327 | PRIM_FLUSH(flush0, 3); | ||
328 | loops -= 4; | ||
329 | step = 1; | ||
330 | case 3: | ||
331 | PRIM_FLUSH(flush0, 2); | ||
332 | case 2: | ||
333 | PRIM_FLUSH(flush0, 1); | ||
334 | case 1: | ||
335 | PRIM_FLUSH(flush0, 0); | ||
336 | flush0 += LOOP_INC; | ||
337 | case 0: | ||
338 | break; | ||
339 | } | ||
340 | } while (step); | ||
341 | } | ||
342 | EXPORT_SYMBOL(metag_data_cache_flush); | ||
343 | |||
344 | static void metag_phys_code_cache_flush(const void *start, int bytes) | ||
345 | { | ||
346 | unsigned long flush0, flush1, flush2, flush3, end_set; | ||
347 | int loops, step; | ||
348 | int thread; | ||
349 | int set_shift, set_size; | ||
350 | int part, offset; | ||
351 | |||
352 | /* Use a sequence of writes to flush the cache region requested */ | ||
353 | thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS) | ||
354 | >> TXENABLE_THREAD_S; | ||
355 | set_shift = icache_set_shift; | ||
356 | |||
357 | /* Move to the base of the physical cache flush region */ | ||
358 | flush0 = LINSYSCFLUSH_ICACHE_LINE; | ||
359 | step = 64; | ||
360 | |||
361 | /* Get partition code for this thread */ | ||
362 | part = metag_in32(SYSC_ICPART0 + | ||
363 | (SYSC_xCPARTn_STRIDE * thread)); | ||
364 | |||
365 | if ((int)start < 0) | ||
366 | /* Access Global vs Local partition */ | ||
367 | part >>= SYSC_xCPARTG_AND_S-SYSC_xCPARTL_AND_S; | ||
368 | |||
369 | /* Extract offset and move SetOff */ | ||
370 | offset = (part & SYSC_xCPARTL_OR_BITS) | ||
371 | >> SYSC_xCPARTL_OR_S; | ||
372 | flush0 += (offset << (set_shift - 4)); | ||
373 | |||
374 | /* Shrink size */ | ||
375 | part = (part & SYSC_xCPARTL_AND_BITS) | ||
376 | >> SYSC_xCPARTL_AND_S; | ||
377 | loops = ((part + 1) << (set_shift - 4)); | ||
378 | |||
379 | /* Where does the Set end? */ | ||
380 | end_set = flush0 + loops; | ||
381 | set_size = loops; | ||
382 | |||
383 | #ifdef CONFIG_METAG_META12 | ||
384 | if ((bytes < 4096) && (bytes < loops)) { | ||
385 | /* Unreachable on HTP/MTP */ | ||
386 | /* Only target the sets that could be relavent */ | ||
387 | flush0 += (loops - step) & ((int) start); | ||
388 | loops = (((int) start) & (step-1)) + bytes + step - 1; | ||
389 | } | ||
390 | #endif | ||
391 | |||
392 | /* Reduce loops by step of cache line size */ | ||
393 | loops /= step; | ||
394 | |||
395 | flush1 = flush0 + (1<<set_shift); | ||
396 | flush2 = flush0 + (2<<set_shift); | ||
397 | flush3 = flush0 + (3<<set_shift); | ||
398 | |||
399 | if (icache_sets_log2 == 1) { | ||
400 | flush2 = flush1; | ||
401 | flush3 = flush1 + step; | ||
402 | flush1 = flush0 + step; | ||
403 | #if 0 | ||
404 | /* flush0 will stop one line early in this case | ||
405 | * (flush1 will do the final line). | ||
406 | * However we don't correct end_set here at the moment | ||
407 | * because it will never wrap on HTP/MTP | ||
408 | */ | ||
409 | end_set -= step; | ||
410 | #endif | ||
411 | step <<= 1; | ||
412 | loops >>= 1; | ||
413 | } | ||
414 | |||
415 | /* Clear loops ways in cache */ | ||
416 | while (loops-- != 0) { | ||
417 | #if 0 | ||
418 | /* | ||
419 | * GCC doesn't generate very good code for this so we | ||
420 | * provide inline assembly instead. | ||
421 | */ | ||
422 | /* Clear the ways */ | ||
423 | metag_out8(0, flush0); | ||
424 | metag_out8(0, flush1); | ||
425 | metag_out8(0, flush2); | ||
426 | metag_out8(0, flush3); | ||
427 | |||
428 | flush0 += step; | ||
429 | flush1 += step; | ||
430 | flush2 += step; | ||
431 | flush3 += step; | ||
432 | #else | ||
433 | asm volatile ( | ||
434 | "SETB\t[%0+%4++],%5\n" | ||
435 | "SETB\t[%1+%4++],%5\n" | ||
436 | "SETB\t[%2+%4++],%5\n" | ||
437 | "SETB\t[%3+%4++],%5\n" | ||
438 | : "+e" (flush0), | ||
439 | "+e" (flush1), | ||
440 | "+e" (flush2), | ||
441 | "+e" (flush3) | ||
442 | : "e" (step), "a" (0)); | ||
443 | #endif | ||
444 | |||
445 | if (flush0 == end_set) { | ||
446 | /* Wrap within Set 0 */ | ||
447 | flush0 -= set_size; | ||
448 | flush1 -= set_size; | ||
449 | flush2 -= set_size; | ||
450 | flush3 -= set_size; | ||
451 | } | ||
452 | } | ||
453 | } | ||
454 | |||
455 | void metag_code_cache_flush_all(const void *start) | ||
456 | { | ||
457 | if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0) | ||
458 | /* No need to flush the code cache it's not actually enabled */ | ||
459 | return; | ||
460 | |||
461 | metag_phys_code_cache_flush(start, 4096); | ||
462 | } | ||
463 | EXPORT_SYMBOL(metag_code_cache_flush_all); | ||
464 | |||
465 | void metag_code_cache_flush(const void *start, int bytes) | ||
466 | { | ||
467 | #ifndef CONFIG_METAG_META12 | ||
468 | void *flush; | ||
469 | int loops, step; | ||
470 | #endif /* !CONFIG_METAG_META12 */ | ||
471 | |||
472 | if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0) | ||
473 | /* No need to flush the code cache it's not actually enabled */ | ||
474 | return; | ||
475 | |||
476 | #ifdef CONFIG_METAG_META12 | ||
477 | /* CACHEWD isn't available on Meta1, so always do full cache flush */ | ||
478 | metag_phys_code_cache_flush(start, bytes); | ||
479 | |||
480 | #else /* CONFIG_METAG_META12 */ | ||
481 | /* If large size do full physical cache flush */ | ||
482 | if (bytes >= 4096) { | ||
483 | metag_phys_code_cache_flush(start, bytes); | ||
484 | return; | ||
485 | } | ||
486 | |||
487 | /* Use linear cache flush mechanism on META IP */ | ||
488 | flush = (void *)((int)start & ~(ICACHE_LINE_BYTES-1)); | ||
489 | loops = ((int)start & (ICACHE_LINE_BYTES-1)) + bytes + | ||
490 | (ICACHE_LINE_BYTES-1); | ||
491 | loops >>= ICACHE_LINE_S; | ||
492 | |||
493 | #define PRIM_IFLUSH(addr, offset) \ | ||
494 | __builtin_meta2_cachewd(((addr) + ((offset) * 64)), CACHEW_ICACHE_BIT) | ||
495 | |||
496 | #define LOOP_INC (4*64) | ||
497 | |||
498 | do { | ||
499 | /* By default stop */ | ||
500 | step = 0; | ||
501 | |||
502 | switch (loops) { | ||
503 | /* Drop Thru Cases! */ | ||
504 | default: | ||
505 | PRIM_IFLUSH(flush, 3); | ||
506 | loops -= 4; | ||
507 | step = 1; | ||
508 | case 3: | ||
509 | PRIM_IFLUSH(flush, 2); | ||
510 | case 2: | ||
511 | PRIM_IFLUSH(flush, 1); | ||
512 | case 1: | ||
513 | PRIM_IFLUSH(flush, 0); | ||
514 | flush += LOOP_INC; | ||
515 | case 0: | ||
516 | break; | ||
517 | } | ||
518 | } while (step); | ||
519 | #endif /* !CONFIG_METAG_META12 */ | ||
520 | } | ||
521 | EXPORT_SYMBOL(metag_code_cache_flush); | ||
diff --git a/arch/metag/mm/extable.c b/arch/metag/mm/extable.c new file mode 100644 index 000000000000..2a21eaebe84d --- /dev/null +++ b/arch/metag/mm/extable.c | |||
@@ -0,0 +1,15 @@ | |||
1 | |||
2 | #include <linux/module.h> | ||
3 | #include <linux/uaccess.h> | ||
4 | |||
5 | int fixup_exception(struct pt_regs *regs) | ||
6 | { | ||
7 | const struct exception_table_entry *fixup; | ||
8 | unsigned long pc = instruction_pointer(regs); | ||
9 | |||
10 | fixup = search_exception_tables(pc); | ||
11 | if (fixup) | ||
12 | regs->ctx.CurrPC = fixup->fixup; | ||
13 | |||
14 | return fixup != NULL; | ||
15 | } | ||
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c new file mode 100644 index 000000000000..2c75bf7357c5 --- /dev/null +++ b/arch/metag/mm/fault.c | |||
@@ -0,0 +1,239 @@ | |||
1 | /* | ||
2 | * Meta page fault handling. | ||
3 | * | ||
4 | * Copyright (C) 2005-2012 Imagination Technologies Ltd. | ||
5 | */ | ||
6 | |||
7 | #include <linux/mman.h> | ||
8 | #include <linux/mm.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/ptrace.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/uaccess.h> | ||
13 | |||
14 | #include <asm/tlbflush.h> | ||
15 | #include <asm/mmu.h> | ||
16 | #include <asm/traps.h> | ||
17 | |||
18 | /* Clear any pending catch buffer state. */ | ||
19 | static void clear_cbuf_entry(struct pt_regs *regs, unsigned long addr, | ||
20 | unsigned int trapno) | ||
21 | { | ||
22 | PTBICTXEXTCB0 cbuf = regs->extcb0; | ||
23 | |||
24 | switch (trapno) { | ||
25 | /* Instruction fetch faults leave no catch buffer state. */ | ||
26 | case TBIXXF_SIGNUM_IGF: | ||
27 | case TBIXXF_SIGNUM_IPF: | ||
28 | return; | ||
29 | default: | ||
30 | if (cbuf[0].CBAddr == addr) { | ||
31 | cbuf[0].CBAddr = 0; | ||
32 | cbuf[0].CBFlags &= ~TXCATCH0_FAULT_BITS; | ||
33 | |||
34 | /* And, as this is the ONLY catch entry, we | ||
35 | * need to clear the cbuf bit from the context! | ||
36 | */ | ||
37 | regs->ctx.SaveMask &= ~(TBICTX_CBUF_BIT | | ||
38 | TBICTX_XCBF_BIT); | ||
39 | |||
40 | return; | ||
41 | } | ||
42 | pr_err("Failed to clear cbuf entry!\n"); | ||
43 | } | ||
44 | } | ||
45 | |||
46 | int show_unhandled_signals = 1; | ||
47 | |||
48 | int do_page_fault(struct pt_regs *regs, unsigned long address, | ||
49 | unsigned int write_access, unsigned int trapno) | ||
50 | { | ||
51 | struct task_struct *tsk; | ||
52 | struct mm_struct *mm; | ||
53 | struct vm_area_struct *vma, *prev_vma; | ||
54 | siginfo_t info; | ||
55 | int fault; | ||
56 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
57 | (write_access ? FAULT_FLAG_WRITE : 0); | ||
58 | |||
59 | tsk = current; | ||
60 | |||
61 | if ((address >= VMALLOC_START) && (address < VMALLOC_END)) { | ||
62 | /* | ||
63 | * Synchronize this task's top level page-table | ||
64 | * with the 'reference' page table. | ||
65 | * | ||
66 | * Do _not_ use "tsk" here. We might be inside | ||
67 | * an interrupt in the middle of a task switch.. | ||
68 | */ | ||
69 | int offset = pgd_index(address); | ||
70 | pgd_t *pgd, *pgd_k; | ||
71 | pud_t *pud, *pud_k; | ||
72 | pmd_t *pmd, *pmd_k; | ||
73 | pte_t *pte_k; | ||
74 | |||
75 | pgd = ((pgd_t *)mmu_get_base()) + offset; | ||
76 | pgd_k = swapper_pg_dir + offset; | ||
77 | |||
78 | /* This will never happen with the folded page table. */ | ||
79 | if (!pgd_present(*pgd)) { | ||
80 | if (!pgd_present(*pgd_k)) | ||
81 | goto bad_area_nosemaphore; | ||
82 | set_pgd(pgd, *pgd_k); | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | pud = pud_offset(pgd, address); | ||
87 | pud_k = pud_offset(pgd_k, address); | ||
88 | if (!pud_present(*pud_k)) | ||
89 | goto bad_area_nosemaphore; | ||
90 | set_pud(pud, *pud_k); | ||
91 | |||
92 | pmd = pmd_offset(pud, address); | ||
93 | pmd_k = pmd_offset(pud_k, address); | ||
94 | if (!pmd_present(*pmd_k)) | ||
95 | goto bad_area_nosemaphore; | ||
96 | set_pmd(pmd, *pmd_k); | ||
97 | |||
98 | pte_k = pte_offset_kernel(pmd_k, address); | ||
99 | if (!pte_present(*pte_k)) | ||
100 | goto bad_area_nosemaphore; | ||
101 | |||
102 | /* May only be needed on Chorus2 */ | ||
103 | flush_tlb_all(); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | mm = tsk->mm; | ||
108 | |||
109 | if (in_atomic() || !mm) | ||
110 | goto no_context; | ||
111 | |||
112 | retry: | ||
113 | down_read(&mm->mmap_sem); | ||
114 | |||
115 | vma = find_vma_prev(mm, address, &prev_vma); | ||
116 | |||
117 | if (!vma || address < vma->vm_start) | ||
118 | goto check_expansion; | ||
119 | |||
120 | good_area: | ||
121 | if (write_access) { | ||
122 | if (!(vma->vm_flags & VM_WRITE)) | ||
123 | goto bad_area; | ||
124 | } else { | ||
125 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | ||
126 | goto bad_area; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * If for any reason at all we couldn't handle the fault, | ||
131 | * make sure we exit gracefully rather than endlessly redo | ||
132 | * the fault. | ||
133 | */ | ||
134 | fault = handle_mm_fault(mm, vma, address, flags); | ||
135 | |||
136 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
137 | return 0; | ||
138 | |||
139 | if (unlikely(fault & VM_FAULT_ERROR)) { | ||
140 | if (fault & VM_FAULT_OOM) | ||
141 | goto out_of_memory; | ||
142 | else if (fault & VM_FAULT_SIGBUS) | ||
143 | goto do_sigbus; | ||
144 | BUG(); | ||
145 | } | ||
146 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | ||
147 | if (fault & VM_FAULT_MAJOR) | ||
148 | tsk->maj_flt++; | ||
149 | else | ||
150 | tsk->min_flt++; | ||
151 | if (fault & VM_FAULT_RETRY) { | ||
152 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
153 | flags |= FAULT_FLAG_TRIED; | ||
154 | |||
155 | /* | ||
156 | * No need to up_read(&mm->mmap_sem) as we would | ||
157 | * have already released it in __lock_page_or_retry | ||
158 | * in mm/filemap.c. | ||
159 | */ | ||
160 | |||
161 | goto retry; | ||
162 | } | ||
163 | } | ||
164 | |||
165 | up_read(&mm->mmap_sem); | ||
166 | return 0; | ||
167 | |||
168 | check_expansion: | ||
169 | vma = prev_vma; | ||
170 | if (vma && (expand_stack(vma, address) == 0)) | ||
171 | goto good_area; | ||
172 | |||
173 | bad_area: | ||
174 | up_read(&mm->mmap_sem); | ||
175 | |||
176 | bad_area_nosemaphore: | ||
177 | if (user_mode(regs)) { | ||
178 | info.si_signo = SIGSEGV; | ||
179 | info.si_errno = 0; | ||
180 | info.si_code = SEGV_MAPERR; | ||
181 | info.si_addr = (__force void __user *)address; | ||
182 | info.si_trapno = trapno; | ||
183 | |||
184 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | ||
185 | printk_ratelimit()) { | ||
186 | pr_info("%s%s[%d]: segfault at %lx pc %08x sp %08x write %d trap %#x (%s)", | ||
187 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | ||
188 | tsk->comm, task_pid_nr(tsk), address, | ||
189 | regs->ctx.CurrPC, regs->ctx.AX[0].U0, | ||
190 | write_access, trapno, trap_name(trapno)); | ||
191 | print_vma_addr(" in ", regs->ctx.CurrPC); | ||
192 | print_vma_addr(" rtp in ", regs->ctx.DX[4].U1); | ||
193 | printk("\n"); | ||
194 | show_regs(regs); | ||
195 | } | ||
196 | force_sig_info(SIGSEGV, &info, tsk); | ||
197 | return 1; | ||
198 | } | ||
199 | goto no_context; | ||
200 | |||
201 | do_sigbus: | ||
202 | up_read(&mm->mmap_sem); | ||
203 | |||
204 | /* | ||
205 | * Send a sigbus, regardless of whether we were in kernel | ||
206 | * or user mode. | ||
207 | */ | ||
208 | info.si_signo = SIGBUS; | ||
209 | info.si_errno = 0; | ||
210 | info.si_code = BUS_ADRERR; | ||
211 | info.si_addr = (__force void __user *)address; | ||
212 | info.si_trapno = trapno; | ||
213 | force_sig_info(SIGBUS, &info, tsk); | ||
214 | |||
215 | /* Kernel mode? Handle exceptions or die */ | ||
216 | if (!user_mode(regs)) | ||
217 | goto no_context; | ||
218 | |||
219 | return 1; | ||
220 | |||
221 | /* | ||
222 | * We ran out of memory, or some other thing happened to us that made | ||
223 | * us unable to handle the page fault gracefully. | ||
224 | */ | ||
225 | out_of_memory: | ||
226 | up_read(&mm->mmap_sem); | ||
227 | if (user_mode(regs)) | ||
228 | do_group_exit(SIGKILL); | ||
229 | |||
230 | no_context: | ||
231 | /* Are we prepared to handle this kernel fault? */ | ||
232 | if (fixup_exception(regs)) { | ||
233 | clear_cbuf_entry(regs, address, trapno); | ||
234 | return 1; | ||
235 | } | ||
236 | |||
237 | die("Oops", regs, (write_access << 15) | trapno, address); | ||
238 | do_exit(SIGKILL); | ||
239 | } | ||
diff --git a/arch/metag/mm/highmem.c b/arch/metag/mm/highmem.c new file mode 100644 index 000000000000..d71f621a2c0b --- /dev/null +++ b/arch/metag/mm/highmem.c | |||
@@ -0,0 +1,133 @@ | |||
1 | #include <linux/export.h> | ||
2 | #include <linux/highmem.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/smp.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | #include <asm/fixmap.h> | ||
7 | #include <asm/tlbflush.h> | ||
8 | |||
9 | static pte_t *kmap_pte; | ||
10 | |||
11 | unsigned long highstart_pfn, highend_pfn; | ||
12 | |||
13 | void *kmap(struct page *page) | ||
14 | { | ||
15 | might_sleep(); | ||
16 | if (!PageHighMem(page)) | ||
17 | return page_address(page); | ||
18 | return kmap_high(page); | ||
19 | } | ||
20 | EXPORT_SYMBOL(kmap); | ||
21 | |||
22 | void kunmap(struct page *page) | ||
23 | { | ||
24 | BUG_ON(in_interrupt()); | ||
25 | if (!PageHighMem(page)) | ||
26 | return; | ||
27 | kunmap_high(page); | ||
28 | } | ||
29 | EXPORT_SYMBOL(kunmap); | ||
30 | |||
31 | /* | ||
32 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | ||
33 | * no global lock is needed and because the kmap code must perform a global TLB | ||
34 | * invalidation when the kmap pool wraps. | ||
35 | * | ||
36 | * However when holding an atomic kmap is is not legal to sleep, so atomic | ||
37 | * kmaps are appropriate for short, tight code paths only. | ||
38 | */ | ||
39 | |||
40 | void *kmap_atomic(struct page *page) | ||
41 | { | ||
42 | enum fixed_addresses idx; | ||
43 | unsigned long vaddr; | ||
44 | int type; | ||
45 | |||
46 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
47 | pagefault_disable(); | ||
48 | if (!PageHighMem(page)) | ||
49 | return page_address(page); | ||
50 | |||
51 | type = kmap_atomic_idx_push(); | ||
52 | idx = type + KM_TYPE_NR * smp_processor_id(); | ||
53 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
54 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
55 | BUG_ON(!pte_none(*(kmap_pte - idx))); | ||
56 | #endif | ||
57 | set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL)); | ||
58 | |||
59 | return (void *)vaddr; | ||
60 | } | ||
61 | EXPORT_SYMBOL(kmap_atomic); | ||
62 | |||
63 | void __kunmap_atomic(void *kvaddr) | ||
64 | { | ||
65 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
66 | int idx, type; | ||
67 | |||
68 | if (kvaddr >= (void *)FIXADDR_START) { | ||
69 | type = kmap_atomic_idx(); | ||
70 | idx = type + KM_TYPE_NR * smp_processor_id(); | ||
71 | |||
72 | /* | ||
73 | * Force other mappings to Oops if they'll try to access this | ||
74 | * pte without first remap it. Keeping stale mappings around | ||
75 | * is a bad idea also, in case the page changes cacheability | ||
76 | * attributes or becomes a protected page in a hypervisor. | ||
77 | */ | ||
78 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
79 | flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); | ||
80 | |||
81 | kmap_atomic_idx_pop(); | ||
82 | } | ||
83 | |||
84 | pagefault_enable(); | ||
85 | } | ||
86 | EXPORT_SYMBOL(__kunmap_atomic); | ||
87 | |||
88 | /* | ||
89 | * This is the same as kmap_atomic() but can map memory that doesn't | ||
90 | * have a struct page associated with it. | ||
91 | */ | ||
92 | void *kmap_atomic_pfn(unsigned long pfn) | ||
93 | { | ||
94 | enum fixed_addresses idx; | ||
95 | unsigned long vaddr; | ||
96 | int type; | ||
97 | |||
98 | pagefault_disable(); | ||
99 | |||
100 | type = kmap_atomic_idx_push(); | ||
101 | idx = type + KM_TYPE_NR * smp_processor_id(); | ||
102 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
103 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
104 | BUG_ON(!pte_none(*(kmap_pte - idx))); | ||
105 | #endif | ||
106 | set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL)); | ||
107 | flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); | ||
108 | |||
109 | return (void *)vaddr; | ||
110 | } | ||
111 | |||
112 | struct page *kmap_atomic_to_page(void *ptr) | ||
113 | { | ||
114 | unsigned long vaddr = (unsigned long)ptr; | ||
115 | int idx; | ||
116 | pte_t *pte; | ||
117 | |||
118 | if (vaddr < FIXADDR_START) | ||
119 | return virt_to_page(ptr); | ||
120 | |||
121 | idx = virt_to_fix(vaddr); | ||
122 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | ||
123 | return pte_page(*pte); | ||
124 | } | ||
125 | |||
126 | void __init kmap_init(void) | ||
127 | { | ||
128 | unsigned long kmap_vstart; | ||
129 | |||
130 | /* cache the first kmap pte */ | ||
131 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | ||
132 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | ||
133 | } | ||
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c new file mode 100644 index 000000000000..3c52fa6d0f8e --- /dev/null +++ b/arch/metag/mm/hugetlbpage.c | |||
@@ -0,0 +1,259 @@ | |||
1 | /* | ||
2 | * arch/metag/mm/hugetlbpage.c | ||
3 | * | ||
4 | * METAG HugeTLB page support. | ||
5 | * | ||
6 | * Cloned from SuperH | ||
7 | * | ||
8 | * Cloned from sparc64 by Paul Mundt. | ||
9 | * | ||
10 | * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/hugetlb.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include <linux/sysctl.h> | ||
19 | |||
20 | #include <asm/mman.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/tlb.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | #include <asm/cacheflush.h> | ||
25 | |||
26 | /* | ||
27 | * If the arch doesn't supply something else, assume that hugepage | ||
28 | * size aligned regions are ok without further preparation. | ||
29 | */ | ||
30 | int prepare_hugepage_range(struct file *file, unsigned long addr, | ||
31 | unsigned long len) | ||
32 | { | ||
33 | struct mm_struct *mm = current->mm; | ||
34 | struct hstate *h = hstate_file(file); | ||
35 | struct vm_area_struct *vma; | ||
36 | |||
37 | if (len & ~huge_page_mask(h)) | ||
38 | return -EINVAL; | ||
39 | if (addr & ~huge_page_mask(h)) | ||
40 | return -EINVAL; | ||
41 | if (TASK_SIZE - len < addr) | ||
42 | return -EINVAL; | ||
43 | |||
44 | vma = find_vma(mm, ALIGN_HUGEPT(addr)); | ||
45 | if (vma && !(vma->vm_flags & MAP_HUGETLB)) | ||
46 | return -EINVAL; | ||
47 | |||
48 | vma = find_vma(mm, addr); | ||
49 | if (vma) { | ||
50 | if (addr + len > vma->vm_start) | ||
51 | return -EINVAL; | ||
52 | if (!(vma->vm_flags & MAP_HUGETLB) && | ||
53 | (ALIGN_HUGEPT(addr + len) > vma->vm_start)) | ||
54 | return -EINVAL; | ||
55 | } | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
60 | unsigned long addr, unsigned long sz) | ||
61 | { | ||
62 | pgd_t *pgd; | ||
63 | pud_t *pud; | ||
64 | pmd_t *pmd; | ||
65 | pte_t *pte; | ||
66 | |||
67 | pgd = pgd_offset(mm, addr); | ||
68 | pud = pud_offset(pgd, addr); | ||
69 | pmd = pmd_offset(pud, addr); | ||
70 | pte = pte_alloc_map(mm, NULL, pmd, addr); | ||
71 | pgd->pgd &= ~_PAGE_SZ_MASK; | ||
72 | pgd->pgd |= _PAGE_SZHUGE; | ||
73 | |||
74 | return pte; | ||
75 | } | ||
76 | |||
77 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
78 | { | ||
79 | pgd_t *pgd; | ||
80 | pud_t *pud; | ||
81 | pmd_t *pmd; | ||
82 | pte_t *pte = NULL; | ||
83 | |||
84 | pgd = pgd_offset(mm, addr); | ||
85 | pud = pud_offset(pgd, addr); | ||
86 | pmd = pmd_offset(pud, addr); | ||
87 | pte = pte_offset_kernel(pmd, addr); | ||
88 | |||
89 | return pte; | ||
90 | } | ||
91 | |||
92 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
93 | { | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | struct page *follow_huge_addr(struct mm_struct *mm, | ||
98 | unsigned long address, int write) | ||
99 | { | ||
100 | return ERR_PTR(-EINVAL); | ||
101 | } | ||
102 | |||
103 | int pmd_huge(pmd_t pmd) | ||
104 | { | ||
105 | return pmd_page_shift(pmd) > PAGE_SHIFT; | ||
106 | } | ||
107 | |||
108 | int pud_huge(pud_t pud) | ||
109 | { | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
114 | pmd_t *pmd, int write) | ||
115 | { | ||
116 | return NULL; | ||
117 | } | ||
118 | |||
119 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
120 | |||
121 | /* | ||
122 | * Look for an unmapped area starting after another hugetlb vma. | ||
123 | * There are guaranteed to be no huge pte's spare if all the huge pages are | ||
124 | * full size (4MB), so in that case compile out this search. | ||
125 | */ | ||
126 | #if HPAGE_SHIFT == HUGEPT_SHIFT | ||
127 | static inline unsigned long | ||
128 | hugetlb_get_unmapped_area_existing(unsigned long len) | ||
129 | { | ||
130 | return 0; | ||
131 | } | ||
132 | #else | ||
133 | static unsigned long | ||
134 | hugetlb_get_unmapped_area_existing(unsigned long len) | ||
135 | { | ||
136 | struct mm_struct *mm = current->mm; | ||
137 | struct vm_area_struct *vma; | ||
138 | unsigned long start_addr, addr; | ||
139 | int after_huge; | ||
140 | |||
141 | if (mm->context.part_huge) { | ||
142 | start_addr = mm->context.part_huge; | ||
143 | after_huge = 1; | ||
144 | } else { | ||
145 | start_addr = TASK_UNMAPPED_BASE; | ||
146 | after_huge = 0; | ||
147 | } | ||
148 | new_search: | ||
149 | addr = start_addr; | ||
150 | |||
151 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
152 | if ((!vma && !after_huge) || TASK_SIZE - len < addr) { | ||
153 | /* | ||
154 | * Start a new search - just in case we missed | ||
155 | * some holes. | ||
156 | */ | ||
157 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
158 | start_addr = TASK_UNMAPPED_BASE; | ||
159 | goto new_search; | ||
160 | } | ||
161 | return 0; | ||
162 | } | ||
163 | /* skip ahead if we've aligned right over some vmas */ | ||
164 | if (vma && vma->vm_end <= addr) | ||
165 | continue; | ||
166 | /* space before the next vma? */ | ||
167 | if (after_huge && (!vma || ALIGN_HUGEPT(addr + len) | ||
168 | <= vma->vm_start)) { | ||
169 | unsigned long end = addr + len; | ||
170 | if (end & HUGEPT_MASK) | ||
171 | mm->context.part_huge = end; | ||
172 | else if (addr == mm->context.part_huge) | ||
173 | mm->context.part_huge = 0; | ||
174 | return addr; | ||
175 | } | ||
176 | if (vma && (vma->vm_flags & MAP_HUGETLB)) { | ||
177 | /* space after a huge vma in 2nd level page table? */ | ||
178 | if (vma->vm_end & HUGEPT_MASK) { | ||
179 | after_huge = 1; | ||
180 | /* no need to align to the next PT block */ | ||
181 | addr = vma->vm_end; | ||
182 | continue; | ||
183 | } | ||
184 | } | ||
185 | after_huge = 0; | ||
186 | addr = ALIGN_HUGEPT(vma->vm_end); | ||
187 | } | ||
188 | } | ||
189 | #endif | ||
190 | |||
191 | /* Do a full search to find an area without any nearby normal pages. */ | ||
192 | static unsigned long | ||
193 | hugetlb_get_unmapped_area_new_pmd(unsigned long len) | ||
194 | { | ||
195 | struct vm_unmapped_area_info info; | ||
196 | |||
197 | info.flags = 0; | ||
198 | info.length = len; | ||
199 | info.low_limit = TASK_UNMAPPED_BASE; | ||
200 | info.high_limit = TASK_SIZE; | ||
201 | info.align_mask = PAGE_MASK & HUGEPT_MASK; | ||
202 | info.align_offset = 0; | ||
203 | return vm_unmapped_area(&info); | ||
204 | } | ||
205 | |||
206 | unsigned long | ||
207 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
208 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
209 | { | ||
210 | struct hstate *h = hstate_file(file); | ||
211 | |||
212 | if (len & ~huge_page_mask(h)) | ||
213 | return -EINVAL; | ||
214 | if (len > TASK_SIZE) | ||
215 | return -ENOMEM; | ||
216 | |||
217 | if (flags & MAP_FIXED) { | ||
218 | if (prepare_hugepage_range(file, addr, len)) | ||
219 | return -EINVAL; | ||
220 | return addr; | ||
221 | } | ||
222 | |||
223 | if (addr) { | ||
224 | addr = ALIGN(addr, huge_page_size(h)); | ||
225 | if (!prepare_hugepage_range(file, addr, len)) | ||
226 | return addr; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Look for an existing hugetlb vma with space after it (this is to to | ||
231 | * minimise fragmentation caused by huge pages. | ||
232 | */ | ||
233 | addr = hugetlb_get_unmapped_area_existing(len); | ||
234 | if (addr) | ||
235 | return addr; | ||
236 | |||
237 | /* | ||
238 | * Find an unmapped naturally aligned set of 4MB blocks that we can use | ||
239 | * for huge pages. | ||
240 | */ | ||
241 | return hugetlb_get_unmapped_area_new_pmd(len); | ||
242 | } | ||
243 | |||
244 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ | ||
245 | |||
246 | /* necessary for boot time 4MB huge page allocation */ | ||
247 | static __init int setup_hugepagesz(char *opt) | ||
248 | { | ||
249 | unsigned long ps = memparse(opt, &opt); | ||
250 | if (ps == (1 << HPAGE_SHIFT)) { | ||
251 | hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); | ||
252 | } else { | ||
253 | pr_err("hugepagesz: Unsupported page size %lu M\n", | ||
254 | ps >> 20); | ||
255 | return 0; | ||
256 | } | ||
257 | return 1; | ||
258 | } | ||
259 | __setup("hugepagesz=", setup_hugepagesz); | ||
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c new file mode 100644 index 000000000000..504a398d5f8b --- /dev/null +++ b/arch/metag/mm/init.c | |||
@@ -0,0 +1,451 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies | ||
3 | * | ||
4 | */ | ||
5 | |||
6 | #include <linux/export.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/swap.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/bootmem.h> | ||
11 | #include <linux/pagemap.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/memblock.h> | ||
14 | #include <linux/initrd.h> | ||
15 | #include <linux/of_fdt.h> | ||
16 | |||
17 | #include <asm/setup.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/pgalloc.h> | ||
20 | #include <asm/mmu.h> | ||
21 | #include <asm/mmu_context.h> | ||
22 | #include <asm/sections.h> | ||
23 | #include <asm/tlb.h> | ||
24 | #include <asm/user_gateway.h> | ||
25 | #include <asm/mmzone.h> | ||
26 | #include <asm/fixmap.h> | ||
27 | |||
28 | unsigned long pfn_base; | ||
29 | EXPORT_SYMBOL(pfn_base); | ||
30 | |||
31 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data; | ||
32 | |||
33 | unsigned long empty_zero_page; | ||
34 | EXPORT_SYMBOL(empty_zero_page); | ||
35 | |||
36 | extern char __user_gateway_start; | ||
37 | extern char __user_gateway_end; | ||
38 | |||
39 | void *gateway_page; | ||
40 | |||
41 | /* | ||
42 | * Insert the gateway page into a set of page tables, creating the | ||
43 | * page tables if necessary. | ||
44 | */ | ||
45 | static void insert_gateway_page(pgd_t *pgd, unsigned long address) | ||
46 | { | ||
47 | pud_t *pud; | ||
48 | pmd_t *pmd; | ||
49 | pte_t *pte; | ||
50 | |||
51 | BUG_ON(!pgd_present(*pgd)); | ||
52 | |||
53 | pud = pud_offset(pgd, address); | ||
54 | BUG_ON(!pud_present(*pud)); | ||
55 | |||
56 | pmd = pmd_offset(pud, address); | ||
57 | if (!pmd_present(*pmd)) { | ||
58 | pte = alloc_bootmem_pages(PAGE_SIZE); | ||
59 | set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))); | ||
60 | } | ||
61 | |||
62 | pte = pte_offset_kernel(pmd, address); | ||
63 | set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY)); | ||
64 | } | ||
65 | |||
66 | /* Alloc and map a page in a known location accessible to userspace. */ | ||
67 | static void __init user_gateway_init(void) | ||
68 | { | ||
69 | unsigned long address = USER_GATEWAY_PAGE; | ||
70 | int offset = pgd_index(address); | ||
71 | pgd_t *pgd; | ||
72 | |||
73 | gateway_page = alloc_bootmem_pages(PAGE_SIZE); | ||
74 | |||
75 | pgd = swapper_pg_dir + offset; | ||
76 | insert_gateway_page(pgd, address); | ||
77 | |||
78 | #ifdef CONFIG_METAG_META12 | ||
79 | /* | ||
80 | * Insert the gateway page into our current page tables even | ||
81 | * though we've already inserted it into our reference page | ||
82 | * table (swapper_pg_dir). This is because with a META1 mmu we | ||
83 | * copy just the user address range and not the gateway page | ||
84 | * entry on context switch, see switch_mmu(). | ||
85 | */ | ||
86 | pgd = (pgd_t *)mmu_get_base() + offset; | ||
87 | insert_gateway_page(pgd, address); | ||
88 | #endif /* CONFIG_METAG_META12 */ | ||
89 | |||
90 | BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE); | ||
91 | |||
92 | gateway_page += (address & ~PAGE_MASK); | ||
93 | |||
94 | memcpy(gateway_page, &__user_gateway_start, | ||
95 | &__user_gateway_end - &__user_gateway_start); | ||
96 | |||
97 | /* | ||
98 | * We don't need to flush the TLB here, there should be no mapping | ||
99 | * present at boot for this address and only valid mappings are in | ||
100 | * the TLB (apart from on Meta 1.x, but those cached invalid | ||
101 | * mappings should be impossible to hit here). | ||
102 | * | ||
103 | * We don't flush the code cache here even though we have written | ||
104 | * code through the data cache and they may not be coherent. At | ||
105 | * this point we assume there is no stale data in the code cache | ||
106 | * for this address so there is no need to flush. | ||
107 | */ | ||
108 | } | ||
109 | |||
110 | static void __init allocate_pgdat(unsigned int nid) | ||
111 | { | ||
112 | unsigned long start_pfn, end_pfn; | ||
113 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
114 | unsigned long phys; | ||
115 | #endif | ||
116 | |||
117 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | ||
118 | |||
119 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
120 | phys = __memblock_alloc_base(sizeof(struct pglist_data), | ||
121 | SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); | ||
122 | /* Retry with all of system memory */ | ||
123 | if (!phys) | ||
124 | phys = __memblock_alloc_base(sizeof(struct pglist_data), | ||
125 | SMP_CACHE_BYTES, | ||
126 | memblock_end_of_DRAM()); | ||
127 | if (!phys) | ||
128 | panic("Can't allocate pgdat for node %d\n", nid); | ||
129 | |||
130 | NODE_DATA(nid) = __va(phys); | ||
131 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | ||
132 | |||
133 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | ||
134 | #endif | ||
135 | |||
136 | NODE_DATA(nid)->node_start_pfn = start_pfn; | ||
137 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | ||
138 | } | ||
139 | |||
140 | static void __init bootmem_init_one_node(unsigned int nid) | ||
141 | { | ||
142 | unsigned long total_pages, paddr; | ||
143 | unsigned long end_pfn; | ||
144 | struct pglist_data *p; | ||
145 | |||
146 | p = NODE_DATA(nid); | ||
147 | |||
148 | /* Nothing to do.. */ | ||
149 | if (!p->node_spanned_pages) | ||
150 | return; | ||
151 | |||
152 | end_pfn = p->node_start_pfn + p->node_spanned_pages; | ||
153 | #ifdef CONFIG_HIGHMEM | ||
154 | if (end_pfn > max_low_pfn) | ||
155 | end_pfn = max_low_pfn; | ||
156 | #endif | ||
157 | |||
158 | total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn); | ||
159 | |||
160 | paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); | ||
161 | if (!paddr) | ||
162 | panic("Can't allocate bootmap for nid[%d]\n", nid); | ||
163 | |||
164 | init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); | ||
165 | |||
166 | free_bootmem_with_active_regions(nid, end_pfn); | ||
167 | |||
168 | /* | ||
169 | * XXX Handle initial reservations for the system memory node | ||
170 | * only for the moment, we'll refactor this later for handling | ||
171 | * reservations in other nodes. | ||
172 | */ | ||
173 | if (nid == 0) { | ||
174 | struct memblock_region *reg; | ||
175 | |||
176 | /* Reserve the sections we're already using. */ | ||
177 | for_each_memblock(reserved, reg) { | ||
178 | unsigned long size = reg->size; | ||
179 | |||
180 | #ifdef CONFIG_HIGHMEM | ||
181 | /* ...but not highmem */ | ||
182 | if (PFN_DOWN(reg->base) >= highstart_pfn) | ||
183 | continue; | ||
184 | |||
185 | if (PFN_UP(reg->base + size) > highstart_pfn) | ||
186 | size = (highstart_pfn - PFN_DOWN(reg->base)) | ||
187 | << PAGE_SHIFT; | ||
188 | #endif | ||
189 | |||
190 | reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT); | ||
191 | } | ||
192 | } | ||
193 | |||
194 | sparse_memory_present_with_active_regions(nid); | ||
195 | } | ||
196 | |||
197 | static void __init do_init_bootmem(void) | ||
198 | { | ||
199 | struct memblock_region *reg; | ||
200 | int i; | ||
201 | |||
202 | /* Add active regions with valid PFNs. */ | ||
203 | for_each_memblock(memory, reg) { | ||
204 | unsigned long start_pfn, end_pfn; | ||
205 | start_pfn = memblock_region_memory_base_pfn(reg); | ||
206 | end_pfn = memblock_region_memory_end_pfn(reg); | ||
207 | memblock_set_node(PFN_PHYS(start_pfn), | ||
208 | PFN_PHYS(end_pfn - start_pfn), 0); | ||
209 | } | ||
210 | |||
211 | /* All of system RAM sits in node 0 for the non-NUMA case */ | ||
212 | allocate_pgdat(0); | ||
213 | node_set_online(0); | ||
214 | |||
215 | soc_mem_setup(); | ||
216 | |||
217 | for_each_online_node(i) | ||
218 | bootmem_init_one_node(i); | ||
219 | |||
220 | sparse_init(); | ||
221 | } | ||
222 | |||
223 | extern char _heap_start[]; | ||
224 | |||
225 | static void __init init_and_reserve_mem(void) | ||
226 | { | ||
227 | unsigned long start_pfn, heap_start; | ||
228 | u64 base = min_low_pfn << PAGE_SHIFT; | ||
229 | u64 size = (max_low_pfn << PAGE_SHIFT) - base; | ||
230 | |||
231 | heap_start = (unsigned long) &_heap_start; | ||
232 | |||
233 | memblock_add(base, size); | ||
234 | |||
235 | /* | ||
236 | * Partially used pages are not usable - thus | ||
237 | * we are rounding upwards: | ||
238 | */ | ||
239 | start_pfn = PFN_UP(__pa(heap_start)); | ||
240 | |||
241 | /* | ||
242 | * Reserve the kernel text. | ||
243 | */ | ||
244 | memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base); | ||
245 | |||
246 | #ifdef CONFIG_HIGHMEM | ||
247 | /* | ||
248 | * Add & reserve highmem, so page structures are initialised. | ||
249 | */ | ||
250 | base = highstart_pfn << PAGE_SHIFT; | ||
251 | size = (highend_pfn << PAGE_SHIFT) - base; | ||
252 | if (size) { | ||
253 | memblock_add(base, size); | ||
254 | memblock_reserve(base, size); | ||
255 | } | ||
256 | #endif | ||
257 | } | ||
258 | |||
259 | #ifdef CONFIG_HIGHMEM | ||
260 | /* | ||
261 | * Ensure we have allocated page tables in swapper_pg_dir for the | ||
262 | * fixed mappings range from 'start' to 'end'. | ||
263 | */ | ||
264 | static void __init allocate_pgtables(unsigned long start, unsigned long end) | ||
265 | { | ||
266 | pgd_t *pgd; | ||
267 | pmd_t *pmd; | ||
268 | pte_t *pte; | ||
269 | int i, j; | ||
270 | unsigned long vaddr; | ||
271 | |||
272 | vaddr = start; | ||
273 | i = pgd_index(vaddr); | ||
274 | j = pmd_index(vaddr); | ||
275 | pgd = swapper_pg_dir + i; | ||
276 | |||
277 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | ||
278 | pmd = (pmd_t *)pgd; | ||
279 | for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { | ||
280 | vaddr += PMD_SIZE; | ||
281 | |||
282 | if (!pmd_none(*pmd)) | ||
283 | continue; | ||
284 | |||
285 | pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | ||
286 | pmd_populate_kernel(&init_mm, pmd, pte); | ||
287 | } | ||
288 | j = 0; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | static void __init fixedrange_init(void) | ||
293 | { | ||
294 | unsigned long vaddr, end; | ||
295 | pgd_t *pgd; | ||
296 | pud_t *pud; | ||
297 | pmd_t *pmd; | ||
298 | pte_t *pte; | ||
299 | |||
300 | /* | ||
301 | * Fixed mappings: | ||
302 | */ | ||
303 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | ||
304 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | ||
305 | allocate_pgtables(vaddr, end); | ||
306 | |||
307 | /* | ||
308 | * Permanent kmaps: | ||
309 | */ | ||
310 | vaddr = PKMAP_BASE; | ||
311 | allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP); | ||
312 | |||
313 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
314 | pud = pud_offset(pgd, vaddr); | ||
315 | pmd = pmd_offset(pud, vaddr); | ||
316 | pte = pte_offset_kernel(pmd, vaddr); | ||
317 | pkmap_page_table = pte; | ||
318 | } | ||
319 | #endif /* CONFIG_HIGHMEM */ | ||
320 | |||
321 | /* | ||
322 | * paging_init() continues the virtual memory environment setup which | ||
323 | * was begun by the code in arch/metag/kernel/setup.c. | ||
324 | */ | ||
325 | void __init paging_init(unsigned long mem_end) | ||
326 | { | ||
327 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
328 | int nid; | ||
329 | |||
330 | init_and_reserve_mem(); | ||
331 | |||
332 | memblock_allow_resize(); | ||
333 | |||
334 | memblock_dump_all(); | ||
335 | |||
336 | nodes_clear(node_online_map); | ||
337 | |||
338 | init_new_context(&init_task, &init_mm); | ||
339 | |||
340 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | ||
341 | |||
342 | do_init_bootmem(); | ||
343 | mmu_init(mem_end); | ||
344 | |||
345 | #ifdef CONFIG_HIGHMEM | ||
346 | fixedrange_init(); | ||
347 | kmap_init(); | ||
348 | #endif | ||
349 | |||
350 | /* Initialize the zero page to a bootmem page, already zeroed. */ | ||
351 | empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | ||
352 | |||
353 | user_gateway_init(); | ||
354 | |||
355 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | ||
356 | |||
357 | for_each_online_node(nid) { | ||
358 | pg_data_t *pgdat = NODE_DATA(nid); | ||
359 | unsigned long low, start_pfn; | ||
360 | |||
361 | start_pfn = pgdat->bdata->node_min_pfn; | ||
362 | low = pgdat->bdata->node_low_pfn; | ||
363 | |||
364 | if (max_zone_pfns[ZONE_NORMAL] < low) | ||
365 | max_zone_pfns[ZONE_NORMAL] = low; | ||
366 | |||
367 | #ifdef CONFIG_HIGHMEM | ||
368 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; | ||
369 | #endif | ||
370 | pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", | ||
371 | nid, start_pfn, low); | ||
372 | } | ||
373 | |||
374 | free_area_init_nodes(max_zone_pfns); | ||
375 | } | ||
376 | |||
377 | void __init mem_init(void) | ||
378 | { | ||
379 | int nid; | ||
380 | |||
381 | #ifdef CONFIG_HIGHMEM | ||
382 | unsigned long tmp; | ||
383 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { | ||
384 | struct page *page = pfn_to_page(tmp); | ||
385 | ClearPageReserved(page); | ||
386 | init_page_count(page); | ||
387 | __free_page(page); | ||
388 | totalhigh_pages++; | ||
389 | } | ||
390 | totalram_pages += totalhigh_pages; | ||
391 | num_physpages += totalhigh_pages; | ||
392 | #endif /* CONFIG_HIGHMEM */ | ||
393 | |||
394 | for_each_online_node(nid) { | ||
395 | pg_data_t *pgdat = NODE_DATA(nid); | ||
396 | unsigned long node_pages = 0; | ||
397 | |||
398 | num_physpages += pgdat->node_present_pages; | ||
399 | |||
400 | if (pgdat->node_spanned_pages) | ||
401 | node_pages = free_all_bootmem_node(pgdat); | ||
402 | |||
403 | totalram_pages += node_pages; | ||
404 | } | ||
405 | |||
406 | pr_info("Memory: %luk/%luk available\n", | ||
407 | (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), | ||
408 | num_physpages << (PAGE_SHIFT - 10)); | ||
409 | |||
410 | show_mem(0); | ||
411 | |||
412 | return; | ||
413 | } | ||
414 | |||
415 | static void free_init_pages(char *what, unsigned long begin, unsigned long end) | ||
416 | { | ||
417 | unsigned long addr; | ||
418 | |||
419 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||
420 | ClearPageReserved(virt_to_page(addr)); | ||
421 | init_page_count(virt_to_page(addr)); | ||
422 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | ||
423 | free_page(addr); | ||
424 | totalram_pages++; | ||
425 | } | ||
426 | pr_info("Freeing %s: %luk freed\n", what, (end - begin) >> 10); | ||
427 | } | ||
428 | |||
429 | void free_initmem(void) | ||
430 | { | ||
431 | free_init_pages("unused kernel memory", | ||
432 | (unsigned long)(&__init_begin), | ||
433 | (unsigned long)(&__init_end)); | ||
434 | } | ||
435 | |||
436 | #ifdef CONFIG_BLK_DEV_INITRD | ||
437 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
438 | { | ||
439 | end = end & PAGE_MASK; | ||
440 | free_init_pages("initrd memory", start, end); | ||
441 | } | ||
442 | #endif | ||
443 | |||
444 | #ifdef CONFIG_OF_FLATTREE | ||
445 | void __init early_init_dt_setup_initrd_arch(unsigned long start, | ||
446 | unsigned long end) | ||
447 | { | ||
448 | pr_err("%s(%lx, %lx)\n", | ||
449 | __func__, start, end); | ||
450 | } | ||
451 | #endif /* CONFIG_OF_FLATTREE */ | ||
diff --git a/arch/metag/mm/ioremap.c b/arch/metag/mm/ioremap.c new file mode 100644 index 000000000000..a136a435fdaa --- /dev/null +++ b/arch/metag/mm/ioremap.c | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * Re-map IO memory to kernel address space so that we can access it. | ||
3 | * Needed for memory-mapped I/O devices mapped outside our normal DRAM | ||
4 | * window (that is, all memory-mapped I/O devices). | ||
5 | * | ||
6 | * Copyright (C) 1995,1996 Linus Torvalds | ||
7 | * | ||
8 | * Meta port based on CRIS-port by Axis Communications AB | ||
9 | */ | ||
10 | |||
11 | #include <linux/vmalloc.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <linux/export.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/mm.h> | ||
16 | |||
17 | #include <asm/pgtable.h> | ||
18 | |||
19 | /* | ||
20 | * Remap an arbitrary physical address space into the kernel virtual | ||
21 | * address space. Needed when the kernel wants to access high addresses | ||
22 | * directly. | ||
23 | * | ||
24 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
25 | * have to convert them into an offset in a page-aligned mapping, but the | ||
26 | * caller shouldn't need to know that small detail. | ||
27 | */ | ||
28 | void __iomem *__ioremap(unsigned long phys_addr, size_t size, | ||
29 | unsigned long flags) | ||
30 | { | ||
31 | unsigned long addr; | ||
32 | struct vm_struct *area; | ||
33 | unsigned long offset, last_addr; | ||
34 | pgprot_t prot; | ||
35 | |||
36 | /* Don't allow wraparound or zero size */ | ||
37 | last_addr = phys_addr + size - 1; | ||
38 | if (!size || last_addr < phys_addr) | ||
39 | return NULL; | ||
40 | |||
41 | /* Custom region addresses are accessible and uncached by default. */ | ||
42 | if (phys_addr >= LINSYSCUSTOM_BASE && | ||
43 | phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT)) | ||
44 | return (__force void __iomem *) phys_addr; | ||
45 | |||
46 | /* | ||
47 | * Mappings have to be page-aligned | ||
48 | */ | ||
49 | offset = phys_addr & ~PAGE_MASK; | ||
50 | phys_addr &= PAGE_MASK; | ||
51 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
52 | prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY | | ||
53 | _PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 | | ||
54 | flags); | ||
55 | |||
56 | /* | ||
57 | * Ok, go for it.. | ||
58 | */ | ||
59 | area = get_vm_area(size, VM_IOREMAP); | ||
60 | if (!area) | ||
61 | return NULL; | ||
62 | area->phys_addr = phys_addr; | ||
63 | addr = (unsigned long) area->addr; | ||
64 | if (ioremap_page_range(addr, addr + size, phys_addr, prot)) { | ||
65 | vunmap((void *) addr); | ||
66 | return NULL; | ||
67 | } | ||
68 | return (__force void __iomem *) (offset + (char *)addr); | ||
69 | } | ||
70 | EXPORT_SYMBOL(__ioremap); | ||
71 | |||
72 | void __iounmap(void __iomem *addr) | ||
73 | { | ||
74 | struct vm_struct *p; | ||
75 | |||
76 | if ((__force unsigned long)addr >= LINSYSCUSTOM_BASE && | ||
77 | (__force unsigned long)addr < (LINSYSCUSTOM_BASE + | ||
78 | LINSYSCUSTOM_LIMIT)) | ||
79 | return; | ||
80 | |||
81 | p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr)); | ||
82 | if (unlikely(!p)) { | ||
83 | pr_err("iounmap: bad address %p\n", addr); | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | kfree(p); | ||
88 | } | ||
89 | EXPORT_SYMBOL(__iounmap); | ||
diff --git a/arch/metag/mm/l2cache.c b/arch/metag/mm/l2cache.c new file mode 100644 index 000000000000..c64ee615cf90 --- /dev/null +++ b/arch/metag/mm/l2cache.c | |||
@@ -0,0 +1,192 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/delay.h> | ||
4 | |||
5 | #include <asm/l2cache.h> | ||
6 | #include <asm/metag_isa.h> | ||
7 | |||
8 | /* If non-0, then initialise the L2 cache */ | ||
9 | static int l2cache_init = 1; | ||
10 | /* If non-0, then initialise the L2 cache prefetch */ | ||
11 | static int l2cache_init_pf = 1; | ||
12 | |||
13 | int l2c_pfenable; | ||
14 | |||
15 | static volatile u32 l2c_testdata[16] __initdata __aligned(64); | ||
16 | |||
17 | static int __init parse_l2cache(char *p) | ||
18 | { | ||
19 | char *cp = p; | ||
20 | |||
21 | if (get_option(&cp, &l2cache_init) != 1) { | ||
22 | pr_err("Bad l2cache parameter (%s)\n", p); | ||
23 | return 1; | ||
24 | } | ||
25 | return 0; | ||
26 | } | ||
27 | early_param("l2cache", parse_l2cache); | ||
28 | |||
29 | static int __init parse_l2cache_pf(char *p) | ||
30 | { | ||
31 | char *cp = p; | ||
32 | |||
33 | if (get_option(&cp, &l2cache_init_pf) != 1) { | ||
34 | pr_err("Bad l2cache_pf parameter (%s)\n", p); | ||
35 | return 1; | ||
36 | } | ||
37 | return 0; | ||
38 | } | ||
39 | early_param("l2cache_pf", parse_l2cache_pf); | ||
40 | |||
41 | static int __init meta_l2c_setup(void) | ||
42 | { | ||
43 | /* | ||
44 | * If the L2 cache isn't even present, don't do anything, but say so in | ||
45 | * the log. | ||
46 | */ | ||
47 | if (!meta_l2c_is_present()) { | ||
48 | pr_info("L2 Cache: Not present\n"); | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Check whether the line size is recognised. | ||
54 | */ | ||
55 | if (!meta_l2c_linesize()) { | ||
56 | pr_warn_once("L2 Cache: unknown line size id (config=0x%08x)\n", | ||
57 | meta_l2c_config()); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Initialise state. | ||
62 | */ | ||
63 | l2c_pfenable = _meta_l2c_pf_is_enabled(); | ||
64 | |||
65 | /* | ||
66 | * Enable the L2 cache and print to log whether it was already enabled | ||
67 | * by the bootloader. | ||
68 | */ | ||
69 | if (l2cache_init) { | ||
70 | pr_info("L2 Cache: Enabling... "); | ||
71 | if (meta_l2c_enable()) | ||
72 | pr_cont("already enabled\n"); | ||
73 | else | ||
74 | pr_cont("done\n"); | ||
75 | } else { | ||
76 | pr_info("L2 Cache: Not enabling\n"); | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Enable L2 cache prefetch. | ||
81 | */ | ||
82 | if (l2cache_init_pf) { | ||
83 | pr_info("L2 Cache: Enabling prefetch... "); | ||
84 | if (meta_l2c_pf_enable(1)) | ||
85 | pr_cont("already enabled\n"); | ||
86 | else | ||
87 | pr_cont("done\n"); | ||
88 | } else { | ||
89 | pr_info("L2 Cache: Not enabling prefetch\n"); | ||
90 | } | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | core_initcall(meta_l2c_setup); | ||
95 | |||
96 | int meta_l2c_disable(void) | ||
97 | { | ||
98 | unsigned long flags; | ||
99 | int en; | ||
100 | |||
101 | if (!meta_l2c_is_present()) | ||
102 | return 1; | ||
103 | |||
104 | /* | ||
105 | * Prevent other threads writing during the writeback, otherwise the | ||
106 | * writes will get "lost" when the L2 is disabled. | ||
107 | */ | ||
108 | __global_lock2(flags); | ||
109 | en = meta_l2c_is_enabled(); | ||
110 | if (likely(en)) { | ||
111 | _meta_l2c_pf_enable(0); | ||
112 | wr_fence(); | ||
113 | _meta_l2c_purge(); | ||
114 | _meta_l2c_enable(0); | ||
115 | } | ||
116 | __global_unlock2(flags); | ||
117 | |||
118 | return !en; | ||
119 | } | ||
120 | |||
121 | int meta_l2c_enable(void) | ||
122 | { | ||
123 | unsigned long flags; | ||
124 | int en; | ||
125 | |||
126 | if (!meta_l2c_is_present()) | ||
127 | return 0; | ||
128 | |||
129 | /* | ||
130 | * Init (clearing the L2) can happen while the L2 is disabled, so other | ||
131 | * threads are safe to continue executing, however we must not init the | ||
132 | * cache if it's already enabled (dirty lines would be discarded), so | ||
133 | * this operation should still be atomic with other threads. | ||
134 | */ | ||
135 | __global_lock1(flags); | ||
136 | en = meta_l2c_is_enabled(); | ||
137 | if (likely(!en)) { | ||
138 | _meta_l2c_init(); | ||
139 | _meta_l2c_enable(1); | ||
140 | _meta_l2c_pf_enable(l2c_pfenable); | ||
141 | } | ||
142 | __global_unlock1(flags); | ||
143 | |||
144 | return en; | ||
145 | } | ||
146 | |||
147 | int meta_l2c_pf_enable(int pfenable) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | int en = l2c_pfenable; | ||
151 | |||
152 | if (!meta_l2c_is_present()) | ||
153 | return 0; | ||
154 | |||
155 | /* | ||
156 | * We read modify write the enable register, so this operation must be | ||
157 | * atomic with other threads. | ||
158 | */ | ||
159 | __global_lock1(flags); | ||
160 | en = l2c_pfenable; | ||
161 | l2c_pfenable = pfenable; | ||
162 | if (meta_l2c_is_enabled()) | ||
163 | _meta_l2c_pf_enable(pfenable); | ||
164 | __global_unlock1(flags); | ||
165 | |||
166 | return en; | ||
167 | } | ||
168 | |||
169 | int meta_l2c_flush(void) | ||
170 | { | ||
171 | unsigned long flags; | ||
172 | int en; | ||
173 | |||
174 | /* | ||
175 | * Prevent other threads writing during the writeback. This also | ||
176 | * involves read modify writes. | ||
177 | */ | ||
178 | __global_lock2(flags); | ||
179 | en = meta_l2c_is_enabled(); | ||
180 | if (likely(en)) { | ||
181 | _meta_l2c_pf_enable(0); | ||
182 | wr_fence(); | ||
183 | _meta_l2c_purge(); | ||
184 | _meta_l2c_enable(0); | ||
185 | _meta_l2c_init(); | ||
186 | _meta_l2c_enable(1); | ||
187 | _meta_l2c_pf_enable(l2c_pfenable); | ||
188 | } | ||
189 | __global_unlock2(flags); | ||
190 | |||
191 | return !en; | ||
192 | } | ||
diff --git a/arch/metag/mm/maccess.c b/arch/metag/mm/maccess.c new file mode 100644 index 000000000000..eba2cfc935b1 --- /dev/null +++ b/arch/metag/mm/maccess.c | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * safe read and write memory routines callable while atomic | ||
3 | * | ||
4 | * Copyright 2012 Imagination Technologies | ||
5 | */ | ||
6 | |||
7 | #include <linux/uaccess.h> | ||
8 | #include <asm/io.h> | ||
9 | |||
10 | /* | ||
11 | * The generic probe_kernel_write() uses the user copy code which can split the | ||
12 | * writes if the source is unaligned, and repeats writes to make exceptions | ||
13 | * precise. We override it here to avoid these things happening to memory mapped | ||
14 | * IO memory where they could have undesired effects. | ||
15 | * Due to the use of CACHERD instruction this only works on Meta2 onwards. | ||
16 | */ | ||
17 | #ifdef CONFIG_METAG_META21 | ||
18 | long probe_kernel_write(void *dst, const void *src, size_t size) | ||
19 | { | ||
20 | unsigned long ldst = (unsigned long)dst; | ||
21 | void __iomem *iodst = (void __iomem *)dst; | ||
22 | unsigned long lsrc = (unsigned long)src; | ||
23 | const u8 *psrc = (u8 *)src; | ||
24 | unsigned int pte, i; | ||
25 | u8 bounce[8] __aligned(8); | ||
26 | |||
27 | if (!size) | ||
28 | return 0; | ||
29 | |||
30 | /* Use the write combine bit to decide is the destination is MMIO. */ | ||
31 | pte = __builtin_meta2_cacherd(dst); | ||
32 | |||
33 | /* Check the mapping is valid and writeable. */ | ||
34 | if ((pte & (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT)) | ||
35 | != (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT)) | ||
36 | return -EFAULT; | ||
37 | |||
38 | /* Fall back to generic version for cases we're not interested in. */ | ||
39 | if (pte & MMCU_ENTRY_WRC_BIT || /* write combined memory */ | ||
40 | (ldst & (size - 1)) || /* destination unaligned */ | ||
41 | size > 8 || /* more than max write size */ | ||
42 | (size & (size - 1))) /* non power of 2 size */ | ||
43 | return __probe_kernel_write(dst, src, size); | ||
44 | |||
45 | /* If src is unaligned, copy to the aligned bounce buffer first. */ | ||
46 | if (lsrc & (size - 1)) { | ||
47 | for (i = 0; i < size; ++i) | ||
48 | bounce[i] = psrc[i]; | ||
49 | psrc = bounce; | ||
50 | } | ||
51 | |||
52 | switch (size) { | ||
53 | case 1: | ||
54 | writeb(*psrc, iodst); | ||
55 | break; | ||
56 | case 2: | ||
57 | writew(*(const u16 *)psrc, iodst); | ||
58 | break; | ||
59 | case 4: | ||
60 | writel(*(const u32 *)psrc, iodst); | ||
61 | break; | ||
62 | case 8: | ||
63 | writeq(*(const u64 *)psrc, iodst); | ||
64 | break; | ||
65 | } | ||
66 | return 0; | ||
67 | } | ||
68 | #endif | ||
diff --git a/arch/metag/mm/mmu-meta1.c b/arch/metag/mm/mmu-meta1.c new file mode 100644 index 000000000000..91f4255bcb5c --- /dev/null +++ b/arch/metag/mm/mmu-meta1.c | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005,2006,2007,2008,2009 Imagination Technologies | ||
3 | * | ||
4 | * Meta 1 MMU handling code. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/sched.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/io.h> | ||
11 | |||
12 | #include <asm/mmu.h> | ||
13 | |||
14 | #define DM3_BASE (LINSYSDIRECT_BASE + (MMCU_DIRECTMAPn_ADDR_SCALE * 3)) | ||
15 | |||
16 | /* | ||
17 | * This contains the physical address of the top level 2k pgd table. | ||
18 | */ | ||
19 | static unsigned long mmu_base_phys; | ||
20 | |||
21 | /* | ||
22 | * Given a physical address, return a mapped virtual address that can be used | ||
23 | * to access that location. | ||
24 | * In practice, we use the DirectMap region to make this happen. | ||
25 | */ | ||
26 | static unsigned long map_addr(unsigned long phys) | ||
27 | { | ||
28 | static unsigned long dm_base = 0xFFFFFFFF; | ||
29 | int offset; | ||
30 | |||
31 | offset = phys - dm_base; | ||
32 | |||
33 | /* Are we in the current map range ? */ | ||
34 | if ((offset < 0) || (offset >= MMCU_DIRECTMAPn_ADDR_SCALE)) { | ||
35 | /* Calculate new DM area */ | ||
36 | dm_base = phys & ~(MMCU_DIRECTMAPn_ADDR_SCALE - 1); | ||
37 | |||
38 | /* Actually map it in! */ | ||
39 | metag_out32(dm_base, MMCU_DIRECTMAP3_ADDR); | ||
40 | |||
41 | /* And calculate how far into that area our reference is */ | ||
42 | offset = phys - dm_base; | ||
43 | } | ||
44 | |||
45 | return DM3_BASE + offset; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Return the physical address of the base of our pgd table. | ||
50 | */ | ||
51 | static inline unsigned long __get_mmu_base(void) | ||
52 | { | ||
53 | unsigned long base_phys; | ||
54 | unsigned int stride; | ||
55 | |||
56 | if (is_global_space(PAGE_OFFSET)) | ||
57 | stride = 4; | ||
58 | else | ||
59 | stride = hard_processor_id(); /* [0..3] */ | ||
60 | |||
61 | base_phys = metag_in32(MMCU_TABLE_PHYS_ADDR); | ||
62 | base_phys += (0x800 * stride); | ||
63 | |||
64 | return base_phys; | ||
65 | } | ||
66 | |||
67 | /* Given a virtual address, return the virtual address of the relevant pgd */ | ||
68 | static unsigned long pgd_entry_addr(unsigned long virt) | ||
69 | { | ||
70 | unsigned long pgd_phys; | ||
71 | unsigned long pgd_virt; | ||
72 | |||
73 | if (!mmu_base_phys) | ||
74 | mmu_base_phys = __get_mmu_base(); | ||
75 | |||
76 | /* | ||
77 | * Are we trying to map a global address. If so, then index | ||
78 | * the global pgd table instead of our local one. | ||
79 | */ | ||
80 | if (is_global_space(virt)) { | ||
81 | /* Scale into 2gig map */ | ||
82 | virt &= ~0x80000000; | ||
83 | } | ||
84 | |||
85 | /* Base of the pgd table plus our 4Meg entry, 4bytes each */ | ||
86 | pgd_phys = mmu_base_phys + ((virt >> PGDIR_SHIFT) * 4); | ||
87 | |||
88 | pgd_virt = map_addr(pgd_phys); | ||
89 | |||
90 | return pgd_virt; | ||
91 | } | ||
92 | |||
93 | /* Given a virtual address, return the virtual address of the relevant pte */ | ||
94 | static unsigned long pgtable_entry_addr(unsigned long virt) | ||
95 | { | ||
96 | unsigned long pgtable_phys; | ||
97 | unsigned long pgtable_virt, pte_virt; | ||
98 | |||
99 | /* Find the physical address of the 4MB page table*/ | ||
100 | pgtable_phys = metag_in32(pgd_entry_addr(virt)) & MMCU_ENTRY_ADDR_BITS; | ||
101 | |||
102 | /* Map it to a virtual address */ | ||
103 | pgtable_virt = map_addr(pgtable_phys); | ||
104 | |||
105 | /* And index into it for our pte */ | ||
106 | pte_virt = pgtable_virt + ((virt >> PAGE_SHIFT) & 0x3FF) * 4; | ||
107 | |||
108 | return pte_virt; | ||
109 | } | ||
110 | |||
111 | unsigned long mmu_read_first_level_page(unsigned long vaddr) | ||
112 | { | ||
113 | return metag_in32(pgd_entry_addr(vaddr)); | ||
114 | } | ||
115 | |||
116 | unsigned long mmu_read_second_level_page(unsigned long vaddr) | ||
117 | { | ||
118 | return metag_in32(pgtable_entry_addr(vaddr)); | ||
119 | } | ||
120 | |||
121 | unsigned long mmu_get_base(void) | ||
122 | { | ||
123 | static unsigned long __base; | ||
124 | |||
125 | /* Find the base of our MMU pgd table */ | ||
126 | if (!__base) | ||
127 | __base = pgd_entry_addr(0); | ||
128 | |||
129 | return __base; | ||
130 | } | ||
131 | |||
132 | void __init mmu_init(unsigned long mem_end) | ||
133 | { | ||
134 | unsigned long entry, addr; | ||
135 | pgd_t *p_swapper_pg_dir; | ||
136 | |||
137 | /* | ||
138 | * Now copy over any MMU pgd entries already in the mmu page tables | ||
139 | * over to our root init process (swapper_pg_dir) map. This map is | ||
140 | * then inherited by all other processes, which means all processes | ||
141 | * inherit a map of the kernel space. | ||
142 | */ | ||
143 | addr = PAGE_OFFSET; | ||
144 | entry = pgd_index(PAGE_OFFSET); | ||
145 | p_swapper_pg_dir = pgd_offset_k(0) + entry; | ||
146 | |||
147 | while (addr <= META_MEMORY_LIMIT) { | ||
148 | unsigned long pgd_entry; | ||
149 | /* copy over the current MMU value */ | ||
150 | pgd_entry = mmu_read_first_level_page(addr); | ||
151 | pgd_val(*p_swapper_pg_dir) = pgd_entry; | ||
152 | |||
153 | p_swapper_pg_dir++; | ||
154 | addr += PGDIR_SIZE; | ||
155 | entry++; | ||
156 | } | ||
157 | } | ||
diff --git a/arch/metag/mm/mmu-meta2.c b/arch/metag/mm/mmu-meta2.c new file mode 100644 index 000000000000..81dcbb0bba34 --- /dev/null +++ b/arch/metag/mm/mmu-meta2.c | |||
@@ -0,0 +1,207 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008,2009,2010,2011 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * Meta 2 enhanced mode MMU handling code. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/mm.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/bootmem.h> | ||
13 | #include <linux/syscore_ops.h> | ||
14 | |||
15 | #include <asm/mmu.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | |||
18 | unsigned long mmu_read_first_level_page(unsigned long vaddr) | ||
19 | { | ||
20 | unsigned int cpu = hard_processor_id(); | ||
21 | unsigned long offset, linear_base, linear_limit; | ||
22 | unsigned int phys0; | ||
23 | pgd_t *pgd, entry; | ||
24 | |||
25 | if (is_global_space(vaddr)) | ||
26 | vaddr &= ~0x80000000; | ||
27 | |||
28 | offset = vaddr >> PGDIR_SHIFT; | ||
29 | |||
30 | phys0 = metag_in32(mmu_phys0_addr(cpu)); | ||
31 | |||
32 | /* Top bit of linear base is always zero. */ | ||
33 | linear_base = (phys0 >> PGDIR_SHIFT) & 0x1ff; | ||
34 | |||
35 | /* Limit in the range 0 (4MB) to 9 (2GB). */ | ||
36 | linear_limit = 1 << ((phys0 >> 8) & 0xf); | ||
37 | linear_limit += linear_base; | ||
38 | |||
39 | /* | ||
40 | * If offset is below linear base or above the limit then no | ||
41 | * mapping exists. | ||
42 | */ | ||
43 | if (offset < linear_base || offset > linear_limit) | ||
44 | return 0; | ||
45 | |||
46 | offset -= linear_base; | ||
47 | pgd = (pgd_t *)mmu_get_base(); | ||
48 | entry = pgd[offset]; | ||
49 | |||
50 | return pgd_val(entry); | ||
51 | } | ||
52 | |||
53 | unsigned long mmu_read_second_level_page(unsigned long vaddr) | ||
54 | { | ||
55 | return __builtin_meta2_cacherd((void *)(vaddr & PAGE_MASK)); | ||
56 | } | ||
57 | |||
58 | unsigned long mmu_get_base(void) | ||
59 | { | ||
60 | unsigned int cpu = hard_processor_id(); | ||
61 | unsigned long stride; | ||
62 | |||
63 | stride = cpu * LINSYSMEMTnX_STRIDE; | ||
64 | |||
65 | /* | ||
66 | * Bits 18:2 of the MMCU_TnLocal_TABLE_PHYS1 register should be | ||
67 | * used as an offset to the start of the top-level pgd table. | ||
68 | */ | ||
69 | stride += (metag_in32(mmu_phys1_addr(cpu)) & 0x7fffc); | ||
70 | |||
71 | if (is_global_space(PAGE_OFFSET)) | ||
72 | stride += LINSYSMEMTXG_OFFSET; | ||
73 | |||
74 | return LINSYSMEMT0L_BASE + stride; | ||
75 | } | ||
76 | |||
77 | #define FIRST_LEVEL_MASK 0xffffffc0 | ||
78 | #define SECOND_LEVEL_MASK 0xfffff000 | ||
79 | #define SECOND_LEVEL_ALIGN 64 | ||
80 | |||
81 | static void repriv_mmu_tables(void) | ||
82 | { | ||
83 | unsigned long phys0_addr; | ||
84 | unsigned int g; | ||
85 | |||
86 | /* | ||
87 | * Check that all the mmu table regions are priv protected, and if not | ||
88 | * fix them and emit a warning. If we left them without priv protection | ||
89 | * then userland processes would have access to a 2M window into | ||
90 | * physical memory near where the page tables are. | ||
91 | */ | ||
92 | phys0_addr = MMCU_T0LOCAL_TABLE_PHYS0; | ||
93 | for (g = 0; g < 2; ++g) { | ||
94 | unsigned int t, phys0; | ||
95 | unsigned long flags; | ||
96 | for (t = 0; t < 4; ++t) { | ||
97 | __global_lock2(flags); | ||
98 | phys0 = metag_in32(phys0_addr); | ||
99 | if ((phys0 & _PAGE_PRESENT) && !(phys0 & _PAGE_PRIV)) { | ||
100 | pr_warn("Fixing priv protection on T%d %s MMU table region\n", | ||
101 | t, | ||
102 | g ? "global" : "local"); | ||
103 | phys0 |= _PAGE_PRIV; | ||
104 | metag_out32(phys0, phys0_addr); | ||
105 | } | ||
106 | __global_unlock2(flags); | ||
107 | |||
108 | phys0_addr += MMCU_TnX_TABLE_PHYSX_STRIDE; | ||
109 | } | ||
110 | |||
111 | phys0_addr += MMCU_TXG_TABLE_PHYSX_OFFSET | ||
112 | - 4*MMCU_TnX_TABLE_PHYSX_STRIDE; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | #ifdef CONFIG_METAG_SUSPEND_MEM | ||
117 | static void mmu_resume(void) | ||
118 | { | ||
119 | /* | ||
120 | * If a full suspend to RAM has happened then the original bad MMU table | ||
121 | * priv may have been restored, so repriv them again. | ||
122 | */ | ||
123 | repriv_mmu_tables(); | ||
124 | } | ||
125 | #else | ||
126 | #define mmu_resume NULL | ||
127 | #endif /* CONFIG_METAG_SUSPEND_MEM */ | ||
128 | |||
129 | static struct syscore_ops mmu_syscore_ops = { | ||
130 | .resume = mmu_resume, | ||
131 | }; | ||
132 | |||
133 | void __init mmu_init(unsigned long mem_end) | ||
134 | { | ||
135 | unsigned long entry, addr; | ||
136 | pgd_t *p_swapper_pg_dir; | ||
137 | #ifdef CONFIG_KERNEL_4M_PAGES | ||
138 | unsigned long mem_size = mem_end - PAGE_OFFSET; | ||
139 | unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22); | ||
140 | unsigned int second_level_entry = 0; | ||
141 | unsigned long *second_level_table; | ||
142 | #endif | ||
143 | |||
144 | /* | ||
145 | * Now copy over any MMU pgd entries already in the mmu page tables | ||
146 | * over to our root init process (swapper_pg_dir) map. This map is | ||
147 | * then inherited by all other processes, which means all processes | ||
148 | * inherit a map of the kernel space. | ||
149 | */ | ||
150 | addr = META_MEMORY_BASE; | ||
151 | entry = pgd_index(META_MEMORY_BASE); | ||
152 | p_swapper_pg_dir = pgd_offset_k(0) + entry; | ||
153 | |||
154 | while (entry < (PTRS_PER_PGD - pgd_index(META_MEMORY_BASE))) { | ||
155 | unsigned long pgd_entry; | ||
156 | /* copy over the current MMU value */ | ||
157 | pgd_entry = mmu_read_first_level_page(addr); | ||
158 | pgd_val(*p_swapper_pg_dir) = pgd_entry; | ||
159 | |||
160 | p_swapper_pg_dir++; | ||
161 | addr += PGDIR_SIZE; | ||
162 | entry++; | ||
163 | } | ||
164 | |||
165 | #ifdef CONFIG_KERNEL_4M_PAGES | ||
166 | /* | ||
167 | * At this point we can also map the kernel with 4MB pages to | ||
168 | * reduce TLB pressure. | ||
169 | */ | ||
170 | second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages); | ||
171 | |||
172 | addr = PAGE_OFFSET; | ||
173 | entry = pgd_index(PAGE_OFFSET); | ||
174 | p_swapper_pg_dir = pgd_offset_k(0) + entry; | ||
175 | |||
176 | while (pages > 0) { | ||
177 | unsigned long phys_addr, second_level_phys; | ||
178 | pte_t *pte = (pte_t *)&second_level_table[second_level_entry]; | ||
179 | |||
180 | phys_addr = __pa(addr); | ||
181 | |||
182 | second_level_phys = __pa(pte); | ||
183 | |||
184 | pgd_val(*p_swapper_pg_dir) = ((second_level_phys & | ||
185 | FIRST_LEVEL_MASK) | | ||
186 | _PAGE_SZ_4M | | ||
187 | _PAGE_PRESENT); | ||
188 | |||
189 | pte_val(*pte) = ((phys_addr & SECOND_LEVEL_MASK) | | ||
190 | _PAGE_PRESENT | _PAGE_DIRTY | | ||
191 | _PAGE_ACCESSED | _PAGE_WRITE | | ||
192 | _PAGE_CACHEABLE | _PAGE_KERNEL); | ||
193 | |||
194 | p_swapper_pg_dir++; | ||
195 | addr += PGDIR_SIZE; | ||
196 | /* Second level pages must be 64byte aligned. */ | ||
197 | second_level_entry += (SECOND_LEVEL_ALIGN / | ||
198 | sizeof(unsigned long)); | ||
199 | pages--; | ||
200 | } | ||
201 | load_pgd(swapper_pg_dir, hard_processor_id()); | ||
202 | flush_tlb_all(); | ||
203 | #endif | ||
204 | |||
205 | repriv_mmu_tables(); | ||
206 | register_syscore_ops(&mmu_syscore_ops); | ||
207 | } | ||
diff --git a/arch/metag/mm/numa.c b/arch/metag/mm/numa.c new file mode 100644 index 000000000000..9ae578c9b620 --- /dev/null +++ b/arch/metag/mm/numa.c | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Multiple memory node support for Meta machines | ||
3 | * | ||
4 | * Copyright (C) 2007 Paul Mundt | ||
5 | * Copyright (C) 2010 Imagination Technologies Ltd. | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/bootmem.h> | ||
13 | #include <linux/memblock.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/numa.h> | ||
16 | #include <linux/pfn.h> | ||
17 | #include <asm/sections.h> | ||
18 | |||
19 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | ||
20 | EXPORT_SYMBOL_GPL(node_data); | ||
21 | |||
22 | extern char _heap_start[]; | ||
23 | |||
24 | /* | ||
25 | * On Meta machines the conventional approach is to stash system RAM | ||
26 | * in node 0, and other memory blocks in to node 1 and up, ordered by | ||
27 | * latency. Each node's pgdat is node-local at the beginning of the node, | ||
28 | * immediately followed by the node mem map. | ||
29 | */ | ||
30 | void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | ||
31 | { | ||
32 | unsigned long bootmap_pages, bootmem_paddr; | ||
33 | unsigned long start_pfn, end_pfn; | ||
34 | unsigned long pgdat_paddr; | ||
35 | |||
36 | /* Don't allow bogus node assignment */ | ||
37 | BUG_ON(nid > MAX_NUMNODES || nid <= 0); | ||
38 | |||
39 | start_pfn = start >> PAGE_SHIFT; | ||
40 | end_pfn = end >> PAGE_SHIFT; | ||
41 | |||
42 | memblock_add(start, end - start); | ||
43 | |||
44 | memblock_set_node(PFN_PHYS(start_pfn), | ||
45 | PFN_PHYS(end_pfn - start_pfn), nid); | ||
46 | |||
47 | /* Node-local pgdat */ | ||
48 | pgdat_paddr = memblock_alloc_base(sizeof(struct pglist_data), | ||
49 | SMP_CACHE_BYTES, end); | ||
50 | NODE_DATA(nid) = __va(pgdat_paddr); | ||
51 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | ||
52 | |||
53 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | ||
54 | NODE_DATA(nid)->node_start_pfn = start_pfn; | ||
55 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | ||
56 | |||
57 | /* Node-local bootmap */ | ||
58 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); | ||
59 | bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT, | ||
60 | PAGE_SIZE, end); | ||
61 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, | ||
62 | start_pfn, end_pfn); | ||
63 | |||
64 | free_bootmem_with_active_regions(nid, end_pfn); | ||
65 | |||
66 | /* Reserve the pgdat and bootmap space with the bootmem allocator */ | ||
67 | reserve_bootmem_node(NODE_DATA(nid), pgdat_paddr & PAGE_MASK, | ||
68 | sizeof(struct pglist_data), BOOTMEM_DEFAULT); | ||
69 | reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr, | ||
70 | bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); | ||
71 | |||
72 | /* It's up */ | ||
73 | node_set_online(nid); | ||
74 | |||
75 | /* Kick sparsemem */ | ||
76 | sparse_memory_present_with_active_regions(nid); | ||
77 | } | ||
78 | |||
79 | void __init __weak soc_mem_setup(void) | ||
80 | { | ||
81 | } | ||
diff --git a/arch/metag/tbx/Makefile b/arch/metag/tbx/Makefile new file mode 100644 index 000000000000..e994239e518c --- /dev/null +++ b/arch/metag/tbx/Makefile | |||
@@ -0,0 +1,21 @@ | |||
1 | # | ||
2 | # Makefile for TBX library files.. | ||
3 | # | ||
4 | |||
5 | asflags-y += -mmetac=2.1 -Wa,-mfpu=metac21 -mdsp | ||
6 | asflags-$(CONFIG_SMP) += -DTBX_PERCPU_SP_SAVE | ||
7 | |||
8 | ccflags-y += -mmetac=2.1 | ||
9 | |||
10 | lib-y += tbicore.o | ||
11 | lib-y += tbictx.o | ||
12 | lib-y += tbidefr.o | ||
13 | lib-y += tbilogf.o | ||
14 | lib-y += tbipcx.o | ||
15 | lib-y += tbiroot.o | ||
16 | lib-y += tbisoft.o | ||
17 | lib-y += tbistring.o | ||
18 | lib-y += tbitimer.o | ||
19 | |||
20 | lib-$(CONFIG_METAG_DSP) += tbidspram.o | ||
21 | lib-$(CONFIG_METAG_FPU) += tbictxfpu.o | ||
diff --git a/arch/metag/tbx/tbicore.S b/arch/metag/tbx/tbicore.S new file mode 100644 index 000000000000..a0838ebcb433 --- /dev/null +++ b/arch/metag/tbx/tbicore.S | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * tbicore.S | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Core functions needed to support use of the thread binary interface for META | ||
11 | * processors | ||
12 | */ | ||
13 | |||
14 | .file "tbicore.S" | ||
15 | /* Get data structures and defines from the TBI C header */ | ||
16 | #include <asm/metag_mem.h> | ||
17 | #include <asm/metag_regs.h> | ||
18 | #include <asm/tbx.h> | ||
19 | |||
20 | .data | ||
21 | .balign 8 | ||
22 | .global ___pTBISegs | ||
23 | .type ___pTBISegs,object | ||
24 | ___pTBISegs: | ||
25 | .quad 0 /* Segment list pointer with it's */ | ||
26 | .size ___pTBISegs,.-___pTBISegs | ||
27 | /* own id or spin-lock location */ | ||
28 | /* | ||
29 | * Return ___pTBISegs value specific to privilege level - not very complicated | ||
30 | * at the moment | ||
31 | * | ||
32 | * Register Usage: D0Re0 is the result, D1Re0 is used as a scratch | ||
33 | */ | ||
34 | .text | ||
35 | .balign 4 | ||
36 | .global ___TBISegList | ||
37 | .type ___TBISegList,function | ||
38 | ___TBISegList: | ||
39 | MOVT A1LbP,#HI(___pTBISegs) | ||
40 | ADD A1LbP,A1LbP,#LO(___pTBISegs) | ||
41 | GETL D0Re0,D1Re0,[A1LbP] | ||
42 | MOV PC,D1RtP | ||
43 | .size ___TBISegList,.-___TBISegList | ||
44 | |||
45 | /* | ||
46 | * Search the segment list for a match given Id, pStart can be NULL | ||
47 | * | ||
48 | * Register Usage: D1Ar1 is pSeg, D0Ar2 is Id, D0Re0 is the result | ||
49 | * D0Ar4, D1Ar3 are used as a scratch | ||
50 | * NB: The PSTAT bit if Id in D0Ar2 may be toggled | ||
51 | */ | ||
52 | .text | ||
53 | .balign 4 | ||
54 | .global ___TBIFindSeg | ||
55 | .type ___TBIFindSeg,function | ||
56 | ___TBIFindSeg: | ||
57 | MOVT A1LbP,#HI(___pTBISegs) | ||
58 | ADD A1LbP,A1LbP,#LO(___pTBISegs) | ||
59 | GETL D1Ar3,D0Ar4,[A1LbP] /* Read segment list head */ | ||
60 | MOV D0Re0,TXSTATUS /* What priv level are we at? */ | ||
61 | CMP D1Ar1,#0 /* Is pStart provided? */ | ||
62 | /* Disable privilege adaption for now */ | ||
63 | ANDT D0Re0,D0Re0,#0 /*HI(TXSTATUS_PSTAT_BIT) ; Is PSTAT set? Zero if not */ | ||
64 | LSL D0Re0,D0Re0,#(TBID_PSTAT_S-TXSTATUS_PSTAT_S) | ||
65 | XOR D0Ar2,D0Ar2,D0Re0 /* Toggle Id PSTAT if privileged */ | ||
66 | MOVNZ D1Ar3,D1Ar1 /* Use pStart if provided */ | ||
67 | $LFindSegLoop: | ||
68 | ADDS D0Re0,D1Ar3,#0 /* End of list? Load result into D0Re0 */ | ||
69 | MOVZ PC,D1RtP /* If result is NULL we leave */ | ||
70 | GETL D1Ar3,D0Ar4,[D1Ar3] /* Read pLink and Id */ | ||
71 | CMP D0Ar4,D0Ar2 /* Does it match? */ | ||
72 | BNZ $LFindSegLoop /* Loop if there is no match */ | ||
73 | TST D0Re0,D0Re0 /* Clear zero flag - we found it! */ | ||
74 | MOV PC,D1RtP /* Return */ | ||
75 | .size ___TBIFindSeg,.-___TBIFindSeg | ||
76 | |||
77 | /* Useful offsets to encode the lower bits of the lock/unlock addresses */ | ||
78 | #define UON (LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFF8) | ||
79 | #define UOFF (LINSYSEVENT_WR_ATOMIC_UNLOCK & 0xFFF8) | ||
80 | |||
81 | /* | ||
82 | * Perform a whole spin-lock sequence as used by the TBISignal routine | ||
83 | * | ||
84 | * Register Usage: D1Ar1 is pLock, D0Ar2 is Mask, D0Re0 is the result | ||
85 | * (All other usage due to ___TBIPoll - D0Ar6, D1Re0) | ||
86 | */ | ||
87 | .text | ||
88 | .balign 4 | ||
89 | .global ___TBISpin | ||
90 | .type ___TBISpin,function | ||
91 | ___TBISpin: | ||
92 | SETL [A0StP++],D0FrT,D1RtP /* Save our return address */ | ||
93 | ORS D0Re0,D0Re0,#1 /* Clear zero flag */ | ||
94 | MOV D1RtP,PC /* Setup return address to form loop */ | ||
95 | $LSpinLoop: | ||
96 | BNZ ___TBIPoll /* Keep repeating if fail to set */ | ||
97 | GETL D0FrT,D1RtP,[--A0StP] /* Restore return address */ | ||
98 | MOV PC,D1RtP /* Return */ | ||
99 | .size ___TBISpin,.-___TBISpin | ||
100 | |||
101 | /* | ||
102 | * Perform an attempt to gain access to a spin-lock and set some bits | ||
103 | * | ||
104 | * Register Usage: D1Ar1 is pLock, D0Ar2 is Mask, D0Re0 is the result | ||
105 | * !!On return Zero flag is SET if we are sucessfull!! | ||
106 | * A0.3 is used to hold base address of system event region | ||
107 | * D1Re0 use to hold TXMASKI while interrupts are off | ||
108 | */ | ||
109 | .text | ||
110 | .balign 4 | ||
111 | .global ___TBIPoll | ||
112 | .type ___TBIPoll,function | ||
113 | ___TBIPoll: | ||
114 | MOV D1Re0,#0 /* Prepare to disable ints */ | ||
115 | MOVT A0.3,#HI(LINSYSEVENT_WR_ATOMIC_LOCK) | ||
116 | SWAP D1Re0,TXMASKI /* Really stop ints */ | ||
117 | LOCK2 /* Gain all locks */ | ||
118 | SET [A0.3+#UON],D1RtP /* Stop shared memory access too */ | ||
119 | DCACHE [D1Ar1],A0.3 /* Flush Cache line */ | ||
120 | GETD D0Re0,[D1Ar1] /* Get new state from memory or hit */ | ||
121 | DCACHE [D1Ar1],A0.3 /* Flush Cache line */ | ||
122 | GETD D0Re0,[D1Ar1] /* Get current state */ | ||
123 | TST D0Re0,D0Ar2 /* Are we clear to send? */ | ||
124 | ORZ D0Re0,D0Re0,D0Ar2 /* Yes: So set bits and */ | ||
125 | SETDZ [D1Ar1],D0Re0 /* transmit new state */ | ||
126 | SET [A0.3+#UOFF],D1RtP /* Allow shared memory access */ | ||
127 | LOCK0 /* Release all locks */ | ||
128 | MOV TXMASKI,D1Re0 /* Allow ints */ | ||
129 | $LPollEnd: | ||
130 | XORNZ D0Re0,D0Re0,D0Re0 /* No: Generate zero result */ | ||
131 | MOV PC,D1RtP /* Return (NZ indicates failure) */ | ||
132 | .size ___TBIPoll,.-___TBIPoll | ||
133 | |||
134 | /* | ||
135 | * End of tbicore.S | ||
136 | */ | ||
diff --git a/arch/metag/tbx/tbictx.S b/arch/metag/tbx/tbictx.S new file mode 100644 index 000000000000..19af983a13ae --- /dev/null +++ b/arch/metag/tbx/tbictx.S | |||
@@ -0,0 +1,366 @@ | |||
1 | /* | ||
2 | * tbictx.S | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Explicit state save and restore routines forming part of the thread binary | ||
11 | * interface for META processors | ||
12 | */ | ||
13 | |||
14 | .file "tbictx.S" | ||
15 | #include <asm/metag_regs.h> | ||
16 | #include <asm/tbx.h> | ||
17 | |||
18 | #ifdef METAC_1_0 | ||
19 | /* Ax.4 is NOT saved in XAX3 */ | ||
20 | #define A0_4 | ||
21 | #else | ||
22 | /* Ax.4 is saved in XAX4 */ | ||
23 | #define A0_4 A0.4, | ||
24 | #endif | ||
25 | |||
26 | |||
27 | /* Size of the TBICTX structure */ | ||
28 | #define TBICTX_BYTES ((TBICTX_AX_REGS*8)+TBICTX_AX) | ||
29 | |||
30 | /* | ||
31 | * TBIRES __TBINestInts( TBIRES State, void *pExt, int NoNestMask ) | ||
32 | */ | ||
33 | .text | ||
34 | .balign 4 | ||
35 | .global ___TBINestInts | ||
36 | .type ___TBINestInts,function | ||
37 | ___TBINestInts: | ||
38 | XOR D0Ar4,D0Ar4,#-1 /* D0Ar4 = ~TrigBit */ | ||
39 | AND D0Ar4,D0Ar4,#0xFFFF /* D0Ar4 &= 0xFFFF */ | ||
40 | MOV D0Ar6,TXMASKI /* BGNDHALT currently enabled? */ | ||
41 | TSTT D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XCBF_BIT | ||
42 | AND D0Ar4,D0Ar2,D0Ar4 /* D0Ar4 = Ints to allow */ | ||
43 | XOR D0Ar2,D0Ar2,D0Ar4 /* Less Ints in TrigMask */ | ||
44 | BNZ ___TBINestInts2 /* Jump if ctx save required! */ | ||
45 | TSTT D0Ar2,#TBICTX_CBUF_BIT+TBICTX_CBRP_BIT /* Is catch state dirty? */ | ||
46 | OR D0Ar4,D0Ar4,D0Ar6 /* Or in TXMASKI BGNDHALT if set */ | ||
47 | TSTNZ D0Ar4,D0Ar4 /* Yes: AND triggers enabled */ | ||
48 | MOV D0Re0,D0Ar2 /* Update State argument */ | ||
49 | MOV D1Re0,D1Ar1 /* with less Ints in TrigMask */ | ||
50 | MOVZ TXMASKI,D0Ar4 /* Early return: Enable Ints */ | ||
51 | MOVZ PC,D1RtP /* Early return */ | ||
52 | .size ___TBINestInts,.-___TBINestInts | ||
53 | /* | ||
54 | * Drop thru into sub-function- | ||
55 | */ | ||
56 | .global ___TBINestInts2 | ||
57 | .type ___TBINestInts2,function | ||
58 | ___TBINestInts2: | ||
59 | MOV D0FrT,A0FrP /* Full entry sequence so we */ | ||
60 | ADD A0FrP,A0StP,#0 /* can make sub-calls */ | ||
61 | MSETL [A0StP],D0FrT,D0.5,D0.6 /* and preserve our result */ | ||
62 | ORT D0Ar2,D0Ar2,#TBICTX_XCBF_BIT /* Add in XCBF save request */ | ||
63 | MOV D0.5,D0Ar2 /* Save State in DX.5 */ | ||
64 | MOV D1.5,D1Ar1 | ||
65 | OR D0.6,D0Ar4,D0Ar6 /* Save TrigMask in D0.6 */ | ||
66 | MOVT D1RtP,#HI(___TBICtxSave) /* Save catch buffer */ | ||
67 | CALL D1RtP,#LO(___TBICtxSave) | ||
68 | MOV TXMASKI,D0.6 /* Allow Ints */ | ||
69 | MOV D0Re0,D0.5 /* Return State */ | ||
70 | MOV D1Re0,D1.5 | ||
71 | MGETL D0FrT,D0.5,D0.6,[A0FrP] /* Full exit sequence */ | ||
72 | SUB A0StP,A0FrP,#(8*3) | ||
73 | MOV A0FrP,D0FrT | ||
74 | MOV PC,D1RtP | ||
75 | .size ___TBINestInts2,.-___TBINestInts2 | ||
76 | |||
77 | /* | ||
78 | * void *__TBICtxSave( TBIRES State, void *pExt ) | ||
79 | * | ||
80 | * D0Ar2 contains TBICTX_*_BIT values that control what | ||
81 | * extended data is to be saved beyond the end of D1Ar1. | ||
82 | * These bits must be ored into the SaveMask of this structure. | ||
83 | * | ||
84 | * Virtually all possible scratch registers are used. | ||
85 | * | ||
86 | * The D1Ar1 parameter is only used as the basis for saving | ||
87 | * CBUF state. | ||
88 | */ | ||
89 | /* | ||
90 | * If TBICTX_XEXT_BIT is specified in State. then State.pCtx->Ext is | ||
91 | * utilised to save the base address of the context save area and | ||
92 | * the extended states saved. The XEXT flag then indicates that the | ||
93 | * original state of the A0.2 and A1.2 registers from TBICTX.Ext.AX2 | ||
94 | * are stored as the first part of the extended state structure. | ||
95 | */ | ||
96 | .balign 4 | ||
97 | .global ___TBICtxSave | ||
98 | .type ___TBICtxSave,function | ||
99 | ___TBICtxSave: | ||
100 | GETD D0Re0,[D1Ar1+#TBICTX_SaveMask-2] /* Get SaveMask */ | ||
101 | TSTT D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT | ||
102 | /* Just XCBF to save? */ | ||
103 | MOV A0.2,D1Ar3 /* Save pointer into A0.2 */ | ||
104 | MOV A1.2,D1RtP /* Free off D0FrT:D1RtP pair */ | ||
105 | BZ $LCtxSaveCBUF /* Yes: Only XCBF may be saved */ | ||
106 | TSTT D0Ar2,#TBICTX_XEXT_BIT /* Extended base-state model? */ | ||
107 | BZ $LCtxSaveXDX8 | ||
108 | GETL D0Ar6,D1Ar5,[D1Ar1+#TBICTX_Ext_AX2] /* Get A0.2, A1.2 state */ | ||
109 | MOV D0Ar4,D0Ar2 /* Extract Ctx.SaveFlags value */ | ||
110 | ANDMT D0Ar4,D0Ar4,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT | ||
111 | SETD [D1Ar1+#TBICTX_Ext_Ctx_pExt],A0.2 | ||
112 | SETD [D1Ar1+#TBICTX_Ext_Ctx_SaveMask-2],D0Ar4 | ||
113 | SETL [A0.2++],D0Ar6,D1Ar5 /* Save A0.2, A1.2 state */ | ||
114 | $LCtxSaveXDX8: | ||
115 | TSTT D0Ar2,#TBICTX_XDX8_BIT /* Save extended DX regs? */ | ||
116 | BZ $LCtxSaveXAXX | ||
117 | /* | ||
118 | * Save 8 extra DX registers | ||
119 | */ | ||
120 | MSETL [A0.2],D0.8,D0.9,D0.10,D0.11,D0.12,D0.13,D0.14,D0.15 | ||
121 | $LCtxSaveXAXX: | ||
122 | TSTT D0Ar2,#TBICTX_XAXX_BIT /* Save extended AX regs? */ | ||
123 | SWAP D0Re0,A0.2 /* pDst into D0Re0 */ | ||
124 | BZ $LCtxSaveXHL2 | ||
125 | /* | ||
126 | * Save 4 extra AX registers | ||
127 | */ | ||
128 | MSETL [D0Re0], A0_4 A0.5,A0.6,A0.7 /* Save 8*3 bytes */ | ||
129 | $LCtxSaveXHL2: | ||
130 | TSTT D0Ar2,#TBICTX_XHL2_BIT /* Save hardware-loop regs? */ | ||
131 | SWAP D0Re0,A0.2 /* pDst back into A0.2 */ | ||
132 | MOV D0Ar6,TXL1START | ||
133 | MOV D1Ar5,TXL2START | ||
134 | BZ $LCtxSaveXTDP | ||
135 | /* | ||
136 | * Save hardware loop registers | ||
137 | */ | ||
138 | SETL [A0.2++],D0Ar6,D1Ar5 /* Save 8*1 bytes */ | ||
139 | MOV D0Ar6,TXL1END | ||
140 | MOV D1Ar5,TXL2END | ||
141 | MOV D0FrT,TXL1COUNT | ||
142 | MOV D1RtP,TXL2COUNT | ||
143 | MSETL [A0.2],D0Ar6,D0FrT /* Save 8*2 bytes */ | ||
144 | /* | ||
145 | * Clear loop counters to disable any current loops | ||
146 | */ | ||
147 | XOR TXL1COUNT,D0FrT,D0FrT | ||
148 | XOR TXL2COUNT,D1RtP,D1RtP | ||
149 | $LCtxSaveXTDP: | ||
150 | TSTT D0Ar2,#TBICTX_XTDP_BIT /* Save per-thread DSP regs? */ | ||
151 | BZ $LCtxSaveCBUF | ||
152 | /* | ||
153 | * Save per-thread DSP registers; ACC.0, PR.0, PI.1-3 (PI.0 is zero) | ||
154 | */ | ||
155 | #ifndef CTX_NO_DSP | ||
156 | D SETL [A0.2++],AC0.0,AC1.0 /* Save ACx.0 lower 32-bits */ | ||
157 | DH SETL [A0.2++],AC0.0,AC1.0 /* Save ACx.0 upper 32-bits */ | ||
158 | D SETL [A0.2++],D0AR.0,D1AR.0 /* Save DSP RAM registers */ | ||
159 | D SETL [A0.2++],D0AR.1,D1AR.1 | ||
160 | D SETL [A0.2++],D0AW.0,D1AW.0 | ||
161 | D SETL [A0.2++],D0AW.1,D1AW.1 | ||
162 | D SETL [A0.2++],D0BR.0,D1BR.0 | ||
163 | D SETL [A0.2++],D0BR.1,D1BR.1 | ||
164 | D SETL [A0.2++],D0BW.0,D1BW.0 | ||
165 | D SETL [A0.2++],D0BW.1,D1BW.1 | ||
166 | D SETL [A0.2++],D0ARI.0,D1ARI.0 | ||
167 | D SETL [A0.2++],D0ARI.1,D1ARI.1 | ||
168 | D SETL [A0.2++],D0AWI.0,D1AWI.0 | ||
169 | D SETL [A0.2++],D0AWI.1,D1AWI.1 | ||
170 | D SETL [A0.2++],D0BRI.0,D1BRI.0 | ||
171 | D SETL [A0.2++],D0BRI.1,D1BRI.1 | ||
172 | D SETL [A0.2++],D0BWI.0,D1BWI.0 | ||
173 | D SETL [A0.2++],D0BWI.1,D1BWI.1 | ||
174 | D SETD [A0.2++],T0 | ||
175 | D SETD [A0.2++],T1 | ||
176 | D SETD [A0.2++],T2 | ||
177 | D SETD [A0.2++],T3 | ||
178 | D SETD [A0.2++],T4 | ||
179 | D SETD [A0.2++],T5 | ||
180 | D SETD [A0.2++],T6 | ||
181 | D SETD [A0.2++],T7 | ||
182 | D SETD [A0.2++],T8 | ||
183 | D SETD [A0.2++],T9 | ||
184 | D SETD [A0.2++],TA | ||
185 | D SETD [A0.2++],TB | ||
186 | D SETD [A0.2++],TC | ||
187 | D SETD [A0.2++],TD | ||
188 | D SETD [A0.2++],TE | ||
189 | D SETD [A0.2++],TF | ||
190 | #else | ||
191 | ADD A0.2,A0.2,#(8*18+4*16) | ||
192 | #endif | ||
193 | MOV D0Ar6,TXMRSIZE | ||
194 | MOV D1Ar5,TXDRSIZE | ||
195 | SETL [A0.2++],D0Ar6,D1Ar5 /* Save 8*1 bytes */ | ||
196 | |||
197 | $LCtxSaveCBUF: | ||
198 | #ifdef TBI_1_3 | ||
199 | MOV D0Ar4,D0Re0 /* Copy Ctx Flags */ | ||
200 | ANDT D0Ar4,D0Ar4,#TBICTX_XCBF_BIT /* mask XCBF if already set */ | ||
201 | XOR D0Ar4,D0Ar4,#-1 | ||
202 | AND D0Ar2,D0Ar2,D0Ar4 /* remove XCBF if already set */ | ||
203 | #endif | ||
204 | TSTT D0Ar2,#TBICTX_XCBF_BIT /* Want to save CBUF? */ | ||
205 | ANDT D0Ar2,D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT | ||
206 | OR D0Ar2,D0Ar2,D0Re0 /* Generate new SaveMask */ | ||
207 | SETD [D1Ar1+#TBICTX_SaveMask-2],D0Ar2/* Add in bits saved to TBICTX */ | ||
208 | MOV D0Re0,A0.2 /* Return end of save area */ | ||
209 | MOV D0Ar4,TXDIVTIME /* Get TXDIVTIME */ | ||
210 | MOVZ PC,A1.2 /* No: Early return */ | ||
211 | TSTT D0Ar2,#TBICTX_CBUF_BIT+TBICTX_CBRP_BIT /* Need to save CBUF? */ | ||
212 | MOVZ PC,A1.2 /* No: Early return */ | ||
213 | ORT D0Ar2,D0Ar2,#TBICTX_XCBF_BIT | ||
214 | SETD [D1Ar1+#TBICTX_SaveMask-2],D0Ar2/* Add in XCBF bit to TBICTX */ | ||
215 | ADD A0.2,D1Ar1,#TBICTX_BYTES /* Dump CBUF state after TBICTX */ | ||
216 | /* | ||
217 | * Save CBUF | ||
218 | */ | ||
219 | SETD [A0.2+# 0],TXCATCH0 /* Restore TXCATCHn */ | ||
220 | SETD [A0.2+# 4],TXCATCH1 | ||
221 | TSTT D0Ar2,#TBICTX_CBRP_BIT /* ... RDDIRTY was/is set */ | ||
222 | SETD [A0.2+# 8],TXCATCH2 | ||
223 | SETD [A0.2+#12],TXCATCH3 | ||
224 | BZ $LCtxSaveComplete | ||
225 | SETL [A0.2+#(2*8)],RD /* Save read pipeline */ | ||
226 | SETL [A0.2+#(3*8)],RD /* Save read pipeline */ | ||
227 | SETL [A0.2+#(4*8)],RD /* Save read pipeline */ | ||
228 | SETL [A0.2+#(5*8)],RD /* Save read pipeline */ | ||
229 | SETL [A0.2+#(6*8)],RD /* Save read pipeline */ | ||
230 | SETL [A0.2+#(7*8)],RD /* Save read pipeline */ | ||
231 | AND TXDIVTIME,D0Ar4,#TXDIVTIME_DIV_BITS /* Clear RPDIRTY */ | ||
232 | $LCtxSaveComplete: | ||
233 | MOV PC,A1.2 /* Return */ | ||
234 | .size ___TBICtxSave,.-___TBICtxSave | ||
235 | |||
236 | /* | ||
237 | * void *__TBICtxRestore( TBIRES State, void *pExt ) | ||
238 | * | ||
239 | * D0Ar2 contains TBICTX_*_BIT values that control what | ||
240 | * extended data is to be recovered from D1Ar3 (pExt). | ||
241 | * | ||
242 | * Virtually all possible scratch registers are used. | ||
243 | */ | ||
244 | /* | ||
245 | * If TBICTX_XEXT_BIT is specified in State. Then the saved state of | ||
246 | * the orginal A0.2 and A1.2 is restored from pExt and the XEXT | ||
247 | * related flags are removed from State.pCtx->SaveMask. | ||
248 | * | ||
249 | */ | ||
250 | .balign 4 | ||
251 | .global ___TBICtxRestore | ||
252 | .type ___TBICtxRestore,function | ||
253 | ___TBICtxRestore: | ||
254 | GETD D0Ar6,[D1Ar1+#TBICTX_CurrMODE] /* Get TXMODE Value */ | ||
255 | ANDST D0Ar2,D0Ar2,#TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT | ||
256 | MOV D1Re0,D0Ar2 /* Keep flags in D1Re0 */ | ||
257 | MOV D0Re0,D1Ar3 /* D1Ar3 is default result */ | ||
258 | MOVZ PC,D1RtP /* Early return, nothing to do */ | ||
259 | ANDT D0Ar6,D0Ar6,#0xE000 /* Top bits of TXMODE required */ | ||
260 | MOV A0.3,D0Ar6 /* Save TXMODE for later */ | ||
261 | TSTT D1Re0,#TBICTX_XEXT_BIT /* Check for XEXT bit */ | ||
262 | BZ $LCtxRestXDX8 | ||
263 | GETD D0Ar4,[D1Ar1+#TBICTX_SaveMask-2]/* Get current SaveMask */ | ||
264 | GETL D0Ar6,D1Ar5,[D0Re0++] /* Restore A0.2, A1.2 state */ | ||
265 | ANDMT D0Ar4,D0Ar4,#(0xFFFF-(TBICTX_XDX8_BIT+TBICTX_XAXX_BIT+TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+TBICTX_XEXT_BIT)) | ||
266 | SETD [D1Ar1+#TBICTX_SaveMask-2],D0Ar4/* New SaveMask */ | ||
267 | #ifdef METAC_1_0 | ||
268 | SETD [D1Ar1+#TBICTX_Ext_AX2_U0],D0Ar6 | ||
269 | MOV D0Ar6,D1Ar1 | ||
270 | SETD [D0Ar6+#TBICTX_Ext_AX2_U1],D1Ar5 | ||
271 | #else | ||
272 | SETL [D1Ar1+#TBICTX_Ext_AX2],D0Ar6,D1Ar5 | ||
273 | #endif | ||
274 | $LCtxRestXDX8: | ||
275 | TSTT D1Re0,#TBICTX_XDX8_BIT /* Get extended DX regs? */ | ||
276 | MOV A1.2,D1RtP /* Free off D1RtP register */ | ||
277 | BZ $LCtxRestXAXX | ||
278 | /* | ||
279 | * Restore 8 extra DX registers | ||
280 | */ | ||
281 | MGETL D0.8,D0.9,D0.10,D0.11,D0.12,D0.13,D0.14,D0.15,[D0Re0] | ||
282 | $LCtxRestXAXX: | ||
283 | TSTT D1Re0,#TBICTX_XAXX_BIT /* Get extended AX regs? */ | ||
284 | BZ $LCtxRestXHL2 | ||
285 | /* | ||
286 | * Restore 3 extra AX registers | ||
287 | */ | ||
288 | MGETL A0_4 A0.5,A0.6,A0.7,[D0Re0] /* Get 8*3 bytes */ | ||
289 | $LCtxRestXHL2: | ||
290 | TSTT D1Re0,#TBICTX_XHL2_BIT /* Get hardware-loop regs? */ | ||
291 | BZ $LCtxRestXTDP | ||
292 | /* | ||
293 | * Get hardware loop registers | ||
294 | */ | ||
295 | MGETL D0Ar6,D0Ar4,D0Ar2,[D0Re0] /* Get 8*3 bytes */ | ||
296 | MOV TXL1START,D0Ar6 | ||
297 | MOV TXL2START,D1Ar5 | ||
298 | MOV TXL1END,D0Ar4 | ||
299 | MOV TXL2END,D1Ar3 | ||
300 | MOV TXL1COUNT,D0Ar2 | ||
301 | MOV TXL2COUNT,D1Ar1 | ||
302 | $LCtxRestXTDP: | ||
303 | TSTT D1Re0,#TBICTX_XTDP_BIT /* Get per-thread DSP regs? */ | ||
304 | MOVZ PC,A1.2 /* No: Early return */ | ||
305 | /* | ||
306 | * Get per-thread DSP registers; ACC.0, PR.0, PI.1-3 (PI.0 is zero) | ||
307 | */ | ||
308 | MOV A0.2,D0Re0 | ||
309 | GETL D0Ar6,D1Ar5,[D0Re0++#((16*4)+(18*8))] | ||
310 | #ifndef CTX_NO_DSP | ||
311 | D GETL AC0.0,AC1.0,[A0.2++] /* Restore ACx.0 lower 32-bits */ | ||
312 | DH GETL AC0.0,AC1.0,[A0.2++] /* Restore ACx.0 upper 32-bits */ | ||
313 | #else | ||
314 | ADD A0.2,A0.2,#(2*8) | ||
315 | #endif | ||
316 | ADD D0Re0,D0Re0,#(2*4) | ||
317 | MOV TXMODE,A0.3 /* Some TXMODE bits needed */ | ||
318 | MOV TXMRSIZE,D0Ar6 | ||
319 | MOV TXDRSIZE,D1Ar5 | ||
320 | #ifndef CTX_NO_DSP | ||
321 | D GETL D0AR.0,D1AR.0,[A0.2++] /* Restore DSP RAM registers */ | ||
322 | D GETL D0AR.1,D1AR.1,[A0.2++] | ||
323 | D GETL D0AW.0,D1AW.0,[A0.2++] | ||
324 | D GETL D0AW.1,D1AW.1,[A0.2++] | ||
325 | D GETL D0BR.0,D1BR.0,[A0.2++] | ||
326 | D GETL D0BR.1,D1BR.1,[A0.2++] | ||
327 | D GETL D0BW.0,D1BW.0,[A0.2++] | ||
328 | D GETL D0BW.1,D1BW.1,[A0.2++] | ||
329 | #else | ||
330 | ADD A0.2,A0.2,#(8*8) | ||
331 | #endif | ||
332 | MOV TXMODE,#0 /* Restore TXMODE */ | ||
333 | #ifndef CTX_NO_DSP | ||
334 | D GETL D0ARI.0,D1ARI.0,[A0.2++] | ||
335 | D GETL D0ARI.1,D1ARI.1,[A0.2++] | ||
336 | D GETL D0AWI.0,D1AWI.0,[A0.2++] | ||
337 | D GETL D0AWI.1,D1AWI.1,[A0.2++] | ||
338 | D GETL D0BRI.0,D1BRI.0,[A0.2++] | ||
339 | D GETL D0BRI.1,D1BRI.1,[A0.2++] | ||
340 | D GETL D0BWI.0,D1BWI.0,[A0.2++] | ||
341 | D GETL D0BWI.1,D1BWI.1,[A0.2++] | ||
342 | D GETD T0,[A0.2++] | ||
343 | D GETD T1,[A0.2++] | ||
344 | D GETD T2,[A0.2++] | ||
345 | D GETD T3,[A0.2++] | ||
346 | D GETD T4,[A0.2++] | ||
347 | D GETD T5,[A0.2++] | ||
348 | D GETD T6,[A0.2++] | ||
349 | D GETD T7,[A0.2++] | ||
350 | D GETD T8,[A0.2++] | ||
351 | D GETD T9,[A0.2++] | ||
352 | D GETD TA,[A0.2++] | ||
353 | D GETD TB,[A0.2++] | ||
354 | D GETD TC,[A0.2++] | ||
355 | D GETD TD,[A0.2++] | ||
356 | D GETD TE,[A0.2++] | ||
357 | D GETD TF,[A0.2++] | ||
358 | #else | ||
359 | ADD A0.2,A0.2,#(8*8+4*16) | ||
360 | #endif | ||
361 | MOV PC,A1.2 /* Return */ | ||
362 | .size ___TBICtxRestore,.-___TBICtxRestore | ||
363 | |||
364 | /* | ||
365 | * End of tbictx.S | ||
366 | */ | ||
diff --git a/arch/metag/tbx/tbictxfpu.S b/arch/metag/tbx/tbictxfpu.S new file mode 100644 index 000000000000..e773bea3e7bd --- /dev/null +++ b/arch/metag/tbx/tbictxfpu.S | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * tbictxfpu.S | ||
3 | * | ||
4 | * Copyright (C) 2009, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Explicit state save and restore routines forming part of the thread binary | ||
11 | * interface for META processors | ||
12 | */ | ||
13 | |||
14 | .file "tbifpuctx.S" | ||
15 | |||
16 | #include <asm/metag_regs.h> | ||
17 | #include <asm/tbx.h> | ||
18 | |||
19 | #ifdef TBI_1_4 | ||
20 | /* | ||
21 | * void *__TBICtxFPUSave( TBIRES State, void *pExt ) | ||
22 | * | ||
23 | * D0Ar2 contains TBICTX_*_BIT values that control what | ||
24 | * extended data is to be saved. | ||
25 | * These bits must be ored into the SaveMask of this structure. | ||
26 | * | ||
27 | * Virtually all possible scratch registers are used. | ||
28 | */ | ||
29 | .text | ||
30 | .balign 4 | ||
31 | .global ___TBICtxFPUSave | ||
32 | .type ___TBICtxFPUSave,function | ||
33 | ___TBICtxFPUSave: | ||
34 | |||
35 | /* D1Ar1:D0Ar2 - State | ||
36 | * D1Ar3 - pExt | ||
37 | * D0Ar4 - Value of METAC_CORE_ID | ||
38 | * D1Ar5 - Scratch | ||
39 | * D0Ar6 - Scratch | ||
40 | */ | ||
41 | |||
42 | /* If the FPAC bit isnt set then there is nothing to do */ | ||
43 | TSTT D0Ar2,#TBICTX_FPAC_BIT | ||
44 | MOVZ PC, D1RtP | ||
45 | |||
46 | /* Obtain the Core config */ | ||
47 | MOVT D0Ar4, #HI(METAC_CORE_ID) | ||
48 | ADD D0Ar4, D0Ar4, #LO(METAC_CORE_ID) | ||
49 | GETD D0Ar4, [D0Ar4] | ||
50 | |||
51 | /* Detect FX.8 - FX.15 and add to core config */ | ||
52 | MOV D0Ar6, TXENABLE | ||
53 | AND D0Ar6, D0Ar6, #(TXENABLE_CLASSALT_FPUR8 << TXENABLE_CLASS_S) | ||
54 | AND D0Ar4, D0Ar4, #LO(0x0000FFFF) | ||
55 | ORT D0Ar4, D0Ar4, #HI(TBICTX_CFGFPU_FX16_BIT) | ||
56 | XOR D0Ar4, D0Ar4, D0Ar6 | ||
57 | |||
58 | /* Save the relevant bits to the buffer */ | ||
59 | SETD [D1Ar3++], D0Ar4 | ||
60 | |||
61 | /* Save the relevant bits of TXDEFR (Assumes TXDEFR is coherent) ... */ | ||
62 | MOV D0Ar6, TXDEFR | ||
63 | LSR D0Re0, D0Ar6, #8 | ||
64 | AND D0Re0, D0Re0, #LO(TXDEFR_FPE_FE_BITS>>8) | ||
65 | AND D0Ar6, D0Ar6, #LO(TXDEFR_FPE_ICTRL_BITS) | ||
66 | OR D0Re0, D0Re0, D0Ar6 | ||
67 | |||
68 | /* ... along with relevant bits of TXMODE to buffer */ | ||
69 | MOV D0Ar6, TXMODE | ||
70 | ANDT D0Ar6, D0Ar6, #HI(TXMODE_FPURMODE_BITS) | ||
71 | ORT D0Ar6, D0Ar6, #HI(TXMODE_FPURMODEWRITE_BIT) | ||
72 | OR D0Ar6, D0Ar6, D0Re0 | ||
73 | SETD [D1Ar3++], D0Ar6 | ||
74 | |||
75 | GETD D0Ar6,[D1Ar1+#TBICTX_SaveMask-2] /* Get the current SaveMask */ | ||
76 | /* D0Ar6 - pCtx->SaveMask */ | ||
77 | |||
78 | TSTT D0Ar4, #HI(TBICTX_CFGFPU_FX16_BIT) /* Perform test here for extended FPU registers | ||
79 | * to avoid stalls | ||
80 | */ | ||
81 | /* Save the standard FPU registers */ | ||
82 | F MSETL [D1Ar3++], FX.0, FX.2, FX.4, FX.6 | ||
83 | |||
84 | /* Save the extended FPU registers if they are present */ | ||
85 | BZ $Lskip_save_fx8_fx16 | ||
86 | F MSETL [D1Ar3++], FX.8, FX.10, FX.12, FX.14 | ||
87 | $Lskip_save_fx8_fx16: | ||
88 | |||
89 | /* Save the FPU Accumulator if it is present */ | ||
90 | TST D0Ar4, #METAC_COREID_NOFPACC_BIT | ||
91 | BNZ $Lskip_save_fpacc | ||
92 | F SETL [D1Ar3++], ACF.0 | ||
93 | F SETL [D1Ar3++], ACF.1 | ||
94 | F SETL [D1Ar3++], ACF.2 | ||
95 | $Lskip_save_fpacc: | ||
96 | |||
97 | /* Update pCtx->SaveMask */ | ||
98 | ANDT D0Ar2, D0Ar2, #TBICTX_FPAC_BIT | ||
99 | OR D0Ar6, D0Ar6, D0Ar2 | ||
100 | SETD [D1Ar1+#TBICTX_SaveMask-2],D0Ar6/* Add in XCBF bit to TBICTX */ | ||
101 | |||
102 | MOV D0Re0, D1Ar3 /* Return end of save area */ | ||
103 | MOV PC, D1RtP | ||
104 | |||
105 | .size ___TBICtxFPUSave,.-___TBICtxFPUSave | ||
106 | |||
107 | /* | ||
108 | * void *__TBICtxFPURestore( TBIRES State, void *pExt ) | ||
109 | * | ||
110 | * D0Ar2 contains TBICTX_*_BIT values that control what | ||
111 | * extended data is to be recovered from D1Ar3 (pExt). | ||
112 | * | ||
113 | * Virtually all possible scratch registers are used. | ||
114 | */ | ||
115 | /* | ||
116 | * If TBICTX_XEXT_BIT is specified in State. Then the saved state of | ||
117 | * the orginal A0.2 and A1.2 is restored from pExt and the XEXT | ||
118 | * related flags are removed from State.pCtx->SaveMask. | ||
119 | * | ||
120 | */ | ||
121 | .balign 4 | ||
122 | .global ___TBICtxFPURestore | ||
123 | .type ___TBICtxFPURestore,function | ||
124 | ___TBICtxFPURestore: | ||
125 | |||
126 | /* D1Ar1:D0Ar2 - State | ||
127 | * D1Ar3 - pExt | ||
128 | * D0Ar4 - Value of METAC_CORE_ID | ||
129 | * D1Ar5 - Scratch | ||
130 | * D0Ar6 - Scratch | ||
131 | * D1Re0 - Scratch | ||
132 | */ | ||
133 | |||
134 | /* If the FPAC bit isnt set then there is nothing to do */ | ||
135 | TSTT D0Ar2,#TBICTX_FPAC_BIT | ||
136 | MOVZ PC, D1RtP | ||
137 | |||
138 | /* Obtain the relevant bits of the Core config */ | ||
139 | GETD D0Ar4, [D1Ar3++] | ||
140 | |||
141 | /* Restore FPU related parts of TXDEFR. Assumes TXDEFR is coherent */ | ||
142 | GETD D1Ar5, [D1Ar3++] | ||
143 | MOV D0Ar6, D1Ar5 | ||
144 | LSL D1Re0, D1Ar5, #8 | ||
145 | ANDT D1Re0, D1Re0, #HI(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS) | ||
146 | AND D1Ar5, D1Ar5, #LO(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS) | ||
147 | OR D1Re0, D1Re0, D1Ar5 | ||
148 | |||
149 | MOV D1Ar5, TXDEFR | ||
150 | ANDMT D1Ar5, D1Ar5, #HI(~(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS)) | ||
151 | ANDMB D1Ar5, D1Ar5, #LO(~(TXDEFR_FPE_FE_BITS|TXDEFR_FPE_ICTRL_BITS)) | ||
152 | OR D1Re0, D1Re0, D1Ar5 | ||
153 | MOV TXDEFR, D1Re0 | ||
154 | |||
155 | /* Restore relevant bits of TXMODE */ | ||
156 | MOV D1Ar5, TXMODE | ||
157 | ANDMT D1Ar5, D1Ar5, #HI(~TXMODE_FPURMODE_BITS) | ||
158 | ANDT D0Ar6, D0Ar6, #HI(TXMODE_FPURMODE_BITS|TXMODE_FPURMODEWRITE_BIT) | ||
159 | OR D0Ar6, D0Ar6, D1Ar5 | ||
160 | MOV TXMODE, D0Ar6 | ||
161 | |||
162 | TSTT D0Ar4, #HI(TBICTX_CFGFPU_FX16_BIT) /* Perform test here for extended FPU registers | ||
163 | * to avoid stalls | ||
164 | */ | ||
165 | /* Save the standard FPU registers */ | ||
166 | F MGETL FX.0, FX.2, FX.4, FX.6, [D1Ar3++] | ||
167 | |||
168 | /* Save the extended FPU registers if they are present */ | ||
169 | BZ $Lskip_restore_fx8_fx16 | ||
170 | F MGETL FX.8, FX.10, FX.12, FX.14, [D1Ar3++] | ||
171 | $Lskip_restore_fx8_fx16: | ||
172 | |||
173 | /* Save the FPU Accumulator if it is present */ | ||
174 | TST D0Ar4, #METAC_COREID_NOFPACC_BIT | ||
175 | BNZ $Lskip_restore_fpacc | ||
176 | F GETL ACF.0, [D1Ar3++] | ||
177 | F GETL ACF.1, [D1Ar3++] | ||
178 | F GETL ACF.2, [D1Ar3++] | ||
179 | $Lskip_restore_fpacc: | ||
180 | |||
181 | MOV D0Re0, D1Ar3 /* Return end of save area */ | ||
182 | MOV PC, D1RtP | ||
183 | |||
184 | .size ___TBICtxFPURestore,.-___TBICtxFPURestore | ||
185 | |||
186 | #endif /* TBI_1_4 */ | ||
187 | |||
188 | /* | ||
189 | * End of tbictx.S | ||
190 | */ | ||
diff --git a/arch/metag/tbx/tbidefr.S b/arch/metag/tbx/tbidefr.S new file mode 100644 index 000000000000..3eb165ebf540 --- /dev/null +++ b/arch/metag/tbx/tbidefr.S | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * tbidefr.S | ||
3 | * | ||
4 | * Copyright (C) 2009, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Routing deferred exceptions | ||
11 | */ | ||
12 | |||
13 | #include <asm/metag_regs.h> | ||
14 | #include <asm/tbx.h> | ||
15 | |||
16 | .text | ||
17 | .balign 4 | ||
18 | .global ___TBIHandleDFR | ||
19 | .type ___TBIHandleDFR,function | ||
20 | /* D1Ar1:D0Ar2 -- State | ||
21 | * D0Ar3 -- SigNum | ||
22 | * D0Ar4 -- Triggers | ||
23 | * D1Ar5 -- InstOrSWSId | ||
24 | * D0Ar6 -- pTBI (volatile) | ||
25 | */ | ||
26 | ___TBIHandleDFR: | ||
27 | #ifdef META_BUG_MBN100212 | ||
28 | MSETL [A0StP++], D0FrT, D0.5 | ||
29 | |||
30 | /* D1Ar1,D0Ar2,D1Ar5,D0Ar6 -- Arguments to handler, must be preserved | ||
31 | * D0Ar4 -- The deferred exceptions | ||
32 | * D1Ar3 -- As per D0Ar4 but just the trigger bits | ||
33 | * D0.5 -- The bgnd deferred exceptions | ||
34 | * D1.5 -- TXDEFR with bgnd re-added | ||
35 | */ | ||
36 | |||
37 | /* - Collect the pending deferred exceptions using TXSTAT, | ||
38 | * (ack's the bgnd exceptions as a side-effect) | ||
39 | * - Manually collect remaining (interrupt) deferred exceptions | ||
40 | * using TXDEFR | ||
41 | * - Replace the triggers (from TXSTATI) with the int deferred | ||
42 | * exceptions DEFR ..., TXSTATI would have returned if it was valid | ||
43 | * from bgnd code | ||
44 | * - Reconstruct TXDEFR by or'ing bgnd deferred exceptions (except | ||
45 | * the DEFER bit) and the int deferred exceptions. This will be | ||
46 | * restored later | ||
47 | */ | ||
48 | DEFR D0.5, TXSTAT | ||
49 | MOV D1.5, TXDEFR | ||
50 | ANDT D0.5, D0.5, #HI(0xFFFF0000) | ||
51 | MOV D1Ar3, D1.5 | ||
52 | ANDT D1Ar3, D1Ar3, #HI(0xFFFF0000) | ||
53 | OR D0Ar4, D1Ar3, #TXSTAT_DEFER_BIT | ||
54 | OR D1.5, D1.5, D0.5 | ||
55 | |||
56 | /* Mask off anything unrelated to the deferred exception triggers */ | ||
57 | ANDT D1Ar3, D1Ar3, #HI(TXSTAT_BUSERR_BIT | TXSTAT_FPE_BITS) | ||
58 | |||
59 | /* Can assume that at least one exception happened since this | ||
60 | * handler wouldnt have been called otherwise. | ||
61 | * | ||
62 | * Replace the signal number and at the same time, prepare | ||
63 | * the mask to acknowledge the exception | ||
64 | * | ||
65 | * D1Re0 -- The bits to acknowledge | ||
66 | * D1Ar3 -- The signal number | ||
67 | * D1RtP -- Scratch to deal with non-conditional insns | ||
68 | */ | ||
69 | MOVT D1Re0, #HI(TXSTAT_FPE_BITS & ~TXSTAT_FPE_DENORMAL_BIT) | ||
70 | MOV D1RtP, #TXSTAT_FPE_INVALID_S | ||
71 | FFB D1Ar3, D1Ar3 | ||
72 | CMP D1Ar3, #TXSTAT_FPE_INVALID_S | ||
73 | MOVLE D1Ar3, D1RtP /* Collapse FPE triggers to a single signal */ | ||
74 | MOV D1RtP, #1 | ||
75 | LSLGT D1Re0, D1RtP, D1Ar3 | ||
76 | |||
77 | /* Get the handler using the signal number | ||
78 | * | ||
79 | * D1Ar3 -- The signal number | ||
80 | * D0Re0 -- Offset into TBI struct containing handler address | ||
81 | * D1Re0 -- Mask of triggers to keep | ||
82 | * D1RtP -- Address of handler | ||
83 | */ | ||
84 | SUB D1Ar3, D1Ar3, #(TXSTAT_FPE_INVALID_S - TBID_SIGNUM_FPE) | ||
85 | LSL D0Re0, D1Ar3, #2 | ||
86 | XOR D1Re0, D1Re0, #-1 /* Prepare mask for acknowledge (avoids stall) */ | ||
87 | ADD D0Re0,D0Re0,#TBI_fnSigs | ||
88 | GETD D1RtP, [D0Ar6+D0Re0] | ||
89 | |||
90 | /* Acknowledge triggers */ | ||
91 | AND D1.5, D1.5, D1Re0 | ||
92 | |||
93 | /* Restore remaining exceptions | ||
94 | * Do this here in case the handler enables nested interrupts | ||
95 | * | ||
96 | * D1.5 -- TXDEFR with this exception ack'd | ||
97 | */ | ||
98 | MOV TXDEFR, D1.5 | ||
99 | |||
100 | /* Call the handler */ | ||
101 | SWAP D1RtP, PC | ||
102 | |||
103 | GETL D0.5, D1.5, [--A0StP] | ||
104 | GETL D0FrT, D1RtP, [--A0StP] | ||
105 | MOV PC,D1RtP | ||
106 | #else /* META_BUG_MBN100212 */ | ||
107 | |||
108 | /* D1Ar1,D0Ar2,D1Ar5,D0Ar6 -- Arguments to handler, must be preserved | ||
109 | * D0Ar4 -- The deferred exceptions | ||
110 | * D1Ar3 -- As per D0Ar4 but just the trigger bits | ||
111 | */ | ||
112 | |||
113 | /* - Collect the pending deferred exceptions using TXSTAT, | ||
114 | * (ack's the interrupt exceptions as a side-effect) | ||
115 | */ | ||
116 | DEFR D0Ar4, TXSTATI | ||
117 | |||
118 | /* Mask off anything unrelated to the deferred exception triggers */ | ||
119 | MOV D1Ar3, D0Ar4 | ||
120 | ANDT D1Ar3, D1Ar3, #HI(TXSTAT_BUSERR_BIT | TXSTAT_FPE_BITS) | ||
121 | |||
122 | /* Can assume that at least one exception happened since this | ||
123 | * handler wouldnt have been called otherwise. | ||
124 | * | ||
125 | * Replace the signal number and at the same time, prepare | ||
126 | * the mask to acknowledge the exception | ||
127 | * | ||
128 | * The unusual code for 1<<D1Ar3 may need explanation. | ||
129 | * Normally this would be done using 'MOV rs,#1' and 'LSL rd,rs,D1Ar3' | ||
130 | * but only D1Re0 is available in D1 and no crossunit insns are available | ||
131 | * Even worse, there is no conditional 'MOV r,#uimm8'. | ||
132 | * Since the CMP proves that D1Ar3 >= 20, we can reuse the bottom 12-bits | ||
133 | * of D1Re0 (using 'ORGT r,#1') in the knowledge that the top 20-bits will | ||
134 | * be discarded without affecting the result. | ||
135 | * | ||
136 | * D1Re0 -- The bits to acknowledge | ||
137 | * D1Ar3 -- The signal number | ||
138 | */ | ||
139 | MOVT D1Re0, #HI(TXSTAT_FPE_BITS & ~TXSTAT_FPE_DENORMAL_BIT) | ||
140 | MOV D0Re0, #TXSTAT_FPE_INVALID_S | ||
141 | FFB D1Ar3, D1Ar3 | ||
142 | CMP D1Ar3, #TXSTAT_FPE_INVALID_S | ||
143 | MOVLE D1Ar3, D0Re0 /* Collapse FPE triggers to a single signal */ | ||
144 | ORGT D1Re0, D1Re0, #1 | ||
145 | LSLGT D1Re0, D1Re0, D1Ar3 | ||
146 | |||
147 | SUB D1Ar3, D1Ar3, #(TXSTAT_FPE_INVALID_S - TBID_SIGNUM_FPE) | ||
148 | |||
149 | /* Acknowledge triggers and restore remaining exceptions | ||
150 | * Do this here in case the handler enables nested interrupts | ||
151 | * | ||
152 | * (x | y) ^ y == x & ~y. It avoids the restrictive XOR ...,#-1 insn | ||
153 | * and is the same length | ||
154 | */ | ||
155 | MOV D0Re0, TXDEFR | ||
156 | OR D0Re0, D0Re0, D1Re0 | ||
157 | XOR TXDEFR, D0Re0, D1Re0 | ||
158 | |||
159 | /* Get the handler using the signal number | ||
160 | * | ||
161 | * D1Ar3 -- The signal number | ||
162 | * D0Re0 -- Address of handler | ||
163 | */ | ||
164 | LSL D0Re0, D1Ar3, #2 | ||
165 | ADD D0Re0,D0Re0,#TBI_fnSigs | ||
166 | GETD D0Re0, [D0Ar6+D0Re0] | ||
167 | |||
168 | /* Tailcall the handler */ | ||
169 | MOV PC,D0Re0 | ||
170 | |||
171 | #endif /* META_BUG_MBN100212 */ | ||
172 | .size ___TBIHandleDFR,.-___TBIHandleDFR | ||
173 | /* | ||
174 | * End of tbidefr.S | ||
175 | */ | ||
diff --git a/arch/metag/tbx/tbidspram.S b/arch/metag/tbx/tbidspram.S new file mode 100644 index 000000000000..2f27c0372212 --- /dev/null +++ b/arch/metag/tbx/tbidspram.S | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * tbidspram.S | ||
3 | * | ||
4 | * Copyright (C) 2009, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Explicit state save and restore routines forming part of the thread binary | ||
11 | * interface for META processors | ||
12 | */ | ||
13 | |||
14 | .file "tbidspram.S" | ||
15 | |||
16 | /* These aren't generally useful to a user so for now, they arent publically available */ | ||
17 | #define _TBIECH_DSPRAM_DUA_S 8 | ||
18 | #define _TBIECH_DSPRAM_DUA_BITS 0x7f00 | ||
19 | #define _TBIECH_DSPRAM_DUB_S 0 | ||
20 | #define _TBIECH_DSPRAM_DUB_BITS 0x007f | ||
21 | |||
22 | /* | ||
23 | * void *__TBIDspramSaveA( short DspramSizes, void *pExt ) | ||
24 | */ | ||
25 | .text | ||
26 | .balign 4 | ||
27 | .global ___TBIDspramSaveA | ||
28 | .type ___TBIDspramSaveA,function | ||
29 | ___TBIDspramSaveA: | ||
30 | |||
31 | SETL [A0StP++], D0.5, D1.5 | ||
32 | MOV A0.3, D0Ar2 | ||
33 | |||
34 | /* D1Ar1 - Dspram Sizes | ||
35 | * A0.4 - Pointer to buffer | ||
36 | */ | ||
37 | |||
38 | /* Save the specified amount of dspram DUA */ | ||
39 | DL MOV D0AR.0, #0 | ||
40 | LSR D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUA_S | ||
41 | AND D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUA_BITS >> _TBIECH_DSPRAM_DUA_S) | ||
42 | SUB TXRPT, D1Ar1, #1 | ||
43 | $L1: | ||
44 | DL MOV D0Re0, [D0AR.0++] | ||
45 | DL MOV D0Ar6, [D0AR.0++] | ||
46 | DL MOV D0Ar4, [D0AR.0++] | ||
47 | DL MOV D0.5, [D0AR.0++] | ||
48 | MSETL [A0.3++], D0Re0, D0Ar6, D0Ar4, D0.5 | ||
49 | |||
50 | BR $L1 | ||
51 | |||
52 | GETL D0.5, D1.5, [--A0StP] | ||
53 | MOV PC, D1RtP | ||
54 | |||
55 | .size ___TBIDspramSaveA,.-___TBIDspramSaveA | ||
56 | |||
57 | /* | ||
58 | * void *__TBIDspramSaveB( short DspramSizes, void *pExt ) | ||
59 | */ | ||
60 | .balign 4 | ||
61 | .global ___TBIDspramSaveB | ||
62 | .type ___TBIDspramSaveB,function | ||
63 | ___TBIDspramSaveB: | ||
64 | |||
65 | SETL [A0StP++], D0.5, D1.5 | ||
66 | MOV A0.3, D0Ar2 | ||
67 | |||
68 | /* D1Ar1 - Dspram Sizes | ||
69 | * A0.3 - Pointer to buffer | ||
70 | */ | ||
71 | |||
72 | /* Save the specified amount of dspram DUA */ | ||
73 | DL MOV D0BR.0, #0 | ||
74 | LSR D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUB_S | ||
75 | AND D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUB_BITS >> _TBIECH_DSPRAM_DUB_S) | ||
76 | SUB TXRPT, D1Ar1, #1 | ||
77 | $L2: | ||
78 | DL MOV D0Re0, [D0BR.0++] | ||
79 | DL MOV D0Ar6, [D0BR.0++] | ||
80 | DL MOV D0Ar4, [D0BR.0++] | ||
81 | DL MOV D0.5, [D0BR.0++] | ||
82 | MSETL [A0.3++], D0Re0, D0Ar6, D0Ar4, D0.5 | ||
83 | |||
84 | BR $L2 | ||
85 | |||
86 | GETL D0.5, D1.5, [--A0StP] | ||
87 | MOV PC, D1RtP | ||
88 | |||
89 | .size ___TBIDspramSaveB,.-___TBIDspramSaveB | ||
90 | |||
91 | /* | ||
92 | * void *__TBIDspramRestoreA( short DspramSizes, void *pExt ) | ||
93 | */ | ||
94 | .balign 4 | ||
95 | .global ___TBIDspramRestoreA | ||
96 | .type ___TBIDspramRestoreA,function | ||
97 | ___TBIDspramRestoreA: | ||
98 | |||
99 | SETL [A0StP++], D0.5, D1.5 | ||
100 | MOV A0.3, D0Ar2 | ||
101 | |||
102 | /* D1Ar1 - Dspram Sizes | ||
103 | * A0.3 - Pointer to buffer | ||
104 | */ | ||
105 | |||
106 | /* Restore the specified amount of dspram DUA */ | ||
107 | DL MOV D0AW.0, #0 | ||
108 | LSR D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUA_S | ||
109 | AND D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUA_BITS >> _TBIECH_DSPRAM_DUA_S) | ||
110 | SUB TXRPT, D1Ar1, #1 | ||
111 | $L3: | ||
112 | MGETL D0Re0, D0Ar6, D0Ar4, D0.5, [A0.3++] | ||
113 | DL MOV [D0AW.0++], D0Re0 | ||
114 | DL MOV [D0AW.0++], D0Ar6 | ||
115 | DL MOV [D0AW.0++], D0Ar4 | ||
116 | DL MOV [D0AW.0++], D0.5 | ||
117 | |||
118 | BR $L3 | ||
119 | |||
120 | GETL D0.5, D1.5, [--A0StP] | ||
121 | MOV PC, D1RtP | ||
122 | |||
123 | .size ___TBIDspramRestoreA,.-___TBIDspramRestoreA | ||
124 | |||
125 | /* | ||
126 | * void *__TBIDspramRestoreB( short DspramSizes, void *pExt ) | ||
127 | */ | ||
128 | .balign 4 | ||
129 | .global ___TBIDspramRestoreB | ||
130 | .type ___TBIDspramRestoreB,function | ||
131 | ___TBIDspramRestoreB: | ||
132 | |||
133 | SETL [A0StP++], D0.5, D1.5 | ||
134 | MOV A0.3, D0Ar2 | ||
135 | |||
136 | /* D1Ar1 - Dspram Sizes | ||
137 | * A0.3 - Pointer to buffer | ||
138 | */ | ||
139 | |||
140 | /* Restore the specified amount of dspram DUA */ | ||
141 | DL MOV D0BW.0, #0 | ||
142 | LSR D1Ar1, D1Ar1, #_TBIECH_DSPRAM_DUB_S | ||
143 | AND D1Ar1, D1Ar1, #(_TBIECH_DSPRAM_DUB_BITS >> _TBIECH_DSPRAM_DUB_S) | ||
144 | SUB TXRPT, D1Ar1, #1 | ||
145 | $L4: | ||
146 | MGETL D0Re0, D0Ar6, D0Ar4, D0.5, [A0.3++] | ||
147 | DL MOV [D0BW.0++], D0Re0 | ||
148 | DL MOV [D0BW.0++], D0Ar6 | ||
149 | DL MOV [D0BW.0++], D0Ar4 | ||
150 | DL MOV [D0BW.0++], D0.5 | ||
151 | |||
152 | BR $L4 | ||
153 | |||
154 | GETL D0.5, D1.5, [--A0StP] | ||
155 | MOV PC, D1RtP | ||
156 | |||
157 | .size ___TBIDspramRestoreB,.-___TBIDspramRestoreB | ||
158 | |||
159 | /* | ||
160 | * End of tbidspram.S | ||
161 | */ | ||
diff --git a/arch/metag/tbx/tbilogf.S b/arch/metag/tbx/tbilogf.S new file mode 100644 index 000000000000..4a34d80657db --- /dev/null +++ b/arch/metag/tbx/tbilogf.S | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * tbilogf.S | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Defines __TBILogF trap code for debugging messages and __TBICont for debug | ||
11 | * assert to be implemented on. | ||
12 | */ | ||
13 | |||
14 | .file "tbilogf.S" | ||
15 | |||
16 | /* | ||
17 | * Perform console printf using external debugger or host support | ||
18 | */ | ||
19 | .text | ||
20 | .balign 4 | ||
21 | .global ___TBILogF | ||
22 | .type ___TBILogF,function | ||
23 | ___TBILogF: | ||
24 | MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2 | ||
25 | SWITCH #0xC10020 | ||
26 | MOV D0Re0,#0 | ||
27 | SUB A0StP,A0StP,#24 | ||
28 | MOV PC,D1RtP | ||
29 | .size ___TBILogF,.-___TBILogF | ||
30 | |||
31 | /* | ||
32 | * Perform wait for continue under control of the debugger | ||
33 | */ | ||
34 | .text | ||
35 | .balign 4 | ||
36 | .global ___TBICont | ||
37 | .type ___TBICont,function | ||
38 | ___TBICont: | ||
39 | MOV D0Ar6,#1 | ||
40 | MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2 | ||
41 | SWITCH #0xC30006 /* Returns if we are to continue */ | ||
42 | SUB A0StP,A0StP,#(8*3) | ||
43 | MOV PC,D1RtP /* Return */ | ||
44 | .size ___TBICont,.-___TBICont | ||
45 | |||
46 | /* | ||
47 | * End of tbilogf.S | ||
48 | */ | ||
diff --git a/arch/metag/tbx/tbipcx.S b/arch/metag/tbx/tbipcx.S new file mode 100644 index 000000000000..de0626fdad25 --- /dev/null +++ b/arch/metag/tbx/tbipcx.S | |||
@@ -0,0 +1,451 @@ | |||
1 | /* | ||
2 | * tbipcx.S | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002, 2007, 2009, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Asyncronous trigger handling including exceptions | ||
11 | */ | ||
12 | |||
13 | .file "tbipcx.S" | ||
14 | #include <asm/metag_regs.h> | ||
15 | #include <asm/tbx.h> | ||
16 | |||
17 | /* BEGIN HACK */ | ||
18 | /* define these for now while doing inital conversion to GAS | ||
19 | will fix properly later */ | ||
20 | |||
21 | /* Signal identifiers always have the TBID_SIGNAL_BIT set and contain the | ||
22 | following related bit-fields */ | ||
23 | #define TBID_SIGNUM_S 2 | ||
24 | |||
25 | /* END HACK */ | ||
26 | |||
27 | #ifdef METAC_1_0 | ||
28 | /* Ax.4 is saved in TBICTX */ | ||
29 | #define A0_4 ,A0.4 | ||
30 | #else | ||
31 | /* Ax.4 is NOT saved in TBICTX */ | ||
32 | #define A0_4 | ||
33 | #endif | ||
34 | |||
35 | /* Size of the TBICTX structure */ | ||
36 | #define TBICTX_BYTES ((TBICTX_AX_REGS*8)+TBICTX_AX) | ||
37 | |||
38 | #ifdef METAC_1_1 | ||
39 | #ifndef BOOTROM | ||
40 | #ifndef SPECIAL_BUILD | ||
41 | /* Jump straight into the boot ROM version of this code */ | ||
42 | #define CODE_USES_BOOTROM | ||
43 | #endif | ||
44 | #endif | ||
45 | #endif | ||
46 | |||
47 | /* Define space needed for CATCH buffer state in traditional units */ | ||
48 | #define CATCH_ENTRIES 5 | ||
49 | #define CATCH_ENTRY_BYTES 16 | ||
50 | |||
51 | #ifndef CODE_USES_BOOTROM | ||
52 | #define A0GblIStP A0.15 /* PTBICTX for current thread in PRIV system */ | ||
53 | #define A1GblIGbP A1.15 /* Interrupt A1GbP value in PRIV system */ | ||
54 | #endif | ||
55 | |||
56 | /* | ||
57 | * TBIRES __TBIASyncTrigger( TBIRES State ) | ||
58 | */ | ||
59 | .text | ||
60 | .balign 4 | ||
61 | .global ___TBIASyncTrigger | ||
62 | .type ___TBIASyncTrigger,function | ||
63 | ___TBIASyncTrigger: | ||
64 | #ifdef CODE_USES_BOOTROM | ||
65 | MOVT D0Re0,#HI(LINCORE_BASE) | ||
66 | JUMP D0Re0,#0xA0 | ||
67 | #else | ||
68 | MOV D0FrT,A0FrP /* Boing entry sequence */ | ||
69 | ADD A0FrP,A0StP,#0 | ||
70 | SETL [A0StP++],D0FrT,D1RtP | ||
71 | MOV D0Re0,PCX /* Check for repeat call */ | ||
72 | MOVT D0FrT,#HI(___TBIBoingRTI+4) | ||
73 | ADD D0FrT,D0FrT,#LO(___TBIBoingRTI+4) | ||
74 | CMP D0Re0,D0FrT | ||
75 | BEQ ___TBIBoingExit /* Already set up - come out */ | ||
76 | ADD D1Ar1,D1Ar1,#7 /* PRIV system stack here */ | ||
77 | MOV A0.2,A0StP /* else push context here */ | ||
78 | MOVS D0Re0,D0Ar2 /* Return in user mode? */ | ||
79 | ANDMB D1Ar1,D1Ar1,#0xfff8 /* align priv stack to 64-bit */ | ||
80 | MOV D1Re0,D1Ar1 /* and set result to arg */ | ||
81 | MOVMI A0.2,D1Ar1 /* use priv stack if PRIV set */ | ||
82 | /* | ||
83 | * Generate an initial TBICTX to return to our own current call context | ||
84 | */ | ||
85 | MOVT D1Ar5,#HI(___TBIBoingExit) /* Go here to return */ | ||
86 | ADD D1Ar5,D1Ar5,#LO(___TBIBoingExit) | ||
87 | ADD A0.3,A0.2,#TBICTX_DX /* DX Save area */ | ||
88 | ANDT D0Ar2,D0Ar2,#TBICTX_PRIV_BIT /* Extract PRIV bit */ | ||
89 | MOVT D0Ar6,#TBICTX_SOFT_BIT /* Only soft thread state */ | ||
90 | ADD D0Ar6,D0Ar6,D0Ar2 /* Add in PRIV bit if requested */ | ||
91 | SETL [A0.2],D0Ar6,D1Ar5 /* Push header fields */ | ||
92 | ADD D0FrT,A0.2,#TBICTX_AX /* Address AX save area */ | ||
93 | MSETL [A0.3],D0Re0,D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7 | ||
94 | MOV D0Ar6,#0 | ||
95 | MOV D1Ar5,#0 | ||
96 | SETL [A0.3++],D0Ar6,D1Ar5 /* Zero CT register states */ | ||
97 | SETL [A0.3++],D0Ar6,D1Ar5 | ||
98 | MSETL [D0FrT],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX regs */ | ||
99 | MOV A0FrP,A0.2 /* Restore me! */ | ||
100 | B ___TBIResume | ||
101 | .size ___TBIASyncTrigger,.-___TBIASyncTrigger | ||
102 | |||
103 | /* | ||
104 | * Optimised return to handler for META Core | ||
105 | */ | ||
106 | ___TBIBoingRTH: | ||
107 | RTH /* Go to background level */ | ||
108 | MOVT A0.2, #HI($Lpcx_target) | ||
109 | ADD A0.2,A0.2,#LO($Lpcx_target) | ||
110 | MOV PCX,A0.2 /* Setup PCX for interrupts */ | ||
111 | MOV PC,D1Re0 /* Jump to handler */ | ||
112 | /* | ||
113 | * This is where the code below needs to jump to wait for outermost interrupt | ||
114 | * event in a non-privilege mode system (single shared interrupt stack). | ||
115 | */ | ||
116 | ___TBIBoingPCX: | ||
117 | MGETL A0StP,A0FrP,A0.2,A0.3 A0_4,[D1Re0] /* Restore AX regs */ | ||
118 | MOV TXSTATUS,D0Re0 /* Restore flags */ | ||
119 | GETL D0Re0,D1Re0,[D1Re0+#TBICTX_DX-TBICTX_BYTES] | ||
120 | ___TBIBoingRTI: | ||
121 | RTI /* Wait for interrupt */ | ||
122 | $Lpcx_target: | ||
123 | /* | ||
124 | * Save initial interrupt state on current stack | ||
125 | */ | ||
126 | SETL [A0StP+#TBICTX_DX],D0Re0,D1Re0 /* Save key registers */ | ||
127 | ADD D1Re0,A0StP,#TBICTX_AX /* Address AX save area */ | ||
128 | MOV D0Re0,TXSTATUS /* Read TXSTATUS into D0Re0 */ | ||
129 | MOV TXSTATUS,#0 /* Clear TXSTATUS */ | ||
130 | MSETL [D1Re0],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX critical regs */ | ||
131 | /* | ||
132 | * Register state at this point is- | ||
133 | * | ||
134 | * D0Re0 - Old TXSTATUS with PRIV and CBUF bits set if appropriate | ||
135 | * A0StP - Is call stack frame and base of TBICTX being generated | ||
136 | * A1GbP - Is valid static access link | ||
137 | */ | ||
138 | ___TBIBoing: | ||
139 | LOCK0 /* Make sure we have no locks! */ | ||
140 | ADD A1.2,A0StP,#TBICTX_DX+(8*1) /* Address DX.1 save area */ | ||
141 | MOV A0FrP,A0StP /* Setup frame pointer */ | ||
142 | MSETL [A1.2],D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7 | ||
143 | MOV D0Ar4,TXRPT /* Save critical CT regs */ | ||
144 | MOV D1Ar3,TXBPOBITS | ||
145 | MOV D1Ar1,TXDIVTIME /* Calc catch buffer pSrc */ | ||
146 | MOV D0Ar2,TXMODE | ||
147 | MOV TXMODE,#0 /* Clear TXMODE */ | ||
148 | #ifdef TXDIVTIME_RPDIRTY_BIT | ||
149 | TSTT D1Ar1,#HI(TXDIVTIME_RPDIRTY_BIT)/* NZ = RPDIRTY */ | ||
150 | MOVT D0Ar6,#TBICTX_CBRP_BIT | ||
151 | ORNZ D0Re0,D0Re0,D0Ar6 /* Set CBRP if RPDIRTY set */ | ||
152 | #endif | ||
153 | MSETL [A1.2],D0Ar4,D0Ar2 /* Save CT regs state */ | ||
154 | MOV D0Ar2,D0Re0 /* Copy TXSTATUS */ | ||
155 | ANDMT D0Ar2,D0Ar2,#TBICTX_CBUF_BIT+TBICTX_CBRP_BIT | ||
156 | #ifdef TBI_1_4 | ||
157 | MOVT D1Ar1,#TBICTX_FPAC_BIT /* Copy FPActive into FPAC */ | ||
158 | TSTT D0Re0,#HI(TXSTATUS_FPACTIVE_BIT) | ||
159 | ORNZ D0Ar2,D0Ar2,D1Ar1 | ||
160 | #endif | ||
161 | MOV D1Ar1,PCX /* Read CurrPC */ | ||
162 | ORT D0Ar2,D0Ar2,#TBICTX_CRIT_BIT /* SaveMask + CRIT bit */ | ||
163 | SETL [A0FrP+#TBICTX_Flags],D0Ar2,D1Ar1 /* Set pCtx header fields */ | ||
164 | /* | ||
165 | * Completed context save, now we need to make a call to an interrupt handler | ||
166 | * | ||
167 | * D0Re0 - holds PRIV, WAIT, CBUF flags, HALT reason if appropriate | ||
168 | * A0FrP - interrupt stack frame and base of TBICTX being generated | ||
169 | * A0StP - same as A0FrP | ||
170 | */ | ||
171 | ___TBIBoingWait: | ||
172 | /* Reserve space for TBICTX and CBUF */ | ||
173 | ADD A0StP,A0StP,#TBICTX_BYTES+(CATCH_ENTRY_BYTES*CATCH_ENTRIES) | ||
174 | MOV D0Ar4,TXSTATI /* Read the Triggers data */ | ||
175 | MOV D1Ar3,TXDIVTIME /* Read IRQEnc bits */ | ||
176 | MOV D0Ar2,D0Re0 /* Copy PRIV and WAIT flags */ | ||
177 | ANDT D0Ar2,D0Ar2,#TBICTX_PRIV_BIT+TBICTX_WAIT_BIT+TBICTX_CBUF_BIT | ||
178 | #ifdef TBI_1_4 | ||
179 | MOVT D1Ar5,#TBICTX_FPAC_BIT /* Copy FPActive into FPAC */ | ||
180 | TSTT D0Re0,#HI(TXSTATUS_FPACTIVE_BIT) | ||
181 | ORNZ D0Ar2,D0Ar2,D1Ar5 | ||
182 | #endif | ||
183 | ANDT D1Ar3,D1Ar3,#HI(TXDIVTIME_IRQENC_BITS) | ||
184 | LSR D1Ar3,D1Ar3,#TXDIVTIME_IRQENC_S | ||
185 | AND TXSTATI,D0Ar4,#TXSTATI_BGNDHALT_BIT/* Ack any HALT seen */ | ||
186 | ANDS D0Ar4,D0Ar4,#0xFFFF-TXSTATI_BGNDHALT_BIT /* Only seen HALT? */ | ||
187 | ORT D0Ar2,D0Ar2,#TBICTX_CRIT_BIT /* Set CRIT */ | ||
188 | #ifndef BOOTROM | ||
189 | MOVT A1LbP,#HI(___pTBIs) | ||
190 | ADD A1LbP,A1LbP,#LO(___pTBIs) | ||
191 | GETL D1Ar5,D0Ar6,[A1LbP] /* D0Ar6 = ___pTBIs[1] */ | ||
192 | #else | ||
193 | /* | ||
194 | * For BOOTROM support ___pTBIs must be allocated at offset 0 vs A1GbP | ||
195 | */ | ||
196 | GETL D1Ar5,D0Ar6,[A1GbP] /* D0Ar6 = ___pTBIs[1] */ | ||
197 | #endif | ||
198 | BZ ___TBIBoingHalt /* Yes: Service HALT */ | ||
199 | /* | ||
200 | * Encode interrupt as signal vector, strip away same/lower TXMASKI bits | ||
201 | */ | ||
202 | MOV D1Ar1,#1 /* Generate mask for this bit */ | ||
203 | MOV D0Re0,TXMASKI /* Get interrupt mask */ | ||
204 | LSL TXSTATI,D1Ar1,D1Ar3 /* Acknowledge trigger */ | ||
205 | AND TXMASKI,D0Re0,#TXSTATI_BGNDHALT_BIT /* Only allow HALTs */ | ||
206 | OR D0Ar2,D0Ar2,D0Re0 /* Set TBIRES.Sig.TrigMask */ | ||
207 | ADD D1Ar3,D1Ar3,#TBID_SIGNUM_TRT /* Offset into interrupt sigs */ | ||
208 | LSL D0Re0,D1Ar3,#TBID_SIGNUM_S /* Generate offset from SigNum */ | ||
209 | /* | ||
210 | * This is a key moment we are about to call the handler, register state is | ||
211 | * as follows- | ||
212 | * | ||
213 | * D0Re0 - Handler vector (SigNum<<TBID_SIGNUM_S) | ||
214 | * D0Ar2 - TXMASKI:TBICTX_CRIT_BIT with optional CBUF and PRIV bits | ||
215 | * D1Ar3 - SigNum | ||
216 | * D0Ar4 - State read from TXSTATI | ||
217 | * D1Ar5 - Inst for SWITCH trigger case only, otherwise undefined | ||
218 | * D0Ar6 - pTBI | ||
219 | */ | ||
220 | ___TBIBoingVec: | ||
221 | ADD D0Re0,D0Re0,#TBI_fnSigs /* Offset into signal table */ | ||
222 | GETD D1Re0,[D0Ar6+D0Re0] /* Get address for Handler */ | ||
223 | /* | ||
224 | * Call handler at interrupt level, when it returns simply resume execution | ||
225 | * of state indicated by D1Re0. | ||
226 | */ | ||
227 | MOV D1Ar1,A0FrP /* Pass in pCtx */ | ||
228 | CALLR D1RtP,___TBIBoingRTH /* Use RTH to invoke handler */ | ||
229 | |||
230 | /* | ||
231 | * Perform critical state restore and execute background thread. | ||
232 | * | ||
233 | * A0FrP - is pointer to TBICTX structure to resume | ||
234 | * D0Re0 - contains additional TXMASKI triggers | ||
235 | */ | ||
236 | .text | ||
237 | .balign 4 | ||
238 | #ifdef BOOTROM | ||
239 | .global ___TBIResume | ||
240 | #endif | ||
241 | ___TBIResume: | ||
242 | /* | ||
243 | * New META IP method | ||
244 | */ | ||
245 | RTH /* Go to interrupt level */ | ||
246 | MOV D0Ar4,TXMASKI /* Read TXMASKI */ | ||
247 | OR TXMASKI,D0Ar4,D0Re0 /* -Write-Modify TXMASKI */ | ||
248 | GETL D0Re0,D1Re0,[A0FrP+#TBICTX_Flags]/* Get Flags:SaveMask, CurrPC */ | ||
249 | MOV A0StP,A0FrP /* Position stack pointer */ | ||
250 | MOV D0Ar2,TXPOLLI /* Read pending triggers */ | ||
251 | MOV PCX,D1Re0 /* Set resumption PC */ | ||
252 | TST D0Ar2,#0xFFFF /* Any pending triggers? */ | ||
253 | BNZ ___TBIBoingWait /* Yes: Go for triggers */ | ||
254 | TSTT D0Re0,#TBICTX_WAIT_BIT /* Do we WAIT anyway? */ | ||
255 | BNZ ___TBIBoingWait /* Yes: Go for triggers */ | ||
256 | LSLS D1Ar5,D0Re0,#1 /* Test XCBF (MI) & PRIV (CS)? */ | ||
257 | ADD D1Re0,A0FrP,#TBICTX_CurrRPT /* Address CT save area */ | ||
258 | ADD A0StP,A0FrP,#TBICTX_DX+(8*1) /* Address DX.1 save area */ | ||
259 | MGETL A0.2,A0.3,[D1Re0] /* Get CT reg states */ | ||
260 | MOV D1Ar3,A1.3 /* Copy old TXDIVTIME */ | ||
261 | BPL ___TBIResCrit /* No: Skip logic */ | ||
262 | ADD D0Ar4,A0FrP,#TBICTX_BYTES /* Source is after TBICTX */ | ||
263 | ANDST D1Ar3,D1Ar3,#HI(TXDIVTIME_RPMASK_BITS)/* !Z if RPDIRTY */ | ||
264 | MGETL D0.5,D0.6,[D0Ar4] /* Read Catch state */ | ||
265 | MOV TXCATCH0,D0.5 /* Restore TXCATCHn */ | ||
266 | MOV TXCATCH1,D1.5 | ||
267 | MOV TXCATCH2,D0.6 | ||
268 | MOV TXCATCH3,D1.6 | ||
269 | BZ ___TBIResCrit | ||
270 | MOV D0Ar2,#(1*8) | ||
271 | LSRS D1Ar3,D1Ar3,#TXDIVTIME_RPMASK_S+1 /* 2nd RPMASK bit -> bit 0 */ | ||
272 | ADD RA,D0Ar4,#(0*8) /* Re-read read pipeline */ | ||
273 | ADDNZ RA,D0Ar4,D0Ar2 /* If Bit 0 set issue RA */ | ||
274 | LSRS D1Ar3,D1Ar3,#2 /* Bit 1 -> C, Bit 2 -> Bit 0 */ | ||
275 | ADD D0Ar2,D0Ar2,#8 | ||
276 | ADDCS RA,D0Ar4,D0Ar2 /* If C issue RA */ | ||
277 | ADD D0Ar2,D0Ar2,#8 | ||
278 | ADDNZ RA,D0Ar4,D0Ar2 /* If Bit 0 set issue RA */ | ||
279 | LSRS D1Ar3,D1Ar3,#2 /* Bit 1 -> C, Bit 2 -> Bit 0 */ | ||
280 | ADD D0Ar2,D0Ar2,#8 | ||
281 | ADDCS RA,D0Ar4,D0Ar2 /* If C issue RA */ | ||
282 | ADD D0Ar2,D0Ar2,#8 | ||
283 | ADDNZ RA,D0Ar4,D0Ar2 /* If Bit 0 set issue RA */ | ||
284 | MOV TXDIVTIME,A1.3 /* Set RPDIRTY again */ | ||
285 | ___TBIResCrit: | ||
286 | LSLS D1Ar5,D0Re0,#1 /* Test XCBF (MI) & PRIV (CS)? */ | ||
287 | #ifdef TBI_1_4 | ||
288 | ANDT D1Ar5,D1Ar5,#(TBICTX_FPAC_BIT*2) | ||
289 | LSL D0Ar6,D1Ar5,#3 /* Convert FPAC into FPACTIVE */ | ||
290 | #endif | ||
291 | ANDMT D0Re0,D0Re0,#TBICTX_CBUF_BIT /* Keep CBUF bit from SaveMask */ | ||
292 | #ifdef TBI_1_4 | ||
293 | OR D0Re0,D0Re0,D0Ar6 /* Combine FPACTIVE with others */ | ||
294 | #endif | ||
295 | MGETL D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7,[A0StP] /* Restore DX */ | ||
296 | MOV TXRPT,A0.2 /* Restore CT regs */ | ||
297 | MOV TXBPOBITS,A1.2 | ||
298 | MOV TXMODE,A0.3 | ||
299 | BCC ___TBIBoingPCX /* Do non-PRIV wait! */ | ||
300 | MOV A1GblIGbP,A1GbP /* Save A1GbP too */ | ||
301 | MGETL A0StP,A0FrP,A0.2,A0.3 A0_4,[D1Re0] /* Restore AX regs */ | ||
302 | /* | ||
303 | * Wait for the first interrupt/exception trigger in a privilege mode system | ||
304 | * (interrupt stack area for current TASK to be pointed to by A0GblIStP | ||
305 | * or per_cpu__stack_save[hwthread_id]). | ||
306 | */ | ||
307 | MOV TXSTATUS,D0Re0 /* Restore flags */ | ||
308 | MOV D0Re0,TXPRIVEXT /* Set TXPRIVEXT_TXTOGGLEI_BIT */ | ||
309 | SUB D1Re0,D1Re0,#TBICTX_BYTES /* TBICTX is top of int stack */ | ||
310 | #ifdef TBX_PERCPU_SP_SAVE | ||
311 | SWAP D1Ar3,A1GbP | ||
312 | MOV D1Ar3,TXENABLE /* Which thread are we? */ | ||
313 | AND D1Ar3,D1Ar3,#TXENABLE_THREAD_BITS | ||
314 | LSR D1Ar3,D1Ar3,#TXENABLE_THREAD_S-2 | ||
315 | ADDT D1Ar3,D1Ar3,#HI(_per_cpu__stack_save) | ||
316 | ADD D1Ar3,D1Ar3,#LO(_per_cpu__stack_save) | ||
317 | SETD [D1Ar3],D1Re0 | ||
318 | SWAP D1Ar3,A1GbP | ||
319 | #else | ||
320 | MOV A0GblIStP, D1Re0 | ||
321 | #endif | ||
322 | OR D0Re0,D0Re0,#TXPRIVEXT_TXTOGGLEI_BIT | ||
323 | MOV TXPRIVEXT,D0Re0 /* Cannot set TXPRIVEXT if !priv */ | ||
324 | GETL D0Re0,D1Re0,[D1Re0+#TBICTX_DX] | ||
325 | RTI /* Wait for interrupt */ | ||
326 | /* | ||
327 | * Save initial interrupt state on A0GblIStP, switch to A0GblIStP if | ||
328 | * BOOTROM code, save and switch to [A1GbP] otherwise. | ||
329 | */ | ||
330 | ___TBIBoingPCXP: | ||
331 | #ifdef TBX_PERCPU_SP_SAVE | ||
332 | SWAP D1Ar3,A1GbP /* Get PRIV stack base */ | ||
333 | MOV D1Ar3,TXENABLE /* Which thread are we? */ | ||
334 | AND D1Ar3,D1Ar3,#TXENABLE_THREAD_BITS | ||
335 | LSR D1Ar3,D1Ar3,#TXENABLE_THREAD_S-2 | ||
336 | ADDT D1Ar3,D1Ar3,#HI(_per_cpu__stack_save) | ||
337 | ADD D1Ar3,D1Ar3,#LO(_per_cpu__stack_save) | ||
338 | GETD D1Ar3,[D1Ar3] | ||
339 | #else | ||
340 | SWAP D1Ar3,A0GblIStP /* Get PRIV stack base */ | ||
341 | #endif | ||
342 | SETL [D1Ar3+#TBICTX_DX],D0Re0,D1Re0 /* Save key registers */ | ||
343 | MOV D0Re0,TXPRIVEXT /* Clear TXPRIVEXT_TXTOGGLEI_BIT */ | ||
344 | ADD D1Re0,D1Ar3,#TBICTX_AX /* Address AX save area */ | ||
345 | ANDMB D0Re0,D0Re0,#0xFFFF-TXPRIVEXT_TXTOGGLEI_BIT | ||
346 | MOV TXPRIVEXT,D0Re0 /* Cannot set TXPRIVEXT if !priv */ | ||
347 | MOV D0Re0,TXSTATUS /* Read TXSTATUS into D0Re0 */ | ||
348 | MOV TXSTATUS,#0 /* Clear TXSTATUS */ | ||
349 | MSETL [D1Re0],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX critical regs */ | ||
350 | MOV A0StP,D1Ar3 /* Switch stacks */ | ||
351 | #ifdef TBX_PERCPU_SP_SAVE | ||
352 | MOV D1Ar3,A1GbP /* Get D1Ar2 back */ | ||
353 | #else | ||
354 | MOV D1Ar3,A0GblIStP /* Get D1Ar2 back */ | ||
355 | #endif | ||
356 | ORT D0Re0,D0Re0,#TBICTX_PRIV_BIT /* Add PRIV to TXSTATUS */ | ||
357 | MOV A1GbP,A1GblIGbP /* Restore A1GbP */ | ||
358 | B ___TBIBoing /* Enter common handler code */ | ||
359 | /* | ||
360 | * At this point we know it's a background HALT case we are handling. | ||
361 | * The restored TXSTATUS always needs to have zero in the reason bits. | ||
362 | */ | ||
363 | ___TBIBoingHalt: | ||
364 | MOV D0Ar4,TXMASKI /* Get interrupt mask */ | ||
365 | ANDST D0Re0,D0Re0,#HI(TXSTATUS_MAJOR_HALT_BITS+TXSTATUS_MEM_FAULT_BITS) | ||
366 | AND TXMASKI,D0Ar4,#TXSTATI_BGNDHALT_BIT /* Only allow HALTs */ | ||
367 | AND D0Ar4,D0Ar4,#0xFFFF-TXSTATI_BGNDHALT_BIT /* What ints are off? */ | ||
368 | OR D0Ar2,D0Ar2,D0Ar4 /* Set TBIRES.Sig.TrigMask */ | ||
369 | MOV D0Ar4,#TXSTATI_BGNDHALT_BIT /* This was the trigger state */ | ||
370 | LSR D1Ar3,D0Re0,#TXSTATUS_MAJOR_HALT_S | ||
371 | MOV D0Re0,#TBID_SIGNUM_XXF<<TBID_SIGNUM_S | ||
372 | BNZ ___TBIBoingVec /* Jump to XXF exception handler */ | ||
373 | /* | ||
374 | * Only the SWITCH cases are left, PCX must be valid | ||
375 | */ | ||
376 | #ifdef TBI_1_4 | ||
377 | MOV D1Ar5,TXPRIVEXT | ||
378 | TST D1Ar5,#TXPRIVEXT_MINIMON_BIT | ||
379 | LSR D1Ar3,D1Ar1,#1 /* Shift needed for MINIM paths (fill stall) */ | ||
380 | BZ $Lmeta /* If META only, skip */ | ||
381 | TSTT D1Ar1,#HI(0x00800000) | ||
382 | ANDMT D1Ar3,D1Ar3,#HI(0x007FFFFF >> 1)/* Shifted mask for large MINIM */ | ||
383 | ANDT D1Ar1,D1Ar1,#HI(0xFFE00000) /* Static mask for small MINIM */ | ||
384 | BZ $Llarge_minim /* If large MINIM */ | ||
385 | $Lsmall_minim: | ||
386 | TSTT D1Ar3,#HI(0x00100000 >> 1) | ||
387 | ANDMT D1Ar3,D1Ar3,#HI(0x001FFFFF >> 1)/* Correct shifted mask for large MINIM */ | ||
388 | ADDZ D1Ar1,D1Ar1,D1Ar3 /* If META rgn, add twice to undo LSR #1 */ | ||
389 | B $Lrecombine | ||
390 | $Llarge_minim: | ||
391 | ANDST D1Ar1,D1Ar1,#HI(0xFF800000) /* Correct static mask for small MINIM */ | ||
392 | /* Z=0 (Cannot place code at NULL) */ | ||
393 | $Lrecombine: | ||
394 | ADD D1Ar1,D1Ar1,D1Ar3 /* Combine static and shifted parts */ | ||
395 | $Lmeta: | ||
396 | GETW D1Ar5,[D1Ar1++] /* META: lo-16, MINIM: lo-16 (all-16 if short) */ | ||
397 | GETW D1Ar3,[D1Ar1] /* META: hi-16, MINIM: hi-16 (only if long) */ | ||
398 | MOV D1Re0,D1Ar5 | ||
399 | XOR D1Re0,D1Re0,#0x4000 | ||
400 | LSLSNZ D1Re0,D1Re0,#(32-14) /* MINIM: If long C=0, if short C=1 */ | ||
401 | LSLCC D1Ar3,D1Ar3,#16 /* META/MINIM long: Move hi-16 up */ | ||
402 | LSLCS D1Ar3,D1Ar5,#16 /* MINIM short: Dup all-16 */ | ||
403 | ADD D1Ar5,D1Ar5,D1Ar3 /* ALL: Combine both 16-bit parts */ | ||
404 | #else | ||
405 | GETD D1Ar5,[D1Ar1] /* Read instruction for switch */ | ||
406 | #endif | ||
407 | LSR D1Ar3,D1Ar5,#22 /* Convert into signal number */ | ||
408 | AND D1Ar3,D1Ar3,#TBID_SIGNUM_SW3-TBID_SIGNUM_SW0 | ||
409 | LSL D0Re0,D1Ar3,#TBID_SIGNUM_S /* Generate offset from SigNum */ | ||
410 | B ___TBIBoingVec /* Jump to switch handler */ | ||
411 | /* | ||
412 | * Exit from TBIASyncTrigger call | ||
413 | */ | ||
414 | ___TBIBoingExit: | ||
415 | GETL D0FrT,D1RtP,[A0FrP++] /* Restore state from frame */ | ||
416 | SUB A0StP,A0FrP,#8 /* Unwind stack */ | ||
417 | MOV A0FrP,D0FrT /* Last memory read completes */ | ||
418 | MOV PC,D1RtP /* Return to caller */ | ||
419 | #endif /* ifdef CODE_USES_BOOTROM */ | ||
420 | .size ___TBIResume,.-___TBIResume | ||
421 | |||
422 | #ifndef BOOTROM | ||
423 | /* | ||
424 | * void __TBIASyncResume( TBIRES State ) | ||
425 | */ | ||
426 | .text | ||
427 | .balign 4 | ||
428 | .global ___TBIASyncResume | ||
429 | .type ___TBIASyncResume,function | ||
430 | ___TBIASyncResume: | ||
431 | /* | ||
432 | * Perform CRIT|SOFT state restore and execute background thread. | ||
433 | */ | ||
434 | MOV D1Ar3,D1Ar1 /* Restore this context */ | ||
435 | MOV D0Re0,D0Ar2 /* Carry in additional triggers */ | ||
436 | /* Reserve space for TBICTX */ | ||
437 | ADD D1Ar3,D1Ar3,#TBICTX_BYTES+(CATCH_ENTRY_BYTES*CATCH_ENTRIES) | ||
438 | MOV A0StP,D1Ar3 /* Enter with protection of */ | ||
439 | MOV A0FrP,D1Ar1 /* TBICTX on our stack */ | ||
440 | #ifdef CODE_USES_BOOTROM | ||
441 | MOVT D1Ar1,#HI(LINCORE_BASE) | ||
442 | JUMP D1Ar1,#0xA4 | ||
443 | #else | ||
444 | B ___TBIResume | ||
445 | #endif | ||
446 | .size ___TBIASyncResume,.-___TBIASyncResume | ||
447 | #endif /* ifndef BOOTROM */ | ||
448 | |||
449 | /* | ||
450 | * End of tbipcx.S | ||
451 | */ | ||
diff --git a/arch/metag/tbx/tbiroot.S b/arch/metag/tbx/tbiroot.S new file mode 100644 index 000000000000..7d84daf1340b --- /dev/null +++ b/arch/metag/tbx/tbiroot.S | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * tbiroot.S | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Module that creates and via ___TBI function returns a TBI Root Block for | ||
11 | * interrupt and background processing on the current thread. | ||
12 | */ | ||
13 | |||
14 | .file "tbiroot.S" | ||
15 | #include <asm/metag_regs.h> | ||
16 | |||
17 | /* | ||
18 | * Get data structures and defines from the TBI C header | ||
19 | */ | ||
20 | #include <asm/tbx.h> | ||
21 | |||
22 | |||
23 | /* If signals need to be exchanged we must create a TBI Root Block */ | ||
24 | |||
25 | .data | ||
26 | .balign 8 | ||
27 | .global ___pTBIs | ||
28 | .type ___pTBIs,object | ||
29 | ___pTBIs: | ||
30 | .long 0 /* Bgnd+Int root block ptrs */ | ||
31 | .long 0 | ||
32 | .size ___pTBIs,.-___pTBIs | ||
33 | |||
34 | |||
35 | /* | ||
36 | * Return ___pTBIs value specific to execution level with promotion/demotion | ||
37 | * | ||
38 | * Register Usage: D1Ar1 is Id, D0Re0 is the primary result | ||
39 | * D1Re0 is secondary result (___pTBIs for other exec level) | ||
40 | */ | ||
41 | .text | ||
42 | .balign 4 | ||
43 | .global ___TBI | ||
44 | .type ___TBI,function | ||
45 | ___TBI: | ||
46 | TSTT D1Ar1,#HI(TBID_ISTAT_BIT) /* Bgnd or Int level? */ | ||
47 | MOVT A1LbP,#HI(___pTBIs) | ||
48 | ADD A1LbP,A1LbP,#LO(___pTBIs) | ||
49 | GETL D0Re0,D1Re0,[A1LbP] /* Base of root block table */ | ||
50 | SWAPNZ D0Re0,D1Re0 /* Swap if asked */ | ||
51 | MOV PC,D1RtP | ||
52 | .size ___TBI,.-___TBI | ||
53 | |||
54 | |||
55 | /* | ||
56 | * Return identifier of the current thread in TBI segment or signal format with | ||
57 | * secondary mask to indicate privilege and interrupt level of thread | ||
58 | */ | ||
59 | .text | ||
60 | .balign 4 | ||
61 | .global ___TBIThrdPrivId | ||
62 | .type ___TBIThrdPrivId,function | ||
63 | ___TBIThrdPrivId: | ||
64 | .global ___TBIThreadId | ||
65 | .type ___TBIThreadId,function | ||
66 | ___TBIThreadId: | ||
67 | #ifndef METAC_0_1 | ||
68 | MOV D1Re0,TXSTATUS /* Are we privileged or int? */ | ||
69 | MOV D0Re0,TXENABLE /* Which thread are we? */ | ||
70 | /* Disable privilege adaption for now */ | ||
71 | ANDT D1Re0,D1Re0,#HI(TXSTATUS_ISTAT_BIT) /* +TXSTATUS_PSTAT_BIT) */ | ||
72 | LSL D1Re0,D1Re0,#TBID_ISTAT_S-TXSTATUS_ISTAT_S | ||
73 | AND D0Re0,D0Re0,#TXENABLE_THREAD_BITS | ||
74 | LSL D0Re0,D0Re0,#TBID_THREAD_S-TXENABLE_THREAD_S | ||
75 | #else | ||
76 | /* Thread 0 only */ | ||
77 | XOR D0Re0,D0Re0,D0Re0 | ||
78 | XOR D1Re0,D1Re0,D1Re0 | ||
79 | #endif | ||
80 | MOV PC,D1RtP /* Return */ | ||
81 | .size ___TBIThrdPrivId,.-___TBIThrdPrivId | ||
82 | .size ___TBIThreadId,.-___TBIThreadId | ||
83 | |||
84 | |||
85 | /* | ||
86 | * End of tbiroot.S | ||
87 | */ | ||
diff --git a/arch/metag/tbx/tbisoft.S b/arch/metag/tbx/tbisoft.S new file mode 100644 index 000000000000..0346fe8a53b1 --- /dev/null +++ b/arch/metag/tbx/tbisoft.S | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * tbisoft.S | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * Support for soft threads and soft context switches | ||
11 | */ | ||
12 | |||
13 | .file "tbisoft.S" | ||
14 | |||
15 | #include <asm/tbx.h> | ||
16 | |||
17 | #ifdef METAC_1_0 | ||
18 | /* Ax.4 is saved in TBICTX */ | ||
19 | #define A0_4 ,A0.4 | ||
20 | #define D0_5 ,D0.5 | ||
21 | #else | ||
22 | /* Ax.4 is NOT saved in TBICTX */ | ||
23 | #define A0_4 | ||
24 | #define D0_5 | ||
25 | #endif | ||
26 | |||
27 | /* Size of the TBICTX structure */ | ||
28 | #define TBICTX_BYTES ((TBICTX_AX_REGS*8)+TBICTX_AX) | ||
29 | |||
30 | .text | ||
31 | .balign 4 | ||
32 | .global ___TBISwitchTail | ||
33 | .type ___TBISwitchTail,function | ||
34 | ___TBISwitchTail: | ||
35 | B $LSwitchTail | ||
36 | .size ___TBISwitchTail,.-___TBISwitchTail | ||
37 | |||
38 | /* | ||
39 | * TBIRES __TBIJumpX( TBIX64 ArgsA, PTBICTX *rpSaveCtx, int TrigsMask, | ||
40 | * void (*fnMain)(), void *pStack ); | ||
41 | * | ||
42 | * This is a combination of __TBISwitch and __TBIJump with the context of | ||
43 | * the calling thread being saved in the rpSaveCtx location with a drop-thru | ||
44 | * effect into the __TBIJump logic. ArgsB passes via __TBIJump to the | ||
45 | * routine eventually invoked will reflect the rpSaveCtx value specified. | ||
46 | */ | ||
47 | .text | ||
48 | .balign 4 | ||
49 | .global ___TBIJumpX | ||
50 | .type ___TBIJumpX,function | ||
51 | ___TBIJumpX: | ||
52 | CMP D1RtP,#-1 | ||
53 | B $LSwitchStart | ||
54 | .size ___TBIJumpX,.-___TBIJumpX | ||
55 | |||
56 | /* | ||
57 | * TBIRES __TBISwitch( TBIRES Switch, PTBICTX *rpSaveCtx ) | ||
58 | * | ||
59 | * Software syncronous context switch between soft threads, save only the | ||
60 | * registers which are actually valid on call entry. | ||
61 | * | ||
62 | * A0FrP, D0RtP, D0.5, D0.6, D0.7 - Saved on stack | ||
63 | * A1GbP is global to all soft threads so not virtualised | ||
64 | * A0StP is then saved as the base of the TBICTX of the thread | ||
65 | * | ||
66 | */ | ||
67 | .text | ||
68 | .balign 4 | ||
69 | .global ___TBISwitch | ||
70 | .type ___TBISwitch,function | ||
71 | ___TBISwitch: | ||
72 | XORS D0Re0,D0Re0,D0Re0 /* Set ZERO flag */ | ||
73 | $LSwitchStart: | ||
74 | MOV D0FrT,A0FrP /* Boing entry sequence */ | ||
75 | ADD A0FrP,A0StP,#0 | ||
76 | SETL [A0StP+#8++],D0FrT,D1RtP | ||
77 | /* | ||
78 | * Save current frame state - we save all regs because we don't want | ||
79 | * uninitialised crap in the TBICTX structure that the asyncronous resumption | ||
80 | * of a thread will restore. | ||
81 | */ | ||
82 | MOVT D1Re0,#HI($LSwitchExit) /* ASync resume point here */ | ||
83 | ADD D1Re0,D1Re0,#LO($LSwitchExit) | ||
84 | SETD [D1Ar3],A0StP /* Record pCtx of this thread */ | ||
85 | MOVT D0Re0,#TBICTX_SOFT_BIT /* Only soft thread state */ | ||
86 | SETL [A0StP++],D0Re0,D1Re0 /* Push header fields */ | ||
87 | ADD D0FrT,A0StP,#TBICTX_AX-TBICTX_DX /* Address AX save area */ | ||
88 | MOV D0Re0,#0 /* Setup 0:0 result for ASync */ | ||
89 | MOV D1Re0,#0 /* resume of the thread */ | ||
90 | MSETL [A0StP],D0Re0,D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7 | ||
91 | SETL [A0StP++],D0Re0,D1Re0 /* Zero CurrRPT, CurrBPOBITS, */ | ||
92 | SETL [A0StP++],D0Re0,D1Re0 /* Zero CurrMODE, CurrDIVTIME */ | ||
93 | ADD A0StP,A0StP,#(TBICTX_AX_REGS*8) /* Reserve AX save space */ | ||
94 | MSETL [D0FrT],A0StP,A0FrP,A0.2,A0.3 A0_4 /* Save AX regs */ | ||
95 | BNZ ___TBIJump | ||
96 | /* | ||
97 | * NextThread MUST be in TBICTX_SOFT_BIT state! | ||
98 | */ | ||
99 | $LSwitchTail: | ||
100 | MOV D0Re0,D0Ar2 /* Result from args */ | ||
101 | MOV D1Re0,D1Ar1 | ||
102 | ADD D1RtP,D1Ar1,#TBICTX_AX | ||
103 | MGETL A0StP,A0FrP,[D1RtP] /* Get frame values */ | ||
104 | $LSwitchCmn: | ||
105 | ADD A0.2,D1Ar1,#TBICTX_DX+(8*5) | ||
106 | MGETL D0.5,D0.6,D0.7,[A0.2] /* Get caller-saved DX regs */ | ||
107 | $LSwitchExit: | ||
108 | GETL D0FrT,D1RtP,[A0FrP++] /* Restore state from frame */ | ||
109 | SUB A0StP,A0FrP,#8 /* Unwind stack */ | ||
110 | MOV A0FrP,D0FrT /* Last memory read completes */ | ||
111 | MOV PC,D1RtP /* Return to caller */ | ||
112 | .size ___TBISwitch,.-___TBISwitch | ||
113 | |||
114 | /* | ||
115 | * void __TBISyncResume( TBIRES State, int TrigMask ); | ||
116 | * | ||
117 | * This routine causes the TBICTX structure specified in State.Sig.pCtx to | ||
118 | * be restored. This implies that execution will not return to the caller. | ||
119 | * The State.Sig.TrigMask field will be ored into TXMASKI during the | ||
120 | * context switch such that any immediately occuring interrupts occur in | ||
121 | * the context of the newly specified task. The State.Sig.SaveMask parameter | ||
122 | * is ignored. | ||
123 | */ | ||
124 | .text | ||
125 | .balign 4 | ||
126 | .global ___TBISyncResume | ||
127 | .type ___TBISyncResume,function | ||
128 | ___TBISyncResume: | ||
129 | MOV D0Re0,D0Ar2 /* Result from args */ | ||
130 | MOV D1Re0,D1Ar1 | ||
131 | XOR D1Ar5,D1Ar5,D1Ar5 /* D1Ar5 = 0 */ | ||
132 | ADD D1RtP,D1Ar1,#TBICTX_AX | ||
133 | SWAP D1Ar5,TXMASKI /* D1Ar5 <-> TXMASKI */ | ||
134 | MGETL A0StP,A0FrP,[D1RtP] /* Get frame values */ | ||
135 | OR TXMASKI,D1Ar5,D1Ar3 /* New TXMASKI */ | ||
136 | B $LSwitchCmn | ||
137 | .size ___TBISyncResume,.-___TBISyncResume | ||
138 | |||
139 | /* | ||
140 | * void __TBIJump( TBIX64 ArgsA, TBIX32 ArgsB, int TrigsMask, | ||
141 | * void (*fnMain)(), void *pStack ); | ||
142 | * | ||
143 | * Jump directly to a new routine on an arbitrary stack with arbitrary args | ||
144 | * oring bits back into TXMASKI on route. | ||
145 | */ | ||
146 | .text | ||
147 | .balign 4 | ||
148 | .global ___TBIJump | ||
149 | .type ___TBIJump,function | ||
150 | ___TBIJump: | ||
151 | XOR D0Re0,D0Re0,D0Re0 /* D0Re0 = 0 */ | ||
152 | MOV A0StP,D0Ar6 /* Stack = Frame */ | ||
153 | SWAP D0Re0,TXMASKI /* D0Re0 <-> TXMASKI */ | ||
154 | MOV A0FrP,D0Ar6 | ||
155 | MOVT A1LbP,#HI(__exit) | ||
156 | ADD A1LbP,A1LbP,#LO(__exit) | ||
157 | MOV D1RtP,A1LbP /* D1RtP = __exit */ | ||
158 | OR TXMASKI,D0Re0,D0Ar4 /* New TXMASKI */ | ||
159 | MOV PC,D1Ar5 /* Jump to fnMain */ | ||
160 | .size ___TBIJump,.-___TBIJump | ||
161 | |||
162 | /* | ||
163 | * PTBICTX __TBISwitchInit( void *pStack, int (*fnMain)(), | ||
164 | * .... 4 extra 32-bit args .... ); | ||
165 | * | ||
166 | * Generate a new soft thread context ready for it's first outing. | ||
167 | * | ||
168 | * D1Ar1 - Region of memory to be used as the new soft thread stack | ||
169 | * D0Ar2 - Main line routine for new soft thread | ||
170 | * D1Ar3, D0Ar4, D1Ar5, D0Ar6 - arguments to be passed on stack | ||
171 | * The routine returns the initial PTBICTX value for the new thread | ||
172 | */ | ||
173 | .text | ||
174 | .balign 4 | ||
175 | .global ___TBISwitchInit | ||
176 | .type ___TBISwitchInit,function | ||
177 | ___TBISwitchInit: | ||
178 | MOV D0FrT,A0FrP /* Need save return point */ | ||
179 | ADD A0FrP,A0StP,#0 | ||
180 | SETL [A0StP++],D0FrT,D1RtP /* Save return to caller */ | ||
181 | MOVT A1LbP,#HI(__exit) | ||
182 | ADD A1LbP,A1LbP,#LO(__exit) | ||
183 | MOV D1RtP,A1LbP /* Get address of __exit */ | ||
184 | ADD D1Ar1,D1Ar1,#7 /* Align stack to 64-bits */ | ||
185 | ANDMB D1Ar1,D1Ar1,#0xfff8 /* by rounding base up */ | ||
186 | MOV A0.2,D1Ar1 /* A0.2 is new stack */ | ||
187 | MOV D0FrT,D1Ar1 /* Initial puesdo-frame pointer */ | ||
188 | SETL [A0.2++],D0FrT,D1RtP /* Save return to __exit */ | ||
189 | MOV D1RtP,D0Ar2 | ||
190 | SETL [A0.2++],D0FrT,D1RtP /* Save return to fnMain */ | ||
191 | ADD D0FrT,D0FrT,#8 /* Advance puesdo-frame pointer */ | ||
192 | MSETL [A0.2],D0Ar6,D0Ar4 /* Save extra initial args */ | ||
193 | MOVT D1RtP,#HI(___TBIStart) /* Start up code for new stack */ | ||
194 | ADD D1RtP,D1RtP,#LO(___TBIStart) | ||
195 | SETL [A0.2++],D0FrT,D1RtP /* Save return to ___TBIStart */ | ||
196 | ADD D0FrT,D0FrT,#(8*3) /* Advance puesdo-frame pointer */ | ||
197 | MOV D0Re0,A0.2 /* Return pCtx for new thread */ | ||
198 | MOV D1Re0,#0 /* pCtx:0 is default Arg1:Arg2 */ | ||
199 | /* | ||
200 | * Generate initial TBICTX state | ||
201 | */ | ||
202 | MOVT D1Ar1,#HI($LSwitchExit) /* Async restore code */ | ||
203 | ADD D1Ar1,D1Ar1,#LO($LSwitchExit) | ||
204 | MOVT D0Ar2,#TBICTX_SOFT_BIT /* Only soft thread state */ | ||
205 | ADD D0Ar6,A0.2,#TBICTX_BYTES /* New A0StP */ | ||
206 | MOV D1Ar5,A1GbP /* Same A1GbP */ | ||
207 | MOV D0Ar4,D0FrT /* Initial A0FrP */ | ||
208 | MOV D1Ar3,A1LbP /* Same A1LbP */ | ||
209 | SETL [A0.2++],D0Ar2,D1Ar1 /* Set header fields */ | ||
210 | MSETL [A0.2],D0Re0,D0Ar6,D0Ar4,D0Ar2,D0FrT,D0.5,D0.6,D0.7 | ||
211 | MOV D0Ar2,#0 /* Zero values */ | ||
212 | MOV D1Ar1,#0 | ||
213 | SETL [A0.2++],D0Ar2,D1Ar1 /* Zero CurrRPT, CurrBPOBITS, */ | ||
214 | SETL [A0.2++],D0Ar2,D1Ar1 /* CurrMODE, and pCurrCBuf */ | ||
215 | MSETL [A0.2],D0Ar6,D0Ar4,D0Ar2,D0FrT D0_5 /* Set DX and then AX regs */ | ||
216 | B $LSwitchExit /* All done! */ | ||
217 | .size ___TBISwitchInit,.-___TBISwitchInit | ||
218 | |||
219 | .text | ||
220 | .balign 4 | ||
221 | .global ___TBIStart | ||
222 | .type ___TBIStart,function | ||
223 | ___TBIStart: | ||
224 | MOV D1Ar1,D1Re0 /* Pass TBIRES args to call */ | ||
225 | MOV D0Ar2,D0Re0 | ||
226 | MGETL D0Re0,D0Ar6,D0Ar4,[A0FrP] /* Get hidden args */ | ||
227 | SUB A0StP,A0FrP,#(8*3) /* Entry stack pointer */ | ||
228 | MOV A0FrP,D0Re0 /* Entry frame pointer */ | ||
229 | MOVT A1LbP,#HI(__exit) | ||
230 | ADD A1LbP,A1LbP,#LO(__exit) | ||
231 | MOV D1RtP,A1LbP /* D1RtP = __exit */ | ||
232 | MOV PC,D1Re0 /* Jump into fnMain */ | ||
233 | .size ___TBIStart,.-___TBIStart | ||
234 | |||
235 | /* | ||
236 | * End of tbisoft.S | ||
237 | */ | ||
diff --git a/arch/metag/tbx/tbistring.c b/arch/metag/tbx/tbistring.c new file mode 100644 index 000000000000..f90cd0822065 --- /dev/null +++ b/arch/metag/tbx/tbistring.c | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * tbistring.c | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002, 2003, 2005, 2007, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * String table functions provided as part of the thread binary interface for | ||
11 | * Meta processors | ||
12 | */ | ||
13 | |||
14 | #include <linux/export.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <asm/tbx.h> | ||
17 | |||
18 | /* | ||
19 | * There are not any functions to modify the string table currently, if these | ||
20 | * are required at some later point I suggest having a seperate module and | ||
21 | * ensuring that creating new entries does not interfere with reading old | ||
22 | * entries in any way. | ||
23 | */ | ||
24 | |||
25 | const TBISTR *__TBIFindStr(const TBISTR *start, | ||
26 | const char *str, int match_len) | ||
27 | { | ||
28 | const TBISTR *search = start; | ||
29 | bool exact = true; | ||
30 | const TBISEG *seg; | ||
31 | |||
32 | if (match_len < 0) { | ||
33 | /* Make match_len always positive for the inner loop */ | ||
34 | match_len = -match_len; | ||
35 | exact = false; | ||
36 | } else { | ||
37 | /* | ||
38 | * Also support historic behaviour, which expected match_len to | ||
39 | * include null terminator | ||
40 | */ | ||
41 | if (match_len && str[match_len-1] == '\0') | ||
42 | match_len--; | ||
43 | } | ||
44 | |||
45 | if (!search) { | ||
46 | /* Find global string table segment */ | ||
47 | seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL, | ||
48 | TBID_SEGSCOPE_GLOBAL, | ||
49 | TBID_SEGTYPE_STRING)); | ||
50 | |||
51 | if (!seg || seg->Bytes < sizeof(TBISTR)) | ||
52 | /* No string table! */ | ||
53 | return NULL; | ||
54 | |||
55 | /* Start of string table */ | ||
56 | search = seg->pGAddr; | ||
57 | } | ||
58 | |||
59 | for (;;) { | ||
60 | while (!search->Tag) | ||
61 | /* Allow simple gaps which are just zero initialised */ | ||
62 | search = (const TBISTR *)((const char *)search + 8); | ||
63 | |||
64 | if (search->Tag == METAG_TBI_STRE) { | ||
65 | /* Reached the end of the table */ | ||
66 | search = NULL; | ||
67 | break; | ||
68 | } | ||
69 | |||
70 | if ((search->Len >= match_len) && | ||
71 | (!exact || (search->Len == match_len + 1)) && | ||
72 | (search->Tag != METAG_TBI_STRG)) { | ||
73 | /* Worth searching */ | ||
74 | if (!strncmp(str, (const char *)search->String, | ||
75 | match_len)) | ||
76 | break; | ||
77 | } | ||
78 | |||
79 | /* Next entry */ | ||
80 | search = (const TBISTR *)((const char *)search + search->Bytes); | ||
81 | } | ||
82 | |||
83 | return search; | ||
84 | } | ||
85 | |||
86 | const void *__TBITransStr(const char *str, int len) | ||
87 | { | ||
88 | const TBISTR *search = NULL; | ||
89 | const void *res = NULL; | ||
90 | |||
91 | for (;;) { | ||
92 | /* Search onwards */ | ||
93 | search = __TBIFindStr(search, str, len); | ||
94 | |||
95 | /* No translation returns NULL */ | ||
96 | if (!search) | ||
97 | break; | ||
98 | |||
99 | /* Skip matching entries with no translation data */ | ||
100 | if (search->TransLen != METAG_TBI_STRX) { | ||
101 | /* Calculate base of translation string */ | ||
102 | res = (const char *)search->String + | ||
103 | ((search->Len + 7) & ~7); | ||
104 | break; | ||
105 | } | ||
106 | |||
107 | /* Next entry */ | ||
108 | search = (const TBISTR *)((const char *)search + search->Bytes); | ||
109 | } | ||
110 | |||
111 | /* Return base address of translation data or NULL */ | ||
112 | return res; | ||
113 | } | ||
114 | EXPORT_SYMBOL(__TBITransStr); | ||
diff --git a/arch/metag/tbx/tbitimer.S b/arch/metag/tbx/tbitimer.S new file mode 100644 index 000000000000..5dbeddeee7ba --- /dev/null +++ b/arch/metag/tbx/tbitimer.S | |||
@@ -0,0 +1,207 @@ | |||
1 | /* | ||
2 | * tbitimer.S | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002, 2007, 2012 Imagination Technologies. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it under | ||
7 | * the terms of the GNU General Public License version 2 as published by the | ||
8 | * Free Software Foundation. | ||
9 | * | ||
10 | * TBI timer support routines and data values | ||
11 | */ | ||
12 | |||
13 | .file "tbitimer.S" | ||
14 | /* | ||
15 | * Get data structures and defines from the main C header | ||
16 | */ | ||
17 | #include <asm/tbx.h> | ||
18 | |||
19 | .data | ||
20 | .balign 8 | ||
21 | .global ___TBITimeB | ||
22 | .type ___TBITimeB,object | ||
23 | ___TBITimeB: | ||
24 | .quad 0 /* Background 'lost' ticks */ | ||
25 | .size ___TBITimeB,.-___TBITimeB | ||
26 | |||
27 | .data | ||
28 | .balign 8 | ||
29 | .global ___TBITimeI | ||
30 | .type ___TBITimeI,object | ||
31 | ___TBITimeI: | ||
32 | .quad 0 /* Interrupt 'lost' ticks */ | ||
33 | .size ___TBITimeI,.-___TBITimeI | ||
34 | |||
35 | .data | ||
36 | .balign 8 | ||
37 | .global ___TBITimes | ||
38 | .type ___TBITimes,object | ||
39 | ___TBITimes: | ||
40 | .long ___TBITimeB /* Table of 'lost' tick values */ | ||
41 | .long ___TBITimeI | ||
42 | .size ___TBITimes,.-___TBITimes | ||
43 | |||
44 | /* | ||
45 | * Flag bits for control of ___TBITimeCore | ||
46 | */ | ||
47 | #define TIMER_SET_BIT 1 | ||
48 | #define TIMER_ADD_BIT 2 | ||
49 | |||
50 | /* | ||
51 | * Initialise or stop timer support | ||
52 | * | ||
53 | * Register Usage: D1Ar1 holds Id, D1Ar2 is initial delay or 0 | ||
54 | * D0FrT is used to call ___TBITimeCore | ||
55 | * D0Re0 is used for the result which is TXSTAT_TIMER_BIT | ||
56 | * D0Ar4, D1Ar5, D0Ar6 are all used as scratch | ||
57 | * Other registers are those set by ___TBITimeCore | ||
58 | * A0.3 is assumed to point at ___TBITime(I/B) | ||
59 | */ | ||
60 | .text | ||
61 | .balign 4 | ||
62 | .global ___TBITimerCtrl | ||
63 | .type ___TBITimerCtrl,function | ||
64 | ___TBITimerCtrl: | ||
65 | MOV D1Ar5,#TIMER_SET_BIT /* Timer SET request */ | ||
66 | MOVT D0FrT,#HI(___TBITimeCore) /* Get timer core reg values */ | ||
67 | CALL D0FrT,#LO(___TBITimeCore) /* and perform register update */ | ||
68 | NEGS D0Ar6,D0Ar2 /* Set flags from time-stamp */ | ||
69 | ASR D1Ar5,D0Ar6,#31 /* Sign extend D0Ar6 into D1Ar5 */ | ||
70 | SETLNZ [A0.3],D0Ar6,D1Ar5 /* ___TBITime(B/I)=-Start if enable */ | ||
71 | MOV PC,D1RtP /* Return */ | ||
72 | .size ___TBITimerCtrl,.-___TBITimerCtrl | ||
73 | |||
74 | /* | ||
75 | * Return ___TBITimeStamp value | ||
76 | * | ||
77 | * Register Usage: D1Ar1 holds Id | ||
78 | * D0FrT is used to call ___TBITimeCore | ||
79 | * D0Re0, D1Re0 is used for the result | ||
80 | * D1Ar3, D0Ar4, D1Ar5 | ||
81 | * Other registers are those set by ___TBITimeCore | ||
82 | * D0Ar6 is assumed to be the timer value read | ||
83 | * A0.3 is assumed to point at ___TBITime(I/B) | ||
84 | */ | ||
85 | .text | ||
86 | .balign 4 | ||
87 | .global ___TBITimeStamp | ||
88 | .type ___TBITimeStamp,function | ||
89 | ___TBITimeStamp: | ||
90 | MOV D1Ar5,#0 /* Timer GET request */ | ||
91 | MOVT D0FrT,#HI(___TBITimeCore) /* Get timer core reg values */ | ||
92 | CALL D0FrT,#LO(___TBITimeCore) /* with no register update */ | ||
93 | ADDS D0Re0,D0Ar4,D0Ar6 /* Add current time value */ | ||
94 | ADD D1Re0,D1Ar3,D1Ar5 /* to 64-bit signed extend time */ | ||
95 | ADDCS D1Re0,D1Re0,#1 /* Support borrow too */ | ||
96 | MOV PC,D1RtP /* Return */ | ||
97 | .size ___TBITimeStamp,.-___TBITimeStamp | ||
98 | |||
99 | /* | ||
100 | * Perform ___TBITimerAdd logic | ||
101 | * | ||
102 | * Register Usage: D1Ar1 holds Id, D0Ar2 holds value to be added to the timer | ||
103 | * D0Re0 is used for the result - new TIMER value | ||
104 | * D1Ar5, D0Ar6 are used as scratch | ||
105 | * Other registers are those set by ___TBITimeCore | ||
106 | * D0Ar6 is assumed to be the timer value read | ||
107 | * D0Ar4, D1Ar3 is the current value of ___TBITime(B/I) | ||
108 | */ | ||
109 | .text | ||
110 | .balign 4 | ||
111 | .global ___TBITimerAdd | ||
112 | .type ___TBITimerAdd,function | ||
113 | ___TBITimerAdd: | ||
114 | MOV D1Ar5,#TIMER_ADD_BIT /* Timer ADD request */ | ||
115 | MOVT D0FrT,#HI(___TBITimeCore) /* Get timer core reg values */ | ||
116 | CALL D0FrT,#LO(___TBITimeCore) /* with no register update */ | ||
117 | ADD D0Re0,D0Ar2,D0Ar6 /* Regenerate new value = result */ | ||
118 | NEG D0Ar2,D0Ar2 /* Negate delta */ | ||
119 | ASR D1Re0,D0Ar2,#31 /* Sign extend negated delta */ | ||
120 | ADDS D0Ar4,D0Ar4,D0Ar2 /* Add time added to ... */ | ||
121 | ADD D1Ar3,D1Ar3,D1Re0 /* ... real timer ... */ | ||
122 | ADDCS D1Ar3,D1Ar3,#1 /* ... with carry */ | ||
123 | SETL [A0.3],D0Ar4,D1Ar3 /* Update ___TBITime(B/I) */ | ||
124 | MOV PC,D1RtP /* Return */ | ||
125 | .size ___TBITimerAdd,.-___TBITimerAdd | ||
126 | |||
127 | #ifdef TBI_1_4 | ||
128 | /* | ||
129 | * Perform ___TBITimerDeadline logic | ||
130 | * NB: Delays are positive compared to the Wait values which are -ive | ||
131 | * | ||
132 | * Register Usage: D1Ar1 holds Id | ||
133 | * D0Ar2 holds Delay requested | ||
134 | * D0Re0 is used for the result - old TIMER Delay value | ||
135 | * D1Ar5, D0Ar6 are used as scratch | ||
136 | * Other registers are those set by ___TBITimeCore | ||
137 | * D0Ar6 is assumed to be the timer value read | ||
138 | * D0Ar4, D1Ar3 is the current value of ___TBITime(B/I) | ||
139 | * | ||
140 | */ | ||
141 | .text | ||
142 | .type ___TBITimerDeadline,function | ||
143 | .global ___TBITimerDeadline | ||
144 | .align 2 | ||
145 | ___TBITimerDeadline: | ||
146 | MOV D1Ar5,#TIMER_SET_BIT /* Timer SET request */ | ||
147 | MOVT D0FrT,#HI(___TBITimeCore) /* Get timer core reg values */ | ||
148 | CALL D0FrT,#LO(___TBITimeCore) /* with no register update */ | ||
149 | MOV D0Re0,D0Ar6 /* Old value read = result */ | ||
150 | SUB D0Ar2,D0Ar6,D0Ar2 /* Delta from (old - new) */ | ||
151 | ASR D1Re0,D0Ar2,#31 /* Sign extend delta */ | ||
152 | ADDS D0Ar4,D0Ar4,D0Ar2 /* Add time added to ... */ | ||
153 | ADD D1Ar3,D1Ar3,D1Re0 /* ... real timer ... */ | ||
154 | ADDCS D1Ar3,D1Ar3,#1 /* ... with carry */ | ||
155 | SETL [A0.3],D0Ar4,D1Ar3 /* Update ___TBITime(B/I) */ | ||
156 | MOV PC,D1RtP /* Return */ | ||
157 | .size ___TBITimerDeadline,.-___TBITimerDeadline | ||
158 | #endif /* TBI_1_4 */ | ||
159 | |||
160 | /* | ||
161 | * Perform core timer access logic | ||
162 | * | ||
163 | * Register Usage: D1Ar1 holds Id, D0Ar2 holds input value for SET and | ||
164 | * input value for ADD | ||
165 | * D1Ar5 controls op as SET or ADD as bit values | ||
166 | * On return D0Ar6, D1Ar5 holds the old 64-bit timer value | ||
167 | * A0.3 is setup to point at ___TBITime(I/B) | ||
168 | * A1.3 is setup to point at ___TBITimes | ||
169 | * D0Ar4, D1Ar3 is setup to value of ___TBITime(I/B) | ||
170 | */ | ||
171 | .text | ||
172 | .balign 4 | ||
173 | .global ___TBITimeCore | ||
174 | .type ___TBITimeCore,function | ||
175 | ___TBITimeCore: | ||
176 | #ifndef METAC_0_1 | ||
177 | TSTT D1Ar1,#HI(TBID_ISTAT_BIT) /* Interrupt level timer? */ | ||
178 | #endif | ||
179 | MOVT A1LbP,#HI(___TBITimes) | ||
180 | ADD A1LbP,A1LbP,#LO(___TBITimes) | ||
181 | MOV A1.3,A1LbP /* Get ___TBITimes address */ | ||
182 | #ifndef METAC_0_1 | ||
183 | BNZ $LTimeCoreI /* Yes: Service TXTIMERI! */ | ||
184 | #endif | ||
185 | LSRS D1Ar5,D1Ar5,#1 /* Carry = SET, Zero = !ADD */ | ||
186 | GETD A0.3,[A1.3+#0] /* A0.3 == &___TBITimeB */ | ||
187 | MOV D0Ar6,TXTIMER /* Always GET old value */ | ||
188 | MOVCS TXTIMER,D0Ar2 /* Conditional SET operation */ | ||
189 | ADDNZ TXTIMER,D0Ar2,D0Ar6 /* Conditional ADD operation */ | ||
190 | #ifndef METAC_0_1 | ||
191 | B $LTimeCoreEnd | ||
192 | $LTimeCoreI: | ||
193 | LSRS D1Ar5,D1Ar5,#1 /* Carry = SET, Zero = !ADD */ | ||
194 | GETD A0.3,[A1.3+#4] /* A0.3 == &___TBITimeI */ | ||
195 | MOV D0Ar6,TXTIMERI /* Always GET old value */ | ||
196 | MOVCS TXTIMERI,D0Ar2 /* Conditional SET operation */ | ||
197 | ADDNZ TXTIMERI,D0Ar2,D0Ar6 /* Conditional ADD operation */ | ||
198 | $LTimeCoreEnd: | ||
199 | #endif | ||
200 | ASR D1Ar5,D0Ar6,#31 /* Sign extend D0Ar6 into D1Ar5 */ | ||
201 | GETL D0Ar4,D1Ar3,[A0.3] /* Read ___TBITime(B/I) */ | ||
202 | MOV PC,D0FrT /* Return quickly */ | ||
203 | .size ___TBITimeCore,.-___TBITimeCore | ||
204 | |||
205 | /* | ||
206 | * End of tbitimer.S | ||
207 | */ | ||
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index e920cbe519fa..e507ab7df60b 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -62,3 +62,8 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK | |||
62 | 62 | ||
63 | config ARM_ARCH_TIMER | 63 | config ARM_ARCH_TIMER |
64 | bool | 64 | bool |
65 | |||
66 | config CLKSRC_METAG_GENERIC | ||
67 | def_bool y if METAG | ||
68 | help | ||
69 | This option enables support for the Meta per-thread timers. | ||
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 7d671b85a98e..4d8283aec5b5 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
@@ -21,3 +21,4 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o | |||
21 | obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o | 21 | obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o |
22 | 22 | ||
23 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o | 23 | obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o |
24 | obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o | ||
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c new file mode 100644 index 000000000000..ade7513a11d1 --- /dev/null +++ b/drivers/clocksource/metag_generic.c | |||
@@ -0,0 +1,198 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005-2013 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | * | ||
16 | * | ||
17 | * Support for Meta per-thread timers. | ||
18 | * | ||
19 | * Meta hardware threads have 2 timers. The background timer (TXTIMER) is used | ||
20 | * as a free-running time base (hz clocksource), and the interrupt timer | ||
21 | * (TXTIMERI) is used for the timer interrupt (clock event). Both counters | ||
22 | * traditionally count at approximately 1MHz. | ||
23 | */ | ||
24 | |||
25 | #include <clocksource/metag_generic.h> | ||
26 | #include <linux/cpu.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/param.h> | ||
31 | #include <linux/time.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/proc_fs.h> | ||
34 | #include <linux/clocksource.h> | ||
35 | #include <linux/clockchips.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | |||
38 | #include <asm/clock.h> | ||
39 | #include <asm/hwthread.h> | ||
40 | #include <asm/core_reg.h> | ||
41 | #include <asm/metag_mem.h> | ||
42 | #include <asm/tbx.h> | ||
43 | |||
44 | #define HARDWARE_FREQ 1000000 /* 1MHz */ | ||
45 | #define HARDWARE_DIV 1 /* divide by 1 = 1MHz clock */ | ||
46 | #define HARDWARE_TO_NS_SHIFT 10 /* convert ticks to ns */ | ||
47 | |||
48 | static unsigned int hwtimer_freq = HARDWARE_FREQ; | ||
49 | static DEFINE_PER_CPU(struct clock_event_device, local_clockevent); | ||
50 | static DEFINE_PER_CPU(char [11], local_clockevent_name); | ||
51 | |||
52 | static int metag_timer_set_next_event(unsigned long delta, | ||
53 | struct clock_event_device *dev) | ||
54 | { | ||
55 | __core_reg_set(TXTIMERI, -delta); | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | static void metag_timer_set_mode(enum clock_event_mode mode, | ||
60 | struct clock_event_device *evt) | ||
61 | { | ||
62 | switch (mode) { | ||
63 | case CLOCK_EVT_MODE_ONESHOT: | ||
64 | case CLOCK_EVT_MODE_RESUME: | ||
65 | break; | ||
66 | |||
67 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
68 | /* We should disable the IRQ here */ | ||
69 | break; | ||
70 | |||
71 | case CLOCK_EVT_MODE_PERIODIC: | ||
72 | case CLOCK_EVT_MODE_UNUSED: | ||
73 | WARN_ON(1); | ||
74 | break; | ||
75 | }; | ||
76 | } | ||
77 | |||
78 | static cycle_t metag_clocksource_read(struct clocksource *cs) | ||
79 | { | ||
80 | return __core_reg_get(TXTIMER); | ||
81 | } | ||
82 | |||
83 | static struct clocksource clocksource_metag = { | ||
84 | .name = "META", | ||
85 | .rating = 200, | ||
86 | .mask = CLOCKSOURCE_MASK(32), | ||
87 | .read = metag_clocksource_read, | ||
88 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
89 | }; | ||
90 | |||
91 | static irqreturn_t metag_timer_interrupt(int irq, void *dummy) | ||
92 | { | ||
93 | struct clock_event_device *evt = &__get_cpu_var(local_clockevent); | ||
94 | |||
95 | evt->event_handler(evt); | ||
96 | |||
97 | return IRQ_HANDLED; | ||
98 | } | ||
99 | |||
100 | static struct irqaction metag_timer_irq = { | ||
101 | .name = "META core timer", | ||
102 | .handler = metag_timer_interrupt, | ||
103 | .flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU, | ||
104 | }; | ||
105 | |||
106 | unsigned long long sched_clock(void) | ||
107 | { | ||
108 | unsigned long long ticks = __core_reg_get(TXTIMER); | ||
109 | return ticks << HARDWARE_TO_NS_SHIFT; | ||
110 | } | ||
111 | |||
112 | static void __cpuinit arch_timer_setup(unsigned int cpu) | ||
113 | { | ||
114 | unsigned int txdivtime; | ||
115 | struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); | ||
116 | char *name = per_cpu(local_clockevent_name, cpu); | ||
117 | |||
118 | txdivtime = __core_reg_get(TXDIVTIME); | ||
119 | |||
120 | txdivtime &= ~TXDIVTIME_DIV_BITS; | ||
121 | txdivtime |= (HARDWARE_DIV & TXDIVTIME_DIV_BITS); | ||
122 | |||
123 | __core_reg_set(TXDIVTIME, txdivtime); | ||
124 | |||
125 | sprintf(name, "META %d", cpu); | ||
126 | clk->name = name; | ||
127 | clk->features = CLOCK_EVT_FEAT_ONESHOT, | ||
128 | |||
129 | clk->rating = 200, | ||
130 | clk->shift = 12, | ||
131 | clk->irq = tbisig_map(TBID_SIGNUM_TRT), | ||
132 | clk->set_mode = metag_timer_set_mode, | ||
133 | clk->set_next_event = metag_timer_set_next_event, | ||
134 | |||
135 | clk->mult = div_sc(hwtimer_freq, NSEC_PER_SEC, clk->shift); | ||
136 | clk->max_delta_ns = clockevent_delta2ns(0x7fffffff, clk); | ||
137 | clk->min_delta_ns = clockevent_delta2ns(0xf, clk); | ||
138 | clk->cpumask = cpumask_of(cpu); | ||
139 | |||
140 | clockevents_register_device(clk); | ||
141 | |||
142 | /* | ||
143 | * For all non-boot CPUs we need to synchronize our free | ||
144 | * running clock (TXTIMER) with the boot CPU's clock. | ||
145 | * | ||
146 | * While this won't be accurate, it should be close enough. | ||
147 | */ | ||
148 | if (cpu) { | ||
149 | unsigned int thread0 = cpu_2_hwthread_id[0]; | ||
150 | unsigned long val; | ||
151 | |||
152 | val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0); | ||
153 | __core_reg_set(TXTIMER, val); | ||
154 | } | ||
155 | } | ||
156 | |||
157 | static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, | ||
158 | unsigned long action, void *hcpu) | ||
159 | { | ||
160 | int cpu = (long)hcpu; | ||
161 | |||
162 | switch (action) { | ||
163 | case CPU_STARTING: | ||
164 | case CPU_STARTING_FROZEN: | ||
165 | arch_timer_setup(cpu); | ||
166 | break; | ||
167 | } | ||
168 | |||
169 | return NOTIFY_OK; | ||
170 | } | ||
171 | |||
172 | static struct notifier_block __cpuinitdata arch_timer_cpu_nb = { | ||
173 | .notifier_call = arch_timer_cpu_notify, | ||
174 | }; | ||
175 | |||
176 | int __init metag_generic_timer_init(void) | ||
177 | { | ||
178 | /* | ||
179 | * On Meta 2 SoCs, the actual frequency of the timer is based on the | ||
180 | * Meta core clock speed divided by an integer, so it is only | ||
181 | * approximately 1MHz. Calculating the real frequency here drastically | ||
182 | * reduces clock skew on these SoCs. | ||
183 | */ | ||
184 | #ifdef CONFIG_METAG_META21 | ||
185 | hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1); | ||
186 | #endif | ||
187 | clocksource_register_hz(&clocksource_metag, hwtimer_freq); | ||
188 | |||
189 | setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq); | ||
190 | |||
191 | /* Configure timer on boot CPU */ | ||
192 | arch_timer_setup(smp_processor_id()); | ||
193 | |||
194 | /* Hook cpu boot to configure other CPU's timers */ | ||
195 | register_cpu_notifier(&arch_timer_cpu_nb); | ||
196 | |||
197 | return 0; | ||
198 | } | ||
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index e65fbf2cdf71..98e3b87bdf1b 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile | |||
@@ -2,6 +2,8 @@ obj-$(CONFIG_IRQCHIP) += irqchip.o | |||
2 | 2 | ||
3 | obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o | 3 | obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o |
4 | obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o | 4 | obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o |
5 | obj-$(CONFIG_METAG) += irq-metag-ext.o | ||
6 | obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o | ||
5 | obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o | 7 | obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o |
6 | obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o | 8 | obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o |
7 | obj-$(CONFIG_ARM_GIC) += irq-gic.o | 9 | obj-$(CONFIG_ARM_GIC) += irq-gic.o |
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c new file mode 100644 index 000000000000..92c41ab4dbfd --- /dev/null +++ b/drivers/irqchip/irq-metag-ext.c | |||
@@ -0,0 +1,868 @@ | |||
1 | /* | ||
2 | * Meta External interrupt code. | ||
3 | * | ||
4 | * Copyright (C) 2005-2012 Imagination Technologies Ltd. | ||
5 | * | ||
6 | * External interrupts on Meta are configured at two-levels, in the CPU core and | ||
7 | * in the external trigger block. Interrupts from SoC peripherals are | ||
8 | * multiplexed onto a single Meta CPU "trigger" - traditionally it has always | ||
9 | * been trigger 2 (TR2). For info on how de-multiplexing happens check out | ||
10 | * meta_intc_irq_demux(). | ||
11 | */ | ||
12 | |||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/irqchip/metag-ext.h> | ||
15 | #include <linux/irqdomain.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/of.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/syscore_ops.h> | ||
20 | |||
21 | #include <asm/irq.h> | ||
22 | #include <asm/hwthread.h> | ||
23 | |||
24 | #define HWSTAT_STRIDE 8 | ||
25 | #define HWVEC_BLK_STRIDE 0x1000 | ||
26 | |||
27 | /** | ||
28 | * struct meta_intc_priv - private meta external interrupt data | ||
29 | * @nr_banks: Number of interrupt banks | ||
30 | * @domain: IRQ domain for all banks of external IRQs | ||
31 | * @unmasked: Record of unmasked IRQs | ||
32 | * @levels_altered: Record of altered level bits | ||
33 | */ | ||
34 | struct meta_intc_priv { | ||
35 | unsigned int nr_banks; | ||
36 | struct irq_domain *domain; | ||
37 | |||
38 | unsigned long unmasked[4]; | ||
39 | |||
40 | #ifdef CONFIG_METAG_SUSPEND_MEM | ||
41 | unsigned long levels_altered[4]; | ||
42 | #endif | ||
43 | }; | ||
44 | |||
45 | /* Private data for the one and only external interrupt controller */ | ||
46 | static struct meta_intc_priv meta_intc_priv; | ||
47 | |||
48 | /** | ||
49 | * meta_intc_offset() - Get the offset into the bank of a hardware IRQ number | ||
50 | * @hw: Hardware IRQ number (within external trigger block) | ||
51 | * | ||
52 | * Returns: Bit offset into the IRQ's bank registers | ||
53 | */ | ||
54 | static unsigned int meta_intc_offset(irq_hw_number_t hw) | ||
55 | { | ||
56 | return hw & 0x1f; | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * meta_intc_bank() - Get the bank number of a hardware IRQ number | ||
61 | * @hw: Hardware IRQ number (within external trigger block) | ||
62 | * | ||
63 | * Returns: Bank number indicating which register the IRQ's bits are | ||
64 | */ | ||
65 | static unsigned int meta_intc_bank(irq_hw_number_t hw) | ||
66 | { | ||
67 | return hw >> 5; | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * meta_intc_stat_addr() - Get the address of a HWSTATEXT register | ||
72 | * @hw: Hardware IRQ number (within external trigger block) | ||
73 | * | ||
74 | * Returns: Address of a HWSTATEXT register containing the status bit for | ||
75 | * the specified hardware IRQ number | ||
76 | */ | ||
77 | static void __iomem *meta_intc_stat_addr(irq_hw_number_t hw) | ||
78 | { | ||
79 | return (void __iomem *)(HWSTATEXT + | ||
80 | HWSTAT_STRIDE * meta_intc_bank(hw)); | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * meta_intc_level_addr() - Get the address of a HWLEVELEXT register | ||
85 | * @hw: Hardware IRQ number (within external trigger block) | ||
86 | * | ||
87 | * Returns: Address of a HWLEVELEXT register containing the sense bit for | ||
88 | * the specified hardware IRQ number | ||
89 | */ | ||
90 | static void __iomem *meta_intc_level_addr(irq_hw_number_t hw) | ||
91 | { | ||
92 | return (void __iomem *)(HWLEVELEXT + | ||
93 | HWSTAT_STRIDE * meta_intc_bank(hw)); | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * meta_intc_mask_addr() - Get the address of a HWMASKEXT register | ||
98 | * @hw: Hardware IRQ number (within external trigger block) | ||
99 | * | ||
100 | * Returns: Address of a HWMASKEXT register containing the mask bit for the | ||
101 | * specified hardware IRQ number | ||
102 | */ | ||
103 | static void __iomem *meta_intc_mask_addr(irq_hw_number_t hw) | ||
104 | { | ||
105 | return (void __iomem *)(HWMASKEXT + | ||
106 | HWSTAT_STRIDE * meta_intc_bank(hw)); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * meta_intc_vec_addr() - Get the vector address of a hardware interrupt | ||
111 | * @hw: Hardware IRQ number (within external trigger block) | ||
112 | * | ||
113 | * Returns: Address of a HWVECEXT register controlling the core trigger to | ||
114 | * vector the IRQ onto | ||
115 | */ | ||
116 | static inline void __iomem *meta_intc_vec_addr(irq_hw_number_t hw) | ||
117 | { | ||
118 | return (void __iomem *)(HWVEC0EXT + | ||
119 | HWVEC_BLK_STRIDE * meta_intc_bank(hw) + | ||
120 | HWVECnEXT_STRIDE * meta_intc_offset(hw)); | ||
121 | } | ||
122 | |||
123 | /** | ||
124 | * meta_intc_startup_irq() - set up an external irq | ||
125 | * @data: data for the external irq to start up | ||
126 | * | ||
127 | * Multiplex interrupts for irq onto TR2. Clear any pending interrupts and | ||
128 | * unmask irq, both using the appropriate callbacks. | ||
129 | */ | ||
130 | static unsigned int meta_intc_startup_irq(struct irq_data *data) | ||
131 | { | ||
132 | irq_hw_number_t hw = data->hwirq; | ||
133 | void __iomem *vec_addr = meta_intc_vec_addr(hw); | ||
134 | int thread = hard_processor_id(); | ||
135 | |||
136 | /* Perform any necessary acking. */ | ||
137 | if (data->chip->irq_ack) | ||
138 | data->chip->irq_ack(data); | ||
139 | |||
140 | /* Wire up this interrupt to the core with HWVECxEXT. */ | ||
141 | metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); | ||
142 | |||
143 | /* Perform any necessary unmasking. */ | ||
144 | data->chip->irq_unmask(data); | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * meta_intc_shutdown_irq() - turn off an external irq | ||
151 | * @data: data for the external irq to turn off | ||
152 | * | ||
153 | * Mask irq using the appropriate callback and stop muxing it onto TR2. | ||
154 | */ | ||
155 | static void meta_intc_shutdown_irq(struct irq_data *data) | ||
156 | { | ||
157 | irq_hw_number_t hw = data->hwirq; | ||
158 | void __iomem *vec_addr = meta_intc_vec_addr(hw); | ||
159 | |||
160 | /* Mask the IRQ */ | ||
161 | data->chip->irq_mask(data); | ||
162 | |||
163 | /* | ||
164 | * Disable the IRQ at the core by removing the interrupt from | ||
165 | * the HW vector mapping. | ||
166 | */ | ||
167 | metag_out32(0, vec_addr); | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * meta_intc_ack_irq() - acknowledge an external irq | ||
172 | * @data: data for the external irq to ack | ||
173 | * | ||
174 | * Clear down an edge interrupt in the status register. | ||
175 | */ | ||
176 | static void meta_intc_ack_irq(struct irq_data *data) | ||
177 | { | ||
178 | irq_hw_number_t hw = data->hwirq; | ||
179 | unsigned int bit = 1 << meta_intc_offset(hw); | ||
180 | void __iomem *stat_addr = meta_intc_stat_addr(hw); | ||
181 | |||
182 | /* Ack the int, if it is still 'on'. | ||
183 | * NOTE - this only works for edge triggered interrupts. | ||
184 | */ | ||
185 | if (metag_in32(stat_addr) & bit) | ||
186 | metag_out32(bit, stat_addr); | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * record_irq_is_masked() - record the IRQ masked so it doesn't get handled | ||
191 | * @data: data for the external irq to record | ||
192 | * | ||
193 | * This should get called whenever an external IRQ is masked (by whichever | ||
194 | * callback is used). It records the IRQ masked so that it doesn't get handled | ||
195 | * if it still shows up in the status register. | ||
196 | */ | ||
197 | static void record_irq_is_masked(struct irq_data *data) | ||
198 | { | ||
199 | struct meta_intc_priv *priv = &meta_intc_priv; | ||
200 | irq_hw_number_t hw = data->hwirq; | ||
201 | |||
202 | clear_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]); | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * record_irq_is_unmasked() - record the IRQ unmasked so it can be handled | ||
207 | * @data: data for the external irq to record | ||
208 | * | ||
209 | * This should get called whenever an external IRQ is unmasked (by whichever | ||
210 | * callback is used). It records the IRQ unmasked so that it gets handled if it | ||
211 | * shows up in the status register. | ||
212 | */ | ||
213 | static void record_irq_is_unmasked(struct irq_data *data) | ||
214 | { | ||
215 | struct meta_intc_priv *priv = &meta_intc_priv; | ||
216 | irq_hw_number_t hw = data->hwirq; | ||
217 | |||
218 | set_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]); | ||
219 | } | ||
220 | |||
221 | /* | ||
222 | * For use by wrapper IRQ drivers | ||
223 | */ | ||
224 | |||
225 | /** | ||
226 | * meta_intc_mask_irq_simple() - minimal mask used by wrapper IRQ drivers | ||
227 | * @data: data for the external irq being masked | ||
228 | * | ||
229 | * This should be called by any wrapper IRQ driver mask functions. it doesn't do | ||
230 | * any masking but records the IRQ as masked so that the core code knows the | ||
231 | * mask has taken place. It is the callers responsibility to ensure that the IRQ | ||
232 | * won't trigger an interrupt to the core. | ||
233 | */ | ||
234 | void meta_intc_mask_irq_simple(struct irq_data *data) | ||
235 | { | ||
236 | record_irq_is_masked(data); | ||
237 | } | ||
238 | |||
239 | /** | ||
240 | * meta_intc_unmask_irq_simple() - minimal unmask used by wrapper IRQ drivers | ||
241 | * @data: data for the external irq being unmasked | ||
242 | * | ||
243 | * This should be called by any wrapper IRQ driver unmask functions. it doesn't | ||
244 | * do any unmasking but records the IRQ as unmasked so that the core code knows | ||
245 | * the unmask has taken place. It is the callers responsibility to ensure that | ||
246 | * the IRQ can now trigger an interrupt to the core. | ||
247 | */ | ||
248 | void meta_intc_unmask_irq_simple(struct irq_data *data) | ||
249 | { | ||
250 | record_irq_is_unmasked(data); | ||
251 | } | ||
252 | |||
253 | |||
254 | /** | ||
255 | * meta_intc_mask_irq() - mask an external irq using HWMASKEXT | ||
256 | * @data: data for the external irq to mask | ||
257 | * | ||
258 | * This is a default implementation of a mask function which makes use of the | ||
259 | * HWMASKEXT registers available in newer versions. | ||
260 | * | ||
261 | * Earlier versions without these registers should use SoC level IRQ masking | ||
262 | * which call the meta_intc_*_simple() functions above, or if that isn't | ||
263 | * available should use the fallback meta_intc_*_nomask() functions below. | ||
264 | */ | ||
265 | static void meta_intc_mask_irq(struct irq_data *data) | ||
266 | { | ||
267 | irq_hw_number_t hw = data->hwirq; | ||
268 | unsigned int bit = 1 << meta_intc_offset(hw); | ||
269 | void __iomem *mask_addr = meta_intc_mask_addr(hw); | ||
270 | unsigned long flags; | ||
271 | |||
272 | record_irq_is_masked(data); | ||
273 | |||
274 | /* update the interrupt mask */ | ||
275 | __global_lock2(flags); | ||
276 | metag_out32(metag_in32(mask_addr) & ~bit, mask_addr); | ||
277 | __global_unlock2(flags); | ||
278 | } | ||
279 | |||
280 | /** | ||
281 | * meta_intc_unmask_irq() - unmask an external irq using HWMASKEXT | ||
282 | * @data: data for the external irq to unmask | ||
283 | * | ||
284 | * This is a default implementation of an unmask function which makes use of the | ||
285 | * HWMASKEXT registers available on new versions. It should be paired with | ||
286 | * meta_intc_mask_irq() above. | ||
287 | */ | ||
288 | static void meta_intc_unmask_irq(struct irq_data *data) | ||
289 | { | ||
290 | irq_hw_number_t hw = data->hwirq; | ||
291 | unsigned int bit = 1 << meta_intc_offset(hw); | ||
292 | void __iomem *mask_addr = meta_intc_mask_addr(hw); | ||
293 | unsigned long flags; | ||
294 | |||
295 | record_irq_is_unmasked(data); | ||
296 | |||
297 | /* update the interrupt mask */ | ||
298 | __global_lock2(flags); | ||
299 | metag_out32(metag_in32(mask_addr) | bit, mask_addr); | ||
300 | __global_unlock2(flags); | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | * meta_intc_mask_irq_nomask() - mask an external irq by unvectoring | ||
305 | * @data: data for the external irq to mask | ||
306 | * | ||
307 | * This is the version of the mask function for older versions which don't have | ||
308 | * HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the IRQ is | ||
309 | * unvectored from the core and retriggered if necessary later. | ||
310 | */ | ||
311 | static void meta_intc_mask_irq_nomask(struct irq_data *data) | ||
312 | { | ||
313 | irq_hw_number_t hw = data->hwirq; | ||
314 | void __iomem *vec_addr = meta_intc_vec_addr(hw); | ||
315 | |||
316 | record_irq_is_masked(data); | ||
317 | |||
318 | /* there is no interrupt mask, so unvector the interrupt */ | ||
319 | metag_out32(0, vec_addr); | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring | ||
324 | * @data: data for the external irq to unmask | ||
325 | * | ||
326 | * This is the version of the unmask function for older versions which don't | ||
327 | * have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the | ||
328 | * IRQ is revectored back to the core and retriggered if necessary. | ||
329 | * | ||
330 | * The retriggering done by this function is specific to edge interrupts. | ||
331 | */ | ||
332 | static void meta_intc_unmask_edge_irq_nomask(struct irq_data *data) | ||
333 | { | ||
334 | irq_hw_number_t hw = data->hwirq; | ||
335 | unsigned int bit = 1 << meta_intc_offset(hw); | ||
336 | void __iomem *stat_addr = meta_intc_stat_addr(hw); | ||
337 | void __iomem *vec_addr = meta_intc_vec_addr(hw); | ||
338 | unsigned int thread = hard_processor_id(); | ||
339 | |||
340 | record_irq_is_unmasked(data); | ||
341 | |||
342 | /* there is no interrupt mask, so revector the interrupt */ | ||
343 | metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); | ||
344 | |||
345 | /* | ||
346 | * Re-trigger interrupt | ||
347 | * | ||
348 | * Writing a 1 toggles, and a 0->1 transition triggers. We only | ||
349 | * retrigger if the status bit is already set, which means we | ||
350 | * need to clear it first. Retriggering is fundamentally racy | ||
351 | * because if the interrupt fires again after we clear it we | ||
352 | * could end up clearing it again and the interrupt handler | ||
353 | * thinking it hasn't fired. Therefore we need to keep trying to | ||
354 | * retrigger until the bit is set. | ||
355 | */ | ||
356 | if (metag_in32(stat_addr) & bit) { | ||
357 | metag_out32(bit, stat_addr); | ||
358 | while (!(metag_in32(stat_addr) & bit)) | ||
359 | metag_out32(bit, stat_addr); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * meta_intc_unmask_level_irq_nomask() - unmask a level irq by revectoring | ||
365 | * @data: data for the external irq to unmask | ||
366 | * | ||
367 | * This is the version of the unmask function for older versions which don't | ||
368 | * have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the | ||
369 | * IRQ is revectored back to the core and retriggered if necessary. | ||
370 | * | ||
371 | * The retriggering done by this function is specific to level interrupts. | ||
372 | */ | ||
373 | static void meta_intc_unmask_level_irq_nomask(struct irq_data *data) | ||
374 | { | ||
375 | irq_hw_number_t hw = data->hwirq; | ||
376 | unsigned int bit = 1 << meta_intc_offset(hw); | ||
377 | void __iomem *stat_addr = meta_intc_stat_addr(hw); | ||
378 | void __iomem *vec_addr = meta_intc_vec_addr(hw); | ||
379 | unsigned int thread = hard_processor_id(); | ||
380 | |||
381 | record_irq_is_unmasked(data); | ||
382 | |||
383 | /* there is no interrupt mask, so revector the interrupt */ | ||
384 | metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); | ||
385 | |||
386 | /* Re-trigger interrupt */ | ||
387 | /* Writing a 1 triggers interrupt */ | ||
388 | if (metag_in32(stat_addr) & bit) | ||
389 | metag_out32(bit, stat_addr); | ||
390 | } | ||
391 | |||
392 | /** | ||
393 | * meta_intc_irq_set_type() - set the type of an external irq | ||
394 | * @data: data for the external irq to set the type of | ||
395 | * @flow_type: new irq flow type | ||
396 | * | ||
397 | * Set the flow type of an external interrupt. This updates the irq chip and irq | ||
398 | * handler depending on whether the irq is edge or level sensitive (the polarity | ||
399 | * is ignored), and also sets up the bit in HWLEVELEXT so the hardware knows | ||
400 | * when to trigger. | ||
401 | */ | ||
402 | static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type) | ||
403 | { | ||
404 | #ifdef CONFIG_METAG_SUSPEND_MEM | ||
405 | struct meta_intc_priv *priv = &meta_intc_priv; | ||
406 | #endif | ||
407 | unsigned int irq = data->irq; | ||
408 | irq_hw_number_t hw = data->hwirq; | ||
409 | unsigned int bit = 1 << meta_intc_offset(hw); | ||
410 | void __iomem *level_addr = meta_intc_level_addr(hw); | ||
411 | unsigned long flags; | ||
412 | unsigned int level; | ||
413 | |||
414 | /* update the chip/handler */ | ||
415 | if (flow_type & IRQ_TYPE_LEVEL_MASK) | ||
416 | __irq_set_chip_handler_name_locked(irq, &meta_intc_level_chip, | ||
417 | handle_level_irq, NULL); | ||
418 | else | ||
419 | __irq_set_chip_handler_name_locked(irq, &meta_intc_edge_chip, | ||
420 | handle_edge_irq, NULL); | ||
421 | |||
422 | /* and clear/set the bit in HWLEVELEXT */ | ||
423 | __global_lock2(flags); | ||
424 | level = metag_in32(level_addr); | ||
425 | if (flow_type & IRQ_TYPE_LEVEL_MASK) | ||
426 | level |= bit; | ||
427 | else | ||
428 | level &= ~bit; | ||
429 | metag_out32(level, level_addr); | ||
430 | #ifdef CONFIG_METAG_SUSPEND_MEM | ||
431 | priv->levels_altered[meta_intc_bank(hw)] |= bit; | ||
432 | #endif | ||
433 | __global_unlock2(flags); | ||
434 | |||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | /** | ||
439 | * meta_intc_irq_demux() - external irq de-multiplexer | ||
440 | * @irq: the virtual interrupt number | ||
441 | * @desc: the interrupt description structure for this irq | ||
442 | * | ||
443 | * The cpu receives an interrupt on TR2 when a SoC interrupt has occurred. It is | ||
444 | * this function's job to demux this irq and figure out exactly which external | ||
445 | * irq needs servicing. | ||
446 | * | ||
447 | * Whilst using TR2 to detect external interrupts is a software convention it is | ||
448 | * (hopefully) unlikely to change. | ||
449 | */ | ||
450 | static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc) | ||
451 | { | ||
452 | struct meta_intc_priv *priv = &meta_intc_priv; | ||
453 | irq_hw_number_t hw; | ||
454 | unsigned int bank, irq_no, status; | ||
455 | void __iomem *stat_addr = meta_intc_stat_addr(0); | ||
456 | |||
457 | /* | ||
458 | * Locate which interrupt has caused our handler to run. | ||
459 | */ | ||
460 | for (bank = 0; bank < priv->nr_banks; ++bank) { | ||
461 | /* Which interrupts are currently pending in this bank? */ | ||
462 | recalculate: | ||
463 | status = metag_in32(stat_addr) & priv->unmasked[bank]; | ||
464 | |||
465 | for (hw = bank*32; status; status >>= 1, ++hw) { | ||
466 | if (status & 0x1) { | ||
467 | /* | ||
468 | * Map the hardware IRQ number to a virtual | ||
469 | * Linux IRQ number. | ||
470 | */ | ||
471 | irq_no = irq_linear_revmap(priv->domain, hw); | ||
472 | |||
473 | /* | ||
474 | * Only fire off external interrupts that are | ||
475 | * registered to be handled by the kernel. | ||
476 | * Other external interrupts are probably being | ||
477 | * handled by other Meta hardware threads. | ||
478 | */ | ||
479 | generic_handle_irq(irq_no); | ||
480 | |||
481 | /* | ||
482 | * The handler may have re-enabled interrupts | ||
483 | * which could have caused a nested invocation | ||
484 | * of this code and make the copy of the | ||
485 | * status register we are using invalid. | ||
486 | */ | ||
487 | goto recalculate; | ||
488 | } | ||
489 | } | ||
490 | stat_addr += HWSTAT_STRIDE; | ||
491 | } | ||
492 | } | ||
493 | |||
494 | #ifdef CONFIG_SMP | ||
495 | /** | ||
496 | * meta_intc_set_affinity() - set the affinity for an interrupt | ||
497 | * @data: data for the external irq to set the affinity of | ||
498 | * @cpumask: cpu mask representing cpus which can handle the interrupt | ||
499 | * @force: whether to force (ignored) | ||
500 | * | ||
501 | * Revector the specified external irq onto a specific cpu's TR2 trigger, so | ||
502 | * that that cpu tends to be the one who handles it. | ||
503 | */ | ||
504 | static int meta_intc_set_affinity(struct irq_data *data, | ||
505 | const struct cpumask *cpumask, bool force) | ||
506 | { | ||
507 | irq_hw_number_t hw = data->hwirq; | ||
508 | void __iomem *vec_addr = meta_intc_vec_addr(hw); | ||
509 | unsigned int cpu, thread; | ||
510 | |||
511 | /* | ||
512 | * Wire up this interrupt from HWVECxEXT to the Meta core. | ||
513 | * | ||
514 | * Note that we can't wire up HWVECxEXT to interrupt more than | ||
515 | * one cpu (the interrupt code doesn't support it), so we just | ||
516 | * pick the first cpu we find in 'cpumask'. | ||
517 | */ | ||
518 | cpu = cpumask_any(cpumask); | ||
519 | thread = cpu_2_hwthread_id[cpu]; | ||
520 | |||
521 | metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); | ||
522 | |||
523 | return 0; | ||
524 | } | ||
525 | #else | ||
526 | #define meta_intc_set_affinity NULL | ||
527 | #endif | ||
528 | |||
529 | #ifdef CONFIG_PM_SLEEP | ||
530 | #define META_INTC_CHIP_FLAGS (IRQCHIP_MASK_ON_SUSPEND \ | ||
531 | | IRQCHIP_SKIP_SET_WAKE) | ||
532 | #else | ||
533 | #define META_INTC_CHIP_FLAGS 0 | ||
534 | #endif | ||
535 | |||
536 | /* public edge/level irq chips which SoCs can override */ | ||
537 | |||
538 | struct irq_chip meta_intc_edge_chip = { | ||
539 | .irq_startup = meta_intc_startup_irq, | ||
540 | .irq_shutdown = meta_intc_shutdown_irq, | ||
541 | .irq_ack = meta_intc_ack_irq, | ||
542 | .irq_mask = meta_intc_mask_irq, | ||
543 | .irq_unmask = meta_intc_unmask_irq, | ||
544 | .irq_set_type = meta_intc_irq_set_type, | ||
545 | .irq_set_affinity = meta_intc_set_affinity, | ||
546 | .flags = META_INTC_CHIP_FLAGS, | ||
547 | }; | ||
548 | |||
549 | struct irq_chip meta_intc_level_chip = { | ||
550 | .irq_startup = meta_intc_startup_irq, | ||
551 | .irq_shutdown = meta_intc_shutdown_irq, | ||
552 | .irq_set_type = meta_intc_irq_set_type, | ||
553 | .irq_mask = meta_intc_mask_irq, | ||
554 | .irq_unmask = meta_intc_unmask_irq, | ||
555 | .irq_set_affinity = meta_intc_set_affinity, | ||
556 | .flags = META_INTC_CHIP_FLAGS, | ||
557 | }; | ||
558 | |||
559 | /** | ||
560 | * meta_intc_map() - map an external irq | ||
561 | * @d: irq domain of external trigger block | ||
562 | * @irq: virtual irq number | ||
563 | * @hw: hardware irq number within external trigger block | ||
564 | * | ||
565 | * This sets up a virtual irq for a specified hardware interrupt. The irq chip | ||
566 | * and handler is configured, using the HWLEVELEXT registers to determine | ||
567 | * edge/level flow type. These registers will have been set when the irq type is | ||
568 | * set (or set to a default at init time). | ||
569 | */ | ||
570 | static int meta_intc_map(struct irq_domain *d, unsigned int irq, | ||
571 | irq_hw_number_t hw) | ||
572 | { | ||
573 | unsigned int bit = 1 << meta_intc_offset(hw); | ||
574 | void __iomem *level_addr = meta_intc_level_addr(hw); | ||
575 | |||
576 | /* Go by the current sense in the HWLEVELEXT register */ | ||
577 | if (metag_in32(level_addr) & bit) | ||
578 | irq_set_chip_and_handler(irq, &meta_intc_level_chip, | ||
579 | handle_level_irq); | ||
580 | else | ||
581 | irq_set_chip_and_handler(irq, &meta_intc_edge_chip, | ||
582 | handle_edge_irq); | ||
583 | return 0; | ||
584 | } | ||
585 | |||
586 | static const struct irq_domain_ops meta_intc_domain_ops = { | ||
587 | .map = meta_intc_map, | ||
588 | .xlate = irq_domain_xlate_twocell, | ||
589 | }; | ||
590 | |||
591 | #ifdef CONFIG_METAG_SUSPEND_MEM | ||
592 | |||
593 | /** | ||
594 | * struct meta_intc_context - suspend context | ||
595 | * @levels: State of HWLEVELEXT registers | ||
596 | * @masks: State of HWMASKEXT registers | ||
597 | * @vectors: State of HWVECEXT registers | ||
598 | * @txvecint: State of TxVECINT registers | ||
599 | * | ||
600 | * This structure stores the IRQ state across suspend. | ||
601 | */ | ||
602 | struct meta_intc_context { | ||
603 | u32 levels[4]; | ||
604 | u32 masks[4]; | ||
605 | u8 vectors[4*32]; | ||
606 | |||
607 | u8 txvecint[4][4]; | ||
608 | }; | ||
609 | |||
610 | /* suspend context */ | ||
611 | static struct meta_intc_context *meta_intc_context; | ||
612 | |||
613 | /** | ||
614 | * meta_intc_suspend() - store irq state | ||
615 | * | ||
616 | * To avoid interfering with other threads we only save the IRQ state of IRQs in | ||
617 | * use by Linux. | ||
618 | */ | ||
619 | static int meta_intc_suspend(void) | ||
620 | { | ||
621 | struct meta_intc_priv *priv = &meta_intc_priv; | ||
622 | int i, j; | ||
623 | irq_hw_number_t hw; | ||
624 | unsigned int bank; | ||
625 | unsigned long flags; | ||
626 | struct meta_intc_context *context; | ||
627 | void __iomem *level_addr, *mask_addr, *vec_addr; | ||
628 | u32 mask, bit; | ||
629 | |||
630 | context = kzalloc(sizeof(*context), GFP_ATOMIC); | ||
631 | if (!context) | ||
632 | return -ENOMEM; | ||
633 | |||
634 | hw = 0; | ||
635 | level_addr = meta_intc_level_addr(0); | ||
636 | mask_addr = meta_intc_mask_addr(0); | ||
637 | for (bank = 0; bank < priv->nr_banks; ++bank) { | ||
638 | vec_addr = meta_intc_vec_addr(hw); | ||
639 | |||
640 | /* create mask of interrupts in use */ | ||
641 | mask = 0; | ||
642 | for (bit = 1; bit; bit <<= 1) { | ||
643 | i = irq_linear_revmap(priv->domain, hw); | ||
644 | /* save mapped irqs which are enabled or have actions */ | ||
645 | if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) || | ||
646 | irq_has_action(i))) { | ||
647 | mask |= bit; | ||
648 | |||
649 | /* save trigger vector */ | ||
650 | context->vectors[hw] = metag_in32(vec_addr); | ||
651 | } | ||
652 | |||
653 | ++hw; | ||
654 | vec_addr += HWVECnEXT_STRIDE; | ||
655 | } | ||
656 | |||
657 | /* save level state if any IRQ levels altered */ | ||
658 | if (priv->levels_altered[bank]) | ||
659 | context->levels[bank] = metag_in32(level_addr); | ||
660 | /* save mask state if any IRQs in use */ | ||
661 | if (mask) | ||
662 | context->masks[bank] = metag_in32(mask_addr); | ||
663 | |||
664 | level_addr += HWSTAT_STRIDE; | ||
665 | mask_addr += HWSTAT_STRIDE; | ||
666 | } | ||
667 | |||
668 | /* save trigger matrixing */ | ||
669 | __global_lock2(flags); | ||
670 | for (i = 0; i < 4; ++i) | ||
671 | for (j = 0; j < 4; ++j) | ||
672 | context->txvecint[i][j] = metag_in32(T0VECINT_BHALT + | ||
673 | TnVECINT_STRIDE*i + | ||
674 | 8*j); | ||
675 | __global_unlock2(flags); | ||
676 | |||
677 | meta_intc_context = context; | ||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | /** | ||
682 | * meta_intc_resume() - restore saved irq state | ||
683 | * | ||
684 | * Restore the saved IRQ state and drop it. | ||
685 | */ | ||
686 | static void meta_intc_resume(void) | ||
687 | { | ||
688 | struct meta_intc_priv *priv = &meta_intc_priv; | ||
689 | int i, j; | ||
690 | irq_hw_number_t hw; | ||
691 | unsigned int bank; | ||
692 | unsigned long flags; | ||
693 | struct meta_intc_context *context = meta_intc_context; | ||
694 | void __iomem *level_addr, *mask_addr, *vec_addr; | ||
695 | u32 mask, bit, tmp; | ||
696 | |||
697 | meta_intc_context = NULL; | ||
698 | |||
699 | hw = 0; | ||
700 | level_addr = meta_intc_level_addr(0); | ||
701 | mask_addr = meta_intc_mask_addr(0); | ||
702 | for (bank = 0; bank < priv->nr_banks; ++bank) { | ||
703 | vec_addr = meta_intc_vec_addr(hw); | ||
704 | |||
705 | /* create mask of interrupts in use */ | ||
706 | mask = 0; | ||
707 | for (bit = 1; bit; bit <<= 1) { | ||
708 | i = irq_linear_revmap(priv->domain, hw); | ||
709 | /* restore mapped irqs, enabled or with actions */ | ||
710 | if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) || | ||
711 | irq_has_action(i))) { | ||
712 | mask |= bit; | ||
713 | |||
714 | /* restore trigger vector */ | ||
715 | metag_out32(context->vectors[hw], vec_addr); | ||
716 | } | ||
717 | |||
718 | ++hw; | ||
719 | vec_addr += HWVECnEXT_STRIDE; | ||
720 | } | ||
721 | |||
722 | if (mask) { | ||
723 | /* restore mask state */ | ||
724 | __global_lock2(flags); | ||
725 | tmp = metag_in32(mask_addr); | ||
726 | tmp = (tmp & ~mask) | (context->masks[bank] & mask); | ||
727 | metag_out32(tmp, mask_addr); | ||
728 | __global_unlock2(flags); | ||
729 | } | ||
730 | |||
731 | mask = priv->levels_altered[bank]; | ||
732 | if (mask) { | ||
733 | /* restore level state */ | ||
734 | __global_lock2(flags); | ||
735 | tmp = metag_in32(level_addr); | ||
736 | tmp = (tmp & ~mask) | (context->levels[bank] & mask); | ||
737 | metag_out32(tmp, level_addr); | ||
738 | __global_unlock2(flags); | ||
739 | } | ||
740 | |||
741 | level_addr += HWSTAT_STRIDE; | ||
742 | mask_addr += HWSTAT_STRIDE; | ||
743 | } | ||
744 | |||
745 | /* restore trigger matrixing */ | ||
746 | __global_lock2(flags); | ||
747 | for (i = 0; i < 4; ++i) { | ||
748 | for (j = 0; j < 4; ++j) { | ||
749 | metag_out32(context->txvecint[i][j], | ||
750 | T0VECINT_BHALT + | ||
751 | TnVECINT_STRIDE*i + | ||
752 | 8*j); | ||
753 | } | ||
754 | } | ||
755 | __global_unlock2(flags); | ||
756 | |||
757 | kfree(context); | ||
758 | } | ||
759 | |||
760 | static struct syscore_ops meta_intc_syscore_ops = { | ||
761 | .suspend = meta_intc_suspend, | ||
762 | .resume = meta_intc_resume, | ||
763 | }; | ||
764 | |||
765 | static void __init meta_intc_init_syscore_ops(struct meta_intc_priv *priv) | ||
766 | { | ||
767 | register_syscore_ops(&meta_intc_syscore_ops); | ||
768 | } | ||
769 | #else | ||
770 | #define meta_intc_init_syscore_ops(priv) do {} while (0) | ||
771 | #endif | ||
772 | |||
773 | /** | ||
774 | * meta_intc_init_cpu() - register with a Meta cpu | ||
775 | * @priv: private interrupt controller data | ||
776 | * @cpu: the CPU to register on | ||
777 | * | ||
778 | * Configure @cpu's TR2 irq so that we can demux external irqs. | ||
779 | */ | ||
780 | static void __init meta_intc_init_cpu(struct meta_intc_priv *priv, int cpu) | ||
781 | { | ||
782 | unsigned int thread = cpu_2_hwthread_id[cpu]; | ||
783 | unsigned int signum = TBID_SIGNUM_TR2(thread); | ||
784 | int irq = tbisig_map(signum); | ||
785 | |||
786 | /* Register the multiplexed IRQ handler */ | ||
787 | irq_set_chained_handler(irq, meta_intc_irq_demux); | ||
788 | irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); | ||
789 | } | ||
790 | |||
791 | /** | ||
792 | * meta_intc_no_mask() - indicate lack of HWMASKEXT registers | ||
793 | * | ||
794 | * Called from SoC code (or init code below) to dynamically indicate the lack of | ||
795 | * HWMASKEXT registers (for example depending on some SoC revision register). | ||
796 | * This alters the irq mask and unmask callbacks to use the fallback | ||
797 | * unvectoring/retriggering technique instead of using HWMASKEXT registers. | ||
798 | */ | ||
799 | void __init meta_intc_no_mask(void) | ||
800 | { | ||
801 | meta_intc_edge_chip.irq_mask = meta_intc_mask_irq_nomask; | ||
802 | meta_intc_edge_chip.irq_unmask = meta_intc_unmask_edge_irq_nomask; | ||
803 | meta_intc_level_chip.irq_mask = meta_intc_mask_irq_nomask; | ||
804 | meta_intc_level_chip.irq_unmask = meta_intc_unmask_level_irq_nomask; | ||
805 | } | ||
806 | |||
807 | /** | ||
808 | * init_external_IRQ() - initialise the external irq controller | ||
809 | * | ||
810 | * Set up the external irq controller using device tree properties. This is | ||
811 | * called from init_IRQ(). | ||
812 | */ | ||
813 | int __init init_external_IRQ(void) | ||
814 | { | ||
815 | struct meta_intc_priv *priv = &meta_intc_priv; | ||
816 | struct device_node *node; | ||
817 | int ret, cpu; | ||
818 | u32 val; | ||
819 | bool no_masks = false; | ||
820 | |||
821 | node = of_find_compatible_node(NULL, NULL, "img,meta-intc"); | ||
822 | if (!node) | ||
823 | return -ENOENT; | ||
824 | |||
825 | /* Get number of banks */ | ||
826 | ret = of_property_read_u32(node, "num-banks", &val); | ||
827 | if (ret) { | ||
828 | pr_err("meta-intc: No num-banks property found\n"); | ||
829 | return ret; | ||
830 | } | ||
831 | if (val < 1 || val > 4) { | ||
832 | pr_err("meta-intc: num-banks (%u) out of range\n", val); | ||
833 | return -EINVAL; | ||
834 | } | ||
835 | priv->nr_banks = val; | ||
836 | |||
837 | /* Are any mask registers present? */ | ||
838 | if (of_get_property(node, "no-mask", NULL)) | ||
839 | no_masks = true; | ||
840 | |||
841 | /* No HWMASKEXT registers present? */ | ||
842 | if (no_masks) | ||
843 | meta_intc_no_mask(); | ||
844 | |||
845 | /* Set up an IRQ domain */ | ||
846 | /* | ||
847 | * This is a legacy IRQ domain for now until all the platform setup code | ||
848 | * has been converted to devicetree. | ||
849 | */ | ||
850 | priv->domain = irq_domain_add_linear(node, priv->nr_banks*32, | ||
851 | &meta_intc_domain_ops, priv); | ||
852 | if (unlikely(!priv->domain)) { | ||
853 | pr_err("meta-intc: cannot add IRQ domain\n"); | ||
854 | return -ENOMEM; | ||
855 | } | ||
856 | |||
857 | /* Setup TR2 for all cpus. */ | ||
858 | for_each_possible_cpu(cpu) | ||
859 | meta_intc_init_cpu(priv, cpu); | ||
860 | |||
861 | /* Set up system suspend/resume callbacks */ | ||
862 | meta_intc_init_syscore_ops(priv); | ||
863 | |||
864 | pr_info("meta-intc: External IRQ controller initialised (%u IRQs)\n", | ||
865 | priv->nr_banks*32); | ||
866 | |||
867 | return 0; | ||
868 | } | ||
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c new file mode 100644 index 000000000000..8e94d7a3b20d --- /dev/null +++ b/drivers/irqchip/irq-metag.c | |||
@@ -0,0 +1,343 @@ | |||
1 | /* | ||
2 | * Meta internal (HWSTATMETA) interrupt code. | ||
3 | * | ||
4 | * Copyright (C) 2011-2012 Imagination Technologies Ltd. | ||
5 | * | ||
6 | * This code is based on the code in SoC/common/irq.c and SoC/comet/irq.c | ||
7 | * The code base could be generalised/merged as a lot of the functionality is | ||
8 | * similar. Until this is done, we try to keep the code simple here. | ||
9 | */ | ||
10 | |||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <linux/irqdomain.h> | ||
14 | |||
15 | #include <asm/irq.h> | ||
16 | #include <asm/hwthread.h> | ||
17 | |||
18 | #define PERF0VECINT 0x04820580 | ||
19 | #define PERF1VECINT 0x04820588 | ||
20 | #define PERF0TRIG_OFFSET 16 | ||
21 | #define PERF1TRIG_OFFSET 17 | ||
22 | |||
23 | /** | ||
24 | * struct metag_internal_irq_priv - private meta internal interrupt data | ||
25 | * @domain: IRQ domain for all internal Meta IRQs (HWSTATMETA) | ||
26 | * @unmasked: Record of unmasked IRQs | ||
27 | */ | ||
28 | struct metag_internal_irq_priv { | ||
29 | struct irq_domain *domain; | ||
30 | |||
31 | unsigned long unmasked; | ||
32 | }; | ||
33 | |||
34 | /* Private data for the one and only internal interrupt controller */ | ||
35 | static struct metag_internal_irq_priv metag_internal_irq_priv; | ||
36 | |||
37 | static unsigned int metag_internal_irq_startup(struct irq_data *data); | ||
38 | static void metag_internal_irq_shutdown(struct irq_data *data); | ||
39 | static void metag_internal_irq_ack(struct irq_data *data); | ||
40 | static void metag_internal_irq_mask(struct irq_data *data); | ||
41 | static void metag_internal_irq_unmask(struct irq_data *data); | ||
42 | #ifdef CONFIG_SMP | ||
43 | static int metag_internal_irq_set_affinity(struct irq_data *data, | ||
44 | const struct cpumask *cpumask, bool force); | ||
45 | #endif | ||
46 | |||
47 | static struct irq_chip internal_irq_edge_chip = { | ||
48 | .name = "HWSTATMETA-IRQ", | ||
49 | .irq_startup = metag_internal_irq_startup, | ||
50 | .irq_shutdown = metag_internal_irq_shutdown, | ||
51 | .irq_ack = metag_internal_irq_ack, | ||
52 | .irq_mask = metag_internal_irq_mask, | ||
53 | .irq_unmask = metag_internal_irq_unmask, | ||
54 | #ifdef CONFIG_SMP | ||
55 | .irq_set_affinity = metag_internal_irq_set_affinity, | ||
56 | #endif | ||
57 | }; | ||
58 | |||
59 | /* | ||
60 | * metag_hwvec_addr - get the address of *VECINT regs of irq | ||
61 | * | ||
62 | * This function is a table of supported triggers on HWSTATMETA | ||
63 | * Could do with a structure, but better keep it simple. Changes | ||
64 | * in this code should be rare. | ||
65 | */ | ||
66 | static inline void __iomem *metag_hwvec_addr(irq_hw_number_t hw) | ||
67 | { | ||
68 | void __iomem *addr; | ||
69 | |||
70 | switch (hw) { | ||
71 | case PERF0TRIG_OFFSET: | ||
72 | addr = (void __iomem *)PERF0VECINT; | ||
73 | break; | ||
74 | case PERF1TRIG_OFFSET: | ||
75 | addr = (void __iomem *)PERF1VECINT; | ||
76 | break; | ||
77 | default: | ||
78 | addr = NULL; | ||
79 | break; | ||
80 | } | ||
81 | return addr; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * metag_internal_startup - setup an internal irq | ||
86 | * @irq: the irq to startup | ||
87 | * | ||
88 | * Multiplex interrupts for @irq onto TR1. Clear any pending | ||
89 | * interrupts. | ||
90 | */ | ||
91 | static unsigned int metag_internal_irq_startup(struct irq_data *data) | ||
92 | { | ||
93 | /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */ | ||
94 | metag_internal_irq_ack(data); | ||
95 | |||
96 | /* Enable the interrupt by unmasking it */ | ||
97 | metag_internal_irq_unmask(data); | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * metag_internal_irq_shutdown - turn off the irq | ||
104 | * @irq: the irq number to turn off | ||
105 | * | ||
106 | * Mask @irq and clear any pending interrupts. | ||
107 | * Stop muxing @irq onto TR1. | ||
108 | */ | ||
109 | static void metag_internal_irq_shutdown(struct irq_data *data) | ||
110 | { | ||
111 | /* Disable the IRQ at the core by masking it. */ | ||
112 | metag_internal_irq_mask(data); | ||
113 | |||
114 | /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */ | ||
115 | metag_internal_irq_ack(data); | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * metag_internal_irq_ack - acknowledge irq | ||
120 | * @irq: the irq to ack | ||
121 | */ | ||
122 | static void metag_internal_irq_ack(struct irq_data *data) | ||
123 | { | ||
124 | irq_hw_number_t hw = data->hwirq; | ||
125 | unsigned int bit = 1 << hw; | ||
126 | |||
127 | if (metag_in32(HWSTATMETA) & bit) | ||
128 | metag_out32(bit, HWSTATMETA); | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * metag_internal_irq_mask() - mask an internal irq by unvectoring | ||
133 | * @data: data for the internal irq to mask | ||
134 | * | ||
135 | * HWSTATMETA has no mask register. Instead the IRQ is unvectored from the core | ||
136 | * and retriggered if necessary later. | ||
137 | */ | ||
138 | static void metag_internal_irq_mask(struct irq_data *data) | ||
139 | { | ||
140 | struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; | ||
141 | irq_hw_number_t hw = data->hwirq; | ||
142 | void __iomem *vec_addr = metag_hwvec_addr(hw); | ||
143 | |||
144 | clear_bit(hw, &priv->unmasked); | ||
145 | |||
146 | /* there is no interrupt mask, so unvector the interrupt */ | ||
147 | metag_out32(0, vec_addr); | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring | ||
152 | * @data: data for the internal irq to unmask | ||
153 | * | ||
154 | * HWSTATMETA has no mask register. Instead the IRQ is revectored back to the | ||
155 | * core and retriggered if necessary. | ||
156 | */ | ||
157 | static void metag_internal_irq_unmask(struct irq_data *data) | ||
158 | { | ||
159 | struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; | ||
160 | irq_hw_number_t hw = data->hwirq; | ||
161 | unsigned int bit = 1 << hw; | ||
162 | void __iomem *vec_addr = metag_hwvec_addr(hw); | ||
163 | unsigned int thread = hard_processor_id(); | ||
164 | |||
165 | set_bit(hw, &priv->unmasked); | ||
166 | |||
167 | /* there is no interrupt mask, so revector the interrupt */ | ||
168 | metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), vec_addr); | ||
169 | |||
170 | /* | ||
171 | * Re-trigger interrupt | ||
172 | * | ||
173 | * Writing a 1 toggles, and a 0->1 transition triggers. We only | ||
174 | * retrigger if the status bit is already set, which means we | ||
175 | * need to clear it first. Retriggering is fundamentally racy | ||
176 | * because if the interrupt fires again after we clear it we | ||
177 | * could end up clearing it again and the interrupt handler | ||
178 | * thinking it hasn't fired. Therefore we need to keep trying to | ||
179 | * retrigger until the bit is set. | ||
180 | */ | ||
181 | if (metag_in32(HWSTATMETA) & bit) { | ||
182 | metag_out32(bit, HWSTATMETA); | ||
183 | while (!(metag_in32(HWSTATMETA) & bit)) | ||
184 | metag_out32(bit, HWSTATMETA); | ||
185 | } | ||
186 | } | ||
187 | |||
188 | #ifdef CONFIG_SMP | ||
189 | /* | ||
190 | * metag_internal_irq_set_affinity - set the affinity for an interrupt | ||
191 | */ | ||
192 | static int metag_internal_irq_set_affinity(struct irq_data *data, | ||
193 | const struct cpumask *cpumask, bool force) | ||
194 | { | ||
195 | unsigned int cpu, thread; | ||
196 | irq_hw_number_t hw = data->hwirq; | ||
197 | /* | ||
198 | * Wire up this interrupt from *VECINT to the Meta core. | ||
199 | * | ||
200 | * Note that we can't wire up *VECINT to interrupt more than | ||
201 | * one cpu (the interrupt code doesn't support it), so we just | ||
202 | * pick the first cpu we find in 'cpumask'. | ||
203 | */ | ||
204 | cpu = cpumask_any(cpumask); | ||
205 | thread = cpu_2_hwthread_id[cpu]; | ||
206 | |||
207 | metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), | ||
208 | metag_hwvec_addr(hw)); | ||
209 | |||
210 | return 0; | ||
211 | } | ||
212 | #endif | ||
213 | |||
214 | /* | ||
215 | * metag_internal_irq_demux - irq de-multiplexer | ||
216 | * @irq: the interrupt number | ||
217 | * @desc: the interrupt description structure for this irq | ||
218 | * | ||
219 | * The cpu receives an interrupt on TR1 when an interrupt has | ||
220 | * occurred. It is this function's job to demux this irq and | ||
221 | * figure out exactly which trigger needs servicing. | ||
222 | */ | ||
223 | static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc) | ||
224 | { | ||
225 | struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc); | ||
226 | irq_hw_number_t hw; | ||
227 | unsigned int irq_no; | ||
228 | u32 status; | ||
229 | |||
230 | recalculate: | ||
231 | status = metag_in32(HWSTATMETA) & priv->unmasked; | ||
232 | |||
233 | for (hw = 0; status != 0; status >>= 1, ++hw) { | ||
234 | if (status & 0x1) { | ||
235 | /* | ||
236 | * Map the hardware IRQ number to a virtual Linux IRQ | ||
237 | * number. | ||
238 | */ | ||
239 | irq_no = irq_linear_revmap(priv->domain, hw); | ||
240 | |||
241 | /* | ||
242 | * Only fire off interrupts that are | ||
243 | * registered to be handled by the kernel. | ||
244 | * Other interrupts are probably being | ||
245 | * handled by other Meta hardware threads. | ||
246 | */ | ||
247 | generic_handle_irq(irq_no); | ||
248 | |||
249 | /* | ||
250 | * The handler may have re-enabled interrupts | ||
251 | * which could have caused a nested invocation | ||
252 | * of this code and make the copy of the | ||
253 | * status register we are using invalid. | ||
254 | */ | ||
255 | goto recalculate; | ||
256 | } | ||
257 | } | ||
258 | } | ||
259 | |||
260 | /** | ||
261 | * internal_irq_map() - Map an internal meta IRQ to a virtual IRQ number. | ||
262 | * @hw: Number of the internal IRQ. Must be in range. | ||
263 | * | ||
264 | * Returns: The virtual IRQ number of the Meta internal IRQ specified by | ||
265 | * @hw. | ||
266 | */ | ||
267 | int internal_irq_map(unsigned int hw) | ||
268 | { | ||
269 | struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; | ||
270 | if (!priv->domain) | ||
271 | return -ENODEV; | ||
272 | return irq_create_mapping(priv->domain, hw); | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * metag_internal_irq_init_cpu - regsister with the Meta cpu | ||
277 | * @cpu: the CPU to register on | ||
278 | * | ||
279 | * Configure @cpu's TR1 irq so that we can demux irqs. | ||
280 | */ | ||
281 | static void metag_internal_irq_init_cpu(struct metag_internal_irq_priv *priv, | ||
282 | int cpu) | ||
283 | { | ||
284 | unsigned int thread = cpu_2_hwthread_id[cpu]; | ||
285 | unsigned int signum = TBID_SIGNUM_TR1(thread); | ||
286 | int irq = tbisig_map(signum); | ||
287 | |||
288 | /* Register the multiplexed IRQ handler */ | ||
289 | irq_set_handler_data(irq, priv); | ||
290 | irq_set_chained_handler(irq, metag_internal_irq_demux); | ||
291 | irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); | ||
292 | } | ||
293 | |||
294 | /** | ||
295 | * metag_internal_intc_map() - map an internal irq | ||
296 | * @d: irq domain of internal trigger block | ||
297 | * @irq: virtual irq number | ||
298 | * @hw: hardware irq number within internal trigger block | ||
299 | * | ||
300 | * This sets up a virtual irq for a specified hardware interrupt. The irq chip | ||
301 | * and handler is configured. | ||
302 | */ | ||
303 | static int metag_internal_intc_map(struct irq_domain *d, unsigned int irq, | ||
304 | irq_hw_number_t hw) | ||
305 | { | ||
306 | /* only register interrupt if it is mapped */ | ||
307 | if (!metag_hwvec_addr(hw)) | ||
308 | return -EINVAL; | ||
309 | |||
310 | irq_set_chip_and_handler(irq, &internal_irq_edge_chip, | ||
311 | handle_edge_irq); | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static const struct irq_domain_ops metag_internal_intc_domain_ops = { | ||
316 | .map = metag_internal_intc_map, | ||
317 | }; | ||
318 | |||
319 | /** | ||
320 | * metag_internal_irq_register - register internal IRQs | ||
321 | * | ||
322 | * Register the irq chip and handler function for all internal IRQs | ||
323 | */ | ||
324 | int __init init_internal_IRQ(void) | ||
325 | { | ||
326 | struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; | ||
327 | unsigned int cpu; | ||
328 | |||
329 | /* Set up an IRQ domain */ | ||
330 | priv->domain = irq_domain_add_linear(NULL, 32, | ||
331 | &metag_internal_intc_domain_ops, | ||
332 | priv); | ||
333 | if (unlikely(!priv->domain)) { | ||
334 | pr_err("meta-internal-intc: cannot add IRQ domain\n"); | ||
335 | return -ENOMEM; | ||
336 | } | ||
337 | |||
338 | /* Setup TR1 for all cpus. */ | ||
339 | for_each_possible_cpu(cpu) | ||
340 | metag_internal_irq_init_cpu(priv, cpu); | ||
341 | |||
342 | return 0; | ||
343 | }; | ||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index a5702d74d2bd..3939829f6c5c 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -322,6 +322,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, | |||
322 | return 0; | 322 | return 0; |
323 | } | 323 | } |
324 | 324 | ||
325 | #ifndef elf_map | ||
326 | |||
325 | static unsigned long elf_map(struct file *filep, unsigned long addr, | 327 | static unsigned long elf_map(struct file *filep, unsigned long addr, |
326 | struct elf_phdr *eppnt, int prot, int type, | 328 | struct elf_phdr *eppnt, int prot, int type, |
327 | unsigned long total_size) | 329 | unsigned long total_size) |
@@ -356,6 +358,8 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, | |||
356 | return(map_addr); | 358 | return(map_addr); |
357 | } | 359 | } |
358 | 360 | ||
361 | #endif /* !elf_map */ | ||
362 | |||
359 | static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr) | 363 | static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr) |
360 | { | 364 | { |
361 | int i, first_idx = -1, last_idx = -1; | 365 | int i, first_idx = -1, last_idx = -1; |
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index aba53083297d..ac9da00e9f2c 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h | |||
@@ -346,6 +346,7 @@ extern void ioport_unmap(void __iomem *p); | |||
346 | #define xlate_dev_kmem_ptr(p) p | 346 | #define xlate_dev_kmem_ptr(p) p |
347 | #define xlate_dev_mem_ptr(p) __va(p) | 347 | #define xlate_dev_mem_ptr(p) __va(p) |
348 | 348 | ||
349 | #ifdef CONFIG_VIRT_TO_BUS | ||
349 | #ifndef virt_to_bus | 350 | #ifndef virt_to_bus |
350 | static inline unsigned long virt_to_bus(volatile void *address) | 351 | static inline unsigned long virt_to_bus(volatile void *address) |
351 | { | 352 | { |
@@ -357,6 +358,7 @@ static inline void *bus_to_virt(unsigned long address) | |||
357 | return (void *) address; | 358 | return (void *) address; |
358 | } | 359 | } |
359 | #endif | 360 | #endif |
361 | #endif | ||
360 | 362 | ||
361 | #ifndef memset_io | 363 | #ifndef memset_io |
362 | #define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) | 364 | #define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) |
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index 257c55ec4f77..4077b5d9ff81 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h | |||
@@ -17,5 +17,12 @@ | |||
17 | * but it doesn't work on all toolchains, so we just do it by hand | 17 | * but it doesn't work on all toolchains, so we just do it by hand |
18 | */ | 18 | */ |
19 | #ifndef cond_syscall | 19 | #ifndef cond_syscall |
20 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") | 20 | #ifdef CONFIG_SYMBOL_PREFIX |
21 | #define __SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX | ||
22 | #else | ||
23 | #define __SYMBOL_PREFIX | ||
24 | #endif | ||
25 | #define cond_syscall(x) asm(".weak\t" __SYMBOL_PREFIX #x "\n\t" \ | ||
26 | ".set\t" __SYMBOL_PREFIX #x "," \ | ||
27 | __SYMBOL_PREFIX "sys_ni_syscall") | ||
21 | #endif | 28 | #endif |
diff --git a/include/clocksource/metag_generic.h b/include/clocksource/metag_generic.h new file mode 100644 index 000000000000..ac17e7d06cfb --- /dev/null +++ b/include/clocksource/metag_generic.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Imaginaton Technologies Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #ifndef __CLKSOURCE_METAG_GENERIC_H | ||
17 | #define __CLKSOURCE_METAG_GENERIC_H | ||
18 | |||
19 | extern int metag_generic_timer_init(void); | ||
20 | |||
21 | #endif /* __CLKSOURCE_METAG_GENERIC_H */ | ||
diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h new file mode 100644 index 000000000000..697af0fe7c5a --- /dev/null +++ b/include/linux/irqchip/metag-ext.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Imagination Technologies | ||
3 | */ | ||
4 | |||
5 | #ifndef _LINUX_IRQCHIP_METAG_EXT_H_ | ||
6 | #define _LINUX_IRQCHIP_METAG_EXT_H_ | ||
7 | |||
8 | struct irq_data; | ||
9 | struct platform_device; | ||
10 | |||
11 | /* called from core irq code at init */ | ||
12 | int init_external_IRQ(void); | ||
13 | |||
14 | /* | ||
15 | * called from SoC init_irq() callback to dynamically indicate the lack of | ||
16 | * HWMASKEXT registers. | ||
17 | */ | ||
18 | void meta_intc_no_mask(void); | ||
19 | |||
20 | /* | ||
21 | * These allow SoCs to specialise the interrupt controller from their init_irq | ||
22 | * callbacks. | ||
23 | */ | ||
24 | |||
25 | extern struct irq_chip meta_intc_edge_chip; | ||
26 | extern struct irq_chip meta_intc_level_chip; | ||
27 | |||
28 | /* this should be called in the mask callback */ | ||
29 | void meta_intc_mask_irq_simple(struct irq_data *data); | ||
30 | /* this should be called in the unmask callback */ | ||
31 | void meta_intc_unmask_irq_simple(struct irq_data *data); | ||
32 | |||
33 | #endif /* _LINUX_IRQCHIP_METAG_EXT_H_ */ | ||
diff --git a/include/linux/irqchip/metag.h b/include/linux/irqchip/metag.h new file mode 100644 index 000000000000..4ebdfb3101ab --- /dev/null +++ b/include/linux/irqchip/metag.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Imagination Technologies | ||
3 | */ | ||
4 | |||
5 | #ifndef _LINUX_IRQCHIP_METAG_H_ | ||
6 | #define _LINUX_IRQCHIP_METAG_H_ | ||
7 | |||
8 | #include <linux/errno.h> | ||
9 | |||
10 | #ifdef CONFIG_METAG_PERFCOUNTER_IRQS | ||
11 | extern int init_internal_IRQ(void); | ||
12 | extern int internal_irq_map(unsigned int hw); | ||
13 | #else | ||
14 | static inline int init_internal_IRQ(void) | ||
15 | { | ||
16 | return 0; | ||
17 | } | ||
18 | static inline int internal_irq_map(unsigned int hw) | ||
19 | { | ||
20 | return -EINVAL; | ||
21 | } | ||
22 | #endif | ||
23 | |||
24 | #endif /* _LINUX_IRQCHIP_METAG_H_ */ | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 1ede55f292c2..7acc9dc73c9f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -115,6 +115,8 @@ extern unsigned int kobjsize(const void *objp); | |||
115 | # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ | 115 | # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ |
116 | #elif defined(CONFIG_PARISC) | 116 | #elif defined(CONFIG_PARISC) |
117 | # define VM_GROWSUP VM_ARCH_1 | 117 | # define VM_GROWSUP VM_ARCH_1 |
118 | #elif defined(CONFIG_METAG) | ||
119 | # define VM_GROWSUP VM_ARCH_1 | ||
118 | #elif defined(CONFIG_IA64) | 120 | #elif defined(CONFIG_IA64) |
119 | # define VM_GROWSUP VM_ARCH_1 | 121 | # define VM_GROWSUP VM_ARCH_1 |
120 | #elif !defined(CONFIG_MMU) | 122 | #elif !defined(CONFIG_MMU) |
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 900b9484445b..8072d352b98f 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h | |||
@@ -395,6 +395,8 @@ typedef struct elf64_shdr { | |||
395 | #define NT_ARM_TLS 0x401 /* ARM TLS register */ | 395 | #define NT_ARM_TLS 0x401 /* ARM TLS register */ |
396 | #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ | 396 | #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ |
397 | #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ | 397 | #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ |
398 | #define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */ | ||
399 | #define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */ | ||
398 | 400 | ||
399 | 401 | ||
400 | /* Note header in a PT_NOTE section */ | 402 | /* Note header in a PT_NOTE section */ |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 7244acde77b0..6989df2ba194 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -178,7 +178,7 @@ void tracing_off_permanent(void) | |||
178 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 178 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
179 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ | 179 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ |
180 | 180 | ||
181 | #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | 181 | #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS |
182 | # define RB_FORCE_8BYTE_ALIGNMENT 0 | 182 | # define RB_FORCE_8BYTE_ALIGNMENT 0 |
183 | # define RB_ARCH_ALIGNMENT RB_ALIGNMENT | 183 | # define RB_ARCH_ALIGNMENT RB_ALIGNMENT |
184 | #else | 184 | #else |
@@ -186,6 +186,8 @@ void tracing_off_permanent(void) | |||
186 | # define RB_ARCH_ALIGNMENT 8U | 186 | # define RB_ARCH_ALIGNMENT 8U |
187 | #endif | 187 | #endif |
188 | 188 | ||
189 | #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) | ||
190 | |||
189 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ | 191 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ |
190 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX | 192 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX |
191 | 193 | ||
@@ -334,7 +336,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); | |||
334 | struct buffer_data_page { | 336 | struct buffer_data_page { |
335 | u64 time_stamp; /* page time stamp */ | 337 | u64 time_stamp; /* page time stamp */ |
336 | local_t commit; /* write committed index */ | 338 | local_t commit; /* write committed index */ |
337 | unsigned char data[]; /* data of buffer page */ | 339 | unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ |
338 | }; | 340 | }; |
339 | 341 | ||
340 | /* | 342 | /* |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e4a7f808fa06..28be08c09bab 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -674,7 +674,7 @@ config STACKTRACE | |||
674 | 674 | ||
675 | config DEBUG_STACK_USAGE | 675 | config DEBUG_STACK_USAGE |
676 | bool "Stack utilization instrumentation" | 676 | bool "Stack utilization instrumentation" |
677 | depends on DEBUG_KERNEL && !IA64 && !PARISC | 677 | depends on DEBUG_KERNEL && !IA64 && !PARISC && !METAG |
678 | help | 678 | help |
679 | Enables the display of the minimum amount of free stack which each | 679 | Enables the display of the minimum amount of free stack which each |
680 | task has ever had available in the sysrq-T and sysrq-P debug output. | 680 | task has ever had available in the sysrq-T and sysrq-P debug output. |
@@ -855,7 +855,7 @@ config FRAME_POINTER | |||
855 | bool "Compile the kernel with frame pointers" | 855 | bool "Compile the kernel with frame pointers" |
856 | depends on DEBUG_KERNEL && \ | 856 | depends on DEBUG_KERNEL && \ |
857 | (CRIS || M68K || FRV || UML || \ | 857 | (CRIS || M68K || FRV || UML || \ |
858 | AVR32 || SUPERH || BLACKFIN || MN10300) || \ | 858 | AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \ |
859 | ARCH_WANT_FRAME_POINTERS | 859 | ARCH_WANT_FRAME_POINTERS |
860 | default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS | 860 | default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS |
861 | help | 861 | help |
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl index 17e384396705..544aa56b6200 100755 --- a/scripts/checkstack.pl +++ b/scripts/checkstack.pl | |||
@@ -34,7 +34,7 @@ use strict; | |||
34 | # $1 (first bracket) matches the dynamic amount of the stack growth | 34 | # $1 (first bracket) matches the dynamic amount of the stack growth |
35 | # | 35 | # |
36 | # use anything else and feel the pain ;) | 36 | # use anything else and feel the pain ;) |
37 | my (@stack, $re, $dre, $x, $xs); | 37 | my (@stack, $re, $dre, $x, $xs, $funcre); |
38 | { | 38 | { |
39 | my $arch = shift; | 39 | my $arch = shift; |
40 | if ($arch eq "") { | 40 | if ($arch eq "") { |
@@ -44,6 +44,7 @@ my (@stack, $re, $dre, $x, $xs); | |||
44 | 44 | ||
45 | $x = "[0-9a-f]"; # hex character | 45 | $x = "[0-9a-f]"; # hex character |
46 | $xs = "[0-9a-f ]"; # hex character or space | 46 | $xs = "[0-9a-f ]"; # hex character or space |
47 | $funcre = qr/^$x* <(.*)>:$/; | ||
47 | if ($arch eq 'arm') { | 48 | if ($arch eq 'arm') { |
48 | #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64 | 49 | #c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64 |
49 | $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o; | 50 | $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o; |
@@ -66,6 +67,10 @@ my (@stack, $re, $dre, $x, $xs); | |||
66 | # 2b6c: 4e56 fb70 linkw %fp,#-1168 | 67 | # 2b6c: 4e56 fb70 linkw %fp,#-1168 |
67 | # 1df770: defc ffe4 addaw #-28,%sp | 68 | # 1df770: defc ffe4 addaw #-28,%sp |
68 | $re = qr/.*(?:linkw %fp,|addaw )#-([0-9]{1,4})(?:,%sp)?$/o; | 69 | $re = qr/.*(?:linkw %fp,|addaw )#-([0-9]{1,4})(?:,%sp)?$/o; |
70 | } elsif ($arch eq 'metag') { | ||
71 | #400026fc: 40 00 00 82 ADD A0StP,A0StP,#0x8 | ||
72 | $re = qr/.*ADD.*A0StP,A0StP,\#(0x$x{1,8})/o; | ||
73 | $funcre = qr/^$x* <[^\$](.*)>:$/; | ||
69 | } elsif ($arch eq 'mips64') { | 74 | } elsif ($arch eq 'mips64') { |
70 | #8800402c: 67bdfff0 daddiu sp,sp,-16 | 75 | #8800402c: 67bdfff0 daddiu sp,sp,-16 |
71 | $re = qr/.*daddiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o; | 76 | $re = qr/.*daddiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o; |
@@ -109,7 +114,6 @@ my (@stack, $re, $dre, $x, $xs); | |||
109 | # | 114 | # |
110 | # main() | 115 | # main() |
111 | # | 116 | # |
112 | my $funcre = qr/^$x* <(.*)>:$/; | ||
113 | my ($func, $file, $lastslash); | 117 | my ($func, $file, $lastslash); |
114 | 118 | ||
115 | while (my $line = <STDIN>) { | 119 | while (my $line = <STDIN>) { |
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c index 8a106499ec4f..d25e4a118d37 100644 --- a/scripts/genksyms/genksyms.c +++ b/scripts/genksyms/genksyms.c | |||
@@ -826,7 +826,8 @@ int main(int argc, char **argv) | |||
826 | genksyms_usage(); | 826 | genksyms_usage(); |
827 | return 1; | 827 | return 1; |
828 | } | 828 | } |
829 | if ((strcmp(arch, "h8300") == 0) || (strcmp(arch, "blackfin") == 0)) | 829 | if ((strcmp(arch, "h8300") == 0) || (strcmp(arch, "blackfin") == 0) || |
830 | (strcmp(arch, "metag") == 0)) | ||
830 | mod_prefix = "_"; | 831 | mod_prefix = "_"; |
831 | { | 832 | { |
832 | extern int yydebug; | 833 | extern int yydebug; |
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index ee52cb8e17ad..9c22317778eb 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c | |||
@@ -33,6 +33,13 @@ | |||
33 | #include <string.h> | 33 | #include <string.h> |
34 | #include <unistd.h> | 34 | #include <unistd.h> |
35 | 35 | ||
36 | #ifndef EM_METAG | ||
37 | /* Remove this when these make it to the standard system elf.h. */ | ||
38 | #define EM_METAG 174 | ||
39 | #define R_METAG_ADDR32 2 | ||
40 | #define R_METAG_NONE 3 | ||
41 | #endif | ||
42 | |||
36 | static int fd_map; /* File descriptor for file being modified. */ | 43 | static int fd_map; /* File descriptor for file being modified. */ |
37 | static int mmap_failed; /* Boolean flag. */ | 44 | static int mmap_failed; /* Boolean flag. */ |
38 | static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */ | 45 | static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */ |
@@ -341,6 +348,12 @@ do_file(char const *const fname) | |||
341 | altmcount = "__gnu_mcount_nc"; | 348 | altmcount = "__gnu_mcount_nc"; |
342 | break; | 349 | break; |
343 | case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break; | 350 | case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break; |
351 | case EM_METAG: reltype = R_METAG_ADDR32; | ||
352 | altmcount = "_mcount_wrapper"; | ||
353 | rel_type_nop = R_METAG_NONE; | ||
354 | /* We happen to have the same requirement as MIPS */ | ||
355 | is_fake_mcount32 = MIPS32_is_fake_mcount; | ||
356 | break; | ||
344 | case EM_MIPS: /* reltype: e_class */ gpfx = '_'; break; | 357 | case EM_MIPS: /* reltype: e_class */ gpfx = '_'; break; |
345 | case EM_PPC: reltype = R_PPC_ADDR32; gpfx = '_'; break; | 358 | case EM_PPC: reltype = R_PPC_ADDR32; gpfx = '_'; break; |
346 | case EM_PPC64: reltype = R_PPC64_ADDR64; gpfx = '_'; break; | 359 | case EM_PPC64: reltype = R_PPC64_ADDR64; gpfx = '_'; break; |
diff --git a/tools/perf/perf.h b/tools/perf/perf.h index d5818c98d051..74659ecf93e0 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h | |||
@@ -100,6 +100,12 @@ | |||
100 | #define CPUINFO_PROC "Processor" | 100 | #define CPUINFO_PROC "Processor" |
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | #ifdef __metag__ | ||
104 | #define rmb() asm volatile("" ::: "memory") | ||
105 | #define cpu_relax() asm volatile("" ::: "memory") | ||
106 | #define CPUINFO_PROC "CPU" | ||
107 | #endif | ||
108 | |||
103 | #include <time.h> | 109 | #include <time.h> |
104 | #include <unistd.h> | 110 | #include <unistd.h> |
105 | #include <sys/types.h> | 111 | #include <sys/types.h> |