diff options
Diffstat (limited to 'arch/tile/kernel')
35 files changed, 14250 insertions, 0 deletions
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile new file mode 100644 index 000000000000..112b1e248f05 --- /dev/null +++ b/arch/tile/kernel/Makefile | |||
@@ -0,0 +1,17 @@ | |||
1 | # | ||
2 | # Makefile for the Linux/TILE kernel. | ||
3 | # | ||
4 | |||
5 | extra-y := vmlinux.lds head_$(BITS).o | ||
6 | obj-y := backtrace.o entry.o init_task.o irq.o messaging.o \ | ||
7 | pci-dma.o proc.o process.o ptrace.o reboot.o \ | ||
8 | setup.o signal.o single_step.o stack.o sys.o time.o traps.o \ | ||
9 | intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o | ||
10 | |||
11 | obj-$(CONFIG_HARDWALL) += hardwall.o | ||
12 | obj-$(CONFIG_TILEGX) += futex_64.o | ||
13 | obj-$(CONFIG_COMPAT) += compat.o compat_signal.o | ||
14 | obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o | ||
15 | obj-$(CONFIG_MODULES) += module.o | ||
16 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
17 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | ||
diff --git a/arch/tile/kernel/asm-offsets.c b/arch/tile/kernel/asm-offsets.c new file mode 100644 index 000000000000..01ddf19cc36d --- /dev/null +++ b/arch/tile/kernel/asm-offsets.c | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Generates definitions from c-type structures used by assembly sources. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kbuild.h> | ||
18 | #include <linux/thread_info.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/hardirq.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | |||
24 | /* Check for compatible compiler early in the build. */ | ||
25 | #ifdef CONFIG_TILEGX | ||
26 | # ifndef __tilegx__ | ||
27 | # error Can only build TILE-Gx configurations with tilegx compiler | ||
28 | # endif | ||
29 | # ifndef __LP64__ | ||
30 | # error Must not specify -m32 when building the TILE-Gx kernel | ||
31 | # endif | ||
32 | #else | ||
33 | # ifdef __tilegx__ | ||
34 | # error Can not build TILEPro/TILE64 configurations with tilegx compiler | ||
35 | # endif | ||
36 | #endif | ||
37 | |||
38 | void foo(void) | ||
39 | { | ||
40 | DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET, \ | ||
41 | offsetof(struct single_step_state, buffer)); | ||
42 | DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET, \ | ||
43 | offsetof(struct single_step_state, flags)); | ||
44 | DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET, \ | ||
45 | offsetof(struct single_step_state, orig_pc)); | ||
46 | DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET, \ | ||
47 | offsetof(struct single_step_state, next_pc)); | ||
48 | DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET, \ | ||
49 | offsetof(struct single_step_state, branch_next_pc)); | ||
50 | DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET, \ | ||
51 | offsetof(struct single_step_state, update_value)); | ||
52 | |||
53 | DEFINE(THREAD_INFO_TASK_OFFSET, \ | ||
54 | offsetof(struct thread_info, task)); | ||
55 | DEFINE(THREAD_INFO_FLAGS_OFFSET, \ | ||
56 | offsetof(struct thread_info, flags)); | ||
57 | DEFINE(THREAD_INFO_STATUS_OFFSET, \ | ||
58 | offsetof(struct thread_info, status)); | ||
59 | DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET, \ | ||
60 | offsetof(struct thread_info, homecache_cpu)); | ||
61 | DEFINE(THREAD_INFO_STEP_STATE_OFFSET, \ | ||
62 | offsetof(struct thread_info, step_state)); | ||
63 | |||
64 | DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET, | ||
65 | offsetof(struct task_struct, thread.ksp)); | ||
66 | DEFINE(TASK_STRUCT_THREAD_PC_OFFSET, | ||
67 | offsetof(struct task_struct, thread.pc)); | ||
68 | |||
69 | DEFINE(HV_TOPOLOGY_WIDTH_OFFSET, \ | ||
70 | offsetof(HV_Topology, width)); | ||
71 | DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET, \ | ||
72 | offsetof(HV_Topology, height)); | ||
73 | |||
74 | DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET, \ | ||
75 | offsetof(irq_cpustat_t, irq_syscall_count)); | ||
76 | } | ||
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c new file mode 100644 index 000000000000..77265f3b58d6 --- /dev/null +++ b/arch/tile/kernel/backtrace.c | |||
@@ -0,0 +1,621 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/string.h> | ||
17 | |||
18 | #include <asm/backtrace.h> | ||
19 | |||
20 | #include <arch/chip.h> | ||
21 | |||
22 | #if TILE_CHIP < 10 | ||
23 | |||
24 | |||
25 | #include <asm/opcode-tile.h> | ||
26 | |||
27 | |||
28 | #define TREG_SP 54 | ||
29 | #define TREG_LR 55 | ||
30 | |||
31 | |||
32 | /** A decoded bundle used for backtracer analysis. */ | ||
33 | struct BacktraceBundle { | ||
34 | tile_bundle_bits bits; | ||
35 | int num_insns; | ||
36 | struct tile_decoded_instruction | ||
37 | insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]; | ||
38 | }; | ||
39 | |||
40 | |||
41 | /* This implementation only makes sense for native tools. */ | ||
42 | /** Default function to read memory. */ | ||
43 | static bool bt_read_memory(void *result, VirtualAddress addr, | ||
44 | size_t size, void *extra) | ||
45 | { | ||
46 | /* FIXME: this should do some horrible signal stuff to catch | ||
47 | * SEGV cleanly and fail. | ||
48 | * | ||
49 | * Or else the caller should do the setjmp for efficiency. | ||
50 | */ | ||
51 | |||
52 | memcpy(result, (const void *)addr, size); | ||
53 | return true; | ||
54 | } | ||
55 | |||
56 | |||
57 | /** Locates an instruction inside the given bundle that | ||
58 | * has the specified mnemonic, and whose first 'num_operands_to_match' | ||
59 | * operands exactly match those in 'operand_values'. | ||
60 | */ | ||
61 | static const struct tile_decoded_instruction *find_matching_insn( | ||
62 | const struct BacktraceBundle *bundle, | ||
63 | tile_mnemonic mnemonic, | ||
64 | const int *operand_values, | ||
65 | int num_operands_to_match) | ||
66 | { | ||
67 | int i, j; | ||
68 | bool match; | ||
69 | |||
70 | for (i = 0; i < bundle->num_insns; i++) { | ||
71 | const struct tile_decoded_instruction *insn = | ||
72 | &bundle->insns[i]; | ||
73 | |||
74 | if (insn->opcode->mnemonic != mnemonic) | ||
75 | continue; | ||
76 | |||
77 | match = true; | ||
78 | for (j = 0; j < num_operands_to_match; j++) { | ||
79 | if (operand_values[j] != insn->operand_values[j]) { | ||
80 | match = false; | ||
81 | break; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | if (match) | ||
86 | return insn; | ||
87 | } | ||
88 | |||
89 | return NULL; | ||
90 | } | ||
91 | |||
92 | /** Does this bundle contain an 'iret' instruction? */ | ||
93 | static inline bool bt_has_iret(const struct BacktraceBundle *bundle) | ||
94 | { | ||
95 | return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL; | ||
96 | } | ||
97 | |||
98 | /** Does this bundle contain an 'addi sp, sp, OFFSET' or | ||
99 | * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET? | ||
100 | */ | ||
101 | static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust) | ||
102 | { | ||
103 | static const int vals[2] = { TREG_SP, TREG_SP }; | ||
104 | |||
105 | const struct tile_decoded_instruction *insn = | ||
106 | find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2); | ||
107 | if (insn == NULL) | ||
108 | insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2); | ||
109 | if (insn == NULL) | ||
110 | return false; | ||
111 | |||
112 | *adjust = insn->operand_values[2]; | ||
113 | return true; | ||
114 | } | ||
115 | |||
116 | /** Does this bundle contain any 'info OP' or 'infol OP' | ||
117 | * instruction, and if so, what are their OP? Note that OP is interpreted | ||
118 | * as an unsigned value by this code since that's what the caller wants. | ||
119 | * Returns the number of info ops found. | ||
120 | */ | ||
121 | static int bt_get_info_ops(const struct BacktraceBundle *bundle, | ||
122 | int operands[MAX_INFO_OPS_PER_BUNDLE]) | ||
123 | { | ||
124 | int num_ops = 0; | ||
125 | int i; | ||
126 | |||
127 | for (i = 0; i < bundle->num_insns; i++) { | ||
128 | const struct tile_decoded_instruction *insn = | ||
129 | &bundle->insns[i]; | ||
130 | |||
131 | if (insn->opcode->mnemonic == TILE_OPC_INFO || | ||
132 | insn->opcode->mnemonic == TILE_OPC_INFOL) { | ||
133 | operands[num_ops++] = insn->operand_values[0]; | ||
134 | } | ||
135 | } | ||
136 | |||
137 | return num_ops; | ||
138 | } | ||
139 | |||
140 | /** Does this bundle contain a jrp instruction, and if so, to which | ||
141 | * register is it jumping? | ||
142 | */ | ||
143 | static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg) | ||
144 | { | ||
145 | const struct tile_decoded_instruction *insn = | ||
146 | find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0); | ||
147 | if (insn == NULL) | ||
148 | return false; | ||
149 | |||
150 | *target_reg = insn->operand_values[0]; | ||
151 | return true; | ||
152 | } | ||
153 | |||
154 | /** Does this bundle modify the specified register in any way? */ | ||
155 | static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg) | ||
156 | { | ||
157 | int i, j; | ||
158 | for (i = 0; i < bundle->num_insns; i++) { | ||
159 | const struct tile_decoded_instruction *insn = | ||
160 | &bundle->insns[i]; | ||
161 | |||
162 | if (insn->opcode->implicitly_written_register == reg) | ||
163 | return true; | ||
164 | |||
165 | for (j = 0; j < insn->opcode->num_operands; j++) | ||
166 | if (insn->operands[j]->is_dest_reg && | ||
167 | insn->operand_values[j] == reg) | ||
168 | return true; | ||
169 | } | ||
170 | |||
171 | return false; | ||
172 | } | ||
173 | |||
174 | /** Does this bundle modify sp? */ | ||
175 | static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle) | ||
176 | { | ||
177 | return bt_modifies_reg(bundle, TREG_SP); | ||
178 | } | ||
179 | |||
180 | /** Does this bundle modify lr? */ | ||
181 | static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle) | ||
182 | { | ||
183 | return bt_modifies_reg(bundle, TREG_LR); | ||
184 | } | ||
185 | |||
186 | /** Does this bundle contain the instruction 'move fp, sp'? */ | ||
187 | static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle) | ||
188 | { | ||
189 | static const int vals[2] = { 52, TREG_SP }; | ||
190 | return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; | ||
191 | } | ||
192 | |||
193 | /** Does this bundle contain the instruction 'sw sp, lr'? */ | ||
194 | static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle) | ||
195 | { | ||
196 | static const int vals[2] = { TREG_SP, TREG_LR }; | ||
197 | return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL; | ||
198 | } | ||
199 | |||
200 | /** Locates the caller's PC and SP for a program starting at the | ||
201 | * given address. | ||
202 | */ | ||
203 | static void find_caller_pc_and_caller_sp(CallerLocation *location, | ||
204 | const VirtualAddress start_pc, | ||
205 | BacktraceMemoryReader read_memory_func, | ||
206 | void *read_memory_func_extra) | ||
207 | { | ||
208 | /* Have we explicitly decided what the sp is, | ||
209 | * rather than just the default? | ||
210 | */ | ||
211 | bool sp_determined = false; | ||
212 | |||
213 | /* Has any bundle seen so far modified lr? */ | ||
214 | bool lr_modified = false; | ||
215 | |||
216 | /* Have we seen a move from sp to fp? */ | ||
217 | bool sp_moved_to_r52 = false; | ||
218 | |||
219 | /* Have we seen a terminating bundle? */ | ||
220 | bool seen_terminating_bundle = false; | ||
221 | |||
222 | /* Cut down on round-trip reading overhead by reading several | ||
223 | * bundles at a time. | ||
224 | */ | ||
225 | tile_bundle_bits prefetched_bundles[32]; | ||
226 | int num_bundles_prefetched = 0; | ||
227 | int next_bundle = 0; | ||
228 | VirtualAddress pc; | ||
229 | |||
230 | /* Default to assuming that the caller's sp is the current sp. | ||
231 | * This is necessary to handle the case where we start backtracing | ||
232 | * right at the end of the epilog. | ||
233 | */ | ||
234 | location->sp_location = SP_LOC_OFFSET; | ||
235 | location->sp_offset = 0; | ||
236 | |||
237 | /* Default to having no idea where the caller PC is. */ | ||
238 | location->pc_location = PC_LOC_UNKNOWN; | ||
239 | |||
240 | /* Don't even try if the PC is not aligned. */ | ||
241 | if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) | ||
242 | return; | ||
243 | |||
244 | for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) { | ||
245 | |||
246 | struct BacktraceBundle bundle; | ||
247 | int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE]; | ||
248 | int one_ago, jrp_reg; | ||
249 | bool has_jrp; | ||
250 | |||
251 | if (next_bundle >= num_bundles_prefetched) { | ||
252 | /* Prefetch some bytes, but don't cross a page | ||
253 | * boundary since that might cause a read failure we | ||
254 | * don't care about if we only need the first few | ||
255 | * bytes. Note: we don't care what the actual page | ||
256 | * size is; using the minimum possible page size will | ||
257 | * prevent any problems. | ||
258 | */ | ||
259 | unsigned int bytes_to_prefetch = 4096 - (pc & 4095); | ||
260 | if (bytes_to_prefetch > sizeof prefetched_bundles) | ||
261 | bytes_to_prefetch = sizeof prefetched_bundles; | ||
262 | |||
263 | if (!read_memory_func(prefetched_bundles, pc, | ||
264 | bytes_to_prefetch, | ||
265 | read_memory_func_extra)) { | ||
266 | if (pc == start_pc) { | ||
267 | /* The program probably called a bad | ||
268 | * address, such as a NULL pointer. | ||
269 | * So treat this as if we are at the | ||
270 | * start of the function prolog so the | ||
271 | * backtrace will show how we got here. | ||
272 | */ | ||
273 | location->pc_location = PC_LOC_IN_LR; | ||
274 | return; | ||
275 | } | ||
276 | |||
277 | /* Unreadable address. Give up. */ | ||
278 | break; | ||
279 | } | ||
280 | |||
281 | next_bundle = 0; | ||
282 | num_bundles_prefetched = | ||
283 | bytes_to_prefetch / sizeof(tile_bundle_bits); | ||
284 | } | ||
285 | |||
286 | /* Decode the next bundle. */ | ||
287 | bundle.bits = prefetched_bundles[next_bundle++]; | ||
288 | bundle.num_insns = | ||
289 | parse_insn_tile(bundle.bits, pc, bundle.insns); | ||
290 | num_info_ops = bt_get_info_ops(&bundle, info_operands); | ||
291 | |||
292 | /* First look at any one_ago info ops if they are interesting, | ||
293 | * since they should shadow any non-one-ago info ops. | ||
294 | */ | ||
295 | for (one_ago = (pc != start_pc) ? 1 : 0; | ||
296 | one_ago >= 0; one_ago--) { | ||
297 | int i; | ||
298 | for (i = 0; i < num_info_ops; i++) { | ||
299 | int info_operand = info_operands[i]; | ||
300 | if (info_operand < CALLER_UNKNOWN_BASE) { | ||
301 | /* Weird; reserved value, ignore it. */ | ||
302 | continue; | ||
303 | } | ||
304 | |||
305 | /* Skip info ops which are not in the | ||
306 | * "one_ago" mode we want right now. | ||
307 | */ | ||
308 | if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0) | ||
309 | != (one_ago != 0)) | ||
310 | continue; | ||
311 | |||
312 | /* Clear the flag to make later checking | ||
313 | * easier. */ | ||
314 | info_operand &= ~ONE_BUNDLE_AGO_FLAG; | ||
315 | |||
316 | /* Default to looking at PC_IN_LR_FLAG. */ | ||
317 | if (info_operand & PC_IN_LR_FLAG) | ||
318 | location->pc_location = | ||
319 | PC_LOC_IN_LR; | ||
320 | else | ||
321 | location->pc_location = | ||
322 | PC_LOC_ON_STACK; | ||
323 | |||
324 | switch (info_operand) { | ||
325 | case CALLER_UNKNOWN_BASE: | ||
326 | location->pc_location = PC_LOC_UNKNOWN; | ||
327 | location->sp_location = SP_LOC_UNKNOWN; | ||
328 | return; | ||
329 | |||
330 | case CALLER_SP_IN_R52_BASE: | ||
331 | case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG: | ||
332 | location->sp_location = SP_LOC_IN_R52; | ||
333 | return; | ||
334 | |||
335 | default: | ||
336 | { | ||
337 | const unsigned int val = info_operand | ||
338 | - CALLER_SP_OFFSET_BASE; | ||
339 | const unsigned int sp_offset = | ||
340 | (val >> NUM_INFO_OP_FLAGS) * 8; | ||
341 | if (sp_offset < 32768) { | ||
342 | /* This is a properly encoded | ||
343 | * SP offset. */ | ||
344 | location->sp_location = | ||
345 | SP_LOC_OFFSET; | ||
346 | location->sp_offset = | ||
347 | sp_offset; | ||
348 | return; | ||
349 | } else { | ||
350 | /* This looked like an SP | ||
351 | * offset, but it's outside | ||
352 | * the legal range, so this | ||
353 | * must be an unrecognized | ||
354 | * info operand. Ignore it. | ||
355 | */ | ||
356 | } | ||
357 | } | ||
358 | break; | ||
359 | } | ||
360 | } | ||
361 | } | ||
362 | |||
363 | if (seen_terminating_bundle) { | ||
364 | /* We saw a terminating bundle during the previous | ||
365 | * iteration, so we were only looking for an info op. | ||
366 | */ | ||
367 | break; | ||
368 | } | ||
369 | |||
370 | if (bundle.bits == 0) { | ||
371 | /* Wacky terminating bundle. Stop looping, and hope | ||
372 | * we've already seen enough to find the caller. | ||
373 | */ | ||
374 | break; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Try to determine caller's SP. | ||
379 | */ | ||
380 | |||
381 | if (!sp_determined) { | ||
382 | int adjust; | ||
383 | if (bt_has_addi_sp(&bundle, &adjust)) { | ||
384 | location->sp_location = SP_LOC_OFFSET; | ||
385 | |||
386 | if (adjust <= 0) { | ||
387 | /* We are in prolog about to adjust | ||
388 | * SP. */ | ||
389 | location->sp_offset = 0; | ||
390 | } else { | ||
391 | /* We are in epilog restoring SP. */ | ||
392 | location->sp_offset = adjust; | ||
393 | } | ||
394 | |||
395 | sp_determined = true; | ||
396 | } else { | ||
397 | if (bt_has_move_r52_sp(&bundle)) { | ||
398 | /* Maybe in prolog, creating an | ||
399 | * alloca-style frame. But maybe in | ||
400 | * the middle of a fixed-size frame | ||
401 | * clobbering r52 with SP. | ||
402 | */ | ||
403 | sp_moved_to_r52 = true; | ||
404 | } | ||
405 | |||
406 | if (bt_modifies_sp(&bundle)) { | ||
407 | if (sp_moved_to_r52) { | ||
408 | /* We saw SP get saved into | ||
409 | * r52 earlier (or now), which | ||
410 | * must have been in the | ||
411 | * prolog, so we now know that | ||
412 | * SP is still holding the | ||
413 | * caller's sp value. | ||
414 | */ | ||
415 | location->sp_location = | ||
416 | SP_LOC_OFFSET; | ||
417 | location->sp_offset = 0; | ||
418 | } else { | ||
419 | /* Someone must have saved | ||
420 | * aside the caller's SP value | ||
421 | * into r52, so r52 holds the | ||
422 | * current value. | ||
423 | */ | ||
424 | location->sp_location = | ||
425 | SP_LOC_IN_R52; | ||
426 | } | ||
427 | sp_determined = true; | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | |||
432 | if (bt_has_iret(&bundle)) { | ||
433 | /* This is a terminating bundle. */ | ||
434 | seen_terminating_bundle = true; | ||
435 | continue; | ||
436 | } | ||
437 | |||
438 | /* | ||
439 | * Try to determine caller's PC. | ||
440 | */ | ||
441 | |||
442 | jrp_reg = -1; | ||
443 | has_jrp = bt_has_jrp(&bundle, &jrp_reg); | ||
444 | if (has_jrp) | ||
445 | seen_terminating_bundle = true; | ||
446 | |||
447 | if (location->pc_location == PC_LOC_UNKNOWN) { | ||
448 | if (has_jrp) { | ||
449 | if (jrp_reg == TREG_LR && !lr_modified) { | ||
450 | /* Looks like a leaf function, or else | ||
451 | * lr is already restored. */ | ||
452 | location->pc_location = | ||
453 | PC_LOC_IN_LR; | ||
454 | } else { | ||
455 | location->pc_location = | ||
456 | PC_LOC_ON_STACK; | ||
457 | } | ||
458 | } else if (bt_has_sw_sp_lr(&bundle)) { | ||
459 | /* In prolog, spilling initial lr to stack. */ | ||
460 | location->pc_location = PC_LOC_IN_LR; | ||
461 | } else if (bt_modifies_lr(&bundle)) { | ||
462 | lr_modified = true; | ||
463 | } | ||
464 | } | ||
465 | } | ||
466 | } | ||
467 | |||
468 | void backtrace_init(BacktraceIterator *state, | ||
469 | BacktraceMemoryReader read_memory_func, | ||
470 | void *read_memory_func_extra, | ||
471 | VirtualAddress pc, VirtualAddress lr, | ||
472 | VirtualAddress sp, VirtualAddress r52) | ||
473 | { | ||
474 | CallerLocation location; | ||
475 | VirtualAddress fp, initial_frame_caller_pc; | ||
476 | |||
477 | if (read_memory_func == NULL) { | ||
478 | read_memory_func = bt_read_memory; | ||
479 | } | ||
480 | |||
481 | /* Find out where we are in the initial frame. */ | ||
482 | find_caller_pc_and_caller_sp(&location, pc, | ||
483 | read_memory_func, read_memory_func_extra); | ||
484 | |||
485 | switch (location.sp_location) { | ||
486 | case SP_LOC_UNKNOWN: | ||
487 | /* Give up. */ | ||
488 | fp = -1; | ||
489 | break; | ||
490 | |||
491 | case SP_LOC_IN_R52: | ||
492 | fp = r52; | ||
493 | break; | ||
494 | |||
495 | case SP_LOC_OFFSET: | ||
496 | fp = sp + location.sp_offset; | ||
497 | break; | ||
498 | |||
499 | default: | ||
500 | /* Give up. */ | ||
501 | fp = -1; | ||
502 | break; | ||
503 | } | ||
504 | |||
505 | /* The frame pointer should theoretically be aligned mod 8. If | ||
506 | * it's not even aligned mod 4 then something terrible happened | ||
507 | * and we should mark it as invalid. | ||
508 | */ | ||
509 | if (fp % 4 != 0) | ||
510 | fp = -1; | ||
511 | |||
512 | /* -1 means "don't know initial_frame_caller_pc". */ | ||
513 | initial_frame_caller_pc = -1; | ||
514 | |||
515 | switch (location.pc_location) { | ||
516 | case PC_LOC_UNKNOWN: | ||
517 | /* Give up. */ | ||
518 | fp = -1; | ||
519 | break; | ||
520 | |||
521 | case PC_LOC_IN_LR: | ||
522 | if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { | ||
523 | /* Give up. */ | ||
524 | fp = -1; | ||
525 | } else { | ||
526 | initial_frame_caller_pc = lr; | ||
527 | } | ||
528 | break; | ||
529 | |||
530 | case PC_LOC_ON_STACK: | ||
531 | /* Leave initial_frame_caller_pc as -1, | ||
532 | * meaning check the stack. | ||
533 | */ | ||
534 | break; | ||
535 | |||
536 | default: | ||
537 | /* Give up. */ | ||
538 | fp = -1; | ||
539 | break; | ||
540 | } | ||
541 | |||
542 | state->pc = pc; | ||
543 | state->sp = sp; | ||
544 | state->fp = fp; | ||
545 | state->initial_frame_caller_pc = initial_frame_caller_pc; | ||
546 | state->read_memory_func = read_memory_func; | ||
547 | state->read_memory_func_extra = read_memory_func_extra; | ||
548 | } | ||
549 | |||
550 | bool backtrace_next(BacktraceIterator *state) | ||
551 | { | ||
552 | VirtualAddress next_fp, next_pc, next_frame[2]; | ||
553 | |||
554 | if (state->fp == -1) { | ||
555 | /* No parent frame. */ | ||
556 | return false; | ||
557 | } | ||
558 | |||
559 | /* Try to read the frame linkage data chaining to the next function. */ | ||
560 | if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame, | ||
561 | state->read_memory_func_extra)) { | ||
562 | return false; | ||
563 | } | ||
564 | |||
565 | next_fp = next_frame[1]; | ||
566 | if (next_fp % 4 != 0) { | ||
567 | /* Caller's frame pointer is suspect, so give up. | ||
568 | * Technically it should be aligned mod 8, but we will | ||
569 | * be forgiving here. | ||
570 | */ | ||
571 | return false; | ||
572 | } | ||
573 | |||
574 | if (state->initial_frame_caller_pc != -1) { | ||
575 | /* We must be in the initial stack frame and already know the | ||
576 | * caller PC. | ||
577 | */ | ||
578 | next_pc = state->initial_frame_caller_pc; | ||
579 | |||
580 | /* Force reading stack next time, in case we were in the | ||
581 | * initial frame. We don't do this above just to paranoidly | ||
582 | * avoid changing the struct at all when we return false. | ||
583 | */ | ||
584 | state->initial_frame_caller_pc = -1; | ||
585 | } else { | ||
586 | /* Get the caller PC from the frame linkage area. */ | ||
587 | next_pc = next_frame[0]; | ||
588 | if (next_pc == 0 || | ||
589 | next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { | ||
590 | /* The PC is suspect, so give up. */ | ||
591 | return false; | ||
592 | } | ||
593 | } | ||
594 | |||
595 | /* Update state to become the caller's stack frame. */ | ||
596 | state->pc = next_pc; | ||
597 | state->sp = state->fp; | ||
598 | state->fp = next_fp; | ||
599 | |||
600 | return true; | ||
601 | } | ||
602 | |||
603 | #else /* TILE_CHIP < 10 */ | ||
604 | |||
605 | void backtrace_init(BacktraceIterator *state, | ||
606 | BacktraceMemoryReader read_memory_func, | ||
607 | void *read_memory_func_extra, | ||
608 | VirtualAddress pc, VirtualAddress lr, | ||
609 | VirtualAddress sp, VirtualAddress r52) | ||
610 | { | ||
611 | state->pc = pc; | ||
612 | state->sp = sp; | ||
613 | state->fp = -1; | ||
614 | state->initial_frame_caller_pc = -1; | ||
615 | state->read_memory_func = read_memory_func; | ||
616 | state->read_memory_func_extra = read_memory_func_extra; | ||
617 | } | ||
618 | |||
619 | bool backtrace_next(BacktraceIterator *state) { return false; } | ||
620 | |||
621 | #endif /* TILE_CHIP < 10 */ | ||
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c new file mode 100644 index 000000000000..b1e06d041555 --- /dev/null +++ b/arch/tile/kernel/compat.c | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Adjust unistd.h to provide 32-bit numbers and functions. */ | ||
16 | #define __SYSCALL_COMPAT | ||
17 | |||
18 | #include <linux/compat.h> | ||
19 | #include <linux/msg.h> | ||
20 | #include <linux/syscalls.h> | ||
21 | #include <linux/kdev_t.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/fcntl.h> | ||
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/signal.h> | ||
27 | #include <asm/syscalls.h> | ||
28 | |||
29 | /* | ||
30 | * Syscalls that take 64-bit numbers traditionally take them in 32-bit | ||
31 | * "high" and "low" value parts on 32-bit architectures. | ||
32 | * In principle, one could imagine passing some register arguments as | ||
33 | * fully 64-bit on TILE-Gx in 32-bit mode, but it seems easier to | ||
34 | * adapt the usual convention. | ||
35 | */ | ||
36 | |||
37 | long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high) | ||
38 | { | ||
39 | return sys_truncate(filename, ((loff_t)high << 32) | low); | ||
40 | } | ||
41 | |||
42 | long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high) | ||
43 | { | ||
44 | return sys_ftruncate(fd, ((loff_t)high << 32) | low); | ||
45 | } | ||
46 | |||
47 | long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count, | ||
48 | u32 dummy, u32 low, u32 high) | ||
49 | { | ||
50 | return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low); | ||
51 | } | ||
52 | |||
53 | long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, | ||
54 | u32 dummy, u32 low, u32 high) | ||
55 | { | ||
56 | return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low); | ||
57 | } | ||
58 | |||
59 | long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len) | ||
60 | { | ||
61 | return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len); | ||
62 | } | ||
63 | |||
64 | long compat_sys_sync_file_range2(int fd, unsigned int flags, | ||
65 | u32 offset_lo, u32 offset_hi, | ||
66 | u32 nbytes_lo, u32 nbytes_hi) | ||
67 | { | ||
68 | return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo, | ||
69 | ((loff_t)nbytes_hi << 32) | nbytes_lo, | ||
70 | flags); | ||
71 | } | ||
72 | |||
73 | long compat_sys_fallocate(int fd, int mode, | ||
74 | u32 offset_lo, u32 offset_hi, | ||
75 | u32 len_lo, u32 len_hi) | ||
76 | { | ||
77 | return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo, | ||
78 | ((loff_t)len_hi << 32) | len_lo); | ||
79 | } | ||
80 | |||
81 | |||
82 | |||
83 | long compat_sys_sched_rr_get_interval(compat_pid_t pid, | ||
84 | struct compat_timespec __user *interval) | ||
85 | { | ||
86 | struct timespec t; | ||
87 | int ret; | ||
88 | mm_segment_t old_fs = get_fs(); | ||
89 | |||
90 | set_fs(KERNEL_DS); | ||
91 | ret = sys_sched_rr_get_interval(pid, | ||
92 | (struct timespec __force __user *)&t); | ||
93 | set_fs(old_fs); | ||
94 | if (put_compat_timespec(&t, interval)) | ||
95 | return -EFAULT; | ||
96 | return ret; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * The usual compat_sys_msgsnd() and _msgrcv() seem to be assuming | ||
101 | * some different calling convention than our normal 32-bit tile code. | ||
102 | */ | ||
103 | |||
104 | /* Already defined in ipc/compat.c, but we need it here. */ | ||
105 | struct compat_msgbuf { | ||
106 | compat_long_t mtype; | ||
107 | char mtext[1]; | ||
108 | }; | ||
109 | |||
110 | long tile_compat_sys_msgsnd(int msqid, | ||
111 | struct compat_msgbuf __user *msgp, | ||
112 | size_t msgsz, int msgflg) | ||
113 | { | ||
114 | compat_long_t mtype; | ||
115 | |||
116 | if (get_user(mtype, &msgp->mtype)) | ||
117 | return -EFAULT; | ||
118 | return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); | ||
119 | } | ||
120 | |||
121 | long tile_compat_sys_msgrcv(int msqid, | ||
122 | struct compat_msgbuf __user *msgp, | ||
123 | size_t msgsz, long msgtyp, int msgflg) | ||
124 | { | ||
125 | long err, mtype; | ||
126 | |||
127 | err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg); | ||
128 | if (err < 0) | ||
129 | goto out; | ||
130 | |||
131 | if (put_user(mtype, &msgp->mtype)) | ||
132 | err = -EFAULT; | ||
133 | out: | ||
134 | return err; | ||
135 | } | ||
136 | |||
137 | /* Provide the compat syscall number to call mapping. */ | ||
138 | #undef __SYSCALL | ||
139 | #define __SYSCALL(nr, call) [nr] = (compat_##call), | ||
140 | |||
141 | /* The generic versions of these don't work for Tile. */ | ||
142 | #define compat_sys_msgrcv tile_compat_sys_msgrcv | ||
143 | #define compat_sys_msgsnd tile_compat_sys_msgsnd | ||
144 | |||
145 | /* See comments in sys.c */ | ||
146 | #define compat_sys_fadvise64 sys32_fadvise64 | ||
147 | #define compat_sys_fadvise64_64 sys32_fadvise64_64 | ||
148 | #define compat_sys_readahead sys32_readahead | ||
149 | #define compat_sys_sync_file_range compat_sys_sync_file_range2 | ||
150 | |||
151 | /* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */ | ||
152 | #define compat_sys_stat64 sys_newstat | ||
153 | #define compat_sys_lstat64 sys_newlstat | ||
154 | #define compat_sys_fstat64 sys_newfstat | ||
155 | #define compat_sys_fstatat64 sys_newfstatat | ||
156 | |||
157 | /* Pass full 64-bit values through ptrace. */ | ||
158 | #define compat_sys_ptrace tile_compat_sys_ptrace | ||
159 | |||
160 | /* | ||
161 | * Note that we can't include <linux/unistd.h> here since the header | ||
162 | * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well. | ||
163 | */ | ||
164 | void *compat_sys_call_table[__NR_syscalls] = { | ||
165 | [0 ... __NR_syscalls-1] = sys_ni_syscall, | ||
166 | #include <asm/unistd.h> | ||
167 | }; | ||
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c new file mode 100644 index 000000000000..d5efb215dd5f --- /dev/null +++ b/arch/tile/kernel/compat_signal.c | |||
@@ -0,0 +1,435 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/signal.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/wait.h> | ||
23 | #include <linux/unistd.h> | ||
24 | #include <linux/stddef.h> | ||
25 | #include <linux/personality.h> | ||
26 | #include <linux/suspend.h> | ||
27 | #include <linux/ptrace.h> | ||
28 | #include <linux/elf.h> | ||
29 | #include <linux/compat.h> | ||
30 | #include <linux/syscalls.h> | ||
31 | #include <linux/uaccess.h> | ||
32 | #include <asm/processor.h> | ||
33 | #include <asm/ucontext.h> | ||
34 | #include <asm/sigframe.h> | ||
35 | #include <asm/syscalls.h> | ||
36 | #include <arch/interrupts.h> | ||
37 | |||
38 | struct compat_sigaction { | ||
39 | compat_uptr_t sa_handler; | ||
40 | compat_ulong_t sa_flags; | ||
41 | compat_uptr_t sa_restorer; | ||
42 | sigset_t sa_mask __packed; | ||
43 | }; | ||
44 | |||
45 | struct compat_sigaltstack { | ||
46 | compat_uptr_t ss_sp; | ||
47 | int ss_flags; | ||
48 | compat_size_t ss_size; | ||
49 | }; | ||
50 | |||
51 | struct compat_ucontext { | ||
52 | compat_ulong_t uc_flags; | ||
53 | compat_uptr_t uc_link; | ||
54 | struct compat_sigaltstack uc_stack; | ||
55 | struct sigcontext uc_mcontext; | ||
56 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
57 | }; | ||
58 | |||
59 | struct compat_siginfo { | ||
60 | int si_signo; | ||
61 | int si_errno; | ||
62 | int si_code; | ||
63 | |||
64 | union { | ||
65 | int _pad[SI_PAD_SIZE]; | ||
66 | |||
67 | /* kill() */ | ||
68 | struct { | ||
69 | unsigned int _pid; /* sender's pid */ | ||
70 | unsigned int _uid; /* sender's uid */ | ||
71 | } _kill; | ||
72 | |||
73 | /* POSIX.1b timers */ | ||
74 | struct { | ||
75 | compat_timer_t _tid; /* timer id */ | ||
76 | int _overrun; /* overrun count */ | ||
77 | compat_sigval_t _sigval; /* same as below */ | ||
78 | int _sys_private; /* not to be passed to user */ | ||
79 | int _overrun_incr; /* amount to add to overrun */ | ||
80 | } _timer; | ||
81 | |||
82 | /* POSIX.1b signals */ | ||
83 | struct { | ||
84 | unsigned int _pid; /* sender's pid */ | ||
85 | unsigned int _uid; /* sender's uid */ | ||
86 | compat_sigval_t _sigval; | ||
87 | } _rt; | ||
88 | |||
89 | /* SIGCHLD */ | ||
90 | struct { | ||
91 | unsigned int _pid; /* which child */ | ||
92 | unsigned int _uid; /* sender's uid */ | ||
93 | int _status; /* exit code */ | ||
94 | compat_clock_t _utime; | ||
95 | compat_clock_t _stime; | ||
96 | } _sigchld; | ||
97 | |||
98 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
99 | struct { | ||
100 | unsigned int _addr; /* faulting insn/memory ref. */ | ||
101 | #ifdef __ARCH_SI_TRAPNO | ||
102 | int _trapno; /* TRAP # which caused the signal */ | ||
103 | #endif | ||
104 | } _sigfault; | ||
105 | |||
106 | /* SIGPOLL */ | ||
107 | struct { | ||
108 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
109 | int _fd; | ||
110 | } _sigpoll; | ||
111 | } _sifields; | ||
112 | }; | ||
113 | |||
114 | struct compat_rt_sigframe { | ||
115 | unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */ | ||
116 | struct compat_siginfo info; | ||
117 | struct compat_ucontext uc; | ||
118 | }; | ||
119 | |||
120 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
121 | |||
122 | long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, | ||
123 | struct compat_sigaction __user *oact, | ||
124 | size_t sigsetsize) | ||
125 | { | ||
126 | struct k_sigaction new_sa, old_sa; | ||
127 | int ret = -EINVAL; | ||
128 | |||
129 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
130 | if (sigsetsize != sizeof(sigset_t)) | ||
131 | goto out; | ||
132 | |||
133 | if (act) { | ||
134 | compat_uptr_t handler, restorer; | ||
135 | |||
136 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
137 | __get_user(handler, &act->sa_handler) || | ||
138 | __get_user(new_sa.sa.sa_flags, &act->sa_flags) || | ||
139 | __get_user(restorer, &act->sa_restorer) || | ||
140 | __copy_from_user(&new_sa.sa.sa_mask, &act->sa_mask, | ||
141 | sizeof(sigset_t))) | ||
142 | return -EFAULT; | ||
143 | new_sa.sa.sa_handler = compat_ptr(handler); | ||
144 | new_sa.sa.sa_restorer = compat_ptr(restorer); | ||
145 | } | ||
146 | |||
147 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | ||
148 | |||
149 | if (!ret && oact) { | ||
150 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
151 | __put_user(ptr_to_compat(old_sa.sa.sa_handler), | ||
152 | &oact->sa_handler) || | ||
153 | __put_user(ptr_to_compat(old_sa.sa.sa_restorer), | ||
154 | &oact->sa_restorer) || | ||
155 | __put_user(old_sa.sa.sa_flags, &oact->sa_flags) || | ||
156 | __copy_to_user(&oact->sa_mask, &old_sa.sa.sa_mask, | ||
157 | sizeof(sigset_t))) | ||
158 | return -EFAULT; | ||
159 | } | ||
160 | out: | ||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | long compat_sys_rt_sigqueueinfo(int pid, int sig, | ||
165 | struct compat_siginfo __user *uinfo) | ||
166 | { | ||
167 | siginfo_t info; | ||
168 | int ret; | ||
169 | mm_segment_t old_fs = get_fs(); | ||
170 | |||
171 | if (copy_siginfo_from_user32(&info, uinfo)) | ||
172 | return -EFAULT; | ||
173 | set_fs(KERNEL_DS); | ||
174 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *)&info); | ||
175 | set_fs(old_fs); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from) | ||
180 | { | ||
181 | int err; | ||
182 | |||
183 | if (!access_ok(VERIFY_WRITE, to, sizeof(struct compat_siginfo))) | ||
184 | return -EFAULT; | ||
185 | |||
186 | /* If you change siginfo_t structure, please make sure that | ||
187 | this code is fixed accordingly. | ||
188 | It should never copy any pad contained in the structure | ||
189 | to avoid security leaks, but must copy the generic | ||
190 | 3 ints plus the relevant union member. */ | ||
191 | err = __put_user(from->si_signo, &to->si_signo); | ||
192 | err |= __put_user(from->si_errno, &to->si_errno); | ||
193 | err |= __put_user((short)from->si_code, &to->si_code); | ||
194 | |||
195 | if (from->si_code < 0) { | ||
196 | err |= __put_user(from->si_pid, &to->si_pid); | ||
197 | err |= __put_user(from->si_uid, &to->si_uid); | ||
198 | err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); | ||
199 | } else { | ||
200 | /* | ||
201 | * First 32bits of unions are always present: | ||
202 | * si_pid === si_band === si_tid === si_addr(LS half) | ||
203 | */ | ||
204 | err |= __put_user(from->_sifields._pad[0], | ||
205 | &to->_sifields._pad[0]); | ||
206 | switch (from->si_code >> 16) { | ||
207 | case __SI_FAULT >> 16: | ||
208 | break; | ||
209 | case __SI_CHLD >> 16: | ||
210 | err |= __put_user(from->si_utime, &to->si_utime); | ||
211 | err |= __put_user(from->si_stime, &to->si_stime); | ||
212 | err |= __put_user(from->si_status, &to->si_status); | ||
213 | /* FALL THROUGH */ | ||
214 | default: | ||
215 | case __SI_KILL >> 16: | ||
216 | err |= __put_user(from->si_uid, &to->si_uid); | ||
217 | break; | ||
218 | case __SI_POLL >> 16: | ||
219 | err |= __put_user(from->si_fd, &to->si_fd); | ||
220 | break; | ||
221 | case __SI_TIMER >> 16: | ||
222 | err |= __put_user(from->si_overrun, &to->si_overrun); | ||
223 | err |= __put_user(ptr_to_compat(from->si_ptr), | ||
224 | &to->si_ptr); | ||
225 | break; | ||
226 | /* This is not generated by the kernel as of now. */ | ||
227 | case __SI_RT >> 16: | ||
228 | case __SI_MESGQ >> 16: | ||
229 | err |= __put_user(from->si_uid, &to->si_uid); | ||
230 | err |= __put_user(from->si_int, &to->si_int); | ||
231 | break; | ||
232 | } | ||
233 | } | ||
234 | return err; | ||
235 | } | ||
236 | |||
237 | int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) | ||
238 | { | ||
239 | int err; | ||
240 | u32 ptr32; | ||
241 | |||
242 | if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) | ||
243 | return -EFAULT; | ||
244 | |||
245 | err = __get_user(to->si_signo, &from->si_signo); | ||
246 | err |= __get_user(to->si_errno, &from->si_errno); | ||
247 | err |= __get_user(to->si_code, &from->si_code); | ||
248 | |||
249 | err |= __get_user(to->si_pid, &from->si_pid); | ||
250 | err |= __get_user(to->si_uid, &from->si_uid); | ||
251 | err |= __get_user(ptr32, &from->si_ptr); | ||
252 | to->si_ptr = compat_ptr(ptr32); | ||
253 | |||
254 | return err; | ||
255 | } | ||
256 | |||
257 | long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
258 | struct compat_sigaltstack __user *uoss_ptr, | ||
259 | struct pt_regs *regs) | ||
260 | { | ||
261 | stack_t uss, uoss; | ||
262 | int ret; | ||
263 | mm_segment_t seg; | ||
264 | |||
265 | if (uss_ptr) { | ||
266 | u32 ptr; | ||
267 | |||
268 | memset(&uss, 0, sizeof(stack_t)); | ||
269 | if (!access_ok(VERIFY_READ, uss_ptr, sizeof(*uss_ptr)) || | ||
270 | __get_user(ptr, &uss_ptr->ss_sp) || | ||
271 | __get_user(uss.ss_flags, &uss_ptr->ss_flags) || | ||
272 | __get_user(uss.ss_size, &uss_ptr->ss_size)) | ||
273 | return -EFAULT; | ||
274 | uss.ss_sp = compat_ptr(ptr); | ||
275 | } | ||
276 | seg = get_fs(); | ||
277 | set_fs(KERNEL_DS); | ||
278 | ret = do_sigaltstack(uss_ptr ? (stack_t __user __force *)&uss : NULL, | ||
279 | (stack_t __user __force *)&uoss, | ||
280 | (unsigned long)compat_ptr(regs->sp)); | ||
281 | set_fs(seg); | ||
282 | if (ret >= 0 && uoss_ptr) { | ||
283 | if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(*uoss_ptr)) || | ||
284 | __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || | ||
285 | __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || | ||
286 | __put_user(uoss.ss_size, &uoss_ptr->ss_size)) | ||
287 | ret = -EFAULT; | ||
288 | } | ||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | long _compat_sys_rt_sigreturn(struct pt_regs *regs) | ||
293 | { | ||
294 | struct compat_rt_sigframe __user *frame = | ||
295 | (struct compat_rt_sigframe __user *) compat_ptr(regs->sp); | ||
296 | sigset_t set; | ||
297 | long r0; | ||
298 | |||
299 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
300 | goto badframe; | ||
301 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
302 | goto badframe; | ||
303 | |||
304 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
305 | spin_lock_irq(¤t->sighand->siglock); | ||
306 | current->blocked = set; | ||
307 | recalc_sigpending(); | ||
308 | spin_unlock_irq(¤t->sighand->siglock); | ||
309 | |||
310 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) | ||
311 | goto badframe; | ||
312 | |||
313 | if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0) | ||
314 | goto badframe; | ||
315 | |||
316 | return r0; | ||
317 | |||
318 | badframe: | ||
319 | force_sig(SIGSEGV, current); | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * Determine which stack to use.. | ||
325 | */ | ||
326 | static inline void __user *compat_get_sigframe(struct k_sigaction *ka, | ||
327 | struct pt_regs *regs, | ||
328 | size_t frame_size) | ||
329 | { | ||
330 | unsigned long sp; | ||
331 | |||
332 | /* Default to using normal stack */ | ||
333 | sp = (unsigned long)compat_ptr(regs->sp); | ||
334 | |||
335 | /* | ||
336 | * If we are on the alternate signal stack and would overflow | ||
337 | * it, don't. Return an always-bogus address instead so we | ||
338 | * will die with SIGSEGV. | ||
339 | */ | ||
340 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) | ||
341 | return (void __user __force *)-1UL; | ||
342 | |||
343 | /* This is the X/Open sanctioned signal stack switching. */ | ||
344 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
345 | if (sas_ss_flags(sp) == 0) | ||
346 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
347 | } | ||
348 | |||
349 | sp -= frame_size; | ||
350 | /* | ||
351 | * Align the stack pointer according to the TILE ABI, | ||
352 | * i.e. so that on function entry (sp & 15) == 0. | ||
353 | */ | ||
354 | sp &= -16UL; | ||
355 | return (void __user *) sp; | ||
356 | } | ||
357 | |||
358 | int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
359 | sigset_t *set, struct pt_regs *regs) | ||
360 | { | ||
361 | unsigned long restorer; | ||
362 | struct compat_rt_sigframe __user *frame; | ||
363 | int err = 0; | ||
364 | int usig; | ||
365 | |||
366 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); | ||
367 | |||
368 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
369 | goto give_sigsegv; | ||
370 | |||
371 | usig = current_thread_info()->exec_domain | ||
372 | && current_thread_info()->exec_domain->signal_invmap | ||
373 | && sig < 32 | ||
374 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
375 | : sig; | ||
376 | |||
377 | /* Always write at least the signal number for the stack backtracer. */ | ||
378 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
379 | /* At sigreturn time, restore the callee-save registers too. */ | ||
380 | err |= copy_siginfo_to_user32(&frame->info, info); | ||
381 | regs->flags |= PT_FLAGS_RESTORE_REGS; | ||
382 | } else { | ||
383 | err |= __put_user(info->si_signo, &frame->info.si_signo); | ||
384 | } | ||
385 | |||
386 | /* Create the ucontext. */ | ||
387 | err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); | ||
388 | err |= __put_user(0, &frame->uc.uc_flags); | ||
389 | err |= __put_user(0, &frame->uc.uc_link); | ||
390 | err |= __put_user(ptr_to_compat((void *)(current->sas_ss_sp)), | ||
391 | &frame->uc.uc_stack.ss_sp); | ||
392 | err |= __put_user(sas_ss_flags(regs->sp), | ||
393 | &frame->uc.uc_stack.ss_flags); | ||
394 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
395 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); | ||
396 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
397 | if (err) | ||
398 | goto give_sigsegv; | ||
399 | |||
400 | restorer = VDSO_BASE; | ||
401 | if (ka->sa.sa_flags & SA_RESTORER) | ||
402 | restorer = ptr_to_compat_reg(ka->sa.sa_restorer); | ||
403 | |||
404 | /* | ||
405 | * Set up registers for signal handler. | ||
406 | * Registers that we don't modify keep the value they had from | ||
407 | * user-space at the time we took the signal. | ||
408 | */ | ||
409 | regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); | ||
410 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ | ||
411 | regs->sp = ptr_to_compat_reg(frame); | ||
412 | regs->lr = restorer; | ||
413 | regs->regs[0] = (unsigned long) usig; | ||
414 | |||
415 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
416 | /* Need extra arguments, so mark to restore caller-saves. */ | ||
417 | regs->regs[1] = ptr_to_compat_reg(&frame->info); | ||
418 | regs->regs[2] = ptr_to_compat_reg(&frame->uc); | ||
419 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Notify any tracer that was single-stepping it. | ||
424 | * The tracer may want to single-step inside the | ||
425 | * handler too. | ||
426 | */ | ||
427 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
428 | ptrace_notify(SIGTRAP); | ||
429 | |||
430 | return 0; | ||
431 | |||
432 | give_sigsegv: | ||
433 | force_sigsegv(sig, current); | ||
434 | return -EFAULT; | ||
435 | } | ||
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c new file mode 100644 index 000000000000..2c54fd43a8a0 --- /dev/null +++ b/arch/tile/kernel/early_printk.c | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/console.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <asm/setup.h> | ||
20 | #include <hv/hypervisor.h> | ||
21 | |||
22 | static void early_hv_write(struct console *con, const char *s, unsigned n) | ||
23 | { | ||
24 | hv_console_write((HV_VirtAddr) s, n); | ||
25 | } | ||
26 | |||
27 | static struct console early_hv_console = { | ||
28 | .name = "earlyhv", | ||
29 | .write = early_hv_write, | ||
30 | .flags = CON_PRINTBUFFER, | ||
31 | .index = -1, | ||
32 | }; | ||
33 | |||
34 | /* Direct interface for emergencies */ | ||
35 | static struct console *early_console = &early_hv_console; | ||
36 | static int early_console_initialized; | ||
37 | static int early_console_complete; | ||
38 | |||
39 | static void early_vprintk(const char *fmt, va_list ap) | ||
40 | { | ||
41 | char buf[512]; | ||
42 | int n = vscnprintf(buf, sizeof(buf), fmt, ap); | ||
43 | early_console->write(early_console, buf, n); | ||
44 | } | ||
45 | |||
46 | void early_printk(const char *fmt, ...) | ||
47 | { | ||
48 | va_list ap; | ||
49 | va_start(ap, fmt); | ||
50 | early_vprintk(fmt, ap); | ||
51 | va_end(ap); | ||
52 | } | ||
53 | |||
54 | void early_panic(const char *fmt, ...) | ||
55 | { | ||
56 | va_list ap; | ||
57 | raw_local_irq_disable_all(); | ||
58 | va_start(ap, fmt); | ||
59 | early_printk("Kernel panic - not syncing: "); | ||
60 | early_vprintk(fmt, ap); | ||
61 | early_console->write(early_console, "\n", 1); | ||
62 | va_end(ap); | ||
63 | dump_stack(); | ||
64 | hv_halt(); | ||
65 | } | ||
66 | |||
67 | static int __initdata keep_early; | ||
68 | |||
69 | static int __init setup_early_printk(char *str) | ||
70 | { | ||
71 | if (early_console_initialized) | ||
72 | return 1; | ||
73 | |||
74 | if (str != NULL && strncmp(str, "keep", 4) == 0) | ||
75 | keep_early = 1; | ||
76 | |||
77 | early_console = &early_hv_console; | ||
78 | early_console_initialized = 1; | ||
79 | register_console(early_console); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | void __init disable_early_printk(void) | ||
85 | { | ||
86 | early_console_complete = 1; | ||
87 | if (!early_console_initialized || !early_console) | ||
88 | return; | ||
89 | if (!keep_early) { | ||
90 | early_printk("disabling early console\n"); | ||
91 | unregister_console(early_console); | ||
92 | early_console_initialized = 0; | ||
93 | } else { | ||
94 | early_printk("keeping early console\n"); | ||
95 | } | ||
96 | } | ||
97 | |||
98 | void warn_early_printk(void) | ||
99 | { | ||
100 | if (early_console_complete || early_console_initialized) | ||
101 | return; | ||
102 | early_printk("\ | ||
103 | Machine shutting down before console output is fully initialized.\n\ | ||
104 | You may wish to reboot and add the option 'earlyprintk' to your\n\ | ||
105 | boot command line to see any diagnostic early console output.\n\ | ||
106 | "); | ||
107 | } | ||
108 | |||
109 | early_param("earlyprintk", setup_early_printk); | ||
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S new file mode 100644 index 000000000000..3d01383b1b0e --- /dev/null +++ b/arch/tile/kernel/entry.S | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <linux/unistd.h> | ||
17 | #include <asm/irqflags.h> | ||
18 | #include <arch/abi.h> | ||
19 | |||
20 | #ifdef __tilegx__ | ||
21 | #define bnzt bnezt | ||
22 | #endif | ||
23 | |||
24 | STD_ENTRY(current_text_addr) | ||
25 | { move r0, lr; jrp lr } | ||
26 | STD_ENDPROC(current_text_addr) | ||
27 | |||
28 | STD_ENTRY(_sim_syscall) | ||
29 | /* | ||
30 | * Wait for r0-r9 to be ready (and lr on the off chance we | ||
31 | * want the syscall to locate its caller), then make a magic | ||
32 | * simulator syscall. | ||
33 | * | ||
34 | * We carefully stall until the registers are readable in case they | ||
35 | * are the target of a slow load, etc. so that tile-sim will | ||
36 | * definitely be able to read all of them inside the magic syscall. | ||
37 | * | ||
38 | * Technically this is wrong for r3-r9 and lr, since an interrupt | ||
39 | * could come in and restore the registers with a slow load right | ||
40 | * before executing the mtspr. We may need to modify tile-sim to | ||
41 | * explicitly stall for this case, but we do not yet have | ||
42 | * a way to implement such a stall. | ||
43 | */ | ||
44 | { and zero, lr, r9 ; and zero, r8, r7 } | ||
45 | { and zero, r6, r5 ; and zero, r4, r3 } | ||
46 | { and zero, r2, r1 ; mtspr SIM_CONTROL, r0 } | ||
47 | { jrp lr } | ||
48 | STD_ENDPROC(_sim_syscall) | ||
49 | |||
50 | /* | ||
51 | * Implement execve(). The i386 code has a note that forking from kernel | ||
52 | * space results in no copy on write until the execve, so we should be | ||
53 | * careful not to write to the stack here. | ||
54 | */ | ||
55 | STD_ENTRY(kernel_execve) | ||
56 | moveli TREG_SYSCALL_NR_NAME, __NR_execve | ||
57 | swint1 | ||
58 | jrp lr | ||
59 | STD_ENDPROC(kernel_execve) | ||
60 | |||
61 | /* Delay a fixed number of cycles. */ | ||
62 | STD_ENTRY(__delay) | ||
63 | { addi r0, r0, -1; bnzt r0, . } | ||
64 | jrp lr | ||
65 | STD_ENDPROC(__delay) | ||
66 | |||
67 | /* | ||
68 | * We don't run this function directly, but instead copy it to a page | ||
69 | * we map into every user process. See vdso_setup(). | ||
70 | * | ||
71 | * Note that libc has a copy of this function that it uses to compare | ||
72 | * against the PC when a stack backtrace ends, so if this code is | ||
73 | * changed, the libc implementation(s) should also be updated. | ||
74 | */ | ||
75 | .pushsection .data | ||
76 | ENTRY(__rt_sigreturn) | ||
77 | moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn | ||
78 | swint1 | ||
79 | ENDPROC(__rt_sigreturn) | ||
80 | ENTRY(__rt_sigreturn_end) | ||
81 | .popsection | ||
82 | |||
83 | STD_ENTRY(dump_stack) | ||
84 | { move r2, lr; lnk r1 } | ||
85 | { move r4, r52; addli r1, r1, dump_stack - . } | ||
86 | { move r3, sp; j _dump_stack } | ||
87 | jrp lr /* keep backtracer happy */ | ||
88 | STD_ENDPROC(dump_stack) | ||
89 | |||
90 | STD_ENTRY(KBacktraceIterator_init_current) | ||
91 | { move r2, lr; lnk r1 } | ||
92 | { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . } | ||
93 | { move r3, sp; j _KBacktraceIterator_init_current } | ||
94 | jrp lr /* keep backtracer happy */ | ||
95 | STD_ENDPROC(KBacktraceIterator_init_current) | ||
96 | |||
97 | /* | ||
98 | * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then | ||
99 | * free the old stack (passed in r0) and re-invoke cpu_idle(). | ||
100 | * We update sp and ksp0 simultaneously to avoid backtracer warnings. | ||
101 | */ | ||
102 | STD_ENTRY(cpu_idle_on_new_stack) | ||
103 | { | ||
104 | move sp, r1 | ||
105 | mtspr SYSTEM_SAVE_1_0, r2 | ||
106 | } | ||
107 | jal free_thread_info | ||
108 | j cpu_idle | ||
109 | STD_ENDPROC(cpu_idle_on_new_stack) | ||
110 | |||
111 | /* Loop forever on a nap during SMP boot. */ | ||
112 | STD_ENTRY(smp_nap) | ||
113 | nap | ||
114 | j smp_nap /* we are not architecturally guaranteed not to exit nap */ | ||
115 | jrp lr /* clue in the backtracer */ | ||
116 | STD_ENDPROC(smp_nap) | ||
117 | |||
118 | /* | ||
119 | * Enable interrupts racelessly and then nap until interrupted. | ||
120 | * This function's _cpu_idle_nap address is special; see intvec.S. | ||
121 | * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and | ||
122 | * as a result return to the function that called _cpu_idle(). | ||
123 | */ | ||
124 | STD_ENTRY(_cpu_idle) | ||
125 | { | ||
126 | lnk r0 | ||
127 | movei r1, 1 | ||
128 | } | ||
129 | { | ||
130 | addli r0, r0, _cpu_idle_nap - . | ||
131 | mtspr INTERRUPT_CRITICAL_SECTION, r1 | ||
132 | } | ||
133 | IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ | ||
134 | mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */ | ||
135 | mtspr EX_CONTEXT_1_0, r0 | ||
136 | iret | ||
137 | .global _cpu_idle_nap | ||
138 | _cpu_idle_nap: | ||
139 | nap | ||
140 | jrp lr | ||
141 | STD_ENDPROC(_cpu_idle) | ||
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c new file mode 100644 index 000000000000..584b965dc824 --- /dev/null +++ b/arch/tile/kernel/hardwall.c | |||
@@ -0,0 +1,796 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/fs.h> | ||
16 | #include <linux/proc_fs.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/rwsem.h> | ||
19 | #include <linux/kprobes.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/hardirq.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/cdev.h> | ||
25 | #include <linux/compat.h> | ||
26 | #include <asm/hardwall.h> | ||
27 | #include <asm/traps.h> | ||
28 | #include <asm/siginfo.h> | ||
29 | #include <asm/irq_regs.h> | ||
30 | |||
31 | #include <arch/interrupts.h> | ||
32 | #include <arch/spr_def.h> | ||
33 | |||
34 | |||
35 | /* | ||
36 | * This data structure tracks the rectangle data, etc., associated | ||
37 | * one-to-one with a "struct file *" from opening HARDWALL_FILE. | ||
38 | * Note that the file's private data points back to this structure. | ||
39 | */ | ||
40 | struct hardwall_info { | ||
41 | struct list_head list; /* "rectangles" list */ | ||
42 | struct list_head task_head; /* head of tasks in this hardwall */ | ||
43 | int ulhc_x; /* upper left hand corner x coord */ | ||
44 | int ulhc_y; /* upper left hand corner y coord */ | ||
45 | int width; /* rectangle width */ | ||
46 | int height; /* rectangle height */ | ||
47 | int teardown_in_progress; /* are we tearing this one down? */ | ||
48 | }; | ||
49 | |||
50 | /* Currently allocated hardwall rectangles */ | ||
51 | static LIST_HEAD(rectangles); | ||
52 | |||
53 | /* | ||
54 | * Guard changes to the hardwall data structures. | ||
55 | * This could be finer grained (e.g. one lock for the list of hardwall | ||
56 | * rectangles, then separate embedded locks for each one's list of tasks), | ||
57 | * but there are subtle correctness issues when trying to start with | ||
58 | * a task's "hardwall" pointer and lock the correct rectangle's embedded | ||
59 | * lock in the presence of a simultaneous deactivation, so it seems | ||
60 | * easier to have a single lock, given that none of these data | ||
61 | * structures are touched very frequently during normal operation. | ||
62 | */ | ||
63 | static DEFINE_SPINLOCK(hardwall_lock); | ||
64 | |||
65 | /* Allow disabling UDN access. */ | ||
66 | static int udn_disabled; | ||
67 | static int __init noudn(char *str) | ||
68 | { | ||
69 | pr_info("User-space UDN access is disabled\n"); | ||
70 | udn_disabled = 1; | ||
71 | return 0; | ||
72 | } | ||
73 | early_param("noudn", noudn); | ||
74 | |||
75 | |||
76 | /* | ||
77 | * Low-level primitives | ||
78 | */ | ||
79 | |||
80 | /* Set a CPU bit if the CPU is online. */ | ||
81 | #define cpu_online_set(cpu, dst) do { \ | ||
82 | if (cpu_online(cpu)) \ | ||
83 | cpumask_set_cpu(cpu, dst); \ | ||
84 | } while (0) | ||
85 | |||
86 | |||
87 | /* Does the given rectangle contain the given x,y coordinate? */ | ||
88 | static int contains(struct hardwall_info *r, int x, int y) | ||
89 | { | ||
90 | return (x >= r->ulhc_x && x < r->ulhc_x + r->width) && | ||
91 | (y >= r->ulhc_y && y < r->ulhc_y + r->height); | ||
92 | } | ||
93 | |||
94 | /* Compute the rectangle parameters and validate the cpumask. */ | ||
95 | static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask) | ||
96 | { | ||
97 | int x, y, cpu, ulhc, lrhc; | ||
98 | |||
99 | /* The first cpu is the ULHC, the last the LRHC. */ | ||
100 | ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits); | ||
101 | lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits); | ||
102 | |||
103 | /* Compute the rectangle attributes from the cpus. */ | ||
104 | r->ulhc_x = cpu_x(ulhc); | ||
105 | r->ulhc_y = cpu_y(ulhc); | ||
106 | r->width = cpu_x(lrhc) - r->ulhc_x + 1; | ||
107 | r->height = cpu_y(lrhc) - r->ulhc_y + 1; | ||
108 | |||
109 | /* Width and height must be positive */ | ||
110 | if (r->width <= 0 || r->height <= 0) | ||
111 | return -EINVAL; | ||
112 | |||
113 | /* Confirm that the cpumask is exactly the rectangle. */ | ||
114 | for (y = 0, cpu = 0; y < smp_height; ++y) | ||
115 | for (x = 0; x < smp_width; ++x, ++cpu) | ||
116 | if (cpumask_test_cpu(cpu, mask) != contains(r, x, y)) | ||
117 | return -EINVAL; | ||
118 | |||
119 | /* | ||
120 | * Note that offline cpus can't be drained when this UDN | ||
121 | * rectangle eventually closes. We used to detect this | ||
122 | * situation and print a warning, but it annoyed users and | ||
123 | * they ignored it anyway, so now we just return without a | ||
124 | * warning. | ||
125 | */ | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | /* Do the two given rectangles overlap on any cpu? */ | ||
130 | static int overlaps(struct hardwall_info *a, struct hardwall_info *b) | ||
131 | { | ||
132 | return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */ | ||
133 | b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */ | ||
134 | a->ulhc_y + a->height > b->ulhc_y && /* A not above */ | ||
135 | b->ulhc_y + b->height > a->ulhc_y; /* B not above */ | ||
136 | } | ||
137 | |||
138 | |||
139 | /* | ||
140 | * Hardware management of hardwall setup, teardown, trapping, | ||
141 | * and enabling/disabling PL0 access to the networks. | ||
142 | */ | ||
143 | |||
144 | /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */ | ||
145 | enum direction_protect { | ||
146 | N_PROTECT = (1 << 0), | ||
147 | E_PROTECT = (1 << 1), | ||
148 | S_PROTECT = (1 << 2), | ||
149 | W_PROTECT = (1 << 3) | ||
150 | }; | ||
151 | |||
152 | static void enable_firewall_interrupts(void) | ||
153 | { | ||
154 | raw_local_irq_unmask_now(INT_UDN_FIREWALL); | ||
155 | } | ||
156 | |||
157 | static void disable_firewall_interrupts(void) | ||
158 | { | ||
159 | raw_local_irq_mask_now(INT_UDN_FIREWALL); | ||
160 | } | ||
161 | |||
162 | /* Set up hardwall on this cpu based on the passed hardwall_info. */ | ||
163 | static void hardwall_setup_ipi_func(void *info) | ||
164 | { | ||
165 | struct hardwall_info *r = info; | ||
166 | int cpu = smp_processor_id(); | ||
167 | int x = cpu % smp_width; | ||
168 | int y = cpu / smp_width; | ||
169 | int bits = 0; | ||
170 | if (x == r->ulhc_x) | ||
171 | bits |= W_PROTECT; | ||
172 | if (x == r->ulhc_x + r->width - 1) | ||
173 | bits |= E_PROTECT; | ||
174 | if (y == r->ulhc_y) | ||
175 | bits |= N_PROTECT; | ||
176 | if (y == r->ulhc_y + r->height - 1) | ||
177 | bits |= S_PROTECT; | ||
178 | BUG_ON(bits == 0); | ||
179 | __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits); | ||
180 | enable_firewall_interrupts(); | ||
181 | |||
182 | } | ||
183 | |||
184 | /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ | ||
185 | static void hardwall_setup(struct hardwall_info *r) | ||
186 | { | ||
187 | int x, y, cpu, delta; | ||
188 | struct cpumask rect_cpus; | ||
189 | |||
190 | cpumask_clear(&rect_cpus); | ||
191 | |||
192 | /* First include the top and bottom edges */ | ||
193 | cpu = r->ulhc_y * smp_width + r->ulhc_x; | ||
194 | delta = (r->height - 1) * smp_width; | ||
195 | for (x = 0; x < r->width; ++x, ++cpu) { | ||
196 | cpu_online_set(cpu, &rect_cpus); | ||
197 | cpu_online_set(cpu + delta, &rect_cpus); | ||
198 | } | ||
199 | |||
200 | /* Then the left and right edges */ | ||
201 | cpu -= r->width; | ||
202 | delta = r->width - 1; | ||
203 | for (y = 0; y < r->height; ++y, cpu += smp_width) { | ||
204 | cpu_online_set(cpu, &rect_cpus); | ||
205 | cpu_online_set(cpu + delta, &rect_cpus); | ||
206 | } | ||
207 | |||
208 | /* Then tell all the cpus to set up their protection SPR */ | ||
209 | on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1); | ||
210 | } | ||
211 | |||
212 | void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) | ||
213 | { | ||
214 | struct hardwall_info *rect; | ||
215 | struct task_struct *p; | ||
216 | struct siginfo info; | ||
217 | int x, y; | ||
218 | int cpu = smp_processor_id(); | ||
219 | int found_processes; | ||
220 | unsigned long flags; | ||
221 | |||
222 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
223 | irq_enter(); | ||
224 | |||
225 | /* This tile trapped a network access; find the rectangle. */ | ||
226 | x = cpu % smp_width; | ||
227 | y = cpu / smp_width; | ||
228 | spin_lock_irqsave(&hardwall_lock, flags); | ||
229 | list_for_each_entry(rect, &rectangles, list) { | ||
230 | if (contains(rect, x, y)) | ||
231 | break; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * It shouldn't be possible not to find this cpu on the | ||
236 | * rectangle list, since only cpus in rectangles get hardwalled. | ||
237 | * The hardwall is only removed after the UDN is drained. | ||
238 | */ | ||
239 | BUG_ON(&rect->list == &rectangles); | ||
240 | |||
241 | /* | ||
242 | * If we already started teardown on this hardwall, don't worry; | ||
243 | * the abort signal has been sent and we are just waiting for things | ||
244 | * to quiesce. | ||
245 | */ | ||
246 | if (rect->teardown_in_progress) { | ||
247 | pr_notice("cpu %d: detected hardwall violation %#lx" | ||
248 | " while teardown already in progress\n", | ||
249 | cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); | ||
250 | goto done; | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * Kill off any process that is activated in this rectangle. | ||
255 | * We bypass security to deliver the signal, since it must be | ||
256 | * one of the activated processes that generated the UDN | ||
257 | * message that caused this trap, and all the activated | ||
258 | * processes shared a single open file so are pretty tightly | ||
259 | * bound together from a security point of view to begin with. | ||
260 | */ | ||
261 | rect->teardown_in_progress = 1; | ||
262 | wmb(); /* Ensure visibility of rectangle before notifying processes. */ | ||
263 | pr_notice("cpu %d: detected hardwall violation %#lx...\n", | ||
264 | cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); | ||
265 | info.si_signo = SIGILL; | ||
266 | info.si_errno = 0; | ||
267 | info.si_code = ILL_HARDWALL; | ||
268 | found_processes = 0; | ||
269 | list_for_each_entry(p, &rect->task_head, thread.hardwall_list) { | ||
270 | BUG_ON(p->thread.hardwall != rect); | ||
271 | if (p->sighand) { | ||
272 | found_processes = 1; | ||
273 | pr_notice("hardwall: killing %d\n", p->pid); | ||
274 | spin_lock(&p->sighand->siglock); | ||
275 | __group_send_sig_info(info.si_signo, &info, p); | ||
276 | spin_unlock(&p->sighand->siglock); | ||
277 | } | ||
278 | } | ||
279 | if (!found_processes) | ||
280 | pr_notice("hardwall: no associated processes!\n"); | ||
281 | |||
282 | done: | ||
283 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
284 | |||
285 | /* | ||
286 | * We have to disable firewall interrupts now, or else when we | ||
287 | * return from this handler, we will simply re-interrupt back to | ||
288 | * it. However, we can't clear the protection bits, since we | ||
289 | * haven't yet drained the network, and that would allow packets | ||
290 | * to cross out of the hardwall region. | ||
291 | */ | ||
292 | disable_firewall_interrupts(); | ||
293 | |||
294 | irq_exit(); | ||
295 | set_irq_regs(old_regs); | ||
296 | } | ||
297 | |||
298 | /* Allow access from user space to the UDN. */ | ||
299 | void grant_network_mpls(void) | ||
300 | { | ||
301 | __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1); | ||
302 | __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1); | ||
303 | __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1); | ||
304 | __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1); | ||
305 | #if !CHIP_HAS_REV1_XDN() | ||
306 | __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1); | ||
307 | __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1); | ||
308 | #endif | ||
309 | } | ||
310 | |||
311 | /* Deny access from user space to the UDN. */ | ||
312 | void restrict_network_mpls(void) | ||
313 | { | ||
314 | __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1); | ||
315 | __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1); | ||
316 | __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1); | ||
317 | __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1); | ||
318 | #if !CHIP_HAS_REV1_XDN() | ||
319 | __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1); | ||
320 | __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1); | ||
321 | #endif | ||
322 | } | ||
323 | |||
324 | |||
325 | /* | ||
326 | * Code to create, activate, deactivate, and destroy hardwall rectangles. | ||
327 | */ | ||
328 | |||
329 | /* Create a hardwall for the given rectangle */ | ||
330 | static struct hardwall_info *hardwall_create( | ||
331 | size_t size, const unsigned char __user *bits) | ||
332 | { | ||
333 | struct hardwall_info *iter, *rect; | ||
334 | struct cpumask mask; | ||
335 | unsigned long flags; | ||
336 | int rc; | ||
337 | |||
338 | /* Reject crazy sizes out of hand, a la sys_mbind(). */ | ||
339 | if (size > PAGE_SIZE) | ||
340 | return ERR_PTR(-EINVAL); | ||
341 | |||
342 | /* Copy whatever fits into a cpumask. */ | ||
343 | if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size))) | ||
344 | return ERR_PTR(-EFAULT); | ||
345 | |||
346 | /* | ||
347 | * If the size was short, clear the rest of the mask; | ||
348 | * otherwise validate that the rest of the user mask was zero | ||
349 | * (we don't try hard to be efficient when validating huge masks). | ||
350 | */ | ||
351 | if (size < sizeof(struct cpumask)) { | ||
352 | memset((char *)&mask + size, 0, sizeof(struct cpumask) - size); | ||
353 | } else if (size > sizeof(struct cpumask)) { | ||
354 | size_t i; | ||
355 | for (i = sizeof(struct cpumask); i < size; ++i) { | ||
356 | char c; | ||
357 | if (get_user(c, &bits[i])) | ||
358 | return ERR_PTR(-EFAULT); | ||
359 | if (c) | ||
360 | return ERR_PTR(-EINVAL); | ||
361 | } | ||
362 | } | ||
363 | |||
364 | /* Allocate a new rectangle optimistically. */ | ||
365 | rect = kmalloc(sizeof(struct hardwall_info), | ||
366 | GFP_KERNEL | __GFP_ZERO); | ||
367 | if (rect == NULL) | ||
368 | return ERR_PTR(-ENOMEM); | ||
369 | INIT_LIST_HEAD(&rect->task_head); | ||
370 | |||
371 | /* Compute the rectangle size and validate that it's plausible. */ | ||
372 | rc = setup_rectangle(rect, &mask); | ||
373 | if (rc != 0) { | ||
374 | kfree(rect); | ||
375 | return ERR_PTR(rc); | ||
376 | } | ||
377 | |||
378 | /* Confirm it doesn't overlap and add it to the list. */ | ||
379 | spin_lock_irqsave(&hardwall_lock, flags); | ||
380 | list_for_each_entry(iter, &rectangles, list) { | ||
381 | if (overlaps(iter, rect)) { | ||
382 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
383 | kfree(rect); | ||
384 | return ERR_PTR(-EBUSY); | ||
385 | } | ||
386 | } | ||
387 | list_add_tail(&rect->list, &rectangles); | ||
388 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
389 | |||
390 | /* Set up appropriate hardwalling on all affected cpus. */ | ||
391 | hardwall_setup(rect); | ||
392 | |||
393 | return rect; | ||
394 | } | ||
395 | |||
396 | /* Activate a given hardwall on this cpu for this process. */ | ||
397 | static int hardwall_activate(struct hardwall_info *rect) | ||
398 | { | ||
399 | int cpu, x, y; | ||
400 | unsigned long flags; | ||
401 | struct task_struct *p = current; | ||
402 | struct thread_struct *ts = &p->thread; | ||
403 | |||
404 | /* Require a rectangle. */ | ||
405 | if (rect == NULL) | ||
406 | return -ENODATA; | ||
407 | |||
408 | /* Not allowed to activate a rectangle that is being torn down. */ | ||
409 | if (rect->teardown_in_progress) | ||
410 | return -EINVAL; | ||
411 | |||
412 | /* | ||
413 | * Get our affinity; if we're not bound to this tile uniquely, | ||
414 | * we can't access the network registers. | ||
415 | */ | ||
416 | if (cpumask_weight(&p->cpus_allowed) != 1) | ||
417 | return -EPERM; | ||
418 | |||
419 | /* Make sure we are bound to a cpu in this rectangle. */ | ||
420 | cpu = smp_processor_id(); | ||
421 | BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); | ||
422 | x = cpu_x(cpu); | ||
423 | y = cpu_y(cpu); | ||
424 | if (!contains(rect, x, y)) | ||
425 | return -EINVAL; | ||
426 | |||
427 | /* If we are already bound to this hardwall, it's a no-op. */ | ||
428 | if (ts->hardwall) { | ||
429 | BUG_ON(ts->hardwall != rect); | ||
430 | return 0; | ||
431 | } | ||
432 | |||
433 | /* Success! This process gets to use the user networks on this cpu. */ | ||
434 | ts->hardwall = rect; | ||
435 | spin_lock_irqsave(&hardwall_lock, flags); | ||
436 | list_add(&ts->hardwall_list, &rect->task_head); | ||
437 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
438 | grant_network_mpls(); | ||
439 | printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n", | ||
440 | p->pid, p->comm, cpu); | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * Deactivate a task's hardwall. Must hold hardwall_lock. | ||
446 | * This method may be called from free_task(), so we don't want to | ||
447 | * rely on too many fields of struct task_struct still being valid. | ||
448 | * We assume the cpus_allowed, pid, and comm fields are still valid. | ||
449 | */ | ||
450 | static void _hardwall_deactivate(struct task_struct *task) | ||
451 | { | ||
452 | struct thread_struct *ts = &task->thread; | ||
453 | |||
454 | if (cpumask_weight(&task->cpus_allowed) != 1) { | ||
455 | pr_err("pid %d (%s) releasing networks with" | ||
456 | " an affinity mask containing %d cpus!\n", | ||
457 | task->pid, task->comm, | ||
458 | cpumask_weight(&task->cpus_allowed)); | ||
459 | BUG(); | ||
460 | } | ||
461 | |||
462 | BUG_ON(ts->hardwall == NULL); | ||
463 | ts->hardwall = NULL; | ||
464 | list_del(&ts->hardwall_list); | ||
465 | if (task == current) | ||
466 | restrict_network_mpls(); | ||
467 | } | ||
468 | |||
469 | /* Deactivate a task's hardwall. */ | ||
470 | int hardwall_deactivate(struct task_struct *task) | ||
471 | { | ||
472 | unsigned long flags; | ||
473 | int activated; | ||
474 | |||
475 | spin_lock_irqsave(&hardwall_lock, flags); | ||
476 | activated = (task->thread.hardwall != NULL); | ||
477 | if (activated) | ||
478 | _hardwall_deactivate(task); | ||
479 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
480 | |||
481 | if (!activated) | ||
482 | return -EINVAL; | ||
483 | |||
484 | printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n", | ||
485 | task->pid, task->comm, smp_processor_id()); | ||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | /* Stop a UDN switch before draining the network. */ | ||
490 | static void stop_udn_switch(void *ignored) | ||
491 | { | ||
492 | #if !CHIP_HAS_REV1_XDN() | ||
493 | /* Freeze the switch and the demux. */ | ||
494 | __insn_mtspr(SPR_UDN_SP_FREEZE, | ||
495 | SPR_UDN_SP_FREEZE__SP_FRZ_MASK | | ||
496 | SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | | ||
497 | SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); | ||
498 | #endif | ||
499 | } | ||
500 | |||
501 | /* Drain all the state from a stopped switch. */ | ||
502 | static void drain_udn_switch(void *ignored) | ||
503 | { | ||
504 | #if !CHIP_HAS_REV1_XDN() | ||
505 | int i; | ||
506 | int from_tile_words, ca_count; | ||
507 | |||
508 | /* Empty out the 5 switch point fifos. */ | ||
509 | for (i = 0; i < 5; i++) { | ||
510 | int words, j; | ||
511 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); | ||
512 | words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF; | ||
513 | for (j = 0; j < words; j++) | ||
514 | (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA); | ||
515 | BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0); | ||
516 | } | ||
517 | |||
518 | /* Dump out the 3 word fifo at top. */ | ||
519 | from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3; | ||
520 | for (i = 0; i < from_tile_words; i++) | ||
521 | (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); | ||
522 | |||
523 | /* Empty out demuxes. */ | ||
524 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) | ||
525 | (void) __tile_udn0_receive(); | ||
526 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1)) | ||
527 | (void) __tile_udn1_receive(); | ||
528 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2)) | ||
529 | (void) __tile_udn2_receive(); | ||
530 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3)) | ||
531 | (void) __tile_udn3_receive(); | ||
532 | BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0); | ||
533 | |||
534 | /* Empty out catch all. */ | ||
535 | ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); | ||
536 | for (i = 0; i < ca_count; i++) | ||
537 | (void) __insn_mfspr(SPR_UDN_CA_DATA); | ||
538 | BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0); | ||
539 | |||
540 | /* Clear demux logic. */ | ||
541 | __insn_mtspr(SPR_UDN_DEMUX_CTL, 1); | ||
542 | |||
543 | /* | ||
544 | * Write switch state; experimentation indicates that 0xc3000 | ||
545 | * is an idle switch point. | ||
546 | */ | ||
547 | for (i = 0; i < 5; i++) { | ||
548 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); | ||
549 | __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000); | ||
550 | } | ||
551 | #endif | ||
552 | } | ||
553 | |||
554 | /* Reset random UDN state registers at boot up and during hardwall teardown. */ | ||
555 | void reset_network_state(void) | ||
556 | { | ||
557 | #if !CHIP_HAS_REV1_XDN() | ||
558 | /* Reset UDN coordinates to their standard value */ | ||
559 | unsigned int cpu = smp_processor_id(); | ||
560 | unsigned int x = cpu % smp_width; | ||
561 | unsigned int y = cpu / smp_width; | ||
562 | #endif | ||
563 | |||
564 | if (udn_disabled) | ||
565 | return; | ||
566 | |||
567 | #if !CHIP_HAS_REV1_XDN() | ||
568 | __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); | ||
569 | |||
570 | /* Set demux tags to predefined values and enable them. */ | ||
571 | __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); | ||
572 | __insn_mtspr(SPR_UDN_TAG_0, (1 << 0)); | ||
573 | __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); | ||
574 | __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); | ||
575 | __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); | ||
576 | #endif | ||
577 | |||
578 | /* Clear out other random registers so we have a clean slate. */ | ||
579 | __insn_mtspr(SPR_UDN_AVAIL_EN, 0); | ||
580 | __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0); | ||
581 | #if !CHIP_HAS_REV1_XDN() | ||
582 | __insn_mtspr(SPR_UDN_REFILL_EN, 0); | ||
583 | __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); | ||
584 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); | ||
585 | #endif | ||
586 | |||
587 | /* Start the switch and demux. */ | ||
588 | #if !CHIP_HAS_REV1_XDN() | ||
589 | __insn_mtspr(SPR_UDN_SP_FREEZE, 0); | ||
590 | #endif | ||
591 | } | ||
592 | |||
593 | /* Restart a UDN switch after draining. */ | ||
594 | static void restart_udn_switch(void *ignored) | ||
595 | { | ||
596 | reset_network_state(); | ||
597 | |||
598 | /* Disable firewall interrupts. */ | ||
599 | __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0); | ||
600 | disable_firewall_interrupts(); | ||
601 | } | ||
602 | |||
603 | /* Build a struct cpumask containing all valid tiles in bounding rectangle. */ | ||
604 | static void fill_mask(struct hardwall_info *r, struct cpumask *result) | ||
605 | { | ||
606 | int x, y, cpu; | ||
607 | |||
608 | cpumask_clear(result); | ||
609 | |||
610 | cpu = r->ulhc_y * smp_width + r->ulhc_x; | ||
611 | for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) { | ||
612 | for (x = 0; x < r->width; ++x, ++cpu) | ||
613 | cpu_online_set(cpu, result); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | /* Last reference to a hardwall is gone, so clear the network. */ | ||
618 | static void hardwall_destroy(struct hardwall_info *rect) | ||
619 | { | ||
620 | struct task_struct *task; | ||
621 | unsigned long flags; | ||
622 | struct cpumask mask; | ||
623 | |||
624 | /* Make sure this file actually represents a rectangle. */ | ||
625 | if (rect == NULL) | ||
626 | return; | ||
627 | |||
628 | /* | ||
629 | * Deactivate any remaining tasks. It's possible to race with | ||
630 | * some other thread that is exiting and hasn't yet called | ||
631 | * deactivate (when freeing its thread_info), so we carefully | ||
632 | * deactivate any remaining tasks before freeing the | ||
633 | * hardwall_info object itself. | ||
634 | */ | ||
635 | spin_lock_irqsave(&hardwall_lock, flags); | ||
636 | list_for_each_entry(task, &rect->task_head, thread.hardwall_list) | ||
637 | _hardwall_deactivate(task); | ||
638 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
639 | |||
640 | /* Drain the UDN. */ | ||
641 | printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n", | ||
642 | rect->width, rect->height, rect->ulhc_x, rect->ulhc_y); | ||
643 | fill_mask(rect, &mask); | ||
644 | on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1); | ||
645 | on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1); | ||
646 | |||
647 | /* Restart switch and disable firewall. */ | ||
648 | on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1); | ||
649 | |||
650 | /* Now free the rectangle from the list. */ | ||
651 | spin_lock_irqsave(&hardwall_lock, flags); | ||
652 | BUG_ON(!list_empty(&rect->task_head)); | ||
653 | list_del(&rect->list); | ||
654 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
655 | kfree(rect); | ||
656 | } | ||
657 | |||
658 | |||
659 | /* | ||
660 | * Dump hardwall state via /proc; initialized in arch/tile/sys/proc.c. | ||
661 | */ | ||
662 | int proc_tile_hardwall_show(struct seq_file *sf, void *v) | ||
663 | { | ||
664 | struct hardwall_info *r; | ||
665 | |||
666 | if (udn_disabled) { | ||
667 | seq_printf(sf, "%dx%d 0,0 pids:\n", smp_width, smp_height); | ||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | spin_lock_irq(&hardwall_lock); | ||
672 | list_for_each_entry(r, &rectangles, list) { | ||
673 | struct task_struct *p; | ||
674 | seq_printf(sf, "%dx%d %d,%d pids:", | ||
675 | r->width, r->height, r->ulhc_x, r->ulhc_y); | ||
676 | list_for_each_entry(p, &r->task_head, thread.hardwall_list) { | ||
677 | unsigned int cpu = cpumask_first(&p->cpus_allowed); | ||
678 | unsigned int x = cpu % smp_width; | ||
679 | unsigned int y = cpu / smp_width; | ||
680 | seq_printf(sf, " %d@%d,%d", p->pid, x, y); | ||
681 | } | ||
682 | seq_printf(sf, "\n"); | ||
683 | } | ||
684 | spin_unlock_irq(&hardwall_lock); | ||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | |||
689 | /* | ||
690 | * Character device support via ioctl/close. | ||
691 | */ | ||
692 | |||
693 | static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) | ||
694 | { | ||
695 | struct hardwall_info *rect = file->private_data; | ||
696 | |||
697 | if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) | ||
698 | return -EINVAL; | ||
699 | |||
700 | switch (_IOC_NR(a)) { | ||
701 | case _HARDWALL_CREATE: | ||
702 | if (udn_disabled) | ||
703 | return -ENOSYS; | ||
704 | if (rect != NULL) | ||
705 | return -EALREADY; | ||
706 | rect = hardwall_create(_IOC_SIZE(a), | ||
707 | (const unsigned char __user *)b); | ||
708 | if (IS_ERR(rect)) | ||
709 | return PTR_ERR(rect); | ||
710 | file->private_data = rect; | ||
711 | return 0; | ||
712 | |||
713 | case _HARDWALL_ACTIVATE: | ||
714 | return hardwall_activate(rect); | ||
715 | |||
716 | case _HARDWALL_DEACTIVATE: | ||
717 | if (current->thread.hardwall != rect) | ||
718 | return -EINVAL; | ||
719 | return hardwall_deactivate(current); | ||
720 | |||
721 | default: | ||
722 | return -EINVAL; | ||
723 | } | ||
724 | } | ||
725 | |||
726 | #ifdef CONFIG_COMPAT | ||
727 | static long hardwall_compat_ioctl(struct file *file, | ||
728 | unsigned int a, unsigned long b) | ||
729 | { | ||
730 | /* Sign-extend the argument so it can be used as a pointer. */ | ||
731 | return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b)); | ||
732 | } | ||
733 | #endif | ||
734 | |||
735 | /* The user process closed the file; revoke access to user networks. */ | ||
736 | static int hardwall_flush(struct file *file, fl_owner_t owner) | ||
737 | { | ||
738 | struct hardwall_info *rect = file->private_data; | ||
739 | struct task_struct *task, *tmp; | ||
740 | unsigned long flags; | ||
741 | |||
742 | if (rect) { | ||
743 | /* | ||
744 | * NOTE: if multiple threads are activated on this hardwall | ||
745 | * file, the other threads will continue having access to the | ||
746 | * UDN until they are context-switched out and back in again. | ||
747 | * | ||
748 | * NOTE: A NULL files pointer means the task is being torn | ||
749 | * down, so in that case we also deactivate it. | ||
750 | */ | ||
751 | spin_lock_irqsave(&hardwall_lock, flags); | ||
752 | list_for_each_entry_safe(task, tmp, &rect->task_head, | ||
753 | thread.hardwall_list) { | ||
754 | if (task->files == owner || task->files == NULL) | ||
755 | _hardwall_deactivate(task); | ||
756 | } | ||
757 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
758 | } | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | /* This hardwall is gone, so destroy it. */ | ||
764 | static int hardwall_release(struct inode *inode, struct file *file) | ||
765 | { | ||
766 | hardwall_destroy(file->private_data); | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | static const struct file_operations dev_hardwall_fops = { | ||
771 | .unlocked_ioctl = hardwall_ioctl, | ||
772 | #ifdef CONFIG_COMPAT | ||
773 | .compat_ioctl = hardwall_compat_ioctl, | ||
774 | #endif | ||
775 | .flush = hardwall_flush, | ||
776 | .release = hardwall_release, | ||
777 | }; | ||
778 | |||
779 | static struct cdev hardwall_dev; | ||
780 | |||
781 | static int __init dev_hardwall_init(void) | ||
782 | { | ||
783 | int rc; | ||
784 | dev_t dev; | ||
785 | |||
786 | rc = alloc_chrdev_region(&dev, 0, 1, "hardwall"); | ||
787 | if (rc < 0) | ||
788 | return rc; | ||
789 | cdev_init(&hardwall_dev, &dev_hardwall_fops); | ||
790 | rc = cdev_add(&hardwall_dev, dev, 1); | ||
791 | if (rc < 0) | ||
792 | return rc; | ||
793 | |||
794 | return 0; | ||
795 | } | ||
796 | late_initcall(dev_hardwall_init); | ||
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S new file mode 100644 index 000000000000..2b4f6c091701 --- /dev/null +++ b/arch/tile/kernel/head_32.S | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * TILE startup code. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/thread_info.h> | ||
22 | #include <asm/processor.h> | ||
23 | #include <asm/asm-offsets.h> | ||
24 | #include <hv/hypervisor.h> | ||
25 | #include <arch/chip.h> | ||
26 | |||
27 | /* | ||
28 | * This module contains the entry code for kernel images. It performs the | ||
29 | * minimal setup needed to call the generic C routines. | ||
30 | */ | ||
31 | |||
32 | __HEAD | ||
33 | ENTRY(_start) | ||
34 | /* Notify the hypervisor of what version of the API we want */ | ||
35 | { | ||
36 | movei r1, TILE_CHIP | ||
37 | movei r2, TILE_CHIP_REV | ||
38 | } | ||
39 | { | ||
40 | moveli r0, _HV_VERSION | ||
41 | jal hv_init | ||
42 | } | ||
43 | /* Get a reasonable default ASID in r0 */ | ||
44 | { | ||
45 | move r0, zero | ||
46 | jal hv_inquire_asid | ||
47 | } | ||
48 | /* Install the default page table */ | ||
49 | { | ||
50 | moveli r6, lo16(swapper_pgprot - PAGE_OFFSET) | ||
51 | move r4, r0 /* use starting ASID of range for this page table */ | ||
52 | } | ||
53 | { | ||
54 | moveli r0, lo16(swapper_pg_dir - PAGE_OFFSET) | ||
55 | auli r6, r6, ha16(swapper_pgprot - PAGE_OFFSET) | ||
56 | } | ||
57 | { | ||
58 | lw r2, r6 | ||
59 | addi r6, r6, 4 | ||
60 | } | ||
61 | { | ||
62 | lw r3, r6 | ||
63 | auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET) | ||
64 | } | ||
65 | { | ||
66 | inv r6 | ||
67 | move r1, zero /* high 32 bits of CPA is zero */ | ||
68 | } | ||
69 | { | ||
70 | moveli lr, lo16(1f) | ||
71 | move r5, zero | ||
72 | } | ||
73 | { | ||
74 | auli lr, lr, ha16(1f) | ||
75 | j hv_install_context | ||
76 | } | ||
77 | 1: | ||
78 | |||
79 | /* Get our processor number and save it away in SAVE_1_0. */ | ||
80 | jal hv_inquire_topology | ||
81 | mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ | ||
82 | add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ | ||
83 | |||
84 | #ifdef CONFIG_SMP | ||
85 | /* | ||
86 | * Load up our per-cpu offset. When the first (master) tile | ||
87 | * boots, this value is still zero, so we will load boot_pc | ||
88 | * with start_kernel, and boot_sp with init_stack + THREAD_SIZE. | ||
89 | * The master tile initializes the per-cpu offset array, so that | ||
90 | * when subsequent (secondary) tiles boot, they will instead load | ||
91 | * from their per-cpu versions of boot_sp and boot_pc. | ||
92 | */ | ||
93 | moveli r5, lo16(__per_cpu_offset) | ||
94 | auli r5, r5, ha16(__per_cpu_offset) | ||
95 | s2a r5, r4, r5 | ||
96 | lw r5, r5 | ||
97 | bnz r5, 1f | ||
98 | |||
99 | /* | ||
100 | * Save the width and height to the smp_topology variable | ||
101 | * for later use. | ||
102 | */ | ||
103 | moveli r0, lo16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET) | ||
104 | auli r0, r0, ha16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET) | ||
105 | { | ||
106 | sw r0, r2 | ||
107 | addi r0, r0, (HV_TOPOLOGY_HEIGHT_OFFSET - HV_TOPOLOGY_WIDTH_OFFSET) | ||
108 | } | ||
109 | sw r0, r3 | ||
110 | 1: | ||
111 | #else | ||
112 | move r5, zero | ||
113 | #endif | ||
114 | |||
115 | /* Load and go with the correct pc and sp. */ | ||
116 | { | ||
117 | addli r1, r5, lo16(boot_sp) | ||
118 | addli r0, r5, lo16(boot_pc) | ||
119 | } | ||
120 | { | ||
121 | auli r1, r1, ha16(boot_sp) | ||
122 | auli r0, r0, ha16(boot_pc) | ||
123 | } | ||
124 | lw r0, r0 | ||
125 | lw sp, r1 | ||
126 | or r4, sp, r4 | ||
127 | mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */ | ||
128 | addi sp, sp, -STACK_TOP_DELTA | ||
129 | { | ||
130 | move lr, zero /* stop backtraces in the called function */ | ||
131 | jr r0 | ||
132 | } | ||
133 | ENDPROC(_start) | ||
134 | |||
135 | .section ".bss.page_aligned","w" | ||
136 | .align PAGE_SIZE | ||
137 | ENTRY(empty_zero_page) | ||
138 | .fill PAGE_SIZE,1,0 | ||
139 | END(empty_zero_page) | ||
140 | |||
141 | .macro PTE va, cpa, bits1, no_org=0 | ||
142 | .ifeq \no_org | ||
143 | .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE | ||
144 | .endif | ||
145 | .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ | ||
146 | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) | ||
147 | .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) | ||
148 | .endm | ||
149 | |||
150 | .section ".data.page_aligned","wa" | ||
151 | .align PAGE_SIZE | ||
152 | ENTRY(swapper_pg_dir) | ||
153 | /* | ||
154 | * All data pages from PAGE_OFFSET to MEM_USER_INTRPT are mapped as | ||
155 | * VA = PA + PAGE_OFFSET. We remap things with more precise access | ||
156 | * permissions and more respect for size of RAM later. | ||
157 | */ | ||
158 | .set addr, 0 | ||
159 | .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT | ||
160 | PTE addr + PAGE_OFFSET, addr, HV_PTE_READABLE | HV_PTE_WRITABLE | ||
161 | .set addr, addr + PGDIR_SIZE | ||
162 | .endr | ||
163 | |||
164 | /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ | ||
165 | PTE MEM_SV_INTRPT, 0, HV_PTE_READABLE | HV_PTE_EXECUTABLE | ||
166 | .org swapper_pg_dir + HV_L1_SIZE | ||
167 | END(swapper_pg_dir) | ||
168 | |||
169 | /* | ||
170 | * Isolate swapper_pgprot to its own cache line, since each cpu | ||
171 | * starting up will read it using VA-is-PA and local homing. | ||
172 | * This would otherwise likely conflict with other data on the cache | ||
173 | * line, once we have set its permanent home in the page tables. | ||
174 | */ | ||
175 | __INITDATA | ||
176 | .align CHIP_L2_LINE_SIZE() | ||
177 | ENTRY(swapper_pgprot) | ||
178 | PTE 0, 0, HV_PTE_READABLE | HV_PTE_WRITABLE, 1 | ||
179 | .align CHIP_L2_LINE_SIZE() | ||
180 | END(swapper_pgprot) | ||
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds new file mode 100644 index 000000000000..2b7cd0a659a9 --- /dev/null +++ b/arch/tile/kernel/hvglue.lds | |||
@@ -0,0 +1,58 @@ | |||
1 | /* Hypervisor call vector addresses; see <hv/hypervisor.h> */ | ||
2 | hv_init = TEXT_OFFSET + 0x10020; | ||
3 | hv_install_context = TEXT_OFFSET + 0x10040; | ||
4 | hv_sysconf = TEXT_OFFSET + 0x10060; | ||
5 | hv_get_rtc = TEXT_OFFSET + 0x10080; | ||
6 | hv_set_rtc = TEXT_OFFSET + 0x100a0; | ||
7 | hv_flush_asid = TEXT_OFFSET + 0x100c0; | ||
8 | hv_flush_page = TEXT_OFFSET + 0x100e0; | ||
9 | hv_flush_pages = TEXT_OFFSET + 0x10100; | ||
10 | hv_restart = TEXT_OFFSET + 0x10120; | ||
11 | hv_halt = TEXT_OFFSET + 0x10140; | ||
12 | hv_power_off = TEXT_OFFSET + 0x10160; | ||
13 | hv_inquire_physical = TEXT_OFFSET + 0x10180; | ||
14 | hv_inquire_memory_controller = TEXT_OFFSET + 0x101a0; | ||
15 | hv_inquire_virtual = TEXT_OFFSET + 0x101c0; | ||
16 | hv_inquire_asid = TEXT_OFFSET + 0x101e0; | ||
17 | hv_nanosleep = TEXT_OFFSET + 0x10200; | ||
18 | hv_console_read_if_ready = TEXT_OFFSET + 0x10220; | ||
19 | hv_console_write = TEXT_OFFSET + 0x10240; | ||
20 | hv_downcall_dispatch = TEXT_OFFSET + 0x10260; | ||
21 | hv_inquire_topology = TEXT_OFFSET + 0x10280; | ||
22 | hv_fs_findfile = TEXT_OFFSET + 0x102a0; | ||
23 | hv_fs_fstat = TEXT_OFFSET + 0x102c0; | ||
24 | hv_fs_pread = TEXT_OFFSET + 0x102e0; | ||
25 | hv_physaddr_read64 = TEXT_OFFSET + 0x10300; | ||
26 | hv_physaddr_write64 = TEXT_OFFSET + 0x10320; | ||
27 | hv_get_command_line = TEXT_OFFSET + 0x10340; | ||
28 | hv_set_caching = TEXT_OFFSET + 0x10360; | ||
29 | hv_bzero_page = TEXT_OFFSET + 0x10380; | ||
30 | hv_register_message_state = TEXT_OFFSET + 0x103a0; | ||
31 | hv_send_message = TEXT_OFFSET + 0x103c0; | ||
32 | hv_receive_message = TEXT_OFFSET + 0x103e0; | ||
33 | hv_inquire_context = TEXT_OFFSET + 0x10400; | ||
34 | hv_start_all_tiles = TEXT_OFFSET + 0x10420; | ||
35 | hv_dev_open = TEXT_OFFSET + 0x10440; | ||
36 | hv_dev_close = TEXT_OFFSET + 0x10460; | ||
37 | hv_dev_pread = TEXT_OFFSET + 0x10480; | ||
38 | hv_dev_pwrite = TEXT_OFFSET + 0x104a0; | ||
39 | hv_dev_poll = TEXT_OFFSET + 0x104c0; | ||
40 | hv_dev_poll_cancel = TEXT_OFFSET + 0x104e0; | ||
41 | hv_dev_preada = TEXT_OFFSET + 0x10500; | ||
42 | hv_dev_pwritea = TEXT_OFFSET + 0x10520; | ||
43 | hv_flush_remote = TEXT_OFFSET + 0x10540; | ||
44 | hv_console_putc = TEXT_OFFSET + 0x10560; | ||
45 | hv_inquire_tiles = TEXT_OFFSET + 0x10580; | ||
46 | hv_confstr = TEXT_OFFSET + 0x105a0; | ||
47 | hv_reexec = TEXT_OFFSET + 0x105c0; | ||
48 | hv_set_command_line = TEXT_OFFSET + 0x105e0; | ||
49 | hv_clear_intr = TEXT_OFFSET + 0x10600; | ||
50 | hv_enable_intr = TEXT_OFFSET + 0x10620; | ||
51 | hv_disable_intr = TEXT_OFFSET + 0x10640; | ||
52 | hv_raise_intr = TEXT_OFFSET + 0x10660; | ||
53 | hv_trigger_ipi = TEXT_OFFSET + 0x10680; | ||
54 | hv_store_mapping = TEXT_OFFSET + 0x106a0; | ||
55 | hv_inquire_realpa = TEXT_OFFSET + 0x106c0; | ||
56 | hv_flush_all = TEXT_OFFSET + 0x106e0; | ||
57 | hv_get_ipi_pte = TEXT_OFFSET + 0x10700; | ||
58 | hv_glue_internals = TEXT_OFFSET + 0x10720; | ||
diff --git a/arch/tile/kernel/init_task.c b/arch/tile/kernel/init_task.c new file mode 100644 index 000000000000..928b31870669 --- /dev/null +++ b/arch/tile/kernel/init_task.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/init_task.h> | ||
18 | #include <linux/mqueue.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/start_kernel.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | |||
23 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
24 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
25 | |||
26 | /* | ||
27 | * Initial thread structure. | ||
28 | * | ||
29 | * We need to make sure that this is THREAD_SIZE aligned due to the | ||
30 | * way process stacks are handled. This is done by having a special | ||
31 | * "init_task" linker map entry.. | ||
32 | */ | ||
33 | union thread_union init_thread_union __init_task_data = { | ||
34 | INIT_THREAD_INFO(init_task) | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * Initial task structure. | ||
39 | * | ||
40 | * All other task structs will be allocated on slabs in fork.c | ||
41 | */ | ||
42 | struct task_struct init_task = INIT_TASK(init_task); | ||
43 | EXPORT_SYMBOL(init_task); | ||
44 | |||
45 | /* | ||
46 | * per-CPU stack and boot info. | ||
47 | */ | ||
48 | DEFINE_PER_CPU(unsigned long, boot_sp) = | ||
49 | (unsigned long)init_stack + THREAD_SIZE; | ||
50 | |||
51 | #ifdef CONFIG_SMP | ||
52 | DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel; | ||
53 | #else | ||
54 | /* | ||
55 | * The variable must be __initdata since it references __init code. | ||
56 | * With CONFIG_SMP it is per-cpu data, which is exempt from validation. | ||
57 | */ | ||
58 | unsigned long __initdata boot_pc = (unsigned long)start_kernel; | ||
59 | #endif | ||
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S new file mode 100644 index 000000000000..3404c75f8e64 --- /dev/null +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -0,0 +1,2008 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Linux interrupt vectors. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/unistd.h> | ||
21 | #include <asm/ptrace.h> | ||
22 | #include <asm/thread_info.h> | ||
23 | #include <asm/irqflags.h> | ||
24 | #include <asm/atomic.h> | ||
25 | #include <asm/asm-offsets.h> | ||
26 | #include <hv/hypervisor.h> | ||
27 | #include <arch/abi.h> | ||
28 | #include <arch/interrupts.h> | ||
29 | #include <arch/spr_def.h> | ||
30 | |||
31 | #ifdef CONFIG_PREEMPT | ||
32 | # error "No support for kernel preemption currently" | ||
33 | #endif | ||
34 | |||
35 | #if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48 | ||
36 | # error INT_INTCTRL_1 coded to set high interrupt mask | ||
37 | #endif | ||
38 | |||
39 | #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) | ||
40 | |||
41 | #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) | ||
42 | |||
43 | #if !CHIP_HAS_WH64() | ||
44 | /* By making this an empty macro, we can use wh64 in the code. */ | ||
45 | .macro wh64 reg | ||
46 | .endm | ||
47 | #endif | ||
48 | |||
49 | .macro push_reg reg, ptr=sp, delta=-4 | ||
50 | { | ||
51 | sw \ptr, \reg | ||
52 | addli \ptr, \ptr, \delta | ||
53 | } | ||
54 | .endm | ||
55 | |||
56 | .macro pop_reg reg, ptr=sp, delta=4 | ||
57 | { | ||
58 | lw \reg, \ptr | ||
59 | addli \ptr, \ptr, \delta | ||
60 | } | ||
61 | .endm | ||
62 | |||
63 | .macro pop_reg_zero reg, zreg, ptr=sp, delta=4 | ||
64 | { | ||
65 | move \zreg, zero | ||
66 | lw \reg, \ptr | ||
67 | addi \ptr, \ptr, \delta | ||
68 | } | ||
69 | .endm | ||
70 | |||
71 | .macro push_extra_callee_saves reg | ||
72 | PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51)) | ||
73 | push_reg r51, \reg | ||
74 | push_reg r50, \reg | ||
75 | push_reg r49, \reg | ||
76 | push_reg r48, \reg | ||
77 | push_reg r47, \reg | ||
78 | push_reg r46, \reg | ||
79 | push_reg r45, \reg | ||
80 | push_reg r44, \reg | ||
81 | push_reg r43, \reg | ||
82 | push_reg r42, \reg | ||
83 | push_reg r41, \reg | ||
84 | push_reg r40, \reg | ||
85 | push_reg r39, \reg | ||
86 | push_reg r38, \reg | ||
87 | push_reg r37, \reg | ||
88 | push_reg r36, \reg | ||
89 | push_reg r35, \reg | ||
90 | push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34) | ||
91 | .endm | ||
92 | |||
93 | .macro panic str | ||
94 | .pushsection .rodata, "a" | ||
95 | 1: | ||
96 | .asciz "\str" | ||
97 | .popsection | ||
98 | { | ||
99 | moveli r0, lo16(1b) | ||
100 | } | ||
101 | { | ||
102 | auli r0, r0, ha16(1b) | ||
103 | jal panic | ||
104 | } | ||
105 | .endm | ||
106 | |||
107 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
108 | .pushsection .text.intvec_feedback,"ax" | ||
109 | intvec_feedback: | ||
110 | .popsection | ||
111 | #endif | ||
112 | |||
113 | /* | ||
114 | * Default interrupt handler. | ||
115 | * | ||
116 | * vecnum is where we'll put this code. | ||
117 | * c_routine is the C routine we'll call. | ||
118 | * | ||
119 | * The C routine is passed two arguments: | ||
120 | * - A pointer to the pt_regs state. | ||
121 | * - The interrupt vector number. | ||
122 | * | ||
123 | * The "processing" argument specifies the code for processing | ||
124 | * the interrupt. Defaults to "handle_interrupt". | ||
125 | */ | ||
126 | .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt | ||
127 | .org (\vecnum << 8) | ||
128 | intvec_\vecname: | ||
129 | .ifc \vecnum, INT_SWINT_1 | ||
130 | blz TREG_SYSCALL_NR_NAME, sys_cmpxchg | ||
131 | .endif | ||
132 | |||
133 | /* Temporarily save a register so we have somewhere to work. */ | ||
134 | |||
135 | mtspr SYSTEM_SAVE_1_1, r0 | ||
136 | mfspr r0, EX_CONTEXT_1_1 | ||
137 | |||
138 | /* The cmpxchg code clears sp to force us to reset it here on fault. */ | ||
139 | { | ||
140 | bz sp, 2f | ||
141 | andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
142 | } | ||
143 | |||
144 | .ifc \vecnum, INT_DOUBLE_FAULT | ||
145 | /* | ||
146 | * For double-faults from user-space, fall through to the normal | ||
147 | * register save and stack setup path. Otherwise, it's the | ||
148 | * hypervisor giving us one last chance to dump diagnostics, and we | ||
149 | * branch to the kernel_double_fault routine to do so. | ||
150 | */ | ||
151 | bz r0, 1f | ||
152 | j _kernel_double_fault | ||
153 | 1: | ||
154 | .else | ||
155 | /* | ||
156 | * If we're coming from user-space, then set sp to the top of | ||
157 | * the kernel stack. Otherwise, assume sp is already valid. | ||
158 | */ | ||
159 | { | ||
160 | bnz r0, 0f | ||
161 | move r0, sp | ||
162 | } | ||
163 | .endif | ||
164 | |||
165 | .ifc \c_routine, do_page_fault | ||
166 | /* | ||
167 | * The page_fault handler may be downcalled directly by the | ||
168 | * hypervisor even when Linux is running and has ICS set. | ||
169 | * | ||
170 | * In this case the contents of EX_CONTEXT_1_1 reflect the | ||
171 | * previous fault and can't be relied on to choose whether or | ||
172 | * not to reinitialize the stack pointer. So we add a test | ||
173 | * to see whether SYSTEM_SAVE_1_2 has the high bit set, | ||
174 | * and if so we don't reinitialize sp, since we must be coming | ||
175 | * from Linux. (In fact the precise case is !(val & ~1), | ||
176 | * but any Linux PC has to have the high bit set.) | ||
177 | * | ||
178 | * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for | ||
179 | * any path that turns into a downcall to one of our TLB handlers. | ||
180 | */ | ||
181 | mfspr r0, SYSTEM_SAVE_1_2 | ||
182 | { | ||
183 | blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */ | ||
184 | move r0, sp | ||
185 | } | ||
186 | .endif | ||
187 | |||
188 | 2: | ||
189 | /* | ||
190 | * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and | ||
191 | * the current stack top in the higher bits. So we recover | ||
192 | * our stack top by just masking off the low bits, then | ||
193 | * point sp at the top aligned address on the actual stack page. | ||
194 | */ | ||
195 | mfspr r0, SYSTEM_SAVE_1_0 | ||
196 | mm r0, r0, zero, LOG2_THREAD_SIZE, 31 | ||
197 | |||
198 | 0: | ||
199 | /* | ||
200 | * Align the stack mod 64 so we can properly predict what | ||
201 | * cache lines we need to write-hint to reduce memory fetch | ||
202 | * latency as we enter the kernel. The layout of memory is | ||
203 | * as follows, with cache line 0 at the lowest VA, and cache | ||
204 | * line 4 just below the r0 value this "andi" computes. | ||
205 | * Note that we never write to cache line 4, and we skip | ||
206 | * cache line 1 for syscalls. | ||
207 | * | ||
208 | * cache line 4: ptregs padding (two words) | ||
209 | * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad | ||
210 | * cache line 2: r30...r45 | ||
211 | * cache line 1: r14...r29 | ||
212 | * cache line 0: 2 x frame, r0..r13 | ||
213 | */ | ||
214 | andi r0, r0, -64 | ||
215 | |||
216 | /* | ||
217 | * Push the first four registers on the stack, so that we can set | ||
218 | * them to vector-unique values before we jump to the common code. | ||
219 | * | ||
220 | * Registers are pushed on the stack as a struct pt_regs, | ||
221 | * with the sp initially just above the struct, and when we're | ||
222 | * done, sp points to the base of the struct, minus | ||
223 | * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code. | ||
224 | * | ||
225 | * This routine saves just the first four registers, plus the | ||
226 | * stack context so we can do proper backtracing right away, | ||
227 | * and defers to handle_interrupt to save the rest. | ||
228 | * The backtracer needs pc, ex1, lr, sp, r52, and faultnum. | ||
229 | */ | ||
230 | addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP) | ||
231 | wh64 r0 /* cache line 3 */ | ||
232 | { | ||
233 | sw r0, lr | ||
234 | addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR | ||
235 | } | ||
236 | { | ||
237 | sw r0, sp | ||
238 | addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP | ||
239 | } | ||
240 | { | ||
241 | sw sp, r52 | ||
242 | addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52) | ||
243 | } | ||
244 | wh64 sp /* cache line 0 */ | ||
245 | { | ||
246 | sw sp, r1 | ||
247 | addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1) | ||
248 | } | ||
249 | { | ||
250 | sw sp, r2 | ||
251 | addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2) | ||
252 | } | ||
253 | { | ||
254 | sw sp, r3 | ||
255 | addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3) | ||
256 | } | ||
257 | mfspr r0, EX_CONTEXT_1_0 | ||
258 | .ifc \processing,handle_syscall | ||
259 | /* | ||
260 | * Bump the saved PC by one bundle so that when we return, we won't | ||
261 | * execute the same swint instruction again. We need to do this while | ||
262 | * we're in the critical section. | ||
263 | */ | ||
264 | addi r0, r0, 8 | ||
265 | .endif | ||
266 | { | ||
267 | sw sp, r0 | ||
268 | addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC | ||
269 | } | ||
270 | mfspr r0, EX_CONTEXT_1_1 | ||
271 | { | ||
272 | sw sp, r0 | ||
273 | addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 | ||
274 | /* | ||
275 | * Use r0 for syscalls so it's a temporary; use r1 for interrupts | ||
276 | * so that it gets passed through unchanged to the handler routine. | ||
277 | * Note that the .if conditional confusingly spans bundles. | ||
278 | */ | ||
279 | .ifc \processing,handle_syscall | ||
280 | movei r0, \vecnum | ||
281 | } | ||
282 | { | ||
283 | sw sp, r0 | ||
284 | .else | ||
285 | movei r1, \vecnum | ||
286 | } | ||
287 | { | ||
288 | sw sp, r1 | ||
289 | .endif | ||
290 | addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM | ||
291 | } | ||
292 | mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */ | ||
293 | { | ||
294 | sw sp, r0 | ||
295 | addi sp, sp, -PTREGS_OFFSET_REG(0) - 4 | ||
296 | } | ||
297 | { | ||
298 | sw sp, zero /* write zero into "Next SP" frame pointer */ | ||
299 | addi sp, sp, -4 /* leave SP pointing at bottom of frame */ | ||
300 | } | ||
301 | .ifc \processing,handle_syscall | ||
302 | j handle_syscall | ||
303 | .else | ||
304 | /* | ||
305 | * Capture per-interrupt SPR context to registers. | ||
306 | * We overload the meaning of r3 on this path such that if its bit 31 | ||
307 | * is set, we have to mask all interrupts including NMIs before | ||
308 | * clearing the interrupt critical section bit. | ||
309 | * See discussion below at "finish_interrupt_save". | ||
310 | */ | ||
311 | .ifc \c_routine, do_page_fault | ||
312 | mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */ | ||
313 | mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */ | ||
314 | .else | ||
315 | .ifc \vecnum, INT_DOUBLE_FAULT | ||
316 | { | ||
317 | mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */ | ||
318 | movei r3, 0 | ||
319 | } | ||
320 | .else | ||
321 | .ifc \c_routine, do_trap | ||
322 | { | ||
323 | mfspr r2, GPV_REASON | ||
324 | movei r3, 0 | ||
325 | } | ||
326 | .else | ||
327 | .ifc \c_routine, op_handle_perf_interrupt | ||
328 | { | ||
329 | mfspr r2, PERF_COUNT_STS | ||
330 | movei r3, -1 /* not used, but set for consistency */ | ||
331 | } | ||
332 | .else | ||
333 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
334 | .ifc \c_routine, op_handle_aux_perf_interrupt | ||
335 | { | ||
336 | mfspr r2, AUX_PERF_COUNT_STS | ||
337 | movei r3, -1 /* not used, but set for consistency */ | ||
338 | } | ||
339 | .else | ||
340 | #endif | ||
341 | movei r3, 0 | ||
342 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
343 | .endif | ||
344 | #endif | ||
345 | .endif | ||
346 | .endif | ||
347 | .endif | ||
348 | .endif | ||
349 | /* Put function pointer in r0 */ | ||
350 | moveli r0, lo16(\c_routine) | ||
351 | { | ||
352 | auli r0, r0, ha16(\c_routine) | ||
353 | j \processing | ||
354 | } | ||
355 | .endif | ||
356 | ENDPROC(intvec_\vecname) | ||
357 | |||
358 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
359 | .pushsection .text.intvec_feedback,"ax" | ||
360 | .org (\vecnum << 5) | ||
361 | FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8) | ||
362 | jrp lr | ||
363 | .popsection | ||
364 | #endif | ||
365 | |||
366 | .endm | ||
367 | |||
368 | |||
369 | /* | ||
370 | * Save the rest of the registers that we didn't save in the actual | ||
371 | * vector itself. We can't use r0-r10 inclusive here. | ||
372 | */ | ||
373 | .macro finish_interrupt_save, function | ||
374 | |||
375 | /* If it's a syscall, save a proper orig_r0, otherwise just zero. */ | ||
376 | PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0) | ||
377 | { | ||
378 | .ifc \function,handle_syscall | ||
379 | sw r52, r0 | ||
380 | .else | ||
381 | sw r52, zero | ||
382 | .endif | ||
383 | PTREGS_PTR(r52, PTREGS_OFFSET_TP) | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * For ordinary syscalls, we save neither caller- nor callee- | ||
388 | * save registers, since the syscall invoker doesn't expect the | ||
389 | * caller-saves to be saved, and the called kernel functions will | ||
390 | * take care of saving the callee-saves for us. | ||
391 | * | ||
392 | * For interrupts we save just the caller-save registers. Saving | ||
393 | * them is required (since the "caller" can't save them). Again, | ||
394 | * the called kernel functions will restore the callee-save | ||
395 | * registers for us appropriately. | ||
396 | * | ||
397 | * On return, we normally restore nothing special for syscalls, | ||
398 | * and just the caller-save registers for interrupts. | ||
399 | * | ||
400 | * However, there are some important caveats to all this: | ||
401 | * | ||
402 | * - We always save a few callee-save registers to give us | ||
403 | * some scratchpad registers to carry across function calls. | ||
404 | * | ||
405 | * - fork/vfork/etc require us to save all the callee-save | ||
406 | * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below. | ||
407 | * | ||
408 | * - We always save r0..r5 and r10 for syscalls, since we need | ||
409 | * to reload them a bit later for the actual kernel call, and | ||
410 | * since we might need them for -ERESTARTNOINTR, etc. | ||
411 | * | ||
412 | * - Before invoking a signal handler, we save the unsaved | ||
413 | * callee-save registers so they are visible to the | ||
414 | * signal handler or any ptracer. | ||
415 | * | ||
416 | * - If the unsaved callee-save registers are modified, we set | ||
417 | * a bit in pt_regs so we know to reload them from pt_regs | ||
418 | * and not just rely on the kernel function unwinding. | ||
419 | * (Done for ptrace register writes and SA_SIGINFO handler.) | ||
420 | */ | ||
421 | { | ||
422 | sw r52, tp | ||
423 | PTREGS_PTR(r52, PTREGS_OFFSET_REG(33)) | ||
424 | } | ||
425 | wh64 r52 /* cache line 2 */ | ||
426 | push_reg r33, r52 | ||
427 | push_reg r32, r52 | ||
428 | push_reg r31, r52 | ||
429 | .ifc \function,handle_syscall | ||
430 | push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30) | ||
431 | push_reg TREG_SYSCALL_NR_NAME, r52, \ | ||
432 | PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL | ||
433 | .else | ||
434 | |||
435 | push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30) | ||
436 | wh64 r52 /* cache line 1 */ | ||
437 | push_reg r29, r52 | ||
438 | push_reg r28, r52 | ||
439 | push_reg r27, r52 | ||
440 | push_reg r26, r52 | ||
441 | push_reg r25, r52 | ||
442 | push_reg r24, r52 | ||
443 | push_reg r23, r52 | ||
444 | push_reg r22, r52 | ||
445 | push_reg r21, r52 | ||
446 | push_reg r20, r52 | ||
447 | push_reg r19, r52 | ||
448 | push_reg r18, r52 | ||
449 | push_reg r17, r52 | ||
450 | push_reg r16, r52 | ||
451 | push_reg r15, r52 | ||
452 | push_reg r14, r52 | ||
453 | push_reg r13, r52 | ||
454 | push_reg r12, r52 | ||
455 | push_reg r11, r52 | ||
456 | push_reg r10, r52 | ||
457 | push_reg r9, r52 | ||
458 | push_reg r8, r52 | ||
459 | push_reg r7, r52 | ||
460 | push_reg r6, r52 | ||
461 | |||
462 | .endif | ||
463 | |||
464 | push_reg r5, r52 | ||
465 | sw r52, r4 | ||
466 | |||
467 | /* Load tp with our per-cpu offset. */ | ||
468 | #ifdef CONFIG_SMP | ||
469 | { | ||
470 | mfspr r20, SYSTEM_SAVE_1_0 | ||
471 | moveli r21, lo16(__per_cpu_offset) | ||
472 | } | ||
473 | { | ||
474 | auli r21, r21, ha16(__per_cpu_offset) | ||
475 | mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1 | ||
476 | } | ||
477 | s2a r20, r20, r21 | ||
478 | lw tp, r20 | ||
479 | #else | ||
480 | move tp, zero | ||
481 | #endif | ||
482 | |||
483 | /* | ||
484 | * If we will be returning to the kernel, we will need to | ||
485 | * reset the interrupt masks to the state they had before. | ||
486 | * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled. | ||
487 | * We load flags in r32 here so we can jump to .Lrestore_regs | ||
488 | * directly after do_page_fault_ics() if necessary. | ||
489 | */ | ||
490 | mfspr r32, EX_CONTEXT_1_1 | ||
491 | { | ||
492 | andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
493 | PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) | ||
494 | } | ||
495 | bzt r32, 1f /* zero if from user space */ | ||
496 | IRQS_DISABLED(r32) /* zero if irqs enabled */ | ||
497 | #if PT_FLAGS_DISABLE_IRQ != 1 | ||
498 | # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix | ||
499 | #endif | ||
500 | 1: | ||
501 | .ifnc \function,handle_syscall | ||
502 | /* Record the fact that we saved the caller-save registers above. */ | ||
503 | ori r32, r32, PT_FLAGS_CALLER_SAVES | ||
504 | .endif | ||
505 | sw r21, r32 | ||
506 | |||
507 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
508 | /* | ||
509 | * Notify the feedback routines that we were in the | ||
510 | * appropriate fixed interrupt vector area. Note that we | ||
511 | * still have ICS set at this point, so we can't invoke any | ||
512 | * atomic operations or we will panic. The feedback | ||
513 | * routines internally preserve r0..r10 and r30 up. | ||
514 | */ | ||
515 | .ifnc \function,handle_syscall | ||
516 | shli r20, r1, 5 | ||
517 | .else | ||
518 | moveli r20, INT_SWINT_1 << 5 | ||
519 | .endif | ||
520 | addli r20, r20, lo16(intvec_feedback) | ||
521 | auli r20, r20, ha16(intvec_feedback) | ||
522 | jalr r20 | ||
523 | |||
524 | /* And now notify the feedback routines that we are here. */ | ||
525 | FEEDBACK_ENTER(\function) | ||
526 | #endif | ||
527 | |||
528 | /* | ||
529 | * we've captured enough state to the stack (including in | ||
530 | * particular our EX_CONTEXT state) that we can now release | ||
531 | * the interrupt critical section and replace it with our | ||
532 | * standard "interrupts disabled" mask value. This allows | ||
533 | * synchronous interrupts (and profile interrupts) to punch | ||
534 | * through from this point onwards. | ||
535 | * | ||
536 | * If bit 31 of r3 is set during a non-NMI interrupt, we know we | ||
537 | * are on the path where the hypervisor has punched through our | ||
538 | * ICS with a page fault, so we call out to do_page_fault_ics() | ||
539 | * to figure out what to do with it. If the fault was in | ||
540 | * an atomic op, we unlock the atomic lock, adjust the | ||
541 | * saved register state a little, and return "zero" in r4, | ||
542 | * falling through into the normal page-fault interrupt code. | ||
543 | * If the fault was in a kernel-space atomic operation, then | ||
544 | * do_page_fault_ics() resolves it itself, returns "one" in r4, | ||
545 | * and as a result goes directly to restoring registers and iret, | ||
546 | * without trying to adjust the interrupt masks at all. | ||
547 | * The do_page_fault_ics() API involves passing and returning | ||
548 | * a five-word struct (in registers) to avoid writing the | ||
549 | * save and restore code here. | ||
550 | */ | ||
551 | .ifc \function,handle_nmi | ||
552 | IRQ_DISABLE_ALL(r20) | ||
553 | .else | ||
554 | .ifnc \function,handle_syscall | ||
555 | bgezt r3, 1f | ||
556 | { | ||
557 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
558 | jal do_page_fault_ics | ||
559 | } | ||
560 | FEEDBACK_REENTER(\function) | ||
561 | bzt r4, 1f | ||
562 | j .Lrestore_regs | ||
563 | 1: | ||
564 | .endif | ||
565 | IRQ_DISABLE(r20, r21) | ||
566 | .endif | ||
567 | mtspr INTERRUPT_CRITICAL_SECTION, zero | ||
568 | |||
569 | #if CHIP_HAS_WH64() | ||
570 | /* | ||
571 | * Prepare the first 256 stack bytes to be rapidly accessible | ||
572 | * without having to fetch the background data. We don't really | ||
573 | * know how far to write-hint, but kernel stacks generally | ||
574 | * aren't that big, and write-hinting here does take some time. | ||
575 | */ | ||
576 | addi r52, sp, -64 | ||
577 | { | ||
578 | wh64 r52 | ||
579 | addi r52, r52, -64 | ||
580 | } | ||
581 | { | ||
582 | wh64 r52 | ||
583 | addi r52, r52, -64 | ||
584 | } | ||
585 | { | ||
586 | wh64 r52 | ||
587 | addi r52, r52, -64 | ||
588 | } | ||
589 | wh64 r52 | ||
590 | #endif | ||
591 | |||
592 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
593 | .ifnc \function,handle_nmi | ||
594 | /* | ||
595 | * We finally have enough state set up to notify the irq | ||
596 | * tracing code that irqs were disabled on entry to the handler. | ||
597 | * The TRACE_IRQS_OFF call clobbers registers r0-r29. | ||
598 | * For syscalls, we already have the register state saved away | ||
599 | * on the stack, so we don't bother to do any register saves here, | ||
600 | * and later we pop the registers back off the kernel stack. | ||
601 | * For interrupt handlers, save r0-r3 in callee-saved registers. | ||
602 | */ | ||
603 | .ifnc \function,handle_syscall | ||
604 | { move r30, r0; move r31, r1 } | ||
605 | { move r32, r2; move r33, r3 } | ||
606 | .endif | ||
607 | TRACE_IRQS_OFF | ||
608 | .ifnc \function,handle_syscall | ||
609 | { move r0, r30; move r1, r31 } | ||
610 | { move r2, r32; move r3, r33 } | ||
611 | .endif | ||
612 | .endif | ||
613 | #endif | ||
614 | |||
615 | .endm | ||
616 | |||
617 | .macro check_single_stepping, kind, not_single_stepping | ||
618 | /* | ||
619 | * Check for single stepping in user-level priv | ||
620 | * kind can be "normal", "ill", or "syscall" | ||
621 | * At end, if fall-thru | ||
622 | * r29: thread_info->step_state | ||
623 | * r28: &pt_regs->pc | ||
624 | * r27: pt_regs->pc | ||
625 | * r26: thread_info->step_state->buffer | ||
626 | */ | ||
627 | |||
628 | /* Check for single stepping */ | ||
629 | GET_THREAD_INFO(r29) | ||
630 | { | ||
631 | /* Get pointer to field holding step state */ | ||
632 | addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET | ||
633 | |||
634 | /* Get pointer to EX1 in register state */ | ||
635 | PTREGS_PTR(r27, PTREGS_OFFSET_EX1) | ||
636 | } | ||
637 | { | ||
638 | /* Get pointer to field holding PC */ | ||
639 | PTREGS_PTR(r28, PTREGS_OFFSET_PC) | ||
640 | |||
641 | /* Load the pointer to the step state */ | ||
642 | lw r29, r29 | ||
643 | } | ||
644 | /* Load EX1 */ | ||
645 | lw r27, r27 | ||
646 | { | ||
647 | /* Points to flags */ | ||
648 | addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET | ||
649 | |||
650 | /* No single stepping if there is no step state structure */ | ||
651 | bzt r29, \not_single_stepping | ||
652 | } | ||
653 | { | ||
654 | /* mask off ICS and any other high bits */ | ||
655 | andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK | ||
656 | |||
657 | /* Load pointer to single step instruction buffer */ | ||
658 | lw r26, r29 | ||
659 | } | ||
660 | /* Check priv state */ | ||
661 | bnz r27, \not_single_stepping | ||
662 | |||
663 | /* Get flags */ | ||
664 | lw r22, r23 | ||
665 | { | ||
666 | /* Branch if single-step mode not enabled */ | ||
667 | bbnst r22, \not_single_stepping | ||
668 | |||
669 | /* Clear enabled flag */ | ||
670 | andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED | ||
671 | } | ||
672 | .ifc \kind,normal | ||
673 | { | ||
674 | /* Load PC */ | ||
675 | lw r27, r28 | ||
676 | |||
677 | /* Point to the entry containing the original PC */ | ||
678 | addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET | ||
679 | } | ||
680 | { | ||
681 | /* Disable single stepping flag */ | ||
682 | sw r23, r22 | ||
683 | } | ||
684 | { | ||
685 | /* Get the original pc */ | ||
686 | lw r24, r24 | ||
687 | |||
688 | /* See if the PC is at the start of the single step buffer */ | ||
689 | seq r25, r26, r27 | ||
690 | } | ||
691 | /* | ||
692 | * NOTE: it is really expected that the PC be in the single step buffer | ||
693 | * at this point | ||
694 | */ | ||
695 | bzt r25, \not_single_stepping | ||
696 | |||
697 | /* Restore the original PC */ | ||
698 | sw r28, r24 | ||
699 | .else | ||
700 | .ifc \kind,syscall | ||
701 | { | ||
702 | /* Load PC */ | ||
703 | lw r27, r28 | ||
704 | |||
705 | /* Point to the entry containing the next PC */ | ||
706 | addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET | ||
707 | } | ||
708 | { | ||
709 | /* Increment the stopped PC by the bundle size */ | ||
710 | addi r26, r26, 8 | ||
711 | |||
712 | /* Disable single stepping flag */ | ||
713 | sw r23, r22 | ||
714 | } | ||
715 | { | ||
716 | /* Get the next pc */ | ||
717 | lw r24, r24 | ||
718 | |||
719 | /* | ||
720 | * See if the PC is one bundle past the start of the | ||
721 | * single step buffer | ||
722 | */ | ||
723 | seq r25, r26, r27 | ||
724 | } | ||
725 | { | ||
726 | /* | ||
727 | * NOTE: it is really expected that the PC be in the | ||
728 | * single step buffer at this point | ||
729 | */ | ||
730 | bzt r25, \not_single_stepping | ||
731 | } | ||
732 | /* Set to the next PC */ | ||
733 | sw r28, r24 | ||
734 | .else | ||
735 | { | ||
736 | /* Point to 3rd bundle in buffer */ | ||
737 | addi r25, r26, 16 | ||
738 | |||
739 | /* Load PC */ | ||
740 | lw r27, r28 | ||
741 | } | ||
742 | { | ||
743 | /* Disable single stepping flag */ | ||
744 | sw r23, r22 | ||
745 | |||
746 | /* See if the PC is in the single step buffer */ | ||
747 | slte_u r24, r26, r27 | ||
748 | } | ||
749 | { | ||
750 | slte_u r25, r27, r25 | ||
751 | |||
752 | /* | ||
753 | * NOTE: it is really expected that the PC be in the | ||
754 | * single step buffer at this point | ||
755 | */ | ||
756 | bzt r24, \not_single_stepping | ||
757 | } | ||
758 | bzt r25, \not_single_stepping | ||
759 | .endif | ||
760 | .endif | ||
761 | .endm | ||
762 | |||
763 | /* | ||
764 | * Redispatch a downcall. | ||
765 | */ | ||
766 | .macro dc_dispatch vecnum, vecname | ||
767 | .org (\vecnum << 8) | ||
768 | intvec_\vecname: | ||
769 | j hv_downcall_dispatch | ||
770 | ENDPROC(intvec_\vecname) | ||
771 | .endm | ||
772 | |||
773 | /* | ||
774 | * Common code for most interrupts. The C function we're eventually | ||
775 | * going to is in r0, and the faultnum is in r1; the original | ||
776 | * values for those registers are on the stack. | ||
777 | */ | ||
778 | .pushsection .text.handle_interrupt,"ax" | ||
779 | handle_interrupt: | ||
780 | finish_interrupt_save handle_interrupt | ||
781 | |||
782 | /* | ||
783 | * Check for if we are single stepping in user level. If so, then | ||
784 | * we need to restore the PC. | ||
785 | */ | ||
786 | |||
787 | check_single_stepping normal, .Ldispatch_interrupt | ||
788 | .Ldispatch_interrupt: | ||
789 | |||
790 | /* Jump to the C routine; it should enable irqs as soon as possible. */ | ||
791 | { | ||
792 | jalr r0 | ||
793 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
794 | } | ||
795 | FEEDBACK_REENTER(handle_interrupt) | ||
796 | { | ||
797 | movei r30, 0 /* not an NMI */ | ||
798 | j interrupt_return | ||
799 | } | ||
800 | STD_ENDPROC(handle_interrupt) | ||
801 | |||
802 | /* | ||
803 | * This routine takes a boolean in r30 indicating if this is an NMI. | ||
804 | * If so, we also expect a boolean in r31 indicating whether to | ||
805 | * re-enable the oprofile interrupts. | ||
806 | */ | ||
807 | STD_ENTRY(interrupt_return) | ||
808 | /* If we're resuming to kernel space, don't check thread flags. */ | ||
809 | { | ||
810 | bnz r30, .Lrestore_all /* NMIs don't special-case user-space */ | ||
811 | PTREGS_PTR(r29, PTREGS_OFFSET_EX1) | ||
812 | } | ||
813 | lw r29, r29 | ||
814 | andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
815 | { | ||
816 | bzt r29, .Lresume_userspace | ||
817 | PTREGS_PTR(r29, PTREGS_OFFSET_PC) | ||
818 | } | ||
819 | |||
820 | /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */ | ||
821 | { | ||
822 | lw r28, r29 | ||
823 | moveli r27, lo16(_cpu_idle_nap) | ||
824 | } | ||
825 | { | ||
826 | auli r27, r27, ha16(_cpu_idle_nap) | ||
827 | } | ||
828 | { | ||
829 | seq r27, r27, r28 | ||
830 | } | ||
831 | { | ||
832 | bbns r27, .Lrestore_all | ||
833 | addi r28, r28, 8 | ||
834 | } | ||
835 | sw r29, r28 | ||
836 | j .Lrestore_all | ||
837 | |||
838 | .Lresume_userspace: | ||
839 | FEEDBACK_REENTER(interrupt_return) | ||
840 | |||
841 | /* | ||
842 | * Disable interrupts so as to make sure we don't | ||
843 | * miss an interrupt that sets any of the thread flags (like | ||
844 | * need_resched or sigpending) between sampling and the iret. | ||
845 | * Routines like schedule() or do_signal() may re-enable | ||
846 | * interrupts before returning. | ||
847 | */ | ||
848 | IRQ_DISABLE(r20, r21) | ||
849 | TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ | ||
850 | |||
851 | /* Get base of stack in r32; note r30/31 are used as arguments here. */ | ||
852 | GET_THREAD_INFO(r32) | ||
853 | |||
854 | |||
855 | /* Check to see if there is any work to do before returning to user. */ | ||
856 | { | ||
857 | addi r29, r32, THREAD_INFO_FLAGS_OFFSET | ||
858 | moveli r28, lo16(_TIF_ALLWORK_MASK) | ||
859 | } | ||
860 | { | ||
861 | lw r29, r29 | ||
862 | auli r28, r28, ha16(_TIF_ALLWORK_MASK) | ||
863 | } | ||
864 | and r28, r29, r28 | ||
865 | bnz r28, .Lwork_pending | ||
866 | |||
867 | /* | ||
868 | * In the NMI case we | ||
869 | * omit the call to single_process_check_nohz, which normally checks | ||
870 | * to see if we should start or stop the scheduler tick, because | ||
871 | * we can't call arbitrary Linux code from an NMI context. | ||
872 | * We always call the homecache TLB deferral code to re-trigger | ||
873 | * the deferral mechanism. | ||
874 | * | ||
875 | * The other chunk of responsibility this code has is to reset the | ||
876 | * interrupt masks appropriately to reset irqs and NMIs. We have | ||
877 | * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the | ||
878 | * lockdep-type stuff, but we can't set ICS until afterwards, since | ||
879 | * ICS can only be used in very tight chunks of code to avoid | ||
880 | * tripping over various assertions that it is off. | ||
881 | * | ||
882 | * (There is what looks like a window of vulnerability here since | ||
883 | * we might take a profile interrupt between the two SPR writes | ||
884 | * that set the mask, but since we write the low SPR word first, | ||
885 | * and our interrupt entry code checks the low SPR word, any | ||
886 | * profile interrupt will actually disable interrupts in both SPRs | ||
887 | * before returning, which is OK.) | ||
888 | */ | ||
889 | .Lrestore_all: | ||
890 | PTREGS_PTR(r0, PTREGS_OFFSET_EX1) | ||
891 | { | ||
892 | lw r0, r0 | ||
893 | PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS) | ||
894 | } | ||
895 | { | ||
896 | andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK | ||
897 | lw r32, r32 | ||
898 | } | ||
899 | bnz r0, 1f | ||
900 | j 2f | ||
901 | #if PT_FLAGS_DISABLE_IRQ != 1 | ||
902 | # error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below | ||
903 | #endif | ||
904 | 1: bbnst r32, 2f | ||
905 | IRQ_DISABLE(r20,r21) | ||
906 | TRACE_IRQS_OFF | ||
907 | movei r0, 1 | ||
908 | mtspr INTERRUPT_CRITICAL_SECTION, r0 | ||
909 | bzt r30, .Lrestore_regs | ||
910 | j 3f | ||
911 | 2: TRACE_IRQS_ON | ||
912 | movei r0, 1 | ||
913 | mtspr INTERRUPT_CRITICAL_SECTION, r0 | ||
914 | IRQ_ENABLE(r20, r21) | ||
915 | bzt r30, .Lrestore_regs | ||
916 | 3: | ||
917 | |||
918 | |||
919 | /* | ||
920 | * We now commit to returning from this interrupt, since we will be | ||
921 | * doing things like setting EX_CONTEXT SPRs and unwinding the stack | ||
922 | * frame. No calls should be made to any other code after this point. | ||
923 | * This code should only be entered with ICS set. | ||
924 | * r32 must still be set to ptregs.flags. | ||
925 | * We launch loads to each cache line separately first, so we can | ||
926 | * get some parallelism out of the memory subsystem. | ||
927 | * We start zeroing caller-saved registers throughout, since | ||
928 | * that will save some cycles if this turns out to be a syscall. | ||
929 | */ | ||
930 | .Lrestore_regs: | ||
931 | FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */ | ||
932 | |||
933 | /* | ||
934 | * Rotate so we have one high bit and one low bit to test. | ||
935 | * - low bit says whether to restore all the callee-saved registers, | ||
936 | * or just r30-r33, and r52 up. | ||
937 | * - high bit (i.e. sign bit) says whether to restore all the | ||
938 | * caller-saved registers, or just r0. | ||
939 | */ | ||
940 | #if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4 | ||
941 | # error Rotate trick does not work :-) | ||
942 | #endif | ||
943 | { | ||
944 | rli r20, r32, 30 | ||
945 | PTREGS_PTR(sp, PTREGS_OFFSET_REG(0)) | ||
946 | } | ||
947 | |||
948 | /* | ||
949 | * Load cache lines 0, 2, and 3 in that order, then use | ||
950 | * the last loaded value, which makes it likely that the other | ||
951 | * cache lines have also loaded, at which point we should be | ||
952 | * able to safely read all the remaining words on those cache | ||
953 | * lines without waiting for the memory subsystem. | ||
954 | */ | ||
955 | pop_reg_zero r0, r1, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0) | ||
956 | pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30) | ||
957 | pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC | ||
958 | pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1 | ||
959 | { | ||
960 | mtspr EX_CONTEXT_1_0, r21 | ||
961 | move r5, zero | ||
962 | } | ||
963 | { | ||
964 | mtspr EX_CONTEXT_1_1, lr | ||
965 | andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
966 | } | ||
967 | |||
968 | /* Restore callee-saveds that we actually use. */ | ||
969 | pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52) | ||
970 | pop_reg_zero r31, r7 | ||
971 | pop_reg_zero r32, r8 | ||
972 | pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33) | ||
973 | |||
974 | /* | ||
975 | * If we modified other callee-saveds, restore them now. | ||
976 | * This is rare, but could be via ptrace or signal handler. | ||
977 | */ | ||
978 | { | ||
979 | move r10, zero | ||
980 | bbs r20, .Lrestore_callees | ||
981 | } | ||
982 | .Lcontinue_restore_regs: | ||
983 | |||
984 | /* Check if we're returning from a syscall. */ | ||
985 | { | ||
986 | move r11, zero | ||
987 | blzt r20, 1f /* no, so go restore callee-save registers */ | ||
988 | } | ||
989 | |||
990 | /* | ||
991 | * Check if we're returning to userspace. | ||
992 | * Note that if we're not, we don't worry about zeroing everything. | ||
993 | */ | ||
994 | { | ||
995 | addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29) | ||
996 | bnz lr, .Lkernel_return | ||
997 | } | ||
998 | |||
999 | /* | ||
1000 | * On return from syscall, we've restored r0 from pt_regs, but we | ||
1001 | * clear the remainder of the caller-saved registers. We could | ||
1002 | * restore the syscall arguments, but there's not much point, | ||
1003 | * and it ensures user programs aren't trying to use the | ||
1004 | * caller-saves if we clear them, as well as avoiding leaking | ||
1005 | * kernel pointers into userspace. | ||
1006 | */ | ||
1007 | pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR | ||
1008 | pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP | ||
1009 | { | ||
1010 | lw sp, sp | ||
1011 | move r14, zero | ||
1012 | move r15, zero | ||
1013 | } | ||
1014 | { move r16, zero; move r17, zero } | ||
1015 | { move r18, zero; move r19, zero } | ||
1016 | { move r20, zero; move r21, zero } | ||
1017 | { move r22, zero; move r23, zero } | ||
1018 | { move r24, zero; move r25, zero } | ||
1019 | { move r26, zero; move r27, zero } | ||
1020 | { move r28, zero; move r29, zero } | ||
1021 | iret | ||
1022 | |||
1023 | /* | ||
1024 | * Not a syscall, so restore caller-saved registers. | ||
1025 | * First kick off a load for cache line 1, which we're touching | ||
1026 | * for the first time here. | ||
1027 | */ | ||
1028 | .align 64 | ||
1029 | 1: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29) | ||
1030 | pop_reg r1 | ||
1031 | pop_reg r2 | ||
1032 | pop_reg r3 | ||
1033 | pop_reg r4 | ||
1034 | pop_reg r5 | ||
1035 | pop_reg r6 | ||
1036 | pop_reg r7 | ||
1037 | pop_reg r8 | ||
1038 | pop_reg r9 | ||
1039 | pop_reg r10 | ||
1040 | pop_reg r11 | ||
1041 | pop_reg r12 | ||
1042 | pop_reg r13 | ||
1043 | pop_reg r14 | ||
1044 | pop_reg r15 | ||
1045 | pop_reg r16 | ||
1046 | pop_reg r17 | ||
1047 | pop_reg r18 | ||
1048 | pop_reg r19 | ||
1049 | pop_reg r20 | ||
1050 | pop_reg r21 | ||
1051 | pop_reg r22 | ||
1052 | pop_reg r23 | ||
1053 | pop_reg r24 | ||
1054 | pop_reg r25 | ||
1055 | pop_reg r26 | ||
1056 | pop_reg r27 | ||
1057 | pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28) | ||
1058 | /* r29 already restored above */ | ||
1059 | bnz lr, .Lkernel_return | ||
1060 | pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR | ||
1061 | pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP | ||
1062 | lw sp, sp | ||
1063 | iret | ||
1064 | |||
1065 | /* | ||
1066 | * We can't restore tp when in kernel mode, since a thread might | ||
1067 | * have migrated from another cpu and brought a stale tp value. | ||
1068 | */ | ||
1069 | .Lkernel_return: | ||
1070 | pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR | ||
1071 | lw sp, sp | ||
1072 | iret | ||
1073 | |||
1074 | /* Restore callee-saved registers from r34 to r51. */ | ||
1075 | .Lrestore_callees: | ||
1076 | addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29) | ||
1077 | pop_reg r34 | ||
1078 | pop_reg r35 | ||
1079 | pop_reg r36 | ||
1080 | pop_reg r37 | ||
1081 | pop_reg r38 | ||
1082 | pop_reg r39 | ||
1083 | pop_reg r40 | ||
1084 | pop_reg r41 | ||
1085 | pop_reg r42 | ||
1086 | pop_reg r43 | ||
1087 | pop_reg r44 | ||
1088 | pop_reg r45 | ||
1089 | pop_reg r46 | ||
1090 | pop_reg r47 | ||
1091 | pop_reg r48 | ||
1092 | pop_reg r49 | ||
1093 | pop_reg r50 | ||
1094 | pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51) | ||
1095 | j .Lcontinue_restore_regs | ||
1096 | |||
1097 | .Lwork_pending: | ||
1098 | /* Mask the reschedule flag */ | ||
1099 | andi r28, r29, _TIF_NEED_RESCHED | ||
1100 | |||
1101 | { | ||
1102 | /* | ||
1103 | * If the NEED_RESCHED flag is called, we call schedule(), which | ||
1104 | * may drop this context right here and go do something else. | ||
1105 | * On return, jump back to .Lresume_userspace and recheck. | ||
1106 | */ | ||
1107 | bz r28, .Lasync_tlb | ||
1108 | |||
1109 | /* Mask the async-tlb flag */ | ||
1110 | andi r28, r29, _TIF_ASYNC_TLB | ||
1111 | } | ||
1112 | |||
1113 | jal schedule | ||
1114 | FEEDBACK_REENTER(interrupt_return) | ||
1115 | |||
1116 | /* Reload the flags and check again */ | ||
1117 | j .Lresume_userspace | ||
1118 | |||
1119 | .Lasync_tlb: | ||
1120 | { | ||
1121 | bz r28, .Lneed_sigpending | ||
1122 | |||
1123 | /* Mask the sigpending flag */ | ||
1124 | andi r28, r29, _TIF_SIGPENDING | ||
1125 | } | ||
1126 | |||
1127 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1128 | jal do_async_page_fault | ||
1129 | FEEDBACK_REENTER(interrupt_return) | ||
1130 | |||
1131 | /* | ||
1132 | * Go restart the "resume userspace" process. We may have | ||
1133 | * fired a signal, and we need to disable interrupts again. | ||
1134 | */ | ||
1135 | j .Lresume_userspace | ||
1136 | |||
1137 | .Lneed_sigpending: | ||
1138 | /* | ||
1139 | * At this point we are either doing signal handling or single-step, | ||
1140 | * so either way make sure we have all the registers saved. | ||
1141 | */ | ||
1142 | push_extra_callee_saves r0 | ||
1143 | |||
1144 | { | ||
1145 | /* If no signal pending, skip to singlestep check */ | ||
1146 | bz r28, .Lneed_singlestep | ||
1147 | |||
1148 | /* Mask the singlestep flag */ | ||
1149 | andi r28, r29, _TIF_SINGLESTEP | ||
1150 | } | ||
1151 | |||
1152 | jal do_signal | ||
1153 | FEEDBACK_REENTER(interrupt_return) | ||
1154 | |||
1155 | /* Reload the flags and check again */ | ||
1156 | j .Lresume_userspace | ||
1157 | |||
1158 | .Lneed_singlestep: | ||
1159 | { | ||
1160 | /* Get a pointer to the EX1 field */ | ||
1161 | PTREGS_PTR(r29, PTREGS_OFFSET_EX1) | ||
1162 | |||
1163 | /* If we get here, our bit must be set. */ | ||
1164 | bz r28, .Lwork_confusion | ||
1165 | } | ||
1166 | /* If we are in priv mode, don't single step */ | ||
1167 | lw r28, r29 | ||
1168 | andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
1169 | bnz r28, .Lrestore_all | ||
1170 | |||
1171 | /* Allow interrupts within the single step code */ | ||
1172 | TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */ | ||
1173 | IRQ_ENABLE(r20, r21) | ||
1174 | |||
1175 | /* try to single-step the current instruction */ | ||
1176 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1177 | jal single_step_once | ||
1178 | FEEDBACK_REENTER(interrupt_return) | ||
1179 | |||
1180 | /* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */ | ||
1181 | IRQ_DISABLE(r20,r21) | ||
1182 | |||
1183 | j .Lrestore_all | ||
1184 | |||
1185 | .Lwork_confusion: | ||
1186 | move r0, r28 | ||
1187 | panic "thread_info allwork flags unhandled on userspace resume: %#x" | ||
1188 | |||
1189 | STD_ENDPROC(interrupt_return) | ||
1190 | |||
1191 | /* | ||
1192 | * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit | ||
1193 | * before returning, so we can properly get more downcalls. | ||
1194 | */ | ||
1195 | .pushsection .text.handle_interrupt_downcall,"ax" | ||
1196 | handle_interrupt_downcall: | ||
1197 | finish_interrupt_save handle_interrupt_downcall | ||
1198 | check_single_stepping normal, .Ldispatch_downcall | ||
1199 | .Ldispatch_downcall: | ||
1200 | |||
1201 | /* Clear INTCTRL_1 from the set of interrupts we ever enable. */ | ||
1202 | GET_INTERRUPTS_ENABLED_MASK_PTR(r30) | ||
1203 | { | ||
1204 | addi r30, r30, 4 | ||
1205 | movei r31, INT_MASK(INT_INTCTRL_1) | ||
1206 | } | ||
1207 | { | ||
1208 | lw r20, r30 | ||
1209 | nor r21, r31, zero | ||
1210 | } | ||
1211 | and r20, r20, r21 | ||
1212 | sw r30, r20 | ||
1213 | |||
1214 | { | ||
1215 | jalr r0 | ||
1216 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1217 | } | ||
1218 | FEEDBACK_REENTER(handle_interrupt_downcall) | ||
1219 | |||
1220 | /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */ | ||
1221 | lw r20, r30 | ||
1222 | or r20, r20, r31 | ||
1223 | sw r30, r20 | ||
1224 | |||
1225 | { | ||
1226 | movei r30, 0 /* not an NMI */ | ||
1227 | j interrupt_return | ||
1228 | } | ||
1229 | STD_ENDPROC(handle_interrupt_downcall) | ||
1230 | |||
1231 | /* | ||
1232 | * Some interrupts don't check for single stepping | ||
1233 | */ | ||
1234 | .pushsection .text.handle_interrupt_no_single_step,"ax" | ||
1235 | handle_interrupt_no_single_step: | ||
1236 | finish_interrupt_save handle_interrupt_no_single_step | ||
1237 | { | ||
1238 | jalr r0 | ||
1239 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1240 | } | ||
1241 | FEEDBACK_REENTER(handle_interrupt_no_single_step) | ||
1242 | { | ||
1243 | movei r30, 0 /* not an NMI */ | ||
1244 | j interrupt_return | ||
1245 | } | ||
1246 | STD_ENDPROC(handle_interrupt_no_single_step) | ||
1247 | |||
1248 | /* | ||
1249 | * "NMI" interrupts mask ALL interrupts before calling the | ||
1250 | * handler, and don't check thread flags, etc., on the way | ||
1251 | * back out. In general, the only things we do here for NMIs | ||
1252 | * are the register save/restore, fixing the PC if we were | ||
1253 | * doing single step, and the dataplane kernel-TLB management. | ||
1254 | * We don't (for example) deal with start/stop of the sched tick. | ||
1255 | */ | ||
1256 | .pushsection .text.handle_nmi,"ax" | ||
1257 | handle_nmi: | ||
1258 | finish_interrupt_save handle_nmi | ||
1259 | check_single_stepping normal, .Ldispatch_nmi | ||
1260 | .Ldispatch_nmi: | ||
1261 | { | ||
1262 | jalr r0 | ||
1263 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1264 | } | ||
1265 | FEEDBACK_REENTER(handle_nmi) | ||
1266 | j interrupt_return | ||
1267 | STD_ENDPROC(handle_nmi) | ||
1268 | |||
1269 | /* | ||
1270 | * Parallel code for syscalls to handle_interrupt. | ||
1271 | */ | ||
1272 | .pushsection .text.handle_syscall,"ax" | ||
1273 | handle_syscall: | ||
1274 | finish_interrupt_save handle_syscall | ||
1275 | |||
1276 | /* | ||
1277 | * Check for if we are single stepping in user level. If so, then | ||
1278 | * we need to restore the PC. | ||
1279 | */ | ||
1280 | check_single_stepping syscall, .Ldispatch_syscall | ||
1281 | .Ldispatch_syscall: | ||
1282 | |||
1283 | /* Enable irqs. */ | ||
1284 | TRACE_IRQS_ON | ||
1285 | IRQ_ENABLE(r20, r21) | ||
1286 | |||
1287 | /* Bump the counter for syscalls made on this tile. */ | ||
1288 | moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) | ||
1289 | auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) | ||
1290 | add r20, r20, tp | ||
1291 | lw r21, r20 | ||
1292 | addi r21, r21, 1 | ||
1293 | sw r20, r21 | ||
1294 | |||
1295 | /* Trace syscalls, if requested. */ | ||
1296 | GET_THREAD_INFO(r31) | ||
1297 | addi r31, r31, THREAD_INFO_FLAGS_OFFSET | ||
1298 | lw r30, r31 | ||
1299 | andi r30, r30, _TIF_SYSCALL_TRACE | ||
1300 | bzt r30, .Lrestore_syscall_regs | ||
1301 | jal do_syscall_trace | ||
1302 | FEEDBACK_REENTER(handle_syscall) | ||
1303 | |||
1304 | /* | ||
1305 | * We always reload our registers from the stack at this | ||
1306 | * point. They might be valid, if we didn't build with | ||
1307 | * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not | ||
1308 | * doing syscall tracing, but there are enough cases now that it | ||
1309 | * seems simplest just to do the reload unconditionally. | ||
1310 | */ | ||
1311 | .Lrestore_syscall_regs: | ||
1312 | PTREGS_PTR(r11, PTREGS_OFFSET_REG(0)) | ||
1313 | pop_reg r0, r11 | ||
1314 | pop_reg r1, r11 | ||
1315 | pop_reg r2, r11 | ||
1316 | pop_reg r3, r11 | ||
1317 | pop_reg r4, r11 | ||
1318 | pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5) | ||
1319 | pop_reg TREG_SYSCALL_NR_NAME, r11 | ||
1320 | |||
1321 | /* Ensure that the syscall number is within the legal range. */ | ||
1322 | moveli r21, __NR_syscalls | ||
1323 | { | ||
1324 | slt_u r21, TREG_SYSCALL_NR_NAME, r21 | ||
1325 | moveli r20, lo16(sys_call_table) | ||
1326 | } | ||
1327 | { | ||
1328 | bbns r21, .Linvalid_syscall | ||
1329 | auli r20, r20, ha16(sys_call_table) | ||
1330 | } | ||
1331 | s2a r20, TREG_SYSCALL_NR_NAME, r20 | ||
1332 | lw r20, r20 | ||
1333 | |||
1334 | /* Jump to syscall handler. */ | ||
1335 | jalr r20; .Lhandle_syscall_link: | ||
1336 | FEEDBACK_REENTER(handle_syscall) | ||
1337 | |||
1338 | /* | ||
1339 | * Write our r0 onto the stack so it gets restored instead | ||
1340 | * of whatever the user had there before. | ||
1341 | */ | ||
1342 | PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) | ||
1343 | sw r29, r0 | ||
1344 | |||
1345 | /* Do syscall trace again, if requested. */ | ||
1346 | lw r30, r31 | ||
1347 | andi r30, r30, _TIF_SYSCALL_TRACE | ||
1348 | bzt r30, 1f | ||
1349 | jal do_syscall_trace | ||
1350 | FEEDBACK_REENTER(handle_syscall) | ||
1351 | 1: j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1352 | |||
1353 | .Linvalid_syscall: | ||
1354 | /* Report an invalid syscall back to the user program */ | ||
1355 | { | ||
1356 | PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) | ||
1357 | movei r28, -ENOSYS | ||
1358 | } | ||
1359 | sw r29, r28 | ||
1360 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1361 | STD_ENDPROC(handle_syscall) | ||
1362 | |||
1363 | /* Return the address for oprofile to suppress in backtraces. */ | ||
1364 | STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall) | ||
1365 | lnk r0 | ||
1366 | { | ||
1367 | addli r0, r0, .Lhandle_syscall_link - . | ||
1368 | jrp lr | ||
1369 | } | ||
1370 | STD_ENDPROC(handle_syscall_link_address) | ||
1371 | |||
1372 | STD_ENTRY(ret_from_fork) | ||
1373 | jal sim_notify_fork | ||
1374 | jal schedule_tail | ||
1375 | FEEDBACK_REENTER(ret_from_fork) | ||
1376 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1377 | STD_ENDPROC(ret_from_fork) | ||
1378 | |||
1379 | /* | ||
1380 | * Code for ill interrupt. | ||
1381 | */ | ||
1382 | .pushsection .text.handle_ill,"ax" | ||
1383 | handle_ill: | ||
1384 | finish_interrupt_save handle_ill | ||
1385 | |||
1386 | /* | ||
1387 | * Check for if we are single stepping in user level. If so, then | ||
1388 | * we need to restore the PC. | ||
1389 | */ | ||
1390 | check_single_stepping ill, .Ldispatch_normal_ill | ||
1391 | |||
1392 | { | ||
1393 | /* See if the PC is the 1st bundle in the buffer */ | ||
1394 | seq r25, r27, r26 | ||
1395 | |||
1396 | /* Point to the 2nd bundle in the buffer */ | ||
1397 | addi r26, r26, 8 | ||
1398 | } | ||
1399 | { | ||
1400 | /* Point to the original pc */ | ||
1401 | addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET | ||
1402 | |||
1403 | /* Branch if the PC is the 1st bundle in the buffer */ | ||
1404 | bnz r25, 3f | ||
1405 | } | ||
1406 | { | ||
1407 | /* See if the PC is the 2nd bundle of the buffer */ | ||
1408 | seq r25, r27, r26 | ||
1409 | |||
1410 | /* Set PC to next instruction */ | ||
1411 | addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET | ||
1412 | } | ||
1413 | { | ||
1414 | /* Point to flags */ | ||
1415 | addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET | ||
1416 | |||
1417 | /* Branch if PC is in the second bundle */ | ||
1418 | bz r25, 2f | ||
1419 | } | ||
1420 | /* Load flags */ | ||
1421 | lw r25, r25 | ||
1422 | { | ||
1423 | /* | ||
1424 | * Get the offset for the register to restore | ||
1425 | * Note: the lower bound is 2, so we have implicit scaling by 4. | ||
1426 | * No multiplication of the register number by the size of a register | ||
1427 | * is needed. | ||
1428 | */ | ||
1429 | mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \ | ||
1430 | SINGLESTEP_STATE_TARGET_UB | ||
1431 | |||
1432 | /* Mask Rewrite_LR */ | ||
1433 | andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE | ||
1434 | } | ||
1435 | { | ||
1436 | addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET | ||
1437 | |||
1438 | /* Don't rewrite temp register */ | ||
1439 | bz r25, 3f | ||
1440 | } | ||
1441 | { | ||
1442 | /* Get the temp value */ | ||
1443 | lw r29, r29 | ||
1444 | |||
1445 | /* Point to where the register is stored */ | ||
1446 | add r27, r27, sp | ||
1447 | } | ||
1448 | |||
1449 | /* Add in the C ABI save area size to the register offset */ | ||
1450 | addi r27, r27, C_ABI_SAVE_AREA_SIZE | ||
1451 | |||
1452 | /* Restore the user's register with the temp value */ | ||
1453 | sw r27, r29 | ||
1454 | j 3f | ||
1455 | |||
1456 | 2: | ||
1457 | /* Must be in the third bundle */ | ||
1458 | addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET | ||
1459 | |||
1460 | 3: | ||
1461 | /* set PC and continue */ | ||
1462 | lw r26, r24 | ||
1463 | sw r28, r26 | ||
1464 | |||
1465 | /* Clear TIF_SINGLESTEP */ | ||
1466 | GET_THREAD_INFO(r0) | ||
1467 | |||
1468 | addi r1, r0, THREAD_INFO_FLAGS_OFFSET | ||
1469 | { | ||
1470 | lw r2, r1 | ||
1471 | addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */ | ||
1472 | } | ||
1473 | andi r2, r2, ~_TIF_SINGLESTEP | ||
1474 | sw r1, r2 | ||
1475 | |||
1476 | /* Issue a sigtrap */ | ||
1477 | { | ||
1478 | lw r0, r0 /* indirect thru thread_info to get task_info*/ | ||
1479 | addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */ | ||
1480 | move r2, zero /* load error code into r2 */ | ||
1481 | } | ||
1482 | |||
1483 | jal send_sigtrap /* issue a SIGTRAP */ | ||
1484 | FEEDBACK_REENTER(handle_ill) | ||
1485 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1486 | |||
1487 | .Ldispatch_normal_ill: | ||
1488 | { | ||
1489 | jalr r0 | ||
1490 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1491 | } | ||
1492 | FEEDBACK_REENTER(handle_ill) | ||
1493 | { | ||
1494 | movei r30, 0 /* not an NMI */ | ||
1495 | j interrupt_return | ||
1496 | } | ||
1497 | STD_ENDPROC(handle_ill) | ||
1498 | |||
1499 | .pushsection .rodata, "a" | ||
1500 | .align 8 | ||
1501 | bpt_code: | ||
1502 | bpt | ||
1503 | ENDPROC(bpt_code) | ||
1504 | .popsection | ||
1505 | |||
1506 | /* Various stub interrupt handlers and syscall handlers */ | ||
1507 | |||
1508 | STD_ENTRY_LOCAL(_kernel_double_fault) | ||
1509 | mfspr r1, EX_CONTEXT_1_0 | ||
1510 | move r2, lr | ||
1511 | move r3, sp | ||
1512 | move r4, r52 | ||
1513 | addi sp, sp, -C_ABI_SAVE_AREA_SIZE | ||
1514 | j kernel_double_fault | ||
1515 | STD_ENDPROC(_kernel_double_fault) | ||
1516 | |||
1517 | STD_ENTRY_LOCAL(bad_intr) | ||
1518 | mfspr r2, EX_CONTEXT_1_0 | ||
1519 | panic "Unhandled interrupt %#x: PC %#lx" | ||
1520 | STD_ENDPROC(bad_intr) | ||
1521 | |||
1522 | /* Put address of pt_regs in reg and jump. */ | ||
1523 | #define PTREGS_SYSCALL(x, reg) \ | ||
1524 | STD_ENTRY(x); \ | ||
1525 | { \ | ||
1526 | PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ | ||
1527 | j _##x \ | ||
1528 | }; \ | ||
1529 | STD_ENDPROC(x) | ||
1530 | |||
1531 | PTREGS_SYSCALL(sys_execve, r3) | ||
1532 | PTREGS_SYSCALL(sys_sigaltstack, r2) | ||
1533 | PTREGS_SYSCALL(sys_rt_sigreturn, r0) | ||
1534 | |||
1535 | /* Save additional callee-saves to pt_regs, put address in reg and jump. */ | ||
1536 | #define PTREGS_SYSCALL_ALL_REGS(x, reg) \ | ||
1537 | STD_ENTRY(x); \ | ||
1538 | push_extra_callee_saves reg; \ | ||
1539 | j _##x; \ | ||
1540 | STD_ENDPROC(x) | ||
1541 | |||
1542 | PTREGS_SYSCALL_ALL_REGS(sys_fork, r0) | ||
1543 | PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0) | ||
1544 | PTREGS_SYSCALL_ALL_REGS(sys_clone, r4) | ||
1545 | PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1) | ||
1546 | |||
1547 | /* | ||
1548 | * This entrypoint is taken for the cmpxchg and atomic_update fast | ||
1549 | * swints. We may wish to generalize it to other fast swints at some | ||
1550 | * point, but for now there are just two very similar ones, which | ||
1551 | * makes it faster. | ||
1552 | * | ||
1553 | * The fast swint code is designed to have a small footprint. It does | ||
1554 | * not save or restore any GPRs, counting on the caller-save registers | ||
1555 | * to be available to it on entry. It does not modify any callee-save | ||
1556 | * registers (including "lr"). It does not check what PL it is being | ||
1557 | * called at, so you'd better not call it other than at PL0. | ||
1558 | * | ||
1559 | * It does not use the stack, but since it might be re-interrupted by | ||
1560 | * a page fault which would assume the stack was valid, it does | ||
1561 | * save/restore the stack pointer and zero it out to make sure it gets reset. | ||
1562 | * Since we always keep interrupts disabled, the hypervisor won't | ||
1563 | * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them | ||
1564 | * (other than to advance the PC on return). | ||
1565 | * | ||
1566 | * We have to manually validate the user vs kernel address range | ||
1567 | * (since at PL1 we can read/write both), and for performance reasons | ||
1568 | * we don't allow cmpxchg on the fc000000 memory region, since we only | ||
1569 | * validate that the user address is below PAGE_OFFSET. | ||
1570 | * | ||
1571 | * We place it in the __HEAD section to ensure it is relatively | ||
1572 | * near to the intvec_SWINT_1 code (reachable by a conditional branch). | ||
1573 | * | ||
1574 | * Must match register usage in do_page_fault(). | ||
1575 | */ | ||
1576 | __HEAD | ||
1577 | .align 64 | ||
1578 | /* Align much later jump on the start of a cache line. */ | ||
1579 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
1580 | nop; nop | ||
1581 | #endif | ||
1582 | ENTRY(sys_cmpxchg) | ||
1583 | |||
1584 | /* | ||
1585 | * Save "sp" and set it zero for any possible page fault. | ||
1586 | * | ||
1587 | * HACK: We want to both zero sp and check r0's alignment, | ||
1588 | * so we do both at once. If "sp" becomes nonzero we | ||
1589 | * know r0 is unaligned and branch to the error handler that | ||
1590 | * restores sp, so this is OK. | ||
1591 | * | ||
1592 | * ICS is disabled right now so having a garbage but nonzero | ||
1593 | * sp is OK, since we won't execute any faulting instructions | ||
1594 | * when it is nonzero. | ||
1595 | */ | ||
1596 | { | ||
1597 | move r27, sp | ||
1598 | andi sp, r0, 3 | ||
1599 | } | ||
1600 | |||
1601 | /* | ||
1602 | * Get the lock address in ATOMIC_LOCK_REG, and also validate that the | ||
1603 | * address is less than PAGE_OFFSET, since that won't trap at PL1. | ||
1604 | * We only use bits less than PAGE_SHIFT to avoid having to worry | ||
1605 | * about aliasing among multiple mappings of the same physical page, | ||
1606 | * and we ignore the low 3 bits so we have one lock that covers | ||
1607 | * both a cmpxchg64() and a cmpxchg() on either its low or high word. | ||
1608 | * NOTE: this code must match __atomic_hashed_lock() in lib/atomic.c. | ||
1609 | */ | ||
1610 | |||
1611 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
1612 | { | ||
1613 | /* Check for unaligned input. */ | ||
1614 | bnz sp, .Lcmpxchg_badaddr | ||
1615 | mm r25, r0, zero, 3, PAGE_SHIFT-1 | ||
1616 | } | ||
1617 | { | ||
1618 | crc32_32 r25, zero, r25 | ||
1619 | moveli r21, lo16(atomic_lock_ptr) | ||
1620 | } | ||
1621 | { | ||
1622 | auli r21, r21, ha16(atomic_lock_ptr) | ||
1623 | auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */ | ||
1624 | } | ||
1625 | { | ||
1626 | shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT | ||
1627 | slt_u r23, r0, r23 | ||
1628 | |||
1629 | /* | ||
1630 | * Ensure that the TLB is loaded before we take out the lock. | ||
1631 | * On TILEPro, this will start fetching the value all the way | ||
1632 | * into our L1 as well (and if it gets modified before we | ||
1633 | * grab the lock, it will be invalidated from our cache | ||
1634 | * before we reload it). On tile64, we'll start fetching it | ||
1635 | * into our L1 if we're the home, and if we're not, we'll | ||
1636 | * still at least start fetching it into the home's L2. | ||
1637 | */ | ||
1638 | lw r26, r0 | ||
1639 | } | ||
1640 | { | ||
1641 | s2a r21, r20, r21 | ||
1642 | bbns r23, .Lcmpxchg_badaddr | ||
1643 | } | ||
1644 | { | ||
1645 | lw r21, r21 | ||
1646 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64 | ||
1647 | andi r25, r25, ATOMIC_HASH_L2_SIZE - 1 | ||
1648 | } | ||
1649 | { | ||
1650 | /* Branch away at this point if we're doing a 64-bit cmpxchg. */ | ||
1651 | bbs r23, .Lcmpxchg64 | ||
1652 | andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */ | ||
1653 | } | ||
1654 | |||
1655 | { | ||
1656 | /* | ||
1657 | * We very carefully align the code that actually runs with | ||
1658 | * the lock held (nine bundles) so that we know it is all in | ||
1659 | * the icache when we start. This instruction (the jump) is | ||
1660 | * at the start of the first cache line, address zero mod 64; | ||
1661 | * we jump to somewhere in the second cache line to issue the | ||
1662 | * tns, then jump back to finish up. | ||
1663 | */ | ||
1664 | s2a ATOMIC_LOCK_REG_NAME, r25, r21 | ||
1665 | j .Lcmpxchg32_tns | ||
1666 | } | ||
1667 | |||
1668 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
1669 | { | ||
1670 | /* Check for unaligned input. */ | ||
1671 | bnz sp, .Lcmpxchg_badaddr | ||
1672 | auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */ | ||
1673 | } | ||
1674 | { | ||
1675 | /* | ||
1676 | * Slide bits into position for 'mm'. We want to ignore | ||
1677 | * the low 3 bits of r0, and consider only the next | ||
1678 | * ATOMIC_HASH_SHIFT bits. | ||
1679 | * Because of C pointer arithmetic, we want to compute this: | ||
1680 | * | ||
1681 | * ((char*)atomic_locks + | ||
1682 | * (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2)) | ||
1683 | * | ||
1684 | * Instead of two shifts we just ">> 1", and use 'mm' | ||
1685 | * to ignore the low and high bits we don't want. | ||
1686 | */ | ||
1687 | shri r25, r0, 1 | ||
1688 | |||
1689 | slt_u r23, r0, r23 | ||
1690 | |||
1691 | /* | ||
1692 | * Ensure that the TLB is loaded before we take out the lock. | ||
1693 | * On tilepro, this will start fetching the value all the way | ||
1694 | * into our L1 as well (and if it gets modified before we | ||
1695 | * grab the lock, it will be invalidated from our cache | ||
1696 | * before we reload it). On tile64, we'll start fetching it | ||
1697 | * into our L1 if we're the home, and if we're not, we'll | ||
1698 | * still at least start fetching it into the home's L2. | ||
1699 | */ | ||
1700 | lw r26, r0 | ||
1701 | } | ||
1702 | { | ||
1703 | /* atomic_locks is page aligned so this suffices to get its addr. */ | ||
1704 | auli r21, zero, hi16(atomic_locks) | ||
1705 | |||
1706 | bbns r23, .Lcmpxchg_badaddr | ||
1707 | } | ||
1708 | { | ||
1709 | /* | ||
1710 | * Insert the hash bits into the page-aligned pointer. | ||
1711 | * ATOMIC_HASH_SHIFT is so big that we don't actually hash | ||
1712 | * the unmasked address bits, as that may cause unnecessary | ||
1713 | * collisions. | ||
1714 | */ | ||
1715 | mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1 | ||
1716 | |||
1717 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64 | ||
1718 | } | ||
1719 | { | ||
1720 | /* Branch away at this point if we're doing a 64-bit cmpxchg. */ | ||
1721 | bbs r23, .Lcmpxchg64 | ||
1722 | andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */ | ||
1723 | } | ||
1724 | { | ||
1725 | /* | ||
1726 | * We very carefully align the code that actually runs with | ||
1727 | * the lock held (nine bundles) so that we know it is all in | ||
1728 | * the icache when we start. This instruction (the jump) is | ||
1729 | * at the start of the first cache line, address zero mod 64; | ||
1730 | * we jump to somewhere in the second cache line to issue the | ||
1731 | * tns, then jump back to finish up. | ||
1732 | */ | ||
1733 | j .Lcmpxchg32_tns | ||
1734 | } | ||
1735 | |||
1736 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
1737 | |||
1738 | ENTRY(__sys_cmpxchg_grab_lock) | ||
1739 | |||
1740 | /* | ||
1741 | * Perform the actual cmpxchg or atomic_update. | ||
1742 | * Note that __futex_mark_unlocked() in uClibc relies on | ||
1743 | * atomic_update() to always perform an "mf", so don't make | ||
1744 | * it optional or conditional without modifying that code. | ||
1745 | */ | ||
1746 | .Ldo_cmpxchg32: | ||
1747 | { | ||
1748 | lw r21, r0 | ||
1749 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update | ||
1750 | move r24, r2 | ||
1751 | } | ||
1752 | { | ||
1753 | seq r22, r21, r1 /* See if cmpxchg matches. */ | ||
1754 | and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */ | ||
1755 | } | ||
1756 | { | ||
1757 | or r22, r22, r23 /* Skip compare branch for atomic_update. */ | ||
1758 | add r25, r25, r2 /* Compute (*mem & mask) + addend. */ | ||
1759 | } | ||
1760 | { | ||
1761 | mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */ | ||
1762 | bbns r22, .Lcmpxchg32_mismatch | ||
1763 | } | ||
1764 | sw r0, r24 | ||
1765 | |||
1766 | /* Do slow mtspr here so the following "mf" waits less. */ | ||
1767 | { | ||
1768 | move sp, r27 | ||
1769 | mtspr EX_CONTEXT_1_0, r28 | ||
1770 | } | ||
1771 | mf | ||
1772 | |||
1773 | /* The following instruction is the start of the second cache line. */ | ||
1774 | { | ||
1775 | move r0, r21 | ||
1776 | sw ATOMIC_LOCK_REG_NAME, zero | ||
1777 | } | ||
1778 | iret | ||
1779 | |||
1780 | /* Duplicated code here in the case where we don't overlap "mf" */ | ||
1781 | .Lcmpxchg32_mismatch: | ||
1782 | { | ||
1783 | move r0, r21 | ||
1784 | sw ATOMIC_LOCK_REG_NAME, zero | ||
1785 | } | ||
1786 | { | ||
1787 | move sp, r27 | ||
1788 | mtspr EX_CONTEXT_1_0, r28 | ||
1789 | } | ||
1790 | iret | ||
1791 | |||
1792 | /* | ||
1793 | * The locking code is the same for 32-bit cmpxchg/atomic_update, | ||
1794 | * and for 64-bit cmpxchg. We provide it as a macro and put | ||
1795 | * it into both versions. We can't share the code literally | ||
1796 | * since it depends on having the right branch-back address. | ||
1797 | * Note that the first few instructions should share the cache | ||
1798 | * line with the second half of the actual locked code. | ||
1799 | */ | ||
1800 | .macro cmpxchg_lock, bitwidth | ||
1801 | |||
1802 | /* Lock; if we succeed, jump back up to the read-modify-write. */ | ||
1803 | #ifdef CONFIG_SMP | ||
1804 | tns r21, ATOMIC_LOCK_REG_NAME | ||
1805 | #else | ||
1806 | /* | ||
1807 | * Non-SMP preserves all the lock infrastructure, to keep the | ||
1808 | * code simpler for the interesting (SMP) case. However, we do | ||
1809 | * one small optimization here and in atomic_asm.S, which is | ||
1810 | * to fake out acquiring the actual lock in the atomic_lock table. | ||
1811 | */ | ||
1812 | movei r21, 0 | ||
1813 | #endif | ||
1814 | |||
1815 | /* Issue the slow SPR here while the tns result is in flight. */ | ||
1816 | mfspr r28, EX_CONTEXT_1_0 | ||
1817 | |||
1818 | { | ||
1819 | addi r28, r28, 8 /* return to the instruction after the swint1 */ | ||
1820 | bzt r21, .Ldo_cmpxchg\bitwidth | ||
1821 | } | ||
1822 | /* | ||
1823 | * The preceding instruction is the last thing that must be | ||
1824 | * on the second cache line. | ||
1825 | */ | ||
1826 | |||
1827 | #ifdef CONFIG_SMP | ||
1828 | /* | ||
1829 | * We failed to acquire the tns lock on our first try. Now use | ||
1830 | * bounded exponential backoff to retry, like __atomic_spinlock(). | ||
1831 | */ | ||
1832 | { | ||
1833 | moveli r23, 2048 /* maximum backoff time in cycles */ | ||
1834 | moveli r25, 32 /* starting backoff time in cycles */ | ||
1835 | } | ||
1836 | 1: mfspr r26, CYCLE_LOW /* get start point for this backoff */ | ||
1837 | 2: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */ | ||
1838 | sub r22, r22, r26 | ||
1839 | slt r22, r22, r25 | ||
1840 | bbst r22, 2b | ||
1841 | { | ||
1842 | shli r25, r25, 1 /* double the backoff; retry the tns */ | ||
1843 | tns r21, ATOMIC_LOCK_REG_NAME | ||
1844 | } | ||
1845 | slt r26, r23, r25 /* is the proposed backoff too big? */ | ||
1846 | { | ||
1847 | mvnz r25, r26, r23 | ||
1848 | bzt r21, .Ldo_cmpxchg\bitwidth | ||
1849 | } | ||
1850 | j 1b | ||
1851 | #endif /* CONFIG_SMP */ | ||
1852 | .endm | ||
1853 | |||
1854 | .Lcmpxchg32_tns: | ||
1855 | cmpxchg_lock 32 | ||
1856 | |||
1857 | /* | ||
1858 | * This code is invoked from sys_cmpxchg after most of the | ||
1859 | * preconditions have been checked. We still need to check | ||
1860 | * that r0 is 8-byte aligned, since if it's not we won't | ||
1861 | * actually be atomic. However, ATOMIC_LOCK_REG has the atomic | ||
1862 | * lock pointer and r27/r28 have the saved SP/PC. | ||
1863 | * r23 is holding "r0 & 7" so we can test for alignment. | ||
1864 | * The compare value is in r2/r3; the new value is in r4/r5. | ||
1865 | * On return, we must put the old value in r0/r1. | ||
1866 | */ | ||
1867 | .align 64 | ||
1868 | .Lcmpxchg64: | ||
1869 | { | ||
1870 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
1871 | s2a ATOMIC_LOCK_REG_NAME, r25, r21 | ||
1872 | #endif | ||
1873 | bzt r23, .Lcmpxchg64_tns | ||
1874 | } | ||
1875 | j .Lcmpxchg_badaddr | ||
1876 | |||
1877 | .Ldo_cmpxchg64: | ||
1878 | { | ||
1879 | lw r21, r0 | ||
1880 | addi r25, r0, 4 | ||
1881 | } | ||
1882 | { | ||
1883 | lw r1, r25 | ||
1884 | } | ||
1885 | seq r26, r21, r2 | ||
1886 | { | ||
1887 | bz r26, .Lcmpxchg64_mismatch | ||
1888 | seq r26, r1, r3 | ||
1889 | } | ||
1890 | { | ||
1891 | bz r26, .Lcmpxchg64_mismatch | ||
1892 | } | ||
1893 | sw r0, r4 | ||
1894 | sw r25, r5 | ||
1895 | |||
1896 | /* | ||
1897 | * The 32-bit path provides optimized "match" and "mismatch" | ||
1898 | * iret paths, but we don't have enough bundles in this cache line | ||
1899 | * to do that, so we just make even the "mismatch" path do an "mf". | ||
1900 | */ | ||
1901 | .Lcmpxchg64_mismatch: | ||
1902 | { | ||
1903 | move sp, r27 | ||
1904 | mtspr EX_CONTEXT_1_0, r28 | ||
1905 | } | ||
1906 | mf | ||
1907 | { | ||
1908 | move r0, r21 | ||
1909 | sw ATOMIC_LOCK_REG_NAME, zero | ||
1910 | } | ||
1911 | iret | ||
1912 | |||
1913 | .Lcmpxchg64_tns: | ||
1914 | cmpxchg_lock 64 | ||
1915 | |||
1916 | |||
1917 | /* | ||
1918 | * Reset sp and revector to sys_cmpxchg_badaddr(), which will | ||
1919 | * just raise the appropriate signal and exit. Doing it this | ||
1920 | * way means we don't have to duplicate the code in intvec.S's | ||
1921 | * int_hand macro that locates the top of the stack. | ||
1922 | */ | ||
1923 | .Lcmpxchg_badaddr: | ||
1924 | { | ||
1925 | moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr | ||
1926 | move sp, r27 | ||
1927 | } | ||
1928 | j intvec_SWINT_1 | ||
1929 | ENDPROC(sys_cmpxchg) | ||
1930 | ENTRY(__sys_cmpxchg_end) | ||
1931 | |||
1932 | |||
1933 | /* The single-step support may need to read all the registers. */ | ||
1934 | int_unalign: | ||
1935 | push_extra_callee_saves r0 | ||
1936 | j do_trap | ||
1937 | |||
1938 | /* Include .intrpt1 array of interrupt vectors */ | ||
1939 | .section ".intrpt1", "ax" | ||
1940 | |||
1941 | #define op_handle_perf_interrupt bad_intr | ||
1942 | #define op_handle_aux_perf_interrupt bad_intr | ||
1943 | |||
1944 | #ifndef CONFIG_HARDWALL | ||
1945 | #define do_hardwall_trap bad_intr | ||
1946 | #endif | ||
1947 | |||
1948 | int_hand INT_ITLB_MISS, ITLB_MISS, \ | ||
1949 | do_page_fault, handle_interrupt_no_single_step | ||
1950 | int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr | ||
1951 | int_hand INT_ILL, ILL, do_trap, handle_ill | ||
1952 | int_hand INT_GPV, GPV, do_trap | ||
1953 | int_hand INT_SN_ACCESS, SN_ACCESS, do_trap | ||
1954 | int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap | ||
1955 | int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap | ||
1956 | int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr | ||
1957 | int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr | ||
1958 | int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr | ||
1959 | int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr | ||
1960 | int_hand INT_SWINT_3, SWINT_3, do_trap | ||
1961 | int_hand INT_SWINT_2, SWINT_2, do_trap | ||
1962 | int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall | ||
1963 | int_hand INT_SWINT_0, SWINT_0, do_trap | ||
1964 | int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign | ||
1965 | int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault | ||
1966 | int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault | ||
1967 | int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault | ||
1968 | int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault | ||
1969 | int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault | ||
1970 | int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr | ||
1971 | int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap | ||
1972 | int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr | ||
1973 | int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap | ||
1974 | int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt | ||
1975 | int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr | ||
1976 | int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr | ||
1977 | int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr | ||
1978 | int_hand INT_IDN_CA, IDN_CA, bad_intr | ||
1979 | int_hand INT_UDN_CA, UDN_CA, bad_intr | ||
1980 | int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr | ||
1981 | int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr | ||
1982 | int_hand INT_PERF_COUNT, PERF_COUNT, \ | ||
1983 | op_handle_perf_interrupt, handle_nmi | ||
1984 | int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr | ||
1985 | int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr | ||
1986 | dc_dispatch INT_INTCTRL_1, INTCTRL_1 | ||
1987 | int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr | ||
1988 | int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ | ||
1989 | hv_message_intr, handle_interrupt_downcall | ||
1990 | int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \ | ||
1991 | tile_dev_intr, handle_interrupt_downcall | ||
1992 | int_hand INT_I_ASID, I_ASID, bad_intr | ||
1993 | int_hand INT_D_ASID, D_ASID, bad_intr | ||
1994 | int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \ | ||
1995 | do_page_fault, handle_interrupt_downcall | ||
1996 | int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \ | ||
1997 | do_page_fault, handle_interrupt_downcall | ||
1998 | int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \ | ||
1999 | do_page_fault, handle_interrupt_downcall | ||
2000 | int_hand INT_SN_CPL, SN_CPL, bad_intr | ||
2001 | int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap | ||
2002 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
2003 | int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ | ||
2004 | op_handle_aux_perf_interrupt, handle_nmi | ||
2005 | #endif | ||
2006 | |||
2007 | /* Synthetic interrupt delivered only by the simulator */ | ||
2008 | int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint | ||
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c new file mode 100644 index 000000000000..596c60086930 --- /dev/null +++ b/arch/tile/kernel/irq.c | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <linux/kernel_stat.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | #include <hv/drv_pcie_rc_intf.h> | ||
22 | #include <arch/spr_def.h> | ||
23 | #include <asm/traps.h> | ||
24 | |||
25 | /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */ | ||
26 | #define IS_HW_CLEARED 1 | ||
27 | |||
28 | /* | ||
29 | * The set of interrupts we enable for raw_local_irq_enable(). | ||
30 | * This is initialized to have just a single interrupt that the kernel | ||
31 | * doesn't actually use as a sentinel. During kernel init, | ||
32 | * interrupts are added as the kernel gets prepared to support them. | ||
33 | * NOTE: we could probably initialize them all statically up front. | ||
34 | */ | ||
35 | DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) = | ||
36 | INITIAL_INTERRUPTS_ENABLED; | ||
37 | EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask); | ||
38 | |||
39 | /* Define per-tile device interrupt statistics state. */ | ||
40 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; | ||
41 | EXPORT_PER_CPU_SYMBOL(irq_stat); | ||
42 | |||
43 | /* | ||
44 | * Define per-tile irq disable mask; the hardware/HV only has a single | ||
45 | * mask that we use to implement both masking and disabling. | ||
46 | */ | ||
47 | static DEFINE_PER_CPU(unsigned long, irq_disable_mask) | ||
48 | ____cacheline_internodealigned_in_smp; | ||
49 | |||
50 | /* | ||
51 | * Per-tile IRQ nesting depth. Used to make sure we enable newly | ||
52 | * enabled IRQs before exiting the outermost interrupt. | ||
53 | */ | ||
54 | static DEFINE_PER_CPU(int, irq_depth); | ||
55 | |||
56 | /* State for allocating IRQs on Gx. */ | ||
57 | #if CHIP_HAS_IPI() | ||
58 | static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE); | ||
59 | static DEFINE_SPINLOCK(available_irqs_lock); | ||
60 | #endif | ||
61 | |||
62 | #if CHIP_HAS_IPI() | ||
63 | /* Use SPRs to manipulate device interrupts. */ | ||
64 | #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask) | ||
65 | #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask) | ||
66 | #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask) | ||
67 | #else | ||
68 | /* Use HV to manipulate device interrupts. */ | ||
69 | #define mask_irqs(irq_mask) hv_disable_intr(irq_mask) | ||
70 | #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask) | ||
71 | #define clear_irqs(irq_mask) hv_clear_intr(irq_mask) | ||
72 | #endif | ||
73 | |||
74 | /* | ||
75 | * The interrupt handling path, implemented in terms of HV interrupt | ||
76 | * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx. | ||
77 | */ | ||
78 | void tile_dev_intr(struct pt_regs *regs, int intnum) | ||
79 | { | ||
80 | int depth = __get_cpu_var(irq_depth)++; | ||
81 | unsigned long original_irqs; | ||
82 | unsigned long remaining_irqs; | ||
83 | struct pt_regs *old_regs; | ||
84 | |||
85 | #if CHIP_HAS_IPI() | ||
86 | /* | ||
87 | * Pending interrupts are listed in an SPR. We might be | ||
88 | * nested, so be sure to only handle irqs that weren't already | ||
89 | * masked by a previous interrupt. Then, mask out the ones | ||
90 | * we're going to handle. | ||
91 | */ | ||
92 | unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1); | ||
93 | original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked; | ||
94 | __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs); | ||
95 | #else | ||
96 | /* | ||
97 | * Hypervisor performs the equivalent of the Gx code above and | ||
98 | * then puts the pending interrupt mask into a system save reg | ||
99 | * for us to find. | ||
100 | */ | ||
101 | original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); | ||
102 | #endif | ||
103 | remaining_irqs = original_irqs; | ||
104 | |||
105 | /* Track time spent here in an interrupt context. */ | ||
106 | old_regs = set_irq_regs(regs); | ||
107 | irq_enter(); | ||
108 | |||
109 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
110 | /* Debugging check for stack overflow: less than 1/8th stack free? */ | ||
111 | { | ||
112 | long sp = stack_pointer - (long) current_thread_info(); | ||
113 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | ||
114 | pr_emerg("tile_dev_intr: " | ||
115 | "stack overflow: %ld\n", | ||
116 | sp - sizeof(struct thread_info)); | ||
117 | dump_stack(); | ||
118 | } | ||
119 | } | ||
120 | #endif | ||
121 | while (remaining_irqs) { | ||
122 | unsigned long irq = __ffs(remaining_irqs); | ||
123 | remaining_irqs &= ~(1UL << irq); | ||
124 | |||
125 | /* Count device irqs; Linux IPIs are counted elsewhere. */ | ||
126 | if (irq != IRQ_RESCHEDULE) | ||
127 | __get_cpu_var(irq_stat).irq_dev_intr_count++; | ||
128 | |||
129 | generic_handle_irq(irq); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * If we weren't nested, turn on all enabled interrupts, | ||
134 | * including any that were reenabled during interrupt | ||
135 | * handling. | ||
136 | */ | ||
137 | if (depth == 0) | ||
138 | unmask_irqs(~__get_cpu_var(irq_disable_mask)); | ||
139 | |||
140 | __get_cpu_var(irq_depth)--; | ||
141 | |||
142 | /* | ||
143 | * Track time spent against the current process again and | ||
144 | * process any softirqs if they are waiting. | ||
145 | */ | ||
146 | irq_exit(); | ||
147 | set_irq_regs(old_regs); | ||
148 | } | ||
149 | |||
150 | |||
151 | /* | ||
152 | * Remove an irq from the disabled mask. If we're in an interrupt | ||
153 | * context, defer enabling the HW interrupt until we leave. | ||
154 | */ | ||
155 | void enable_percpu_irq(unsigned int irq) | ||
156 | { | ||
157 | get_cpu_var(irq_disable_mask) &= ~(1UL << irq); | ||
158 | if (__get_cpu_var(irq_depth) == 0) | ||
159 | unmask_irqs(1UL << irq); | ||
160 | put_cpu_var(irq_disable_mask); | ||
161 | } | ||
162 | EXPORT_SYMBOL(enable_percpu_irq); | ||
163 | |||
164 | /* | ||
165 | * Add an irq to the disabled mask. We disable the HW interrupt | ||
166 | * immediately so that there's no possibility of it firing. If we're | ||
167 | * in an interrupt context, the return path is careful to avoid | ||
168 | * unmasking a newly disabled interrupt. | ||
169 | */ | ||
170 | void disable_percpu_irq(unsigned int irq) | ||
171 | { | ||
172 | get_cpu_var(irq_disable_mask) |= (1UL << irq); | ||
173 | mask_irqs(1UL << irq); | ||
174 | put_cpu_var(irq_disable_mask); | ||
175 | } | ||
176 | EXPORT_SYMBOL(disable_percpu_irq); | ||
177 | |||
178 | /* Mask an interrupt. */ | ||
179 | static void tile_irq_chip_mask(unsigned int irq) | ||
180 | { | ||
181 | mask_irqs(1UL << irq); | ||
182 | } | ||
183 | |||
184 | /* Unmask an interrupt. */ | ||
185 | static void tile_irq_chip_unmask(unsigned int irq) | ||
186 | { | ||
187 | unmask_irqs(1UL << irq); | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Clear an interrupt before processing it so that any new assertions | ||
192 | * will trigger another irq. | ||
193 | */ | ||
194 | static void tile_irq_chip_ack(unsigned int irq) | ||
195 | { | ||
196 | if ((unsigned long)get_irq_chip_data(irq) != IS_HW_CLEARED) | ||
197 | clear_irqs(1UL << irq); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * For per-cpu interrupts, we need to avoid unmasking any interrupts | ||
202 | * that we disabled via disable_percpu_irq(). | ||
203 | */ | ||
204 | static void tile_irq_chip_eoi(unsigned int irq) | ||
205 | { | ||
206 | if (!(__get_cpu_var(irq_disable_mask) & (1UL << irq))) | ||
207 | unmask_irqs(1UL << irq); | ||
208 | } | ||
209 | |||
210 | static struct irq_chip tile_irq_chip = { | ||
211 | .typename = "tile_irq_chip", | ||
212 | .ack = tile_irq_chip_ack, | ||
213 | .eoi = tile_irq_chip_eoi, | ||
214 | .mask = tile_irq_chip_mask, | ||
215 | .unmask = tile_irq_chip_unmask, | ||
216 | }; | ||
217 | |||
218 | void __init init_IRQ(void) | ||
219 | { | ||
220 | ipi_init(); | ||
221 | } | ||
222 | |||
223 | void __cpuinit setup_irq_regs(void) | ||
224 | { | ||
225 | /* Enable interrupt delivery. */ | ||
226 | unmask_irqs(~0UL); | ||
227 | #if CHIP_HAS_IPI() | ||
228 | raw_local_irq_unmask(INT_IPI_1); | ||
229 | #endif | ||
230 | } | ||
231 | |||
232 | void tile_irq_activate(unsigned int irq, int tile_irq_type) | ||
233 | { | ||
234 | /* | ||
235 | * We use handle_level_irq() by default because the pending | ||
236 | * interrupt vector (whether modeled by the HV on TILE64 and | ||
237 | * TILEPro or implemented in hardware on TILE-Gx) has | ||
238 | * level-style semantics for each bit. An interrupt fires | ||
239 | * whenever a bit is high, not just at edges. | ||
240 | */ | ||
241 | irq_flow_handler_t handle = handle_level_irq; | ||
242 | if (tile_irq_type == TILE_IRQ_PERCPU) | ||
243 | handle = handle_percpu_irq; | ||
244 | set_irq_chip_and_handler(irq, &tile_irq_chip, handle); | ||
245 | |||
246 | /* | ||
247 | * Flag interrupts that are hardware-cleared so that ack() | ||
248 | * won't clear them. | ||
249 | */ | ||
250 | if (tile_irq_type == TILE_IRQ_HW_CLEAR) | ||
251 | set_irq_chip_data(irq, (void *)IS_HW_CLEARED); | ||
252 | } | ||
253 | EXPORT_SYMBOL(tile_irq_activate); | ||
254 | |||
255 | |||
256 | void ack_bad_irq(unsigned int irq) | ||
257 | { | ||
258 | pr_err("unexpected IRQ trap at vector %02x\n", irq); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Generic, controller-independent functions: | ||
263 | */ | ||
264 | |||
265 | int show_interrupts(struct seq_file *p, void *v) | ||
266 | { | ||
267 | int i = *(loff_t *) v, j; | ||
268 | struct irqaction *action; | ||
269 | unsigned long flags; | ||
270 | |||
271 | if (i == 0) { | ||
272 | seq_printf(p, " "); | ||
273 | for (j = 0; j < NR_CPUS; j++) | ||
274 | if (cpu_online(j)) | ||
275 | seq_printf(p, "CPU%-8d", j); | ||
276 | seq_putc(p, '\n'); | ||
277 | } | ||
278 | |||
279 | if (i < NR_IRQS) { | ||
280 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
281 | action = irq_desc[i].action; | ||
282 | if (!action) | ||
283 | goto skip; | ||
284 | seq_printf(p, "%3d: ", i); | ||
285 | #ifndef CONFIG_SMP | ||
286 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
287 | #else | ||
288 | for_each_online_cpu(j) | ||
289 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
290 | #endif | ||
291 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | ||
292 | seq_printf(p, " %s", action->name); | ||
293 | |||
294 | for (action = action->next; action; action = action->next) | ||
295 | seq_printf(p, ", %s", action->name); | ||
296 | |||
297 | seq_putc(p, '\n'); | ||
298 | skip: | ||
299 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
300 | } | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | #if CHIP_HAS_IPI() | ||
305 | int create_irq(void) | ||
306 | { | ||
307 | unsigned long flags; | ||
308 | int result; | ||
309 | |||
310 | spin_lock_irqsave(&available_irqs_lock, flags); | ||
311 | if (available_irqs == 0) | ||
312 | result = -ENOMEM; | ||
313 | else { | ||
314 | result = __ffs(available_irqs); | ||
315 | available_irqs &= ~(1UL << result); | ||
316 | dynamic_irq_init(result); | ||
317 | } | ||
318 | spin_unlock_irqrestore(&available_irqs_lock, flags); | ||
319 | |||
320 | return result; | ||
321 | } | ||
322 | EXPORT_SYMBOL(create_irq); | ||
323 | |||
324 | void destroy_irq(unsigned int irq) | ||
325 | { | ||
326 | unsigned long flags; | ||
327 | |||
328 | spin_lock_irqsave(&available_irqs_lock, flags); | ||
329 | available_irqs |= (1UL << irq); | ||
330 | dynamic_irq_cleanup(irq); | ||
331 | spin_unlock_irqrestore(&available_irqs_lock, flags); | ||
332 | } | ||
333 | EXPORT_SYMBOL(destroy_irq); | ||
334 | #endif | ||
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c new file mode 100644 index 000000000000..ba7a265d6179 --- /dev/null +++ b/arch/tile/kernel/machine_kexec.c | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * based on machine_kexec.c from other architectures in linux-2.6.18 | ||
15 | */ | ||
16 | |||
17 | #include <linux/mm.h> | ||
18 | #include <linux/kexec.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/reboot.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include <linux/cpumask.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/elf.h> | ||
26 | #include <linux/highmem.h> | ||
27 | #include <linux/mmu_context.h> | ||
28 | #include <linux/io.h> | ||
29 | #include <linux/timex.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/pgalloc.h> | ||
32 | #include <asm/cacheflush.h> | ||
33 | #include <asm/checksum.h> | ||
34 | #include <hv/hypervisor.h> | ||
35 | |||
36 | |||
37 | /* | ||
38 | * This stuff is not in elf.h and is not in any other kernel include. | ||
39 | * This stuff is needed below in the little boot notes parser to | ||
40 | * extract the command line so we can pass it to the hypervisor. | ||
41 | */ | ||
42 | struct Elf32_Bhdr { | ||
43 | Elf32_Word b_signature; | ||
44 | Elf32_Word b_size; | ||
45 | Elf32_Half b_checksum; | ||
46 | Elf32_Half b_records; | ||
47 | }; | ||
48 | #define ELF_BOOT_MAGIC 0x0E1FB007 | ||
49 | #define EBN_COMMAND_LINE 0x00000004 | ||
50 | #define roundupsz(X) (((X) + 3) & ~3) | ||
51 | |||
52 | /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ | ||
53 | |||
54 | |||
55 | void machine_shutdown(void) | ||
56 | { | ||
57 | /* | ||
58 | * Normally we would stop all the other processors here, but | ||
59 | * the check in machine_kexec_prepare below ensures we'll only | ||
60 | * get this far if we've been booted with "nosmp" on the | ||
61 | * command line or without CONFIG_SMP so there's nothing to do | ||
62 | * here (for now). | ||
63 | */ | ||
64 | } | ||
65 | |||
66 | void machine_crash_shutdown(struct pt_regs *regs) | ||
67 | { | ||
68 | /* | ||
69 | * Cannot happen. This type of kexec is disabled on this | ||
70 | * architecture (and enforced in machine_kexec_prepare below). | ||
71 | */ | ||
72 | } | ||
73 | |||
74 | |||
75 | int machine_kexec_prepare(struct kimage *image) | ||
76 | { | ||
77 | if (num_online_cpus() > 1) { | ||
78 | pr_warning("%s: detected attempt to kexec " | ||
79 | "with num_online_cpus() > 1\n", | ||
80 | __func__); | ||
81 | return -ENOSYS; | ||
82 | } | ||
83 | if (image->type != KEXEC_TYPE_DEFAULT) { | ||
84 | pr_warning("%s: detected attempt to kexec " | ||
85 | "with unsupported type: %d\n", | ||
86 | __func__, | ||
87 | image->type); | ||
88 | return -ENOSYS; | ||
89 | } | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | void machine_kexec_cleanup(struct kimage *image) | ||
94 | { | ||
95 | /* | ||
96 | * We did nothing in machine_kexec_prepare, | ||
97 | * so we have nothing to do here. | ||
98 | */ | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * If we can find elf boot notes on this page, return the command | ||
103 | * line. Otherwise, silently return null. Somewhat kludgy, but no | ||
104 | * good way to do this without significantly rearchitecting the | ||
105 | * architecture-independent kexec code. | ||
106 | */ | ||
107 | |||
108 | static unsigned char *kexec_bn2cl(void *pg) | ||
109 | { | ||
110 | struct Elf32_Bhdr *bhdrp; | ||
111 | Elf32_Nhdr *nhdrp; | ||
112 | unsigned char *desc; | ||
113 | unsigned char *command_line; | ||
114 | __sum16 csum; | ||
115 | |||
116 | bhdrp = (struct Elf32_Bhdr *) pg; | ||
117 | |||
118 | /* | ||
119 | * This routine is invoked for every source page, so make | ||
120 | * sure to quietly ignore every impossible page. | ||
121 | */ | ||
122 | if (bhdrp->b_signature != ELF_BOOT_MAGIC || | ||
123 | bhdrp->b_size > PAGE_SIZE) | ||
124 | return 0; | ||
125 | |||
126 | /* | ||
127 | * If we get a checksum mismatch, warn with the checksum | ||
128 | * so we can diagnose better. | ||
129 | */ | ||
130 | csum = ip_compute_csum(pg, bhdrp->b_size); | ||
131 | if (csum != 0) { | ||
132 | pr_warning("%s: bad checksum %#x (size %d)\n", | ||
133 | __func__, csum, bhdrp->b_size); | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | nhdrp = (Elf32_Nhdr *) (bhdrp + 1); | ||
138 | |||
139 | while (nhdrp->n_type != EBN_COMMAND_LINE) { | ||
140 | |||
141 | desc = (unsigned char *) (nhdrp + 1); | ||
142 | desc += roundupsz(nhdrp->n_descsz); | ||
143 | |||
144 | nhdrp = (Elf32_Nhdr *) desc; | ||
145 | |||
146 | /* still in bounds? */ | ||
147 | if ((unsigned char *) (nhdrp + 1) > | ||
148 | ((unsigned char *) pg) + bhdrp->b_size) { | ||
149 | |||
150 | pr_info("%s: out of bounds\n", __func__); | ||
151 | return 0; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | command_line = (unsigned char *) (nhdrp + 1); | ||
156 | desc = command_line; | ||
157 | |||
158 | while (*desc != '\0') { | ||
159 | desc++; | ||
160 | if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { | ||
161 | pr_info("%s: ran off end of page\n", | ||
162 | __func__); | ||
163 | return 0; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | return command_line; | ||
168 | } | ||
169 | |||
170 | static void kexec_find_and_set_command_line(struct kimage *image) | ||
171 | { | ||
172 | kimage_entry_t *ptr, entry; | ||
173 | |||
174 | unsigned char *command_line = 0; | ||
175 | unsigned char *r; | ||
176 | HV_Errno hverr; | ||
177 | |||
178 | for (ptr = &image->head; | ||
179 | (entry = *ptr) && !(entry & IND_DONE); | ||
180 | ptr = (entry & IND_INDIRECTION) ? | ||
181 | phys_to_virt((entry & PAGE_MASK)) : ptr + 1) { | ||
182 | |||
183 | if ((entry & IND_SOURCE)) { | ||
184 | void *va = | ||
185 | kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0); | ||
186 | r = kexec_bn2cl(va); | ||
187 | if (r) { | ||
188 | command_line = r; | ||
189 | break; | ||
190 | } | ||
191 | kunmap_atomic(va, KM_USER0); | ||
192 | } | ||
193 | } | ||
194 | |||
195 | if (command_line != 0) { | ||
196 | pr_info("setting new command line to \"%s\"\n", | ||
197 | command_line); | ||
198 | |||
199 | hverr = hv_set_command_line( | ||
200 | (HV_VirtAddr) command_line, strlen(command_line)); | ||
201 | kunmap_atomic(command_line, KM_USER0); | ||
202 | } else { | ||
203 | pr_info("%s: no command line found; making empty\n", | ||
204 | __func__); | ||
205 | hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); | ||
206 | } | ||
207 | if (hverr) | ||
208 | pr_warning("%s: hv_set_command_line returned error: %d\n", | ||
209 | __func__, hverr); | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * The kexec code range-checks all its PAs, so to avoid having it run | ||
214 | * amok and allocate memory and then sequester it from every other | ||
215 | * controller, we force it to come from controller zero. We also | ||
216 | * disable the oom-killer since if we do end up running out of memory, | ||
217 | * that almost certainly won't help. | ||
218 | */ | ||
219 | struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order) | ||
220 | { | ||
221 | gfp_mask |= __GFP_THISNODE | __GFP_NORETRY; | ||
222 | return alloc_pages_node(0, gfp_mask, order); | ||
223 | } | ||
224 | |||
225 | static void setup_quasi_va_is_pa(void) | ||
226 | { | ||
227 | HV_PTE *pgtable; | ||
228 | HV_PTE pte; | ||
229 | int i; | ||
230 | |||
231 | /* | ||
232 | * Flush our TLB to prevent conflicts between the previous contents | ||
233 | * and the new stuff we're about to add. | ||
234 | */ | ||
235 | local_flush_tlb_all(); | ||
236 | |||
237 | /* setup VA is PA, at least up to PAGE_OFFSET */ | ||
238 | |||
239 | pgtable = (HV_PTE *)current->mm->pgd; | ||
240 | pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE); | ||
241 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); | ||
242 | |||
243 | for (i = 0; i < pgd_index(PAGE_OFFSET); i++) | ||
244 | pgtable[i] = pfn_pte(i << (HPAGE_SHIFT - PAGE_SHIFT), pte); | ||
245 | } | ||
246 | |||
247 | |||
248 | NORET_TYPE void machine_kexec(struct kimage *image) | ||
249 | { | ||
250 | void *reboot_code_buffer; | ||
251 | NORET_TYPE void (*rnk)(unsigned long, void *, unsigned long) | ||
252 | ATTRIB_NORET; | ||
253 | |||
254 | /* Mask all interrupts before starting to reboot. */ | ||
255 | interrupt_mask_set_mask(~0ULL); | ||
256 | |||
257 | kexec_find_and_set_command_line(image); | ||
258 | |||
259 | /* | ||
260 | * Adjust the home caching of the control page to be cached on | ||
261 | * this cpu, and copy the assembly helper into the control | ||
262 | * code page, which we map in the vmalloc area. | ||
263 | */ | ||
264 | homecache_change_page_home(image->control_code_page, 0, | ||
265 | smp_processor_id()); | ||
266 | reboot_code_buffer = vmap(&image->control_code_page, 1, 0, | ||
267 | __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE)); | ||
268 | memcpy(reboot_code_buffer, relocate_new_kernel, | ||
269 | relocate_new_kernel_size); | ||
270 | __flush_icache_range( | ||
271 | (unsigned long) reboot_code_buffer, | ||
272 | (unsigned long) reboot_code_buffer + relocate_new_kernel_size); | ||
273 | |||
274 | setup_quasi_va_is_pa(); | ||
275 | |||
276 | /* now call it */ | ||
277 | rnk = reboot_code_buffer; | ||
278 | (*rnk)(image->head, reboot_code_buffer, image->start); | ||
279 | } | ||
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c new file mode 100644 index 000000000000..6d23ed271d10 --- /dev/null +++ b/arch/tile/kernel/messaging.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/percpu.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/hardirq.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <asm/hv_driver.h> | ||
20 | #include <asm/irq_regs.h> | ||
21 | #include <asm/traps.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | #include <arch/interrupts.h> | ||
24 | |||
25 | /* All messages are stored here */ | ||
26 | static DEFINE_PER_CPU(HV_MsgState, msg_state); | ||
27 | |||
28 | void __cpuinit init_messaging(void) | ||
29 | { | ||
30 | /* Allocate storage for messages in kernel space */ | ||
31 | HV_MsgState *state = &__get_cpu_var(msg_state); | ||
32 | int rc = hv_register_message_state(state); | ||
33 | if (rc != HV_OK) | ||
34 | panic("hv_register_message_state: error %d", rc); | ||
35 | |||
36 | /* Make sure downcall interrupts will be enabled. */ | ||
37 | raw_local_irq_unmask(INT_INTCTRL_1); | ||
38 | } | ||
39 | |||
40 | void hv_message_intr(struct pt_regs *regs, int intnum) | ||
41 | { | ||
42 | /* | ||
43 | * We enter with interrupts disabled and leave them disabled, | ||
44 | * to match expectations of called functions (e.g. | ||
45 | * do_ccupdate_local() in mm/slab.c). This is also consistent | ||
46 | * with normal call entry for device interrupts. | ||
47 | */ | ||
48 | |||
49 | int message[HV_MAX_MESSAGE_SIZE/sizeof(int)]; | ||
50 | HV_RcvMsgInfo rmi; | ||
51 | int nmsgs = 0; | ||
52 | |||
53 | /* Track time spent here in an interrupt context */ | ||
54 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
55 | irq_enter(); | ||
56 | |||
57 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
58 | /* Debugging check for stack overflow: less than 1/8th stack free? */ | ||
59 | { | ||
60 | long sp = stack_pointer - (long) current_thread_info(); | ||
61 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | ||
62 | pr_emerg("hv_message_intr: " | ||
63 | "stack overflow: %ld\n", | ||
64 | sp - sizeof(struct thread_info)); | ||
65 | dump_stack(); | ||
66 | } | ||
67 | } | ||
68 | #endif | ||
69 | |||
70 | while (1) { | ||
71 | rmi = hv_receive_message(__get_cpu_var(msg_state), | ||
72 | (HV_VirtAddr) message, | ||
73 | sizeof(message)); | ||
74 | if (rmi.msglen == 0) | ||
75 | break; | ||
76 | |||
77 | if (rmi.msglen < 0) | ||
78 | panic("hv_receive_message failed: %d", rmi.msglen); | ||
79 | |||
80 | ++nmsgs; | ||
81 | |||
82 | if (rmi.source == HV_MSG_TILE) { | ||
83 | int tag; | ||
84 | |||
85 | /* we just send tags for now */ | ||
86 | BUG_ON(rmi.msglen != sizeof(int)); | ||
87 | |||
88 | tag = message[0]; | ||
89 | #ifdef CONFIG_SMP | ||
90 | evaluate_message(message[0]); | ||
91 | #else | ||
92 | panic("Received IPI message %d in UP mode", tag); | ||
93 | #endif | ||
94 | } else if (rmi.source == HV_MSG_INTR) { | ||
95 | HV_IntrMsg *him = (HV_IntrMsg *)message; | ||
96 | struct hv_driver_cb *cb = | ||
97 | (struct hv_driver_cb *)him->intarg; | ||
98 | cb->callback(cb, him->intdata); | ||
99 | __get_cpu_var(irq_stat).irq_hv_msg_count++; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * We shouldn't have gotten a message downcall with no | ||
105 | * messages available. | ||
106 | */ | ||
107 | if (nmsgs == 0) | ||
108 | panic("Message downcall invoked with no messages!"); | ||
109 | |||
110 | /* | ||
111 | * Track time spent against the current process again and | ||
112 | * process any softirqs if they are waiting. | ||
113 | */ | ||
114 | irq_exit(); | ||
115 | set_irq_regs(old_regs); | ||
116 | } | ||
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c new file mode 100644 index 000000000000..e2ab82b7c7e7 --- /dev/null +++ b/arch/tile/kernel/module.c | |||
@@ -0,0 +1,257 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Based on i386 version, copyright (C) 2001 Rusty Russell. | ||
15 | */ | ||
16 | |||
17 | #include <linux/moduleloader.h> | ||
18 | #include <linux/elf.h> | ||
19 | #include <linux/vmalloc.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <asm/opcode-tile.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | |||
26 | #ifdef __tilegx__ | ||
27 | # define Elf_Rela Elf64_Rela | ||
28 | # define ELF_R_SYM ELF64_R_SYM | ||
29 | # define ELF_R_TYPE ELF64_R_TYPE | ||
30 | #else | ||
31 | # define Elf_Rela Elf32_Rela | ||
32 | # define ELF_R_SYM ELF32_R_SYM | ||
33 | # define ELF_R_TYPE ELF32_R_TYPE | ||
34 | #endif | ||
35 | |||
36 | #ifdef MODULE_DEBUG | ||
37 | #define DEBUGP printk | ||
38 | #else | ||
39 | #define DEBUGP(fmt...) | ||
40 | #endif | ||
41 | |||
42 | /* | ||
43 | * Allocate some address space in the range MEM_MODULE_START to | ||
44 | * MEM_MODULE_END and populate it with memory. | ||
45 | */ | ||
46 | void *module_alloc(unsigned long size) | ||
47 | { | ||
48 | struct page **pages; | ||
49 | pgprot_t prot_rwx = __pgprot(_PAGE_KERNEL | _PAGE_KERNEL_EXEC); | ||
50 | struct vm_struct *area; | ||
51 | int i = 0; | ||
52 | int npages; | ||
53 | |||
54 | if (size == 0) | ||
55 | return NULL; | ||
56 | npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; | ||
57 | pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); | ||
58 | if (pages == NULL) | ||
59 | return NULL; | ||
60 | for (; i < npages; ++i) { | ||
61 | pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | ||
62 | if (!pages[i]) | ||
63 | goto error; | ||
64 | } | ||
65 | |||
66 | area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); | ||
67 | if (!area) | ||
68 | goto error; | ||
69 | |||
70 | if (map_vm_area(area, prot_rwx, &pages)) { | ||
71 | vunmap(area->addr); | ||
72 | goto error; | ||
73 | } | ||
74 | |||
75 | return area->addr; | ||
76 | |||
77 | error: | ||
78 | while (--i >= 0) | ||
79 | __free_page(pages[i]); | ||
80 | kfree(pages); | ||
81 | return NULL; | ||
82 | } | ||
83 | |||
84 | |||
85 | /* Free memory returned from module_alloc */ | ||
86 | void module_free(struct module *mod, void *module_region) | ||
87 | { | ||
88 | vfree(module_region); | ||
89 | /* | ||
90 | * FIXME: If module_region == mod->init_region, trim exception | ||
91 | * table entries. | ||
92 | */ | ||
93 | } | ||
94 | |||
95 | /* We don't need anything special. */ | ||
96 | int module_frob_arch_sections(Elf_Ehdr *hdr, | ||
97 | Elf_Shdr *sechdrs, | ||
98 | char *secstrings, | ||
99 | struct module *mod) | ||
100 | { | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | int apply_relocate(Elf_Shdr *sechdrs, | ||
105 | const char *strtab, | ||
106 | unsigned int symindex, | ||
107 | unsigned int relsec, | ||
108 | struct module *me) | ||
109 | { | ||
110 | pr_err("module %s: .rel relocation unsupported\n", me->name); | ||
111 | return -ENOEXEC; | ||
112 | } | ||
113 | |||
114 | #ifdef __tilegx__ | ||
115 | /* | ||
116 | * Validate that the high 16 bits of "value" is just the sign-extension of | ||
117 | * the low 48 bits. | ||
118 | */ | ||
119 | static int validate_hw2_last(long value, struct module *me) | ||
120 | { | ||
121 | if (((value << 16) >> 16) != value) { | ||
122 | pr_warning("module %s: Out of range HW2_LAST value %#lx\n", | ||
123 | me->name, value); | ||
124 | return 0; | ||
125 | } | ||
126 | return 1; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Validate that "value" isn't too big to hold in a JumpOff relocation. | ||
131 | */ | ||
132 | static int validate_jumpoff(long value) | ||
133 | { | ||
134 | /* Determine size of jump offset. */ | ||
135 | int shift = __builtin_clzl(get_JumpOff_X1(create_JumpOff_X1(-1))); | ||
136 | |||
137 | /* Check to see if it fits into the relocation slot. */ | ||
138 | long f = get_JumpOff_X1(create_JumpOff_X1(value)); | ||
139 | f = (f << shift) >> shift; | ||
140 | |||
141 | return f == value; | ||
142 | } | ||
143 | #endif | ||
144 | |||
145 | int apply_relocate_add(Elf_Shdr *sechdrs, | ||
146 | const char *strtab, | ||
147 | unsigned int symindex, | ||
148 | unsigned int relsec, | ||
149 | struct module *me) | ||
150 | { | ||
151 | unsigned int i; | ||
152 | Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr; | ||
153 | Elf_Sym *sym; | ||
154 | u64 *location; | ||
155 | unsigned long value; | ||
156 | |||
157 | DEBUGP("Applying relocate section %u to %u\n", relsec, | ||
158 | sechdrs[relsec].sh_info); | ||
159 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
160 | /* This is where to make the change */ | ||
161 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
162 | + rel[i].r_offset; | ||
163 | /* | ||
164 | * This is the symbol it is referring to. | ||
165 | * Note that all undefined symbols have been resolved. | ||
166 | */ | ||
167 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr | ||
168 | + ELF_R_SYM(rel[i].r_info); | ||
169 | value = sym->st_value + rel[i].r_addend; | ||
170 | |||
171 | switch (ELF_R_TYPE(rel[i].r_info)) { | ||
172 | |||
173 | #define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value))) | ||
174 | |||
175 | #ifndef __tilegx__ | ||
176 | case R_TILE_32: | ||
177 | *(uint32_t *)location = value; | ||
178 | break; | ||
179 | case R_TILE_IMM16_X0_HA: | ||
180 | value = (value + 0x8000) >> 16; | ||
181 | /*FALLTHROUGH*/ | ||
182 | case R_TILE_IMM16_X0_LO: | ||
183 | MUNGE(create_Imm16_X0); | ||
184 | break; | ||
185 | case R_TILE_IMM16_X1_HA: | ||
186 | value = (value + 0x8000) >> 16; | ||
187 | /*FALLTHROUGH*/ | ||
188 | case R_TILE_IMM16_X1_LO: | ||
189 | MUNGE(create_Imm16_X1); | ||
190 | break; | ||
191 | case R_TILE_JOFFLONG_X1: | ||
192 | value -= (unsigned long) location; /* pc-relative */ | ||
193 | value = (long) value >> 3; /* count by instrs */ | ||
194 | MUNGE(create_JOffLong_X1); | ||
195 | break; | ||
196 | #else | ||
197 | case R_TILEGX_64: | ||
198 | *location = value; | ||
199 | break; | ||
200 | case R_TILEGX_IMM16_X0_HW2_LAST: | ||
201 | if (!validate_hw2_last(value, me)) | ||
202 | return -ENOEXEC; | ||
203 | value >>= 16; | ||
204 | /*FALLTHROUGH*/ | ||
205 | case R_TILEGX_IMM16_X0_HW1: | ||
206 | value >>= 16; | ||
207 | /*FALLTHROUGH*/ | ||
208 | case R_TILEGX_IMM16_X0_HW0: | ||
209 | MUNGE(create_Imm16_X0); | ||
210 | break; | ||
211 | case R_TILEGX_IMM16_X1_HW2_LAST: | ||
212 | if (!validate_hw2_last(value, me)) | ||
213 | return -ENOEXEC; | ||
214 | value >>= 16; | ||
215 | /*FALLTHROUGH*/ | ||
216 | case R_TILEGX_IMM16_X1_HW1: | ||
217 | value >>= 16; | ||
218 | /*FALLTHROUGH*/ | ||
219 | case R_TILEGX_IMM16_X1_HW0: | ||
220 | MUNGE(create_Imm16_X1); | ||
221 | break; | ||
222 | case R_TILEGX_JUMPOFF_X1: | ||
223 | value -= (unsigned long) location; /* pc-relative */ | ||
224 | value = (long) value >> 3; /* count by instrs */ | ||
225 | if (!validate_jumpoff(value)) { | ||
226 | pr_warning("module %s: Out of range jump to" | ||
227 | " %#llx at %#llx (%p)\n", me->name, | ||
228 | sym->st_value + rel[i].r_addend, | ||
229 | rel[i].r_offset, location); | ||
230 | return -ENOEXEC; | ||
231 | } | ||
232 | MUNGE(create_JumpOff_X1); | ||
233 | break; | ||
234 | #endif | ||
235 | |||
236 | #undef MUNGE | ||
237 | |||
238 | default: | ||
239 | pr_err("module %s: Unknown relocation: %d\n", | ||
240 | me->name, (int) ELF_R_TYPE(rel[i].r_info)); | ||
241 | return -ENOEXEC; | ||
242 | } | ||
243 | } | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | int module_finalize(const Elf_Ehdr *hdr, | ||
248 | const Elf_Shdr *sechdrs, | ||
249 | struct module *me) | ||
250 | { | ||
251 | /* FIXME: perhaps remove the "writable" bit from the TLB? */ | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | void module_arch_cleanup(struct module *mod) | ||
256 | { | ||
257 | } | ||
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c new file mode 100644 index 000000000000..5ad5e13b0fa6 --- /dev/null +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -0,0 +1,251 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/vmalloc.h> | ||
18 | #include <asm/tlbflush.h> | ||
19 | #include <asm/homecache.h> | ||
20 | |||
21 | /* Generic DMA mapping functions: */ | ||
22 | |||
23 | /* | ||
24 | * Allocate what Linux calls "coherent" memory, which for us just | ||
25 | * means uncached. | ||
26 | */ | ||
27 | void *dma_alloc_coherent(struct device *dev, | ||
28 | size_t size, | ||
29 | dma_addr_t *dma_handle, | ||
30 | gfp_t gfp) | ||
31 | { | ||
32 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); | ||
33 | int node = dev_to_node(dev); | ||
34 | int order = get_order(size); | ||
35 | struct page *pg; | ||
36 | dma_addr_t addr; | ||
37 | |||
38 | gfp |= __GFP_ZERO; | ||
39 | |||
40 | /* | ||
41 | * By forcing NUMA node 0 for 32-bit masks we ensure that the | ||
42 | * high 32 bits of the resulting PA will be zero. If the mask | ||
43 | * size is, e.g., 24, we may still not be able to guarantee a | ||
44 | * suitable memory address, in which case we will return NULL. | ||
45 | * But such devices are uncommon. | ||
46 | */ | ||
47 | if (dma_mask <= DMA_BIT_MASK(32)) | ||
48 | node = 0; | ||
49 | |||
50 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED); | ||
51 | if (pg == NULL) | ||
52 | return NULL; | ||
53 | |||
54 | addr = page_to_phys(pg); | ||
55 | if (addr + size > dma_mask) { | ||
56 | homecache_free_pages(addr, order); | ||
57 | return NULL; | ||
58 | } | ||
59 | |||
60 | *dma_handle = addr; | ||
61 | return page_address(pg); | ||
62 | } | ||
63 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
64 | |||
65 | /* | ||
66 | * Free memory that was allocated with dma_alloc_coherent. | ||
67 | */ | ||
68 | void dma_free_coherent(struct device *dev, size_t size, | ||
69 | void *vaddr, dma_addr_t dma_handle) | ||
70 | { | ||
71 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | ||
72 | } | ||
73 | EXPORT_SYMBOL(dma_free_coherent); | ||
74 | |||
75 | /* | ||
76 | * The map routines "map" the specified address range for DMA | ||
77 | * accesses. The memory belongs to the device after this call is | ||
78 | * issued, until it is unmapped with dma_unmap_single. | ||
79 | * | ||
80 | * We don't need to do any mapping, we just flush the address range | ||
81 | * out of the cache and return a DMA address. | ||
82 | * | ||
83 | * The unmap routines do whatever is necessary before the processor | ||
84 | * accesses the memory again, and must be called before the driver | ||
85 | * touches the memory. We can get away with a cache invalidate if we | ||
86 | * can count on nothing having been touched. | ||
87 | */ | ||
88 | |||
89 | |||
90 | /* | ||
91 | * dma_map_single can be passed any memory address, and there appear | ||
92 | * to be no alignment constraints. | ||
93 | * | ||
94 | * There is a chance that the start of the buffer will share a cache | ||
95 | * line with some other data that has been touched in the meantime. | ||
96 | */ | ||
97 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
98 | enum dma_data_direction direction) | ||
99 | { | ||
100 | struct page *page; | ||
101 | dma_addr_t dma_addr; | ||
102 | int thispage; | ||
103 | |||
104 | BUG_ON(!valid_dma_direction(direction)); | ||
105 | WARN_ON(size == 0); | ||
106 | |||
107 | dma_addr = __pa(ptr); | ||
108 | |||
109 | /* We might have been handed a buffer that wraps a page boundary */ | ||
110 | while ((int)size > 0) { | ||
111 | /* The amount to flush that's on this page */ | ||
112 | thispage = PAGE_SIZE - ((unsigned long)ptr & (PAGE_SIZE - 1)); | ||
113 | thispage = min((int)thispage, (int)size); | ||
114 | /* Is this valid for any page we could be handed? */ | ||
115 | page = pfn_to_page(kaddr_to_pfn(ptr)); | ||
116 | homecache_flush_cache(page, 0); | ||
117 | ptr += thispage; | ||
118 | size -= thispage; | ||
119 | } | ||
120 | |||
121 | return dma_addr; | ||
122 | } | ||
123 | EXPORT_SYMBOL(dma_map_single); | ||
124 | |||
125 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
126 | enum dma_data_direction direction) | ||
127 | { | ||
128 | BUG_ON(!valid_dma_direction(direction)); | ||
129 | } | ||
130 | EXPORT_SYMBOL(dma_unmap_single); | ||
131 | |||
132 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||
133 | enum dma_data_direction direction) | ||
134 | { | ||
135 | struct scatterlist *sg; | ||
136 | int i; | ||
137 | |||
138 | BUG_ON(!valid_dma_direction(direction)); | ||
139 | |||
140 | WARN_ON(nents == 0 || sglist->length == 0); | ||
141 | |||
142 | for_each_sg(sglist, sg, nents, i) { | ||
143 | struct page *page; | ||
144 | sg->dma_address = sg_phys(sg); | ||
145 | page = pfn_to_page(sg->dma_address >> PAGE_SHIFT); | ||
146 | homecache_flush_cache(page, 0); | ||
147 | } | ||
148 | |||
149 | return nents; | ||
150 | } | ||
151 | EXPORT_SYMBOL(dma_map_sg); | ||
152 | |||
153 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
154 | enum dma_data_direction direction) | ||
155 | { | ||
156 | BUG_ON(!valid_dma_direction(direction)); | ||
157 | } | ||
158 | EXPORT_SYMBOL(dma_unmap_sg); | ||
159 | |||
160 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
161 | unsigned long offset, size_t size, | ||
162 | enum dma_data_direction direction) | ||
163 | { | ||
164 | BUG_ON(!valid_dma_direction(direction)); | ||
165 | |||
166 | homecache_flush_cache(page, 0); | ||
167 | |||
168 | return page_to_pa(page) + offset; | ||
169 | } | ||
170 | EXPORT_SYMBOL(dma_map_page); | ||
171 | |||
172 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
173 | enum dma_data_direction direction) | ||
174 | { | ||
175 | BUG_ON(!valid_dma_direction(direction)); | ||
176 | } | ||
177 | EXPORT_SYMBOL(dma_unmap_page); | ||
178 | |||
179 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
180 | size_t size, enum dma_data_direction direction) | ||
181 | { | ||
182 | BUG_ON(!valid_dma_direction(direction)); | ||
183 | } | ||
184 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
185 | |||
186 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
187 | size_t size, enum dma_data_direction direction) | ||
188 | { | ||
189 | unsigned long start = PFN_DOWN(dma_handle); | ||
190 | unsigned long end = PFN_DOWN(dma_handle + size - 1); | ||
191 | unsigned long i; | ||
192 | |||
193 | BUG_ON(!valid_dma_direction(direction)); | ||
194 | for (i = start; i <= end; ++i) | ||
195 | homecache_flush_cache(pfn_to_page(i), 0); | ||
196 | } | ||
197 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
198 | |||
199 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
200 | enum dma_data_direction direction) | ||
201 | { | ||
202 | BUG_ON(!valid_dma_direction(direction)); | ||
203 | WARN_ON(nelems == 0 || sg[0].length == 0); | ||
204 | } | ||
205 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
206 | |||
207 | /* | ||
208 | * Flush and invalidate cache for scatterlist. | ||
209 | */ | ||
210 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | ||
211 | int nelems, enum dma_data_direction direction) | ||
212 | { | ||
213 | struct scatterlist *sg; | ||
214 | int i; | ||
215 | |||
216 | BUG_ON(!valid_dma_direction(direction)); | ||
217 | WARN_ON(nelems == 0 || sglist->length == 0); | ||
218 | |||
219 | for_each_sg(sglist, sg, nelems, i) { | ||
220 | dma_sync_single_for_device(dev, sg->dma_address, | ||
221 | sg_dma_len(sg), direction); | ||
222 | } | ||
223 | } | ||
224 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
225 | |||
226 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
227 | unsigned long offset, size_t size, | ||
228 | enum dma_data_direction direction) | ||
229 | { | ||
230 | dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); | ||
231 | } | ||
232 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
233 | |||
234 | void dma_sync_single_range_for_device(struct device *dev, | ||
235 | dma_addr_t dma_handle, | ||
236 | unsigned long offset, size_t size, | ||
237 | enum dma_data_direction direction) | ||
238 | { | ||
239 | dma_sync_single_for_device(dev, dma_handle + offset, size, direction); | ||
240 | } | ||
241 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
242 | |||
243 | /* | ||
244 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no | ||
245 | * need to do any flushing here. | ||
246 | */ | ||
247 | void dma_cache_sync(void *vaddr, size_t size, | ||
248 | enum dma_data_direction direction) | ||
249 | { | ||
250 | } | ||
251 | EXPORT_SYMBOL(dma_cache_sync); | ||
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c new file mode 100644 index 000000000000..92ef925d2f8d --- /dev/null +++ b/arch/tile/kernel/proc.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/smp.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <linux/threads.h> | ||
18 | #include <linux/cpumask.h> | ||
19 | #include <linux/timex.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/proc_fs.h> | ||
23 | #include <linux/sysctl.h> | ||
24 | #include <linux/hardirq.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/sections.h> | ||
30 | #include <asm/homecache.h> | ||
31 | #include <arch/chip.h> | ||
32 | |||
33 | |||
34 | /* | ||
35 | * Support /proc/cpuinfo | ||
36 | */ | ||
37 | |||
38 | #define cpu_to_ptr(n) ((void *)((long)(n)+1)) | ||
39 | #define ptr_to_cpu(p) ((long)(p) - 1) | ||
40 | |||
41 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
42 | { | ||
43 | int n = ptr_to_cpu(v); | ||
44 | |||
45 | if (n == 0) { | ||
46 | char buf[NR_CPUS*5]; | ||
47 | cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask); | ||
48 | seq_printf(m, "cpu count\t: %d\n", num_online_cpus()); | ||
49 | seq_printf(m, "cpu list\t: %s\n", buf); | ||
50 | seq_printf(m, "model name\t: %s\n", chip_model); | ||
51 | seq_printf(m, "flags\t\t:\n"); /* nothing for now */ | ||
52 | seq_printf(m, "cpu MHz\t\t: %llu.%06llu\n", | ||
53 | get_clock_rate() / 1000000, | ||
54 | (get_clock_rate() % 1000000)); | ||
55 | seq_printf(m, "bogomips\t: %lu.%02lu\n\n", | ||
56 | loops_per_jiffy/(500000/HZ), | ||
57 | (loops_per_jiffy/(5000/HZ)) % 100); | ||
58 | } | ||
59 | |||
60 | #ifdef CONFIG_SMP | ||
61 | if (!cpu_online(n)) | ||
62 | return 0; | ||
63 | #endif | ||
64 | |||
65 | seq_printf(m, "processor\t: %d\n", n); | ||
66 | |||
67 | /* Print only num_online_cpus() blank lines total. */ | ||
68 | if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids) | ||
69 | seq_printf(m, "\n"); | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
75 | { | ||
76 | return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; | ||
77 | } | ||
78 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
79 | { | ||
80 | ++*pos; | ||
81 | return c_start(m, pos); | ||
82 | } | ||
83 | static void c_stop(struct seq_file *m, void *v) | ||
84 | { | ||
85 | } | ||
86 | const struct seq_operations cpuinfo_op = { | ||
87 | .start = c_start, | ||
88 | .next = c_next, | ||
89 | .stop = c_stop, | ||
90 | .show = show_cpuinfo, | ||
91 | }; | ||
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c new file mode 100644 index 000000000000..ed590ad0acdc --- /dev/null +++ b/arch/tile/kernel/process.c | |||
@@ -0,0 +1,671 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/preempt.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/kprobes.h> | ||
20 | #include <linux/elfcore.h> | ||
21 | #include <linux/tick.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/compat.h> | ||
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <asm/system.h> | ||
29 | #include <asm/stack.h> | ||
30 | #include <asm/homecache.h> | ||
31 | #include <asm/syscalls.h> | ||
32 | #ifdef CONFIG_HARDWALL | ||
33 | #include <asm/hardwall.h> | ||
34 | #endif | ||
35 | #include <arch/chip.h> | ||
36 | #include <arch/abi.h> | ||
37 | |||
38 | |||
39 | /* | ||
40 | * Use the (x86) "idle=poll" option to prefer low latency when leaving the | ||
41 | * idle loop over low power while in the idle loop, e.g. if we have | ||
42 | * one thread per core and we want to get threads out of futex waits fast. | ||
43 | */ | ||
44 | static int no_idle_nap; | ||
45 | static int __init idle_setup(char *str) | ||
46 | { | ||
47 | if (!str) | ||
48 | return -EINVAL; | ||
49 | |||
50 | if (!strcmp(str, "poll")) { | ||
51 | pr_info("using polling idle threads.\n"); | ||
52 | no_idle_nap = 1; | ||
53 | } else if (!strcmp(str, "halt")) | ||
54 | no_idle_nap = 0; | ||
55 | else | ||
56 | return -1; | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | early_param("idle", idle_setup); | ||
61 | |||
62 | /* | ||
63 | * The idle thread. There's no useful work to be | ||
64 | * done, so just try to conserve power and have a | ||
65 | * low exit latency (ie sit in a loop waiting for | ||
66 | * somebody to say that they'd like to reschedule) | ||
67 | */ | ||
68 | void cpu_idle(void) | ||
69 | { | ||
70 | int cpu = smp_processor_id(); | ||
71 | |||
72 | |||
73 | current_thread_info()->status |= TS_POLLING; | ||
74 | |||
75 | if (no_idle_nap) { | ||
76 | while (1) { | ||
77 | while (!need_resched()) | ||
78 | cpu_relax(); | ||
79 | schedule(); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | /* endless idle loop with no priority at all */ | ||
84 | while (1) { | ||
85 | tick_nohz_stop_sched_tick(1); | ||
86 | while (!need_resched()) { | ||
87 | if (cpu_is_offline(cpu)) | ||
88 | BUG(); /* no HOTPLUG_CPU */ | ||
89 | |||
90 | local_irq_disable(); | ||
91 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | ||
92 | current_thread_info()->status &= ~TS_POLLING; | ||
93 | /* | ||
94 | * TS_POLLING-cleared state must be visible before we | ||
95 | * test NEED_RESCHED: | ||
96 | */ | ||
97 | smp_mb(); | ||
98 | |||
99 | if (!need_resched()) | ||
100 | _cpu_idle(); | ||
101 | else | ||
102 | local_irq_enable(); | ||
103 | current_thread_info()->status |= TS_POLLING; | ||
104 | } | ||
105 | tick_nohz_restart_sched_tick(); | ||
106 | preempt_enable_no_resched(); | ||
107 | schedule(); | ||
108 | preempt_disable(); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | struct thread_info *alloc_thread_info(struct task_struct *task) | ||
113 | { | ||
114 | struct page *page; | ||
115 | gfp_t flags = GFP_KERNEL; | ||
116 | |||
117 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
118 | flags |= __GFP_ZERO; | ||
119 | #endif | ||
120 | |||
121 | page = alloc_pages(flags, THREAD_SIZE_ORDER); | ||
122 | if (!page) | ||
123 | return NULL; | ||
124 | |||
125 | return (struct thread_info *)page_address(page); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Free a thread_info node, and all of its derivative | ||
130 | * data structures. | ||
131 | */ | ||
132 | void free_thread_info(struct thread_info *info) | ||
133 | { | ||
134 | struct single_step_state *step_state = info->step_state; | ||
135 | |||
136 | #ifdef CONFIG_HARDWALL | ||
137 | /* | ||
138 | * We free a thread_info from the context of the task that has | ||
139 | * been scheduled next, so the original task is already dead. | ||
140 | * Calling deactivate here just frees up the data structures. | ||
141 | * If the task we're freeing held the last reference to a | ||
142 | * hardwall fd, it would have been released prior to this point | ||
143 | * anyway via exit_files(), and "hardwall" would be NULL by now. | ||
144 | */ | ||
145 | if (info->task->thread.hardwall) | ||
146 | hardwall_deactivate(info->task); | ||
147 | #endif | ||
148 | |||
149 | if (step_state) { | ||
150 | |||
151 | /* | ||
152 | * FIXME: we don't munmap step_state->buffer | ||
153 | * because the mm_struct for this process (info->task->mm) | ||
154 | * has already been zeroed in exit_mm(). Keeping a | ||
155 | * reference to it here seems like a bad move, so this | ||
156 | * means we can't munmap() the buffer, and therefore if we | ||
157 | * ptrace multiple threads in a process, we will slowly | ||
158 | * leak user memory. (Note that as soon as the last | ||
159 | * thread in a process dies, we will reclaim all user | ||
160 | * memory including single-step buffers in the usual way.) | ||
161 | * We should either assign a kernel VA to this buffer | ||
162 | * somehow, or we should associate the buffer(s) with the | ||
163 | * mm itself so we can clean them up that way. | ||
164 | */ | ||
165 | kfree(step_state); | ||
166 | } | ||
167 | |||
168 | free_page((unsigned long)info); | ||
169 | } | ||
170 | |||
171 | static void save_arch_state(struct thread_struct *t); | ||
172 | |||
173 | int copy_thread(unsigned long clone_flags, unsigned long sp, | ||
174 | unsigned long stack_size, | ||
175 | struct task_struct *p, struct pt_regs *regs) | ||
176 | { | ||
177 | struct pt_regs *childregs; | ||
178 | unsigned long ksp; | ||
179 | |||
180 | /* | ||
181 | * When creating a new kernel thread we pass sp as zero. | ||
182 | * Assign it to a reasonable value now that we have the stack. | ||
183 | */ | ||
184 | if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0)) | ||
185 | sp = KSTK_TOP(p); | ||
186 | |||
187 | /* | ||
188 | * Do not clone step state from the parent; each thread | ||
189 | * must make its own lazily. | ||
190 | */ | ||
191 | task_thread_info(p)->step_state = NULL; | ||
192 | |||
193 | /* | ||
194 | * Start new thread in ret_from_fork so it schedules properly | ||
195 | * and then return from interrupt like the parent. | ||
196 | */ | ||
197 | p->thread.pc = (unsigned long) ret_from_fork; | ||
198 | |||
199 | /* Save user stack top pointer so we can ID the stack vm area later. */ | ||
200 | p->thread.usp0 = sp; | ||
201 | |||
202 | /* Record the pid of the process that created this one. */ | ||
203 | p->thread.creator_pid = current->pid; | ||
204 | |||
205 | /* | ||
206 | * Copy the registers onto the kernel stack so the | ||
207 | * return-from-interrupt code will reload it into registers. | ||
208 | */ | ||
209 | childregs = task_pt_regs(p); | ||
210 | *childregs = *regs; | ||
211 | childregs->regs[0] = 0; /* return value is zero */ | ||
212 | childregs->sp = sp; /* override with new user stack pointer */ | ||
213 | |||
214 | /* | ||
215 | * Copy the callee-saved registers from the passed pt_regs struct | ||
216 | * into the context-switch callee-saved registers area. | ||
217 | * We have to restore the callee-saved registers since we may | ||
218 | * be cloning a userspace task with userspace register state, | ||
219 | * and we won't be unwinding the same kernel frames to restore them. | ||
220 | * Zero out the C ABI save area to mark the top of the stack. | ||
221 | */ | ||
222 | ksp = (unsigned long) childregs; | ||
223 | ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */ | ||
224 | ((long *)ksp)[0] = ((long *)ksp)[1] = 0; | ||
225 | ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long); | ||
226 | memcpy((void *)ksp, ®s->regs[CALLEE_SAVED_FIRST_REG], | ||
227 | CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long)); | ||
228 | ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */ | ||
229 | ((long *)ksp)[0] = ((long *)ksp)[1] = 0; | ||
230 | p->thread.ksp = ksp; | ||
231 | |||
232 | #if CHIP_HAS_TILE_DMA() | ||
233 | /* | ||
234 | * No DMA in the new thread. We model this on the fact that | ||
235 | * fork() clears the pending signals, alarms, and aio for the child. | ||
236 | */ | ||
237 | memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state)); | ||
238 | memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb)); | ||
239 | #endif | ||
240 | |||
241 | #if CHIP_HAS_SN_PROC() | ||
242 | /* Likewise, the new thread is not running static processor code. */ | ||
243 | p->thread.sn_proc_running = 0; | ||
244 | memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb)); | ||
245 | #endif | ||
246 | |||
247 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
248 | /* New thread has its miscellaneous processor state bits clear. */ | ||
249 | p->thread.proc_status = 0; | ||
250 | #endif | ||
251 | |||
252 | #ifdef CONFIG_HARDWALL | ||
253 | /* New thread does not own any networks. */ | ||
254 | p->thread.hardwall = NULL; | ||
255 | #endif | ||
256 | |||
257 | |||
258 | /* | ||
259 | * Start the new thread with the current architecture state | ||
260 | * (user interrupt masks, etc.). | ||
261 | */ | ||
262 | save_arch_state(&p->thread); | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * Return "current" if it looks plausible, or else a pointer to a dummy. | ||
269 | * This can be helpful if we are just trying to emit a clean panic. | ||
270 | */ | ||
271 | struct task_struct *validate_current(void) | ||
272 | { | ||
273 | static struct task_struct corrupt = { .comm = "<corrupt>" }; | ||
274 | struct task_struct *tsk = current; | ||
275 | if (unlikely((unsigned long)tsk < PAGE_OFFSET || | ||
276 | (void *)tsk > high_memory || | ||
277 | ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { | ||
278 | pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); | ||
279 | tsk = &corrupt; | ||
280 | } | ||
281 | return tsk; | ||
282 | } | ||
283 | |||
284 | /* Take and return the pointer to the previous task, for schedule_tail(). */ | ||
285 | struct task_struct *sim_notify_fork(struct task_struct *prev) | ||
286 | { | ||
287 | struct task_struct *tsk = current; | ||
288 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT | | ||
289 | (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS)); | ||
290 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK | | ||
291 | (tsk->pid << _SIM_CONTROL_OPERATOR_BITS)); | ||
292 | return prev; | ||
293 | } | ||
294 | |||
295 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | ||
296 | { | ||
297 | struct pt_regs *ptregs = task_pt_regs(tsk); | ||
298 | elf_core_copy_regs(regs, ptregs); | ||
299 | return 1; | ||
300 | } | ||
301 | |||
302 | #if CHIP_HAS_TILE_DMA() | ||
303 | |||
304 | /* Allow user processes to access the DMA SPRs */ | ||
305 | void grant_dma_mpls(void) | ||
306 | { | ||
307 | __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1); | ||
308 | __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1); | ||
309 | } | ||
310 | |||
311 | /* Forbid user processes from accessing the DMA SPRs */ | ||
312 | void restrict_dma_mpls(void) | ||
313 | { | ||
314 | __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); | ||
315 | __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); | ||
316 | } | ||
317 | |||
318 | /* Pause the DMA engine, then save off its state registers. */ | ||
319 | static void save_tile_dma_state(struct tile_dma_state *dma) | ||
320 | { | ||
321 | unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS); | ||
322 | unsigned long post_suspend_state; | ||
323 | |||
324 | /* If we're running, suspend the engine. */ | ||
325 | if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) | ||
326 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); | ||
327 | |||
328 | /* | ||
329 | * Wait for the engine to idle, then save regs. Note that we | ||
330 | * want to record the "running" bit from before suspension, | ||
331 | * and the "done" bit from after, so that we can properly | ||
332 | * distinguish a case where the user suspended the engine from | ||
333 | * the case where the kernel suspended as part of the context | ||
334 | * swap. | ||
335 | */ | ||
336 | do { | ||
337 | post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS); | ||
338 | } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK); | ||
339 | |||
340 | dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR); | ||
341 | dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR); | ||
342 | dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR); | ||
343 | dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR); | ||
344 | dma->strides = __insn_mfspr(SPR_DMA_STRIDE); | ||
345 | dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE); | ||
346 | dma->byte = __insn_mfspr(SPR_DMA_BYTE); | ||
347 | dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) | | ||
348 | (post_suspend_state & SPR_DMA_STATUS__DONE_MASK); | ||
349 | } | ||
350 | |||
351 | /* Restart a DMA that was running before we were context-switched out. */ | ||
352 | static void restore_tile_dma_state(struct thread_struct *t) | ||
353 | { | ||
354 | const struct tile_dma_state *dma = &t->tile_dma_state; | ||
355 | |||
356 | /* | ||
357 | * The only way to restore the done bit is to run a zero | ||
358 | * length transaction. | ||
359 | */ | ||
360 | if ((dma->status & SPR_DMA_STATUS__DONE_MASK) && | ||
361 | !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) { | ||
362 | __insn_mtspr(SPR_DMA_BYTE, 0); | ||
363 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | ||
364 | while (__insn_mfspr(SPR_DMA_USER_STATUS) & | ||
365 | SPR_DMA_STATUS__BUSY_MASK) | ||
366 | ; | ||
367 | } | ||
368 | |||
369 | __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src); | ||
370 | __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk); | ||
371 | __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest); | ||
372 | __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk); | ||
373 | __insn_mtspr(SPR_DMA_STRIDE, dma->strides); | ||
374 | __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size); | ||
375 | __insn_mtspr(SPR_DMA_BYTE, dma->byte); | ||
376 | |||
377 | /* | ||
378 | * Restart the engine if we were running and not done. | ||
379 | * Clear a pending async DMA fault that we were waiting on return | ||
380 | * to user space to execute, since we expect the DMA engine | ||
381 | * to regenerate those faults for us now. Note that we don't | ||
382 | * try to clear the TIF_ASYNC_TLB flag, since it's relatively | ||
383 | * harmless if set, and it covers both DMA and the SN processor. | ||
384 | */ | ||
385 | if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) { | ||
386 | t->dma_async_tlb.fault_num = 0; | ||
387 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | ||
388 | } | ||
389 | } | ||
390 | |||
391 | #endif | ||
392 | |||
393 | static void save_arch_state(struct thread_struct *t) | ||
394 | { | ||
395 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
396 | t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) | | ||
397 | ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32); | ||
398 | #else | ||
399 | t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0); | ||
400 | #endif | ||
401 | t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0); | ||
402 | t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1); | ||
403 | t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0); | ||
404 | t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1); | ||
405 | t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2); | ||
406 | t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3); | ||
407 | t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS); | ||
408 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
409 | t->proc_status = __insn_mfspr(SPR_PROC_STATUS); | ||
410 | #endif | ||
411 | } | ||
412 | |||
413 | static void restore_arch_state(const struct thread_struct *t) | ||
414 | { | ||
415 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
416 | __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask); | ||
417 | __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32); | ||
418 | #else | ||
419 | __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask); | ||
420 | #endif | ||
421 | __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]); | ||
422 | __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]); | ||
423 | __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]); | ||
424 | __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]); | ||
425 | __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]); | ||
426 | __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]); | ||
427 | __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0); | ||
428 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
429 | __insn_mtspr(SPR_PROC_STATUS, t->proc_status); | ||
430 | #endif | ||
431 | #if CHIP_HAS_TILE_RTF_HWM() | ||
432 | /* | ||
433 | * Clear this whenever we switch back to a process in case | ||
434 | * the previous process was monkeying with it. Even if enabled | ||
435 | * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a | ||
436 | * performance hint, so isn't worth a full save/restore. | ||
437 | */ | ||
438 | __insn_mtspr(SPR_TILE_RTF_HWM, 0); | ||
439 | #endif | ||
440 | } | ||
441 | |||
442 | |||
443 | void _prepare_arch_switch(struct task_struct *next) | ||
444 | { | ||
445 | #if CHIP_HAS_SN_PROC() | ||
446 | int snctl; | ||
447 | #endif | ||
448 | #if CHIP_HAS_TILE_DMA() | ||
449 | struct tile_dma_state *dma = ¤t->thread.tile_dma_state; | ||
450 | if (dma->enabled) | ||
451 | save_tile_dma_state(dma); | ||
452 | #endif | ||
453 | #if CHIP_HAS_SN_PROC() | ||
454 | /* | ||
455 | * Suspend the static network processor if it was running. | ||
456 | * We do not suspend the fabric itself, just like we don't | ||
457 | * try to suspend the UDN. | ||
458 | */ | ||
459 | snctl = __insn_mfspr(SPR_SNCTL); | ||
460 | current->thread.sn_proc_running = | ||
461 | (snctl & SPR_SNCTL__FRZPROC_MASK) == 0; | ||
462 | if (current->thread.sn_proc_running) | ||
463 | __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK); | ||
464 | #endif | ||
465 | } | ||
466 | |||
467 | |||
468 | struct task_struct *__sched _switch_to(struct task_struct *prev, | ||
469 | struct task_struct *next) | ||
470 | { | ||
471 | /* DMA state is already saved; save off other arch state. */ | ||
472 | save_arch_state(&prev->thread); | ||
473 | |||
474 | #if CHIP_HAS_TILE_DMA() | ||
475 | /* | ||
476 | * Restore DMA in new task if desired. | ||
477 | * Note that it is only safe to restart here since interrupts | ||
478 | * are disabled, so we can't take any DMATLB miss or access | ||
479 | * interrupts before we have finished switching stacks. | ||
480 | */ | ||
481 | if (next->thread.tile_dma_state.enabled) { | ||
482 | restore_tile_dma_state(&next->thread); | ||
483 | grant_dma_mpls(); | ||
484 | } else { | ||
485 | restrict_dma_mpls(); | ||
486 | } | ||
487 | #endif | ||
488 | |||
489 | /* Restore other arch state. */ | ||
490 | restore_arch_state(&next->thread); | ||
491 | |||
492 | #if CHIP_HAS_SN_PROC() | ||
493 | /* | ||
494 | * Restart static network processor in the new process | ||
495 | * if it was running before. | ||
496 | */ | ||
497 | if (next->thread.sn_proc_running) { | ||
498 | int snctl = __insn_mfspr(SPR_SNCTL); | ||
499 | __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK); | ||
500 | } | ||
501 | #endif | ||
502 | |||
503 | #ifdef CONFIG_HARDWALL | ||
504 | /* Enable or disable access to the network registers appropriately. */ | ||
505 | if (prev->thread.hardwall != NULL) { | ||
506 | if (next->thread.hardwall == NULL) | ||
507 | restrict_network_mpls(); | ||
508 | } else if (next->thread.hardwall != NULL) { | ||
509 | grant_network_mpls(); | ||
510 | } | ||
511 | #endif | ||
512 | |||
513 | /* | ||
514 | * Switch kernel SP, PC, and callee-saved registers. | ||
515 | * In the context of the new task, return the old task pointer | ||
516 | * (i.e. the task that actually called __switch_to). | ||
517 | * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp. | ||
518 | */ | ||
519 | return __switch_to(prev, next, next_current_ksp0(next)); | ||
520 | } | ||
521 | |||
522 | long _sys_fork(struct pt_regs *regs) | ||
523 | { | ||
524 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
525 | } | ||
526 | |||
527 | long _sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
528 | void __user *parent_tidptr, void __user *child_tidptr, | ||
529 | struct pt_regs *regs) | ||
530 | { | ||
531 | if (!newsp) | ||
532 | newsp = regs->sp; | ||
533 | return do_fork(clone_flags, newsp, regs, 0, | ||
534 | parent_tidptr, child_tidptr); | ||
535 | } | ||
536 | |||
537 | long _sys_vfork(struct pt_regs *regs) | ||
538 | { | ||
539 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, | ||
540 | regs, 0, NULL, NULL); | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * sys_execve() executes a new program. | ||
545 | */ | ||
546 | long _sys_execve(char __user *path, char __user *__user *argv, | ||
547 | char __user *__user *envp, struct pt_regs *regs) | ||
548 | { | ||
549 | long error; | ||
550 | char *filename; | ||
551 | |||
552 | filename = getname(path); | ||
553 | error = PTR_ERR(filename); | ||
554 | if (IS_ERR(filename)) | ||
555 | goto out; | ||
556 | error = do_execve(filename, argv, envp, regs); | ||
557 | putname(filename); | ||
558 | out: | ||
559 | return error; | ||
560 | } | ||
561 | |||
562 | #ifdef CONFIG_COMPAT | ||
563 | long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
564 | compat_uptr_t __user *envp, struct pt_regs *regs) | ||
565 | { | ||
566 | long error; | ||
567 | char *filename; | ||
568 | |||
569 | filename = getname(path); | ||
570 | error = PTR_ERR(filename); | ||
571 | if (IS_ERR(filename)) | ||
572 | goto out; | ||
573 | error = compat_do_execve(filename, argv, envp, regs); | ||
574 | putname(filename); | ||
575 | out: | ||
576 | return error; | ||
577 | } | ||
578 | #endif | ||
579 | |||
580 | unsigned long get_wchan(struct task_struct *p) | ||
581 | { | ||
582 | struct KBacktraceIterator kbt; | ||
583 | |||
584 | if (!p || p == current || p->state == TASK_RUNNING) | ||
585 | return 0; | ||
586 | |||
587 | for (KBacktraceIterator_init(&kbt, p, NULL); | ||
588 | !KBacktraceIterator_end(&kbt); | ||
589 | KBacktraceIterator_next(&kbt)) { | ||
590 | if (!in_sched_functions(kbt.it.pc)) | ||
591 | return kbt.it.pc; | ||
592 | } | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * We pass in lr as zero (cleared in kernel_thread) and the caller | ||
599 | * part of the backtrace ABI on the stack also zeroed (in copy_thread) | ||
600 | * so that backtraces will stop with this function. | ||
601 | * Note that we don't use r0, since copy_thread() clears it. | ||
602 | */ | ||
603 | static void start_kernel_thread(int dummy, int (*fn)(int), int arg) | ||
604 | { | ||
605 | do_exit(fn(arg)); | ||
606 | } | ||
607 | |||
608 | /* | ||
609 | * Create a kernel thread | ||
610 | */ | ||
611 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | ||
612 | { | ||
613 | struct pt_regs regs; | ||
614 | |||
615 | memset(®s, 0, sizeof(regs)); | ||
616 | regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0); /* run at kernel PL, no ICS */ | ||
617 | regs.pc = (long) start_kernel_thread; | ||
618 | regs.flags = PT_FLAGS_CALLER_SAVES; /* need to restore r1 and r2 */ | ||
619 | regs.regs[1] = (long) fn; /* function pointer */ | ||
620 | regs.regs[2] = (long) arg; /* parameter register */ | ||
621 | |||
622 | /* Ok, create the new process.. */ | ||
623 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, | ||
624 | 0, NULL, NULL); | ||
625 | } | ||
626 | EXPORT_SYMBOL(kernel_thread); | ||
627 | |||
628 | /* Flush thread state. */ | ||
629 | void flush_thread(void) | ||
630 | { | ||
631 | /* Nothing */ | ||
632 | } | ||
633 | |||
634 | /* | ||
635 | * Free current thread data structures etc.. | ||
636 | */ | ||
637 | void exit_thread(void) | ||
638 | { | ||
639 | /* Nothing */ | ||
640 | } | ||
641 | |||
642 | void show_regs(struct pt_regs *regs) | ||
643 | { | ||
644 | struct task_struct *tsk = validate_current(); | ||
645 | int i; | ||
646 | |||
647 | pr_err("\n"); | ||
648 | pr_err(" Pid: %d, comm: %20s, CPU: %d\n", | ||
649 | tsk->pid, tsk->comm, smp_processor_id()); | ||
650 | #ifdef __tilegx__ | ||
651 | for (i = 0; i < 51; i += 3) | ||
652 | pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", | ||
653 | i, regs->regs[i], i+1, regs->regs[i+1], | ||
654 | i+2, regs->regs[i+2]); | ||
655 | pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n", | ||
656 | regs->regs[51], regs->regs[52], regs->tp); | ||
657 | pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); | ||
658 | #else | ||
659 | for (i = 0; i < 52; i += 3) | ||
660 | pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT | ||
661 | " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", | ||
662 | i, regs->regs[i], i+1, regs->regs[i+1], | ||
663 | i+2, regs->regs[i+2], i+3, regs->regs[i+3]); | ||
664 | pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n", | ||
665 | regs->regs[52], regs->tp, regs->sp, regs->lr); | ||
666 | #endif | ||
667 | pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", | ||
668 | regs->pc, regs->ex1, regs->faultnum); | ||
669 | |||
670 | dump_stack_regs(regs); | ||
671 | } | ||
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c new file mode 100644 index 000000000000..7161bd03d2fd --- /dev/null +++ b/arch/tile/kernel/ptrace.c | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Copied from i386: Ross Biro 1/23/92 | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/kprobes.h> | ||
20 | #include <linux/compat.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <asm/traps.h> | ||
23 | |||
24 | void user_enable_single_step(struct task_struct *child) | ||
25 | { | ||
26 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
27 | } | ||
28 | |||
29 | void user_disable_single_step(struct task_struct *child) | ||
30 | { | ||
31 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
32 | } | ||
33 | |||
34 | /* | ||
35 | * This routine will put a word on the process's privileged stack. | ||
36 | */ | ||
37 | static void putreg(struct task_struct *task, | ||
38 | unsigned long addr, unsigned long value) | ||
39 | { | ||
40 | unsigned int regno = addr / sizeof(unsigned long); | ||
41 | struct pt_regs *childregs = task_pt_regs(task); | ||
42 | childregs->regs[regno] = value; | ||
43 | childregs->flags |= PT_FLAGS_RESTORE_REGS; | ||
44 | } | ||
45 | |||
46 | static unsigned long getreg(struct task_struct *task, unsigned long addr) | ||
47 | { | ||
48 | unsigned int regno = addr / sizeof(unsigned long); | ||
49 | struct pt_regs *childregs = task_pt_regs(task); | ||
50 | return childregs->regs[regno]; | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * Called by kernel/ptrace.c when detaching.. | ||
55 | */ | ||
56 | void ptrace_disable(struct task_struct *child) | ||
57 | { | ||
58 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
59 | |||
60 | /* | ||
61 | * These two are currently unused, but will be set by arch_ptrace() | ||
62 | * and used in the syscall assembly when we do support them. | ||
63 | */ | ||
64 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
65 | } | ||
66 | |||
67 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | ||
68 | { | ||
69 | unsigned long __user *datap; | ||
70 | unsigned long tmp; | ||
71 | int i; | ||
72 | long ret = -EIO; | ||
73 | |||
74 | #ifdef CONFIG_COMPAT | ||
75 | if (task_thread_info(current)->status & TS_COMPAT) | ||
76 | data = (u32)data; | ||
77 | if (task_thread_info(child)->status & TS_COMPAT) | ||
78 | addr = (u32)addr; | ||
79 | #endif | ||
80 | datap = (unsigned long __user __force *)data; | ||
81 | |||
82 | switch (request) { | ||
83 | |||
84 | case PTRACE_PEEKUSR: /* Read register from pt_regs. */ | ||
85 | if (addr & (sizeof(data)-1)) | ||
86 | break; | ||
87 | if (addr < 0 || addr >= PTREGS_SIZE) | ||
88 | break; | ||
89 | tmp = getreg(child, addr); /* Read register */ | ||
90 | ret = put_user(tmp, datap); | ||
91 | break; | ||
92 | |||
93 | case PTRACE_POKEUSR: /* Write register in pt_regs. */ | ||
94 | if (addr & (sizeof(data)-1)) | ||
95 | break; | ||
96 | if (addr < 0 || addr >= PTREGS_SIZE) | ||
97 | break; | ||
98 | putreg(child, addr, data); /* Write register */ | ||
99 | ret = 0; | ||
100 | break; | ||
101 | |||
102 | case PTRACE_GETREGS: /* Get all registers from the child. */ | ||
103 | if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE)) | ||
104 | break; | ||
105 | for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) { | ||
106 | ret = __put_user(getreg(child, i), datap); | ||
107 | if (ret != 0) | ||
108 | break; | ||
109 | datap++; | ||
110 | } | ||
111 | break; | ||
112 | |||
113 | case PTRACE_SETREGS: /* Set all registers in the child. */ | ||
114 | if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE)) | ||
115 | break; | ||
116 | for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) { | ||
117 | ret = __get_user(tmp, datap); | ||
118 | if (ret != 0) | ||
119 | break; | ||
120 | putreg(child, i, tmp); | ||
121 | datap++; | ||
122 | } | ||
123 | break; | ||
124 | |||
125 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | ||
126 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | ||
127 | break; | ||
128 | |||
129 | case PTRACE_SETOPTIONS: | ||
130 | /* Support TILE-specific ptrace options. */ | ||
131 | child->ptrace &= ~PT_TRACE_MASK_TILE; | ||
132 | tmp = data & PTRACE_O_MASK_TILE; | ||
133 | data &= ~PTRACE_O_MASK_TILE; | ||
134 | ret = ptrace_request(child, request, addr, data); | ||
135 | if (tmp & PTRACE_O_TRACEMIGRATE) | ||
136 | child->ptrace |= PT_TRACE_MIGRATE; | ||
137 | break; | ||
138 | |||
139 | default: | ||
140 | #ifdef CONFIG_COMPAT | ||
141 | if (task_thread_info(current)->status & TS_COMPAT) { | ||
142 | ret = compat_ptrace_request(child, request, | ||
143 | addr, data); | ||
144 | break; | ||
145 | } | ||
146 | #endif | ||
147 | ret = ptrace_request(child, request, addr, data); | ||
148 | break; | ||
149 | } | ||
150 | |||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | #ifdef CONFIG_COMPAT | ||
155 | /* Not used; we handle compat issues in arch_ptrace() directly. */ | ||
156 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | ||
157 | compat_ulong_t addr, compat_ulong_t data) | ||
158 | { | ||
159 | BUG(); | ||
160 | } | ||
161 | #endif | ||
162 | |||
163 | void do_syscall_trace(void) | ||
164 | { | ||
165 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
166 | return; | ||
167 | |||
168 | if (!(current->ptrace & PT_PTRACED)) | ||
169 | return; | ||
170 | |||
171 | /* | ||
172 | * The 0x80 provides a way for the tracing parent to distinguish | ||
173 | * between a syscall stop and SIGTRAP delivery | ||
174 | */ | ||
175 | ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); | ||
176 | |||
177 | /* | ||
178 | * this isn't the same as continuing with a signal, but it will do | ||
179 | * for normal use. strace only continues with a signal if the | ||
180 | * stopping signal is not SIGTRAP. -brl | ||
181 | */ | ||
182 | if (current->exit_code) { | ||
183 | send_sig(current->exit_code, current, 1); | ||
184 | current->exit_code = 0; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) | ||
189 | { | ||
190 | struct siginfo info; | ||
191 | |||
192 | memset(&info, 0, sizeof(info)); | ||
193 | info.si_signo = SIGTRAP; | ||
194 | info.si_code = TRAP_BRKPT; | ||
195 | info.si_addr = (void __user *) regs->pc; | ||
196 | |||
197 | /* Send us the fakey SIGTRAP */ | ||
198 | force_sig_info(SIGTRAP, &info, tsk); | ||
199 | } | ||
200 | |||
201 | /* Handle synthetic interrupt delivered only by the simulator. */ | ||
202 | void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num) | ||
203 | { | ||
204 | send_sigtrap(current, regs, fault_num); | ||
205 | } | ||
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c new file mode 100644 index 000000000000..acd86d20beba --- /dev/null +++ b/arch/tile/kernel/reboot.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/stddef.h> | ||
16 | #include <linux/reboot.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/pm.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/setup.h> | ||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | #ifndef CONFIG_SMP | ||
24 | #define smp_send_stop() | ||
25 | #endif | ||
26 | |||
27 | void machine_halt(void) | ||
28 | { | ||
29 | warn_early_printk(); | ||
30 | raw_local_irq_disable_all(); | ||
31 | smp_send_stop(); | ||
32 | hv_halt(); | ||
33 | } | ||
34 | |||
35 | void machine_power_off(void) | ||
36 | { | ||
37 | warn_early_printk(); | ||
38 | raw_local_irq_disable_all(); | ||
39 | smp_send_stop(); | ||
40 | hv_power_off(); | ||
41 | } | ||
42 | |||
43 | void machine_restart(char *cmd) | ||
44 | { | ||
45 | raw_local_irq_disable_all(); | ||
46 | smp_send_stop(); | ||
47 | hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); | ||
48 | } | ||
49 | |||
50 | /* No interesting distinction to be made here. */ | ||
51 | void (*pm_power_off)(void) = NULL; | ||
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S new file mode 100644 index 000000000000..e88d6e122783 --- /dev/null +++ b/arch/tile/kernel/regs_32.S | |||
@@ -0,0 +1,145 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <asm/system.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/asm-offsets.h> | ||
19 | #include <arch/spr_def.h> | ||
20 | #include <asm/processor.h> | ||
21 | |||
22 | /* | ||
23 | * See <asm/system.h>; called with prev and next task_struct pointers. | ||
24 | * "prev" is returned in r0 for _switch_to and also for ret_from_fork. | ||
25 | * | ||
26 | * We want to save pc/sp in "prev", and get the new pc/sp from "next". | ||
27 | * We also need to save all the callee-saved registers on the stack. | ||
28 | * | ||
29 | * Intel enables/disables access to the hardware cycle counter in | ||
30 | * seccomp (secure computing) environments if necessary, based on | ||
31 | * has_secure_computing(). We might want to do this at some point, | ||
32 | * though it would require virtualizing the other SPRs under WORLD_ACCESS. | ||
33 | * | ||
34 | * Since we're saving to the stack, we omit sp from this list. | ||
35 | * And for parallels with other architectures, we save lr separately, | ||
36 | * in the thread_struct itself (as the "pc" field). | ||
37 | * | ||
38 | * This code also needs to be aligned with process.c copy_thread() | ||
39 | */ | ||
40 | |||
41 | #if CALLEE_SAVED_REGS_COUNT != 24 | ||
42 | # error Mismatch between <asm/system.h> and kernel/entry.S | ||
43 | #endif | ||
44 | #define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4) | ||
45 | |||
46 | #define SAVE_REG(r) { sw r12, r; addi r12, r12, 4 } | ||
47 | #define LOAD_REG(r) { lw r, r12; addi r12, r12, 4 } | ||
48 | #define FOR_EACH_CALLEE_SAVED_REG(f) \ | ||
49 | f(r30); f(r31); \ | ||
50 | f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \ | ||
51 | f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \ | ||
52 | f(r48); f(r49); f(r50); f(r51); f(r52); | ||
53 | |||
54 | STD_ENTRY_SECTION(__switch_to, .sched.text) | ||
55 | { | ||
56 | move r10, sp | ||
57 | sw sp, lr | ||
58 | addi sp, sp, -FRAME_SIZE | ||
59 | } | ||
60 | { | ||
61 | addi r11, sp, 4 | ||
62 | addi r12, sp, 8 | ||
63 | } | ||
64 | { | ||
65 | sw r11, r10 | ||
66 | addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET | ||
67 | } | ||
68 | { | ||
69 | lw r13, r4 /* Load new sp to a temp register early. */ | ||
70 | addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET | ||
71 | } | ||
72 | FOR_EACH_CALLEE_SAVED_REG(SAVE_REG) | ||
73 | { | ||
74 | sw r3, sp | ||
75 | addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET | ||
76 | } | ||
77 | { | ||
78 | sw r3, lr | ||
79 | addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET | ||
80 | } | ||
81 | { | ||
82 | lw lr, r4 | ||
83 | addi r12, r13, 8 | ||
84 | } | ||
85 | { | ||
86 | /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */ | ||
87 | move sp, r13 | ||
88 | mtspr SYSTEM_SAVE_1_0, r2 | ||
89 | } | ||
90 | FOR_EACH_CALLEE_SAVED_REG(LOAD_REG) | ||
91 | .L__switch_to_pc: | ||
92 | { | ||
93 | addi sp, sp, FRAME_SIZE | ||
94 | jrp lr /* r0 is still valid here, so return it */ | ||
95 | } | ||
96 | STD_ENDPROC(__switch_to) | ||
97 | |||
98 | /* Return a suitable address for the backtracer for suspended threads */ | ||
99 | STD_ENTRY_SECTION(get_switch_to_pc, .sched.text) | ||
100 | lnk r0 | ||
101 | { | ||
102 | addli r0, r0, .L__switch_to_pc - . | ||
103 | jrp lr | ||
104 | } | ||
105 | STD_ENDPROC(get_switch_to_pc) | ||
106 | |||
107 | STD_ENTRY(get_pt_regs) | ||
108 | .irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \ | ||
109 | r8, r9, r10, r11, r12, r13, r14, r15, \ | ||
110 | r16, r17, r18, r19, r20, r21, r22, r23, \ | ||
111 | r24, r25, r26, r27, r28, r29, r30, r31, \ | ||
112 | r32, r33, r34, r35, r36, r37, r38, r39, \ | ||
113 | r40, r41, r42, r43, r44, r45, r46, r47, \ | ||
114 | r48, r49, r50, r51, r52, tp, sp | ||
115 | { | ||
116 | sw r0, \reg | ||
117 | addi r0, r0, 4 | ||
118 | } | ||
119 | .endr | ||
120 | { | ||
121 | sw r0, lr | ||
122 | addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR | ||
123 | } | ||
124 | lnk r1 | ||
125 | { | ||
126 | sw r0, r1 | ||
127 | addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC | ||
128 | } | ||
129 | mfspr r1, INTERRUPT_CRITICAL_SECTION | ||
130 | shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT | ||
131 | ori r1, r1, KERNEL_PL | ||
132 | { | ||
133 | sw r0, r1 | ||
134 | addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 | ||
135 | } | ||
136 | { | ||
137 | sw r0, zero /* clear faultnum */ | ||
138 | addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM | ||
139 | } | ||
140 | { | ||
141 | sw r0, zero /* clear orig_r0 */ | ||
142 | addli r0, r0, -PTREGS_OFFSET_ORIG_R0 /* restore r0 to base */ | ||
143 | } | ||
144 | jrp lr | ||
145 | STD_ENDPROC(get_pt_regs) | ||
diff --git a/arch/tile/kernel/relocate_kernel.S b/arch/tile/kernel/relocate_kernel.S new file mode 100644 index 000000000000..010b418515f8 --- /dev/null +++ b/arch/tile/kernel/relocate_kernel.S | |||
@@ -0,0 +1,280 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * copy new kernel into place and then call hv_reexec | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <arch/chip.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | #define ___hvb MEM_SV_INTRPT + HV_GLUE_START_CPA | ||
24 | |||
25 | #define ___hv_dispatch(f) (___hvb + (HV_DISPATCH_ENTRY_SIZE * f)) | ||
26 | |||
27 | #define ___hv_console_putc ___hv_dispatch(HV_DISPATCH_CONSOLE_PUTC) | ||
28 | #define ___hv_halt ___hv_dispatch(HV_DISPATCH_HALT) | ||
29 | #define ___hv_reexec ___hv_dispatch(HV_DISPATCH_REEXEC) | ||
30 | #define ___hv_flush_remote ___hv_dispatch(HV_DISPATCH_FLUSH_REMOTE) | ||
31 | |||
32 | #undef RELOCATE_NEW_KERNEL_VERBOSE | ||
33 | |||
34 | STD_ENTRY(relocate_new_kernel) | ||
35 | |||
36 | move r30, r0 /* page list */ | ||
37 | move r31, r1 /* address of page we are on */ | ||
38 | move r32, r2 /* start address of new kernel */ | ||
39 | |||
40 | shri r1, r1, PAGE_SHIFT | ||
41 | addi r1, r1, 1 | ||
42 | shli sp, r1, PAGE_SHIFT | ||
43 | addi sp, sp, -8 | ||
44 | /* we now have a stack (whether we need one or not) */ | ||
45 | |||
46 | moveli r40, lo16(___hv_console_putc) | ||
47 | auli r40, r40, ha16(___hv_console_putc) | ||
48 | |||
49 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
50 | moveli r0, 'r' | ||
51 | jalr r40 | ||
52 | |||
53 | moveli r0, '_' | ||
54 | jalr r40 | ||
55 | |||
56 | moveli r0, 'n' | ||
57 | jalr r40 | ||
58 | |||
59 | moveli r0, '_' | ||
60 | jalr r40 | ||
61 | |||
62 | moveli r0, 'k' | ||
63 | jalr r40 | ||
64 | |||
65 | moveli r0, '\n' | ||
66 | jalr r40 | ||
67 | #endif | ||
68 | |||
69 | /* | ||
70 | * Throughout this code r30 is pointer to the element of page | ||
71 | * list we are working on. | ||
72 | * | ||
73 | * Normally we get to the next element of the page list by | ||
74 | * incrementing r30 by four. The exception is if the element | ||
75 | * on the page list is an IND_INDIRECTION in which case we use | ||
76 | * the element with the low bits masked off as the new value | ||
77 | * of r30. | ||
78 | * | ||
79 | * To get this started, we need the value passed to us (which | ||
80 | * will always be an IND_INDIRECTION) in memory somewhere with | ||
81 | * r30 pointing at it. To do that, we push the value passed | ||
82 | * to us on the stack and make r30 point to it. | ||
83 | */ | ||
84 | |||
85 | sw sp, r30 | ||
86 | move r30, sp | ||
87 | addi sp, sp, -8 | ||
88 | |||
89 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
90 | /* | ||
91 | * On TILEPro, we need to flush all tiles' caches, since we may | ||
92 | * have been doing hash-for-home caching there. Note that we | ||
93 | * must do this _after_ we're completely done modifying any memory | ||
94 | * other than our output buffer (which we know is locally cached). | ||
95 | * We want the caches to be fully clean when we do the reexec, | ||
96 | * because the hypervisor is going to do this flush again at that | ||
97 | * point, and we don't want that second flush to overwrite any memory. | ||
98 | */ | ||
99 | { | ||
100 | move r0, zero /* cache_pa */ | ||
101 | move r1, zero | ||
102 | } | ||
103 | { | ||
104 | auli r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */ | ||
105 | movei r3, -1 /* cache_cpumask; -1 means all client tiles */ | ||
106 | } | ||
107 | { | ||
108 | move r4, zero /* tlb_va */ | ||
109 | move r5, zero /* tlb_length */ | ||
110 | } | ||
111 | { | ||
112 | move r6, zero /* tlb_pgsize */ | ||
113 | move r7, zero /* tlb_cpumask */ | ||
114 | } | ||
115 | { | ||
116 | move r8, zero /* asids */ | ||
117 | moveli r20, lo16(___hv_flush_remote) | ||
118 | } | ||
119 | { | ||
120 | move r9, zero /* asidcount */ | ||
121 | auli r20, r20, ha16(___hv_flush_remote) | ||
122 | } | ||
123 | |||
124 | jalr r20 | ||
125 | #endif | ||
126 | |||
127 | /* r33 is destination pointer, default to zero */ | ||
128 | |||
129 | moveli r33, 0 | ||
130 | |||
131 | .Lloop: lw r10, r30 | ||
132 | |||
133 | andi r9, r10, 0xf /* low 4 bits tell us what type it is */ | ||
134 | xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */ | ||
135 | |||
136 | seqi r0, r9, 0x1 /* IND_DESTINATION */ | ||
137 | bzt r0, .Ltry2 | ||
138 | |||
139 | move r33, r10 | ||
140 | |||
141 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
142 | moveli r0, 'd' | ||
143 | jalr r40 | ||
144 | #endif | ||
145 | |||
146 | addi r30, r30, 4 | ||
147 | j .Lloop | ||
148 | |||
149 | .Ltry2: | ||
150 | seqi r0, r9, 0x2 /* IND_INDIRECTION */ | ||
151 | bzt r0, .Ltry4 | ||
152 | |||
153 | move r30, r10 | ||
154 | |||
155 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
156 | moveli r0, 'i' | ||
157 | jalr r40 | ||
158 | #endif | ||
159 | |||
160 | j .Lloop | ||
161 | |||
162 | .Ltry4: | ||
163 | seqi r0, r9, 0x4 /* IND_DONE */ | ||
164 | bzt r0, .Ltry8 | ||
165 | |||
166 | mf | ||
167 | |||
168 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
169 | moveli r0, 'D' | ||
170 | jalr r40 | ||
171 | moveli r0, '\n' | ||
172 | jalr r40 | ||
173 | #endif | ||
174 | |||
175 | move r0, r32 | ||
176 | moveli r1, 0 /* arg to hv_reexec is 64 bits */ | ||
177 | |||
178 | moveli r41, lo16(___hv_reexec) | ||
179 | auli r41, r41, ha16(___hv_reexec) | ||
180 | |||
181 | jalr r41 | ||
182 | |||
183 | /* we should not get here */ | ||
184 | |||
185 | moveli r0, '?' | ||
186 | jalr r40 | ||
187 | moveli r0, '\n' | ||
188 | jalr r40 | ||
189 | |||
190 | j .Lhalt | ||
191 | |||
192 | .Ltry8: seqi r0, r9, 0x8 /* IND_SOURCE */ | ||
193 | bz r0, .Lerr /* unknown type */ | ||
194 | |||
195 | /* copy page at r10 to page at r33 */ | ||
196 | |||
197 | move r11, r33 | ||
198 | |||
199 | moveli r0, lo16(PAGE_SIZE) | ||
200 | auli r0, r0, ha16(PAGE_SIZE) | ||
201 | add r33, r33, r0 | ||
202 | |||
203 | /* copy word at r10 to word at r11 until r11 equals r33 */ | ||
204 | |||
205 | /* We know page size must be multiple of 16, so we can unroll | ||
206 | * 16 times safely without any edge case checking. | ||
207 | * | ||
208 | * Issue a flush of the destination every 16 words to avoid | ||
209 | * incoherence when starting the new kernel. (Now this is | ||
210 | * just good paranoia because the hv_reexec call will also | ||
211 | * take care of this.) | ||
212 | */ | ||
213 | |||
214 | 1: | ||
215 | { lw r0, r10; addi r10, r10, 4 } | ||
216 | { sw r11, r0; addi r11, r11, 4 } | ||
217 | { lw r0, r10; addi r10, r10, 4 } | ||
218 | { sw r11, r0; addi r11, r11, 4 } | ||
219 | { lw r0, r10; addi r10, r10, 4 } | ||
220 | { sw r11, r0; addi r11, r11, 4 } | ||
221 | { lw r0, r10; addi r10, r10, 4 } | ||
222 | { sw r11, r0; addi r11, r11, 4 } | ||
223 | { lw r0, r10; addi r10, r10, 4 } | ||
224 | { sw r11, r0; addi r11, r11, 4 } | ||
225 | { lw r0, r10; addi r10, r10, 4 } | ||
226 | { sw r11, r0; addi r11, r11, 4 } | ||
227 | { lw r0, r10; addi r10, r10, 4 } | ||
228 | { sw r11, r0; addi r11, r11, 4 } | ||
229 | { lw r0, r10; addi r10, r10, 4 } | ||
230 | { sw r11, r0; addi r11, r11, 4 } | ||
231 | { lw r0, r10; addi r10, r10, 4 } | ||
232 | { sw r11, r0; addi r11, r11, 4 } | ||
233 | { lw r0, r10; addi r10, r10, 4 } | ||
234 | { sw r11, r0; addi r11, r11, 4 } | ||
235 | { lw r0, r10; addi r10, r10, 4 } | ||
236 | { sw r11, r0; addi r11, r11, 4 } | ||
237 | { lw r0, r10; addi r10, r10, 4 } | ||
238 | { sw r11, r0; addi r11, r11, 4 } | ||
239 | { lw r0, r10; addi r10, r10, 4 } | ||
240 | { sw r11, r0; addi r11, r11, 4 } | ||
241 | { lw r0, r10; addi r10, r10, 4 } | ||
242 | { sw r11, r0; addi r11, r11, 4 } | ||
243 | { lw r0, r10; addi r10, r10, 4 } | ||
244 | { sw r11, r0; addi r11, r11, 4 } | ||
245 | { lw r0, r10; addi r10, r10, 4 } | ||
246 | { sw r11, r0 } | ||
247 | { flush r11 ; addi r11, r11, 4 } | ||
248 | |||
249 | seq r0, r33, r11 | ||
250 | bzt r0, 1b | ||
251 | |||
252 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
253 | moveli r0, 's' | ||
254 | jalr r40 | ||
255 | #endif | ||
256 | |||
257 | addi r30, r30, 4 | ||
258 | j .Lloop | ||
259 | |||
260 | |||
261 | .Lerr: moveli r0, 'e' | ||
262 | jalr r40 | ||
263 | moveli r0, 'r' | ||
264 | jalr r40 | ||
265 | moveli r0, 'r' | ||
266 | jalr r40 | ||
267 | moveli r0, '\n' | ||
268 | jalr r40 | ||
269 | .Lhalt: | ||
270 | moveli r41, lo16(___hv_halt) | ||
271 | auli r41, r41, ha16(___hv_halt) | ||
272 | |||
273 | jalr r41 | ||
274 | STD_ENDPROC(relocate_new_kernel) | ||
275 | |||
276 | .section .rodata,"a" | ||
277 | |||
278 | .globl relocate_new_kernel_size | ||
279 | relocate_new_kernel_size: | ||
280 | .long .Lend_relocate_new_kernel - relocate_new_kernel | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c new file mode 100644 index 000000000000..4dd21c1e6d5e --- /dev/null +++ b/arch/tile/kernel/setup.c | |||
@@ -0,0 +1,1511 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/mmzone.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/node.h> | ||
21 | #include <linux/cpu.h> | ||
22 | #include <linux/ioport.h> | ||
23 | #include <linux/irq.h> | ||
24 | #include <linux/kexec.h> | ||
25 | #include <linux/pci.h> | ||
26 | #include <linux/initrd.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/highmem.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <linux/timex.h> | ||
31 | #include <asm/setup.h> | ||
32 | #include <asm/sections.h> | ||
33 | #include <asm/sections.h> | ||
34 | #include <asm/cacheflush.h> | ||
35 | #include <asm/cacheflush.h> | ||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/mmu_context.h> | ||
38 | #include <hv/hypervisor.h> | ||
39 | #include <arch/interrupts.h> | ||
40 | |||
41 | /* <linux/smp.h> doesn't provide this definition. */ | ||
42 | #ifndef CONFIG_SMP | ||
43 | #define setup_max_cpus 1 | ||
44 | #endif | ||
45 | |||
46 | static inline int ABS(int x) { return x >= 0 ? x : -x; } | ||
47 | |||
48 | /* Chip information */ | ||
49 | char chip_model[64] __write_once; | ||
50 | |||
51 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; | ||
52 | EXPORT_SYMBOL(node_data); | ||
53 | |||
54 | /* We only create bootmem data on node 0. */ | ||
55 | static bootmem_data_t __initdata node0_bdata; | ||
56 | |||
57 | /* Information on the NUMA nodes that we compute early */ | ||
58 | unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; | ||
59 | unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; | ||
60 | unsigned long __initdata node_memmap_pfn[MAX_NUMNODES]; | ||
61 | unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; | ||
62 | unsigned long __initdata node_free_pfn[MAX_NUMNODES]; | ||
63 | |||
64 | #ifdef CONFIG_HIGHMEM | ||
65 | /* Page frame index of end of lowmem on each controller. */ | ||
66 | unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; | ||
67 | |||
68 | /* Number of pages that can be mapped into lowmem. */ | ||
69 | static unsigned long __initdata mappable_physpages; | ||
70 | #endif | ||
71 | |||
72 | /* Data on which physical memory controller corresponds to which NUMA node */ | ||
73 | int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 }; | ||
74 | |||
75 | #ifdef CONFIG_HIGHMEM | ||
76 | /* Map information from VAs to PAs */ | ||
77 | unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)] | ||
78 | __write_once __attribute__((aligned(L2_CACHE_BYTES))); | ||
79 | EXPORT_SYMBOL(pbase_map); | ||
80 | |||
81 | /* Map information from PAs to VAs */ | ||
82 | void *vbase_map[NR_PA_HIGHBIT_VALUES] | ||
83 | __write_once __attribute__((aligned(L2_CACHE_BYTES))); | ||
84 | EXPORT_SYMBOL(vbase_map); | ||
85 | #endif | ||
86 | |||
87 | /* Node number as a function of the high PA bits */ | ||
88 | int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once; | ||
89 | EXPORT_SYMBOL(highbits_to_node); | ||
90 | |||
91 | static unsigned int __initdata maxmem_pfn = -1U; | ||
92 | static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = { | ||
93 | [0 ... MAX_NUMNODES-1] = -1U | ||
94 | }; | ||
95 | static nodemask_t __initdata isolnodes; | ||
96 | |||
97 | #ifdef CONFIG_PCI | ||
98 | enum { DEFAULT_PCI_RESERVE_MB = 64 }; | ||
99 | static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB; | ||
100 | unsigned long __initdata pci_reserve_start_pfn = -1U; | ||
101 | unsigned long __initdata pci_reserve_end_pfn = -1U; | ||
102 | #endif | ||
103 | |||
104 | static int __init setup_maxmem(char *str) | ||
105 | { | ||
106 | long maxmem_mb; | ||
107 | if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 || | ||
108 | maxmem_mb == 0) | ||
109 | return -EINVAL; | ||
110 | |||
111 | maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << | ||
112 | (HPAGE_SHIFT - PAGE_SHIFT); | ||
113 | pr_info("Forcing RAM used to no more than %dMB\n", | ||
114 | maxmem_pfn >> (20 - PAGE_SHIFT)); | ||
115 | return 0; | ||
116 | } | ||
117 | early_param("maxmem", setup_maxmem); | ||
118 | |||
119 | static int __init setup_maxnodemem(char *str) | ||
120 | { | ||
121 | char *endp; | ||
122 | long maxnodemem_mb, node; | ||
123 | |||
124 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; | ||
125 | if (node >= MAX_NUMNODES || *endp != ':' || | ||
126 | strict_strtol(endp+1, 0, &maxnodemem_mb) != 0) | ||
127 | return -EINVAL; | ||
128 | |||
129 | maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << | ||
130 | (HPAGE_SHIFT - PAGE_SHIFT); | ||
131 | pr_info("Forcing RAM used on node %ld to no more than %dMB\n", | ||
132 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); | ||
133 | return 0; | ||
134 | } | ||
135 | early_param("maxnodemem", setup_maxnodemem); | ||
136 | |||
137 | static int __init setup_isolnodes(char *str) | ||
138 | { | ||
139 | char buf[MAX_NUMNODES * 5]; | ||
140 | if (str == NULL || nodelist_parse(str, isolnodes) != 0) | ||
141 | return -EINVAL; | ||
142 | |||
143 | nodelist_scnprintf(buf, sizeof(buf), isolnodes); | ||
144 | pr_info("Set isolnodes value to '%s'\n", buf); | ||
145 | return 0; | ||
146 | } | ||
147 | early_param("isolnodes", setup_isolnodes); | ||
148 | |||
149 | #ifdef CONFIG_PCI | ||
150 | static int __init setup_pci_reserve(char* str) | ||
151 | { | ||
152 | unsigned long mb; | ||
153 | |||
154 | if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || | ||
155 | mb > 3 * 1024) | ||
156 | return -EINVAL; | ||
157 | |||
158 | pci_reserve_mb = mb; | ||
159 | pr_info("Reserving %dMB for PCIE root complex mappings\n", | ||
160 | pci_reserve_mb); | ||
161 | return 0; | ||
162 | } | ||
163 | early_param("pci_reserve", setup_pci_reserve); | ||
164 | #endif | ||
165 | |||
166 | #ifndef __tilegx__ | ||
167 | /* | ||
168 | * vmalloc=size forces the vmalloc area to be exactly 'size' bytes. | ||
169 | * This can be used to increase (or decrease) the vmalloc area. | ||
170 | */ | ||
171 | static int __init parse_vmalloc(char *arg) | ||
172 | { | ||
173 | if (!arg) | ||
174 | return -EINVAL; | ||
175 | |||
176 | VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK; | ||
177 | |||
178 | /* See validate_va() for more on this test. */ | ||
179 | if ((long)_VMALLOC_START >= 0) | ||
180 | early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n", | ||
181 | VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | early_param("vmalloc", parse_vmalloc); | ||
186 | #endif | ||
187 | |||
188 | #ifdef CONFIG_HIGHMEM | ||
189 | /* | ||
190 | * Determine for each controller where its lowmem is mapped and how | ||
191 | * much of it is mapped there. On controller zero, the first few | ||
192 | * megabytes are mapped at 0xfd000000 as code, so in principle we | ||
193 | * could start our data mappings higher up, but for now we don't | ||
194 | * bother, to avoid additional confusion. | ||
195 | * | ||
196 | * One question is whether, on systems with more than 768 Mb and | ||
197 | * controllers of different sizes, to map in a proportionate amount of | ||
198 | * each one, or to try to map the same amount from each controller. | ||
199 | * (E.g. if we have three controllers with 256MB, 1GB, and 256MB | ||
200 | * respectively, do we map 256MB from each, or do we map 128 MB, 512 | ||
201 | * MB, and 128 MB respectively?) For now we use a proportionate | ||
202 | * solution like the latter. | ||
203 | * | ||
204 | * The VA/PA mapping demands that we align our decisions at 16 MB | ||
205 | * boundaries so that we can rapidly convert VA to PA. | ||
206 | */ | ||
207 | static void *__init setup_pa_va_mapping(void) | ||
208 | { | ||
209 | unsigned long curr_pages = 0; | ||
210 | unsigned long vaddr = PAGE_OFFSET; | ||
211 | nodemask_t highonlynodes = isolnodes; | ||
212 | int i, j; | ||
213 | |||
214 | memset(pbase_map, -1, sizeof(pbase_map)); | ||
215 | memset(vbase_map, -1, sizeof(vbase_map)); | ||
216 | |||
217 | /* Node zero cannot be isolated for LOWMEM purposes. */ | ||
218 | node_clear(0, highonlynodes); | ||
219 | |||
220 | /* Count up the number of pages on non-highonlynodes controllers. */ | ||
221 | mappable_physpages = 0; | ||
222 | for_each_online_node(i) { | ||
223 | if (!node_isset(i, highonlynodes)) | ||
224 | mappable_physpages += | ||
225 | node_end_pfn[i] - node_start_pfn[i]; | ||
226 | } | ||
227 | |||
228 | for_each_online_node(i) { | ||
229 | unsigned long start = node_start_pfn[i]; | ||
230 | unsigned long end = node_end_pfn[i]; | ||
231 | unsigned long size = end - start; | ||
232 | unsigned long vaddr_end; | ||
233 | |||
234 | if (node_isset(i, highonlynodes)) { | ||
235 | /* Mark this controller as having no lowmem. */ | ||
236 | node_lowmem_end_pfn[i] = start; | ||
237 | continue; | ||
238 | } | ||
239 | |||
240 | curr_pages += size; | ||
241 | if (mappable_physpages > MAXMEM_PFN) { | ||
242 | vaddr_end = PAGE_OFFSET + | ||
243 | (((u64)curr_pages * MAXMEM_PFN / | ||
244 | mappable_physpages) | ||
245 | << PAGE_SHIFT); | ||
246 | } else { | ||
247 | vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT); | ||
248 | } | ||
249 | for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) { | ||
250 | unsigned long this_pfn = | ||
251 | start + (j << HUGETLB_PAGE_ORDER); | ||
252 | pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn; | ||
253 | if (vbase_map[__pfn_to_highbits(this_pfn)] == | ||
254 | (void *)-1) | ||
255 | vbase_map[__pfn_to_highbits(this_pfn)] = | ||
256 | (void *)(vaddr & HPAGE_MASK); | ||
257 | } | ||
258 | node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER); | ||
259 | BUG_ON(node_lowmem_end_pfn[i] > end); | ||
260 | } | ||
261 | |||
262 | /* Return highest address of any mapped memory. */ | ||
263 | return (void *)vaddr; | ||
264 | } | ||
265 | #endif /* CONFIG_HIGHMEM */ | ||
266 | |||
267 | /* | ||
268 | * Register our most important memory mappings with the debug stub. | ||
269 | * | ||
270 | * This is up to 4 mappings for lowmem, one mapping per memory | ||
271 | * controller, plus one for our text segment. | ||
272 | */ | ||
273 | static void __cpuinit store_permanent_mappings(void) | ||
274 | { | ||
275 | int i; | ||
276 | |||
277 | for_each_online_node(i) { | ||
278 | HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT; | ||
279 | #ifdef CONFIG_HIGHMEM | ||
280 | HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i]; | ||
281 | #else | ||
282 | HV_PhysAddr high_mapped_pa = node_end_pfn[i]; | ||
283 | #endif | ||
284 | |||
285 | unsigned long pages = high_mapped_pa - node_start_pfn[i]; | ||
286 | HV_VirtAddr addr = (HV_VirtAddr) __va(pa); | ||
287 | hv_store_mapping(addr, pages << PAGE_SHIFT, pa); | ||
288 | } | ||
289 | |||
290 | hv_store_mapping((HV_VirtAddr)_stext, | ||
291 | (uint32_t)(_einittext - _stext), 0); | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * Use hv_inquire_physical() to populate node_{start,end}_pfn[] | ||
296 | * and node_online_map, doing suitable sanity-checking. | ||
297 | * Also set min_low_pfn, max_low_pfn, and max_pfn. | ||
298 | */ | ||
299 | static void __init setup_memory(void) | ||
300 | { | ||
301 | int i, j; | ||
302 | int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 }; | ||
303 | #ifdef CONFIG_HIGHMEM | ||
304 | long highmem_pages; | ||
305 | #endif | ||
306 | #ifndef __tilegx__ | ||
307 | int cap; | ||
308 | #endif | ||
309 | #if defined(CONFIG_HIGHMEM) || defined(__tilegx__) | ||
310 | long lowmem_pages; | ||
311 | #endif | ||
312 | |||
313 | /* We are using a char to hold the cpu_2_node[] mapping */ | ||
314 | BUG_ON(MAX_NUMNODES > 127); | ||
315 | |||
316 | /* Discover the ranges of memory available to us */ | ||
317 | for (i = 0; ; ++i) { | ||
318 | unsigned long start, size, end, highbits; | ||
319 | HV_PhysAddrRange range = hv_inquire_physical(i); | ||
320 | if (range.size == 0) | ||
321 | break; | ||
322 | #ifdef CONFIG_FLATMEM | ||
323 | if (i > 0) { | ||
324 | pr_err("Can't use discontiguous PAs: %#llx..%#llx\n", | ||
325 | range.size, range.start + range.size); | ||
326 | continue; | ||
327 | } | ||
328 | #endif | ||
329 | #ifndef __tilegx__ | ||
330 | if ((unsigned long)range.start) { | ||
331 | pr_err("Range not at 4GB multiple: %#llx..%#llx\n", | ||
332 | range.start, range.start + range.size); | ||
333 | continue; | ||
334 | } | ||
335 | #endif | ||
336 | if ((range.start & (HPAGE_SIZE-1)) != 0 || | ||
337 | (range.size & (HPAGE_SIZE-1)) != 0) { | ||
338 | unsigned long long start_pa = range.start; | ||
339 | unsigned long long orig_size = range.size; | ||
340 | range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; | ||
341 | range.size -= (range.start - start_pa); | ||
342 | range.size &= HPAGE_MASK; | ||
343 | pr_err("Range not hugepage-aligned: %#llx..%#llx:" | ||
344 | " now %#llx-%#llx\n", | ||
345 | start_pa, start_pa + orig_size, | ||
346 | range.start, range.start + range.size); | ||
347 | } | ||
348 | highbits = __pa_to_highbits(range.start); | ||
349 | if (highbits >= NR_PA_HIGHBIT_VALUES) { | ||
350 | pr_err("PA high bits too high: %#llx..%#llx\n", | ||
351 | range.start, range.start + range.size); | ||
352 | continue; | ||
353 | } | ||
354 | if (highbits_seen[highbits]) { | ||
355 | pr_err("Range overlaps in high bits: %#llx..%#llx\n", | ||
356 | range.start, range.start + range.size); | ||
357 | continue; | ||
358 | } | ||
359 | highbits_seen[highbits] = 1; | ||
360 | if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { | ||
361 | int max_size = maxnodemem_pfn[i]; | ||
362 | if (max_size > 0) { | ||
363 | pr_err("Maxnodemem reduced node %d to" | ||
364 | " %d pages\n", i, max_size); | ||
365 | range.size = PFN_PHYS(max_size); | ||
366 | } else { | ||
367 | pr_err("Maxnodemem disabled node %d\n", i); | ||
368 | continue; | ||
369 | } | ||
370 | } | ||
371 | if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { | ||
372 | int max_size = maxmem_pfn - num_physpages; | ||
373 | if (max_size > 0) { | ||
374 | pr_err("Maxmem reduced node %d to %d pages\n", | ||
375 | i, max_size); | ||
376 | range.size = PFN_PHYS(max_size); | ||
377 | } else { | ||
378 | pr_err("Maxmem disabled node %d\n", i); | ||
379 | continue; | ||
380 | } | ||
381 | } | ||
382 | if (i >= MAX_NUMNODES) { | ||
383 | pr_err("Too many PA nodes (#%d): %#llx...%#llx\n", | ||
384 | i, range.size, range.size + range.start); | ||
385 | continue; | ||
386 | } | ||
387 | |||
388 | start = range.start >> PAGE_SHIFT; | ||
389 | size = range.size >> PAGE_SHIFT; | ||
390 | end = start + size; | ||
391 | |||
392 | #ifndef __tilegx__ | ||
393 | if (((HV_PhysAddr)end << PAGE_SHIFT) != | ||
394 | (range.start + range.size)) { | ||
395 | pr_err("PAs too high to represent: %#llx..%#llx\n", | ||
396 | range.start, range.start + range.size); | ||
397 | continue; | ||
398 | } | ||
399 | #endif | ||
400 | #ifdef CONFIG_PCI | ||
401 | /* | ||
402 | * Blocks that overlap the pci reserved region must | ||
403 | * have enough space to hold the maximum percpu data | ||
404 | * region at the top of the range. If there isn't | ||
405 | * enough space above the reserved region, just | ||
406 | * truncate the node. | ||
407 | */ | ||
408 | if (start <= pci_reserve_start_pfn && | ||
409 | end > pci_reserve_start_pfn) { | ||
410 | unsigned int per_cpu_size = | ||
411 | __per_cpu_end - __per_cpu_start; | ||
412 | unsigned int percpu_pages = | ||
413 | NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); | ||
414 | if (end < pci_reserve_end_pfn + percpu_pages) { | ||
415 | end = pci_reserve_start_pfn; | ||
416 | pr_err("PCI mapping region reduced node %d to" | ||
417 | " %ld pages\n", i, end - start); | ||
418 | } | ||
419 | } | ||
420 | #endif | ||
421 | |||
422 | for (j = __pfn_to_highbits(start); | ||
423 | j <= __pfn_to_highbits(end - 1); j++) | ||
424 | highbits_to_node[j] = i; | ||
425 | |||
426 | node_start_pfn[i] = start; | ||
427 | node_end_pfn[i] = end; | ||
428 | node_controller[i] = range.controller; | ||
429 | num_physpages += size; | ||
430 | max_pfn = end; | ||
431 | |||
432 | /* Mark node as online */ | ||
433 | node_set(i, node_online_map); | ||
434 | node_set(i, node_possible_map); | ||
435 | } | ||
436 | |||
437 | #ifndef __tilegx__ | ||
438 | /* | ||
439 | * For 4KB pages, mem_map "struct page" data is 1% of the size | ||
440 | * of the physical memory, so can be quite big (640 MB for | ||
441 | * four 16G zones). These structures must be mapped in | ||
442 | * lowmem, and since we currently cap out at about 768 MB, | ||
443 | * it's impractical to try to use this much address space. | ||
444 | * For now, arbitrarily cap the amount of physical memory | ||
445 | * we're willing to use at 8 million pages (32GB of 4KB pages). | ||
446 | */ | ||
447 | cap = 8 * 1024 * 1024; /* 8 million pages */ | ||
448 | if (num_physpages > cap) { | ||
449 | int num_nodes = num_online_nodes(); | ||
450 | int cap_each = cap / num_nodes; | ||
451 | unsigned long dropped_pages = 0; | ||
452 | for (i = 0; i < num_nodes; ++i) { | ||
453 | int size = node_end_pfn[i] - node_start_pfn[i]; | ||
454 | if (size > cap_each) { | ||
455 | dropped_pages += (size - cap_each); | ||
456 | node_end_pfn[i] = node_start_pfn[i] + cap_each; | ||
457 | } | ||
458 | } | ||
459 | num_physpages -= dropped_pages; | ||
460 | pr_warning("Only using %ldMB memory;" | ||
461 | " ignoring %ldMB.\n", | ||
462 | num_physpages >> (20 - PAGE_SHIFT), | ||
463 | dropped_pages >> (20 - PAGE_SHIFT)); | ||
464 | pr_warning("Consider using a larger page size.\n"); | ||
465 | } | ||
466 | #endif | ||
467 | |||
468 | /* Heap starts just above the last loaded address. */ | ||
469 | min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET); | ||
470 | |||
471 | #ifdef CONFIG_HIGHMEM | ||
472 | /* Find where we map lowmem from each controller. */ | ||
473 | high_memory = setup_pa_va_mapping(); | ||
474 | |||
475 | /* Set max_low_pfn based on what node 0 can directly address. */ | ||
476 | max_low_pfn = node_lowmem_end_pfn[0]; | ||
477 | |||
478 | lowmem_pages = (mappable_physpages > MAXMEM_PFN) ? | ||
479 | MAXMEM_PFN : mappable_physpages; | ||
480 | highmem_pages = (long) (num_physpages - lowmem_pages); | ||
481 | |||
482 | pr_notice("%ldMB HIGHMEM available.\n", | ||
483 | pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); | ||
484 | pr_notice("%ldMB LOWMEM available.\n", | ||
485 | pages_to_mb(lowmem_pages)); | ||
486 | #else | ||
487 | /* Set max_low_pfn based on what node 0 can directly address. */ | ||
488 | max_low_pfn = node_end_pfn[0]; | ||
489 | |||
490 | #ifndef __tilegx__ | ||
491 | if (node_end_pfn[0] > MAXMEM_PFN) { | ||
492 | pr_warning("Only using %ldMB LOWMEM.\n", | ||
493 | MAXMEM>>20); | ||
494 | pr_warning("Use a HIGHMEM enabled kernel.\n"); | ||
495 | max_low_pfn = MAXMEM_PFN; | ||
496 | max_pfn = MAXMEM_PFN; | ||
497 | num_physpages = MAXMEM_PFN; | ||
498 | node_end_pfn[0] = MAXMEM_PFN; | ||
499 | } else { | ||
500 | pr_notice("%ldMB memory available.\n", | ||
501 | pages_to_mb(node_end_pfn[0])); | ||
502 | } | ||
503 | for (i = 1; i < MAX_NUMNODES; ++i) { | ||
504 | node_start_pfn[i] = 0; | ||
505 | node_end_pfn[i] = 0; | ||
506 | } | ||
507 | high_memory = __va(node_end_pfn[0]); | ||
508 | #else | ||
509 | lowmem_pages = 0; | ||
510 | for (i = 0; i < MAX_NUMNODES; ++i) { | ||
511 | int pages = node_end_pfn[i] - node_start_pfn[i]; | ||
512 | lowmem_pages += pages; | ||
513 | if (pages) | ||
514 | high_memory = pfn_to_kaddr(node_end_pfn[i]); | ||
515 | } | ||
516 | pr_notice("%ldMB memory available.\n", | ||
517 | pages_to_mb(lowmem_pages)); | ||
518 | #endif | ||
519 | #endif | ||
520 | } | ||
521 | |||
522 | static void __init setup_bootmem_allocator(void) | ||
523 | { | ||
524 | unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn; | ||
525 | |||
526 | /* Provide a node 0 bdata. */ | ||
527 | NODE_DATA(0)->bdata = &node0_bdata; | ||
528 | |||
529 | #ifdef CONFIG_PCI | ||
530 | /* Don't let boot memory alias the PCI region. */ | ||
531 | last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn); | ||
532 | #else | ||
533 | last_alloc_pfn = max_low_pfn; | ||
534 | #endif | ||
535 | |||
536 | /* | ||
537 | * Initialize the boot-time allocator (with low memory only): | ||
538 | * The first argument says where to put the bitmap, and the | ||
539 | * second says where the end of allocatable memory is. | ||
540 | */ | ||
541 | bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn); | ||
542 | |||
543 | /* | ||
544 | * Let the bootmem allocator use all the space we've given it | ||
545 | * except for its own bitmap. | ||
546 | */ | ||
547 | first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size); | ||
548 | if (first_alloc_pfn >= last_alloc_pfn) | ||
549 | early_panic("Not enough memory on controller 0 for bootmem\n"); | ||
550 | |||
551 | free_bootmem(PFN_PHYS(first_alloc_pfn), | ||
552 | PFN_PHYS(last_alloc_pfn - first_alloc_pfn)); | ||
553 | |||
554 | #ifdef CONFIG_KEXEC | ||
555 | if (crashk_res.start != crashk_res.end) | ||
556 | reserve_bootmem(crashk_res.start, | ||
557 | crashk_res.end - crashk_res.start + 1, 0); | ||
558 | #endif | ||
559 | |||
560 | } | ||
561 | |||
562 | void *__init alloc_remap(int nid, unsigned long size) | ||
563 | { | ||
564 | int pages = node_end_pfn[nid] - node_start_pfn[nid]; | ||
565 | void *map = pfn_to_kaddr(node_memmap_pfn[nid]); | ||
566 | BUG_ON(size != pages * sizeof(struct page)); | ||
567 | memset(map, 0, size); | ||
568 | return map; | ||
569 | } | ||
570 | |||
571 | static int __init percpu_size(void) | ||
572 | { | ||
573 | int size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); | ||
574 | #ifdef CONFIG_MODULES | ||
575 | if (size < PERCPU_ENOUGH_ROOM) | ||
576 | size = PERCPU_ENOUGH_ROOM; | ||
577 | #endif | ||
578 | /* In several places we assume the per-cpu data fits on a huge page. */ | ||
579 | BUG_ON(kdata_huge && size > HPAGE_SIZE); | ||
580 | return size; | ||
581 | } | ||
582 | |||
583 | static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal) | ||
584 | { | ||
585 | void *kva = __alloc_bootmem(size, PAGE_SIZE, goal); | ||
586 | unsigned long pfn = kaddr_to_pfn(kva); | ||
587 | BUG_ON(goal && PFN_PHYS(pfn) != goal); | ||
588 | return pfn; | ||
589 | } | ||
590 | |||
591 | static void __init zone_sizes_init(void) | ||
592 | { | ||
593 | unsigned long zones_size[MAX_NR_ZONES] = { 0 }; | ||
594 | unsigned long node_percpu[MAX_NUMNODES] = { 0 }; | ||
595 | int size = percpu_size(); | ||
596 | int num_cpus = smp_height * smp_width; | ||
597 | int i; | ||
598 | |||
599 | for (i = 0; i < num_cpus; ++i) | ||
600 | node_percpu[cpu_to_node(i)] += size; | ||
601 | |||
602 | for_each_online_node(i) { | ||
603 | unsigned long start = node_start_pfn[i]; | ||
604 | unsigned long end = node_end_pfn[i]; | ||
605 | #ifdef CONFIG_HIGHMEM | ||
606 | unsigned long lowmem_end = node_lowmem_end_pfn[i]; | ||
607 | #else | ||
608 | unsigned long lowmem_end = end; | ||
609 | #endif | ||
610 | int memmap_size = (end - start) * sizeof(struct page); | ||
611 | node_free_pfn[i] = start; | ||
612 | |||
613 | /* | ||
614 | * Set aside pages for per-cpu data and the mem_map array. | ||
615 | * | ||
616 | * Since the per-cpu data requires special homecaching, | ||
617 | * if we are in kdata_huge mode, we put it at the end of | ||
618 | * the lowmem region. If we're not in kdata_huge mode, | ||
619 | * we take the per-cpu pages from the bottom of the | ||
620 | * controller, since that avoids fragmenting a huge page | ||
621 | * that users might want. We always take the memmap | ||
622 | * from the bottom of the controller, since with | ||
623 | * kdata_huge that lets it be under a huge TLB entry. | ||
624 | * | ||
625 | * If the user has requested isolnodes for a controller, | ||
626 | * though, there'll be no lowmem, so we just alloc_bootmem | ||
627 | * the memmap. There will be no percpu memory either. | ||
628 | */ | ||
629 | if (__pfn_to_highbits(start) == 0) { | ||
630 | /* In low PAs, allocate via bootmem. */ | ||
631 | unsigned long goal = 0; | ||
632 | node_memmap_pfn[i] = | ||
633 | alloc_bootmem_pfn(memmap_size, goal); | ||
634 | if (kdata_huge) | ||
635 | goal = PFN_PHYS(lowmem_end) - node_percpu[i]; | ||
636 | if (node_percpu[i]) | ||
637 | node_percpu_pfn[i] = | ||
638 | alloc_bootmem_pfn(node_percpu[i], goal); | ||
639 | } else if (cpu_isset(i, isolnodes)) { | ||
640 | node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0); | ||
641 | BUG_ON(node_percpu[i] != 0); | ||
642 | } else { | ||
643 | /* In high PAs, just reserve some pages. */ | ||
644 | node_memmap_pfn[i] = node_free_pfn[i]; | ||
645 | node_free_pfn[i] += PFN_UP(memmap_size); | ||
646 | if (!kdata_huge) { | ||
647 | node_percpu_pfn[i] = node_free_pfn[i]; | ||
648 | node_free_pfn[i] += PFN_UP(node_percpu[i]); | ||
649 | } else { | ||
650 | node_percpu_pfn[i] = | ||
651 | lowmem_end - PFN_UP(node_percpu[i]); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | #ifdef CONFIG_HIGHMEM | ||
656 | if (start > lowmem_end) { | ||
657 | zones_size[ZONE_NORMAL] = 0; | ||
658 | zones_size[ZONE_HIGHMEM] = end - start; | ||
659 | } else { | ||
660 | zones_size[ZONE_NORMAL] = lowmem_end - start; | ||
661 | zones_size[ZONE_HIGHMEM] = end - lowmem_end; | ||
662 | } | ||
663 | #else | ||
664 | zones_size[ZONE_NORMAL] = end - start; | ||
665 | #endif | ||
666 | |||
667 | /* | ||
668 | * Everyone shares node 0's bootmem allocator, but | ||
669 | * we use alloc_remap(), above, to put the actual | ||
670 | * struct page array on the individual controllers, | ||
671 | * which is most of the data that we actually care about. | ||
672 | * We can't place bootmem allocators on the other | ||
673 | * controllers since the bootmem allocator can only | ||
674 | * operate on 32-bit physical addresses. | ||
675 | */ | ||
676 | NODE_DATA(i)->bdata = NODE_DATA(0)->bdata; | ||
677 | |||
678 | free_area_init_node(i, zones_size, start, NULL); | ||
679 | printk(KERN_DEBUG " DMA zone: %ld per-cpu pages\n", | ||
680 | PFN_UP(node_percpu[i])); | ||
681 | |||
682 | /* Track the type of memory on each node */ | ||
683 | if (zones_size[ZONE_NORMAL]) | ||
684 | node_set_state(i, N_NORMAL_MEMORY); | ||
685 | #ifdef CONFIG_HIGHMEM | ||
686 | if (end != start) | ||
687 | node_set_state(i, N_HIGH_MEMORY); | ||
688 | #endif | ||
689 | |||
690 | node_set_online(i); | ||
691 | } | ||
692 | } | ||
693 | |||
694 | #ifdef CONFIG_NUMA | ||
695 | |||
696 | /* which logical CPUs are on which nodes */ | ||
697 | struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once; | ||
698 | EXPORT_SYMBOL(node_2_cpu_mask); | ||
699 | |||
700 | /* which node each logical CPU is on */ | ||
701 | char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES))); | ||
702 | EXPORT_SYMBOL(cpu_2_node); | ||
703 | |||
704 | /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */ | ||
705 | static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus) | ||
706 | { | ||
707 | if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus)) | ||
708 | return -1; | ||
709 | else | ||
710 | return cpu_to_node(cpu); | ||
711 | } | ||
712 | |||
713 | /* Return number of immediately-adjacent tiles sharing the same NUMA node. */ | ||
714 | static int __init node_neighbors(int node, int cpu, | ||
715 | struct cpumask *unbound_cpus) | ||
716 | { | ||
717 | int neighbors = 0; | ||
718 | int w = smp_width; | ||
719 | int h = smp_height; | ||
720 | int x = cpu % w; | ||
721 | int y = cpu / w; | ||
722 | if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node) | ||
723 | ++neighbors; | ||
724 | if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node) | ||
725 | ++neighbors; | ||
726 | if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node) | ||
727 | ++neighbors; | ||
728 | if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node) | ||
729 | ++neighbors; | ||
730 | return neighbors; | ||
731 | } | ||
732 | |||
733 | static void __init setup_numa_mapping(void) | ||
734 | { | ||
735 | int distance[MAX_NUMNODES][NR_CPUS]; | ||
736 | HV_Coord coord; | ||
737 | int cpu, node, cpus, i, x, y; | ||
738 | int num_nodes = num_online_nodes(); | ||
739 | struct cpumask unbound_cpus; | ||
740 | nodemask_t default_nodes; | ||
741 | |||
742 | cpumask_clear(&unbound_cpus); | ||
743 | |||
744 | /* Get set of nodes we will use for defaults */ | ||
745 | nodes_andnot(default_nodes, node_online_map, isolnodes); | ||
746 | if (nodes_empty(default_nodes)) { | ||
747 | BUG_ON(!node_isset(0, node_online_map)); | ||
748 | pr_err("Forcing NUMA node zero available as a default node\n"); | ||
749 | node_set(0, default_nodes); | ||
750 | } | ||
751 | |||
752 | /* Populate the distance[] array */ | ||
753 | memset(distance, -1, sizeof(distance)); | ||
754 | cpu = 0; | ||
755 | for (coord.y = 0; coord.y < smp_height; ++coord.y) { | ||
756 | for (coord.x = 0; coord.x < smp_width; | ||
757 | ++coord.x, ++cpu) { | ||
758 | BUG_ON(cpu >= nr_cpu_ids); | ||
759 | if (!cpu_possible(cpu)) { | ||
760 | cpu_2_node[cpu] = -1; | ||
761 | continue; | ||
762 | } | ||
763 | for_each_node_mask(node, default_nodes) { | ||
764 | HV_MemoryControllerInfo info = | ||
765 | hv_inquire_memory_controller( | ||
766 | coord, node_controller[node]); | ||
767 | distance[node][cpu] = | ||
768 | ABS(info.coord.x) + ABS(info.coord.y); | ||
769 | } | ||
770 | cpumask_set_cpu(cpu, &unbound_cpus); | ||
771 | } | ||
772 | } | ||
773 | cpus = cpu; | ||
774 | |||
775 | /* | ||
776 | * Round-robin through the NUMA nodes until all the cpus are | ||
777 | * assigned. We could be more clever here (e.g. create four | ||
778 | * sorted linked lists on the same set of cpu nodes, and pull | ||
779 | * off them in round-robin sequence, removing from all four | ||
780 | * lists each time) but given the relatively small numbers | ||
781 | * involved, O(n^2) seem OK for a one-time cost. | ||
782 | */ | ||
783 | node = first_node(default_nodes); | ||
784 | while (!cpumask_empty(&unbound_cpus)) { | ||
785 | int best_cpu = -1; | ||
786 | int best_distance = INT_MAX; | ||
787 | for (cpu = 0; cpu < cpus; ++cpu) { | ||
788 | if (cpumask_test_cpu(cpu, &unbound_cpus)) { | ||
789 | /* | ||
790 | * Compute metric, which is how much | ||
791 | * closer the cpu is to this memory | ||
792 | * controller than the others, shifted | ||
793 | * up, and then the number of | ||
794 | * neighbors already in the node as an | ||
795 | * epsilon adjustment to try to keep | ||
796 | * the nodes compact. | ||
797 | */ | ||
798 | int d = distance[node][cpu] * num_nodes; | ||
799 | for_each_node_mask(i, default_nodes) { | ||
800 | if (i != node) | ||
801 | d -= distance[i][cpu]; | ||
802 | } | ||
803 | d *= 8; /* allow space for epsilon */ | ||
804 | d -= node_neighbors(node, cpu, &unbound_cpus); | ||
805 | if (d < best_distance) { | ||
806 | best_cpu = cpu; | ||
807 | best_distance = d; | ||
808 | } | ||
809 | } | ||
810 | } | ||
811 | BUG_ON(best_cpu < 0); | ||
812 | cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]); | ||
813 | cpu_2_node[best_cpu] = node; | ||
814 | cpumask_clear_cpu(best_cpu, &unbound_cpus); | ||
815 | node = next_node(node, default_nodes); | ||
816 | if (node == MAX_NUMNODES) | ||
817 | node = first_node(default_nodes); | ||
818 | } | ||
819 | |||
820 | /* Print out node assignments and set defaults for disabled cpus */ | ||
821 | cpu = 0; | ||
822 | for (y = 0; y < smp_height; ++y) { | ||
823 | printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y); | ||
824 | for (x = 0; x < smp_width; ++x, ++cpu) { | ||
825 | if (cpu_to_node(cpu) < 0) { | ||
826 | pr_cont(" -"); | ||
827 | cpu_2_node[cpu] = first_node(default_nodes); | ||
828 | } else { | ||
829 | pr_cont(" %d", cpu_to_node(cpu)); | ||
830 | } | ||
831 | } | ||
832 | pr_cont("\n"); | ||
833 | } | ||
834 | } | ||
835 | |||
836 | static struct cpu cpu_devices[NR_CPUS]; | ||
837 | |||
838 | static int __init topology_init(void) | ||
839 | { | ||
840 | int i; | ||
841 | |||
842 | for_each_online_node(i) | ||
843 | register_one_node(i); | ||
844 | |||
845 | for_each_present_cpu(i) | ||
846 | register_cpu(&cpu_devices[i], i); | ||
847 | |||
848 | return 0; | ||
849 | } | ||
850 | |||
851 | subsys_initcall(topology_init); | ||
852 | |||
853 | #else /* !CONFIG_NUMA */ | ||
854 | |||
855 | #define setup_numa_mapping() do { } while (0) | ||
856 | |||
857 | #endif /* CONFIG_NUMA */ | ||
858 | |||
859 | /** | ||
860 | * setup_cpu() - Do all necessary per-cpu, tile-specific initialization. | ||
861 | * @boot: Is this the boot cpu? | ||
862 | * | ||
863 | * Called from setup_arch() on the boot cpu, or online_secondary(). | ||
864 | */ | ||
865 | void __cpuinit setup_cpu(int boot) | ||
866 | { | ||
867 | /* The boot cpu sets up its permanent mappings much earlier. */ | ||
868 | if (!boot) | ||
869 | store_permanent_mappings(); | ||
870 | |||
871 | /* Allow asynchronous TLB interrupts. */ | ||
872 | #if CHIP_HAS_TILE_DMA() | ||
873 | raw_local_irq_unmask(INT_DMATLB_MISS); | ||
874 | raw_local_irq_unmask(INT_DMATLB_ACCESS); | ||
875 | #endif | ||
876 | #if CHIP_HAS_SN_PROC() | ||
877 | raw_local_irq_unmask(INT_SNITLB_MISS); | ||
878 | #endif | ||
879 | |||
880 | /* | ||
881 | * Allow user access to many generic SPRs, like the cycle | ||
882 | * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc. | ||
883 | */ | ||
884 | __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1); | ||
885 | |||
886 | #if CHIP_HAS_SN() | ||
887 | /* Static network is not restricted. */ | ||
888 | __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1); | ||
889 | #endif | ||
890 | #if CHIP_HAS_SN_PROC() | ||
891 | __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1); | ||
892 | __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1); | ||
893 | #endif | ||
894 | |||
895 | /* | ||
896 | * Set the MPL for interrupt control 0 to user level. | ||
897 | * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs, | ||
898 | * as well as the PL 0 interrupt mask. | ||
899 | */ | ||
900 | __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); | ||
901 | |||
902 | /* Initialize IRQ support for this cpu. */ | ||
903 | setup_irq_regs(); | ||
904 | |||
905 | #ifdef CONFIG_HARDWALL | ||
906 | /* Reset the network state on this cpu. */ | ||
907 | reset_network_state(); | ||
908 | #endif | ||
909 | } | ||
910 | |||
911 | static int __initdata set_initramfs_file; | ||
912 | static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; | ||
913 | |||
914 | static int __init setup_initramfs_file(char *str) | ||
915 | { | ||
916 | if (str == NULL) | ||
917 | return -EINVAL; | ||
918 | strncpy(initramfs_file, str, sizeof(initramfs_file) - 1); | ||
919 | set_initramfs_file = 1; | ||
920 | |||
921 | return 0; | ||
922 | } | ||
923 | early_param("initramfs_file", setup_initramfs_file); | ||
924 | |||
925 | /* | ||
926 | * We look for an additional "initramfs.cpio.gz" file in the hvfs. | ||
927 | * If there is one, we allocate some memory for it and it will be | ||
928 | * unpacked to the initramfs after any built-in initramfs_data. | ||
929 | */ | ||
930 | static void __init load_hv_initrd(void) | ||
931 | { | ||
932 | HV_FS_StatInfo stat; | ||
933 | int fd, rc; | ||
934 | void *initrd; | ||
935 | |||
936 | fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); | ||
937 | if (fd == HV_ENOENT) { | ||
938 | if (set_initramfs_file) | ||
939 | pr_warning("No such hvfs initramfs file '%s'\n", | ||
940 | initramfs_file); | ||
941 | return; | ||
942 | } | ||
943 | BUG_ON(fd < 0); | ||
944 | stat = hv_fs_fstat(fd); | ||
945 | BUG_ON(stat.size < 0); | ||
946 | if (stat.flags & HV_FS_ISDIR) { | ||
947 | pr_warning("Ignoring hvfs file '%s': it's a directory.\n", | ||
948 | initramfs_file); | ||
949 | return; | ||
950 | } | ||
951 | initrd = alloc_bootmem_pages(stat.size); | ||
952 | rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0); | ||
953 | if (rc != stat.size) { | ||
954 | pr_err("Error reading %d bytes from hvfs file '%s': %d\n", | ||
955 | stat.size, initramfs_file, rc); | ||
956 | free_bootmem((unsigned long) initrd, stat.size); | ||
957 | return; | ||
958 | } | ||
959 | initrd_start = (unsigned long) initrd; | ||
960 | initrd_end = initrd_start + stat.size; | ||
961 | } | ||
962 | |||
963 | void __init free_initrd_mem(unsigned long begin, unsigned long end) | ||
964 | { | ||
965 | free_bootmem(begin, end - begin); | ||
966 | } | ||
967 | |||
968 | static void __init validate_hv(void) | ||
969 | { | ||
970 | /* | ||
971 | * It may already be too late, but let's check our built-in | ||
972 | * configuration against what the hypervisor is providing. | ||
973 | */ | ||
974 | unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE); | ||
975 | int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL); | ||
976 | int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE); | ||
977 | HV_ASIDRange asid_range; | ||
978 | |||
979 | #ifndef CONFIG_SMP | ||
980 | HV_Topology topology = hv_inquire_topology(); | ||
981 | BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); | ||
982 | if (topology.width != 1 || topology.height != 1) { | ||
983 | pr_warning("Warning: booting UP kernel on %dx%d grid;" | ||
984 | " will ignore all but first tile.\n", | ||
985 | topology.width, topology.height); | ||
986 | } | ||
987 | #endif | ||
988 | |||
989 | if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text) | ||
990 | early_panic("Hypervisor glue size %ld is too big!\n", | ||
991 | glue_size); | ||
992 | if (hv_page_size != PAGE_SIZE) | ||
993 | early_panic("Hypervisor page size %#x != our %#lx\n", | ||
994 | hv_page_size, PAGE_SIZE); | ||
995 | if (hv_hpage_size != HPAGE_SIZE) | ||
996 | early_panic("Hypervisor huge page size %#x != our %#lx\n", | ||
997 | hv_hpage_size, HPAGE_SIZE); | ||
998 | |||
999 | #ifdef CONFIG_SMP | ||
1000 | /* | ||
1001 | * Some hypervisor APIs take a pointer to a bitmap array | ||
1002 | * whose size is at least the number of cpus on the chip. | ||
1003 | * We use a struct cpumask for this, so it must be big enough. | ||
1004 | */ | ||
1005 | if ((smp_height * smp_width) > nr_cpu_ids) | ||
1006 | early_panic("Hypervisor %d x %d grid too big for Linux" | ||
1007 | " NR_CPUS %d\n", smp_height, smp_width, | ||
1008 | nr_cpu_ids); | ||
1009 | #endif | ||
1010 | |||
1011 | /* | ||
1012 | * Check that we're using allowed ASIDs, and initialize the | ||
1013 | * various asid variables to their appropriate initial states. | ||
1014 | */ | ||
1015 | asid_range = hv_inquire_asid(0); | ||
1016 | __get_cpu_var(current_asid) = min_asid = asid_range.start; | ||
1017 | max_asid = asid_range.start + asid_range.size - 1; | ||
1018 | |||
1019 | if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, | ||
1020 | sizeof(chip_model)) < 0) { | ||
1021 | pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n"); | ||
1022 | strlcpy(chip_model, "unknown", sizeof(chip_model)); | ||
1023 | } | ||
1024 | } | ||
1025 | |||
1026 | static void __init validate_va(void) | ||
1027 | { | ||
1028 | #ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */ | ||
1029 | /* | ||
1030 | * Similarly, make sure we're only using allowed VAs. | ||
1031 | * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT, | ||
1032 | * and 0 .. KERNEL_HIGH_VADDR. | ||
1033 | * In addition, make sure we CAN'T use the end of memory, since | ||
1034 | * we use the last chunk of each pgd for the pgd_list. | ||
1035 | */ | ||
1036 | int i, fc_fd_ok = 0; | ||
1037 | unsigned long max_va = 0; | ||
1038 | unsigned long list_va = | ||
1039 | ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); | ||
1040 | |||
1041 | for (i = 0; ; ++i) { | ||
1042 | HV_VirtAddrRange range = hv_inquire_virtual(i); | ||
1043 | if (range.size == 0) | ||
1044 | break; | ||
1045 | if (range.start <= MEM_USER_INTRPT && | ||
1046 | range.start + range.size >= MEM_HV_INTRPT) | ||
1047 | fc_fd_ok = 1; | ||
1048 | if (range.start == 0) | ||
1049 | max_va = range.size; | ||
1050 | BUG_ON(range.start + range.size > list_va); | ||
1051 | } | ||
1052 | if (!fc_fd_ok) | ||
1053 | early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n"); | ||
1054 | if (max_va == 0) | ||
1055 | early_panic("Hypervisor not configured for low VAs\n"); | ||
1056 | if (max_va < KERNEL_HIGH_VADDR) | ||
1057 | early_panic("Hypervisor max VA %#lx smaller than %#lx\n", | ||
1058 | max_va, KERNEL_HIGH_VADDR); | ||
1059 | |||
1060 | /* Kernel PCs must have their high bit set; see intvec.S. */ | ||
1061 | if ((long)VMALLOC_START >= 0) | ||
1062 | early_panic( | ||
1063 | "Linux VMALLOC region below the 2GB line (%#lx)!\n" | ||
1064 | "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" | ||
1065 | "or smaller VMALLOC_RESERVE.\n", | ||
1066 | VMALLOC_START); | ||
1067 | #endif | ||
1068 | } | ||
1069 | |||
1070 | /* | ||
1071 | * cpu_lotar_map lists all the cpus that are valid for the supervisor | ||
1072 | * to cache data on at a page level, i.e. what cpus can be placed in | ||
1073 | * the LOTAR field of a PTE. It is equivalent to the set of possible | ||
1074 | * cpus plus any other cpus that are willing to share their cache. | ||
1075 | * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR). | ||
1076 | */ | ||
1077 | struct cpumask __write_once cpu_lotar_map; | ||
1078 | EXPORT_SYMBOL(cpu_lotar_map); | ||
1079 | |||
1080 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
1081 | /* | ||
1082 | * hash_for_home_map lists all the tiles that hash-for-home data | ||
1083 | * will be cached on. Note that this may includes tiles that are not | ||
1084 | * valid for this supervisor to use otherwise (e.g. if a hypervisor | ||
1085 | * device is being shared between multiple supervisors). | ||
1086 | * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE). | ||
1087 | */ | ||
1088 | struct cpumask hash_for_home_map; | ||
1089 | EXPORT_SYMBOL(hash_for_home_map); | ||
1090 | #endif | ||
1091 | |||
1092 | /* | ||
1093 | * cpu_cacheable_map lists all the cpus whose caches the hypervisor can | ||
1094 | * flush on our behalf. It is set to cpu_possible_map OR'ed with | ||
1095 | * hash_for_home_map, and it is what should be passed to | ||
1096 | * hv_flush_remote() to flush all caches. Note that if there are | ||
1097 | * dedicated hypervisor driver tiles that have authorized use of their | ||
1098 | * cache, those tiles will only appear in cpu_lotar_map, NOT in | ||
1099 | * cpu_cacheable_map, as they are a special case. | ||
1100 | */ | ||
1101 | struct cpumask __write_once cpu_cacheable_map; | ||
1102 | EXPORT_SYMBOL(cpu_cacheable_map); | ||
1103 | |||
1104 | static __initdata struct cpumask disabled_map; | ||
1105 | |||
1106 | static int __init disabled_cpus(char *str) | ||
1107 | { | ||
1108 | int boot_cpu = smp_processor_id(); | ||
1109 | |||
1110 | if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0) | ||
1111 | return -EINVAL; | ||
1112 | if (cpumask_test_cpu(boot_cpu, &disabled_map)) { | ||
1113 | pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu); | ||
1114 | cpumask_clear_cpu(boot_cpu, &disabled_map); | ||
1115 | } | ||
1116 | return 0; | ||
1117 | } | ||
1118 | |||
1119 | early_param("disabled_cpus", disabled_cpus); | ||
1120 | |||
1121 | void __init print_disabled_cpus(void) | ||
1122 | { | ||
1123 | if (!cpumask_empty(&disabled_map)) { | ||
1124 | char buf[100]; | ||
1125 | cpulist_scnprintf(buf, sizeof(buf), &disabled_map); | ||
1126 | pr_info("CPUs not available for Linux: %s\n", buf); | ||
1127 | } | ||
1128 | } | ||
1129 | |||
1130 | static void __init setup_cpu_maps(void) | ||
1131 | { | ||
1132 | struct cpumask hv_disabled_map, cpu_possible_init; | ||
1133 | int boot_cpu = smp_processor_id(); | ||
1134 | int cpus, i, rc; | ||
1135 | |||
1136 | /* Learn which cpus are allowed by the hypervisor. */ | ||
1137 | rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL, | ||
1138 | (HV_VirtAddr) cpumask_bits(&cpu_possible_init), | ||
1139 | sizeof(cpu_cacheable_map)); | ||
1140 | if (rc < 0) | ||
1141 | early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc); | ||
1142 | if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init)) | ||
1143 | early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu); | ||
1144 | |||
1145 | /* Compute the cpus disabled by the hvconfig file. */ | ||
1146 | cpumask_complement(&hv_disabled_map, &cpu_possible_init); | ||
1147 | |||
1148 | /* Include them with the cpus disabled by "disabled_cpus". */ | ||
1149 | cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map); | ||
1150 | |||
1151 | /* | ||
1152 | * Disable every cpu after "setup_max_cpus". But don't mark | ||
1153 | * as disabled the cpus that are outside of our initial rectangle, | ||
1154 | * since that turns out to be confusing. | ||
1155 | */ | ||
1156 | cpus = 1; /* this cpu */ | ||
1157 | cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */ | ||
1158 | for (i = 0; cpus < setup_max_cpus; ++i) | ||
1159 | if (!cpumask_test_cpu(i, &disabled_map)) | ||
1160 | ++cpus; | ||
1161 | for (; i < smp_height * smp_width; ++i) | ||
1162 | cpumask_set_cpu(i, &disabled_map); | ||
1163 | cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */ | ||
1164 | for (i = smp_height * smp_width; i < NR_CPUS; ++i) | ||
1165 | cpumask_clear_cpu(i, &disabled_map); | ||
1166 | |||
1167 | /* | ||
1168 | * Setup cpu_possible map as every cpu allocated to us, minus | ||
1169 | * the results of any "disabled_cpus" settings. | ||
1170 | */ | ||
1171 | cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map); | ||
1172 | init_cpu_possible(&cpu_possible_init); | ||
1173 | |||
1174 | /* Learn which cpus are valid for LOTAR caching. */ | ||
1175 | rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR, | ||
1176 | (HV_VirtAddr) cpumask_bits(&cpu_lotar_map), | ||
1177 | sizeof(cpu_lotar_map)); | ||
1178 | if (rc < 0) { | ||
1179 | pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); | ||
1180 | cpu_lotar_map = cpu_possible_map; | ||
1181 | } | ||
1182 | |||
1183 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
1184 | /* Retrieve set of CPUs used for hash-for-home caching */ | ||
1185 | rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE, | ||
1186 | (HV_VirtAddr) hash_for_home_map.bits, | ||
1187 | sizeof(hash_for_home_map)); | ||
1188 | if (rc < 0) | ||
1189 | early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); | ||
1190 | cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map); | ||
1191 | #else | ||
1192 | cpu_cacheable_map = cpu_possible_map; | ||
1193 | #endif | ||
1194 | } | ||
1195 | |||
1196 | |||
1197 | static int __init dataplane(char *str) | ||
1198 | { | ||
1199 | pr_warning("WARNING: dataplane support disabled in this kernel\n"); | ||
1200 | return 0; | ||
1201 | } | ||
1202 | |||
1203 | early_param("dataplane", dataplane); | ||
1204 | |||
1205 | #ifdef CONFIG_CMDLINE_BOOL | ||
1206 | static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; | ||
1207 | #endif | ||
1208 | |||
1209 | void __init setup_arch(char **cmdline_p) | ||
1210 | { | ||
1211 | int len; | ||
1212 | |||
1213 | #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) | ||
1214 | len = hv_get_command_line((HV_VirtAddr) boot_command_line, | ||
1215 | COMMAND_LINE_SIZE); | ||
1216 | if (boot_command_line[0]) | ||
1217 | pr_warning("WARNING: ignoring dynamic command line \"%s\"\n", | ||
1218 | boot_command_line); | ||
1219 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); | ||
1220 | #else | ||
1221 | char *hv_cmdline; | ||
1222 | #if defined(CONFIG_CMDLINE_BOOL) | ||
1223 | if (builtin_cmdline[0]) { | ||
1224 | int builtin_len = strlcpy(boot_command_line, builtin_cmdline, | ||
1225 | COMMAND_LINE_SIZE); | ||
1226 | if (builtin_len < COMMAND_LINE_SIZE-1) | ||
1227 | boot_command_line[builtin_len++] = ' '; | ||
1228 | hv_cmdline = &boot_command_line[builtin_len]; | ||
1229 | len = COMMAND_LINE_SIZE - builtin_len; | ||
1230 | } else | ||
1231 | #endif | ||
1232 | { | ||
1233 | hv_cmdline = boot_command_line; | ||
1234 | len = COMMAND_LINE_SIZE; | ||
1235 | } | ||
1236 | len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len); | ||
1237 | if (len < 0 || len > COMMAND_LINE_SIZE) | ||
1238 | early_panic("hv_get_command_line failed: %d\n", len); | ||
1239 | #endif | ||
1240 | |||
1241 | *cmdline_p = boot_command_line; | ||
1242 | |||
1243 | /* Set disabled_map and setup_max_cpus very early */ | ||
1244 | parse_early_param(); | ||
1245 | |||
1246 | /* Make sure the kernel is compatible with the hypervisor. */ | ||
1247 | validate_hv(); | ||
1248 | validate_va(); | ||
1249 | |||
1250 | setup_cpu_maps(); | ||
1251 | |||
1252 | |||
1253 | #ifdef CONFIG_PCI | ||
1254 | /* | ||
1255 | * Initialize the PCI structures. This is done before memory | ||
1256 | * setup so that we know whether or not a pci_reserve region | ||
1257 | * is necessary. | ||
1258 | */ | ||
1259 | if (tile_pci_init() == 0) | ||
1260 | pci_reserve_mb = 0; | ||
1261 | |||
1262 | /* PCI systems reserve a region just below 4GB for mapping iomem. */ | ||
1263 | pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT)); | ||
1264 | pci_reserve_start_pfn = pci_reserve_end_pfn - | ||
1265 | (pci_reserve_mb << (20 - PAGE_SHIFT)); | ||
1266 | #endif | ||
1267 | |||
1268 | init_mm.start_code = (unsigned long) _text; | ||
1269 | init_mm.end_code = (unsigned long) _etext; | ||
1270 | init_mm.end_data = (unsigned long) _edata; | ||
1271 | init_mm.brk = (unsigned long) _end; | ||
1272 | |||
1273 | setup_memory(); | ||
1274 | store_permanent_mappings(); | ||
1275 | setup_bootmem_allocator(); | ||
1276 | |||
1277 | /* | ||
1278 | * NOTE: before this point _nobody_ is allowed to allocate | ||
1279 | * any memory using the bootmem allocator. | ||
1280 | */ | ||
1281 | |||
1282 | paging_init(); | ||
1283 | setup_numa_mapping(); | ||
1284 | zone_sizes_init(); | ||
1285 | set_page_homes(); | ||
1286 | setup_cpu(1); | ||
1287 | setup_clock(); | ||
1288 | load_hv_initrd(); | ||
1289 | } | ||
1290 | |||
1291 | |||
1292 | /* | ||
1293 | * Set up per-cpu memory. | ||
1294 | */ | ||
1295 | |||
1296 | unsigned long __per_cpu_offset[NR_CPUS] __write_once; | ||
1297 | EXPORT_SYMBOL(__per_cpu_offset); | ||
1298 | |||
1299 | static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 }; | ||
1300 | static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 }; | ||
1301 | |||
1302 | /* | ||
1303 | * As the percpu code allocates pages, we return the pages from the | ||
1304 | * end of the node for the specified cpu. | ||
1305 | */ | ||
1306 | static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) | ||
1307 | { | ||
1308 | int nid = cpu_to_node(cpu); | ||
1309 | unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid]; | ||
1310 | |||
1311 | BUG_ON(size % PAGE_SIZE != 0); | ||
1312 | pfn_offset[nid] += size / PAGE_SIZE; | ||
1313 | if (percpu_pfn[cpu] == 0) | ||
1314 | percpu_pfn[cpu] = pfn; | ||
1315 | return pfn_to_kaddr(pfn); | ||
1316 | } | ||
1317 | |||
1318 | /* | ||
1319 | * Pages reserved for percpu memory are not freeable, and in any case we are | ||
1320 | * on a short path to panic() in setup_per_cpu_area() at this point anyway. | ||
1321 | */ | ||
1322 | static void __init pcpu_fc_free(void *ptr, size_t size) | ||
1323 | { | ||
1324 | } | ||
1325 | |||
1326 | /* | ||
1327 | * Set up vmalloc page tables using bootmem for the percpu code. | ||
1328 | */ | ||
1329 | static void __init pcpu_fc_populate_pte(unsigned long addr) | ||
1330 | { | ||
1331 | pgd_t *pgd; | ||
1332 | pud_t *pud; | ||
1333 | pmd_t *pmd; | ||
1334 | pte_t *pte; | ||
1335 | |||
1336 | BUG_ON(pgd_addr_invalid(addr)); | ||
1337 | |||
1338 | pgd = swapper_pg_dir + pgd_index(addr); | ||
1339 | pud = pud_offset(pgd, addr); | ||
1340 | BUG_ON(!pud_present(*pud)); | ||
1341 | pmd = pmd_offset(pud, addr); | ||
1342 | if (pmd_present(*pmd)) { | ||
1343 | BUG_ON(pmd_huge_page(*pmd)); | ||
1344 | } else { | ||
1345 | pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, | ||
1346 | HV_PAGE_TABLE_ALIGN, 0); | ||
1347 | pmd_populate_kernel(&init_mm, pmd, pte); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | void __init setup_per_cpu_areas(void) | ||
1352 | { | ||
1353 | struct page *pg; | ||
1354 | unsigned long delta, pfn, lowmem_va; | ||
1355 | unsigned long size = percpu_size(); | ||
1356 | char *ptr; | ||
1357 | int rc, cpu, i; | ||
1358 | |||
1359 | rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc, | ||
1360 | pcpu_fc_free, pcpu_fc_populate_pte); | ||
1361 | if (rc < 0) | ||
1362 | panic("Cannot initialize percpu area (err=%d)", rc); | ||
1363 | |||
1364 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | ||
1365 | for_each_possible_cpu(cpu) { | ||
1366 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | ||
1367 | |||
1368 | /* finv the copy out of cache so we can change homecache */ | ||
1369 | ptr = pcpu_base_addr + pcpu_unit_offsets[cpu]; | ||
1370 | __finv_buffer(ptr, size); | ||
1371 | pfn = percpu_pfn[cpu]; | ||
1372 | |||
1373 | /* Rewrite the page tables to cache on that cpu */ | ||
1374 | pg = pfn_to_page(pfn); | ||
1375 | for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { | ||
1376 | |||
1377 | /* Update the vmalloc mapping and page home. */ | ||
1378 | pte_t *ptep = | ||
1379 | virt_to_pte(NULL, (unsigned long)ptr + i); | ||
1380 | pte_t pte = *ptep; | ||
1381 | BUG_ON(pfn != pte_pfn(pte)); | ||
1382 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); | ||
1383 | pte = set_remote_cache_cpu(pte, cpu); | ||
1384 | set_pte(ptep, pte); | ||
1385 | |||
1386 | /* Update the lowmem mapping for consistency. */ | ||
1387 | lowmem_va = (unsigned long)pfn_to_kaddr(pfn); | ||
1388 | ptep = virt_to_pte(NULL, lowmem_va); | ||
1389 | if (pte_huge(*ptep)) { | ||
1390 | printk(KERN_DEBUG "early shatter of huge page" | ||
1391 | " at %#lx\n", lowmem_va); | ||
1392 | shatter_pmd((pmd_t *)ptep); | ||
1393 | ptep = virt_to_pte(NULL, lowmem_va); | ||
1394 | BUG_ON(pte_huge(*ptep)); | ||
1395 | } | ||
1396 | BUG_ON(pfn != pte_pfn(*ptep)); | ||
1397 | set_pte(ptep, pte); | ||
1398 | } | ||
1399 | } | ||
1400 | |||
1401 | /* Set our thread pointer appropriately. */ | ||
1402 | set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]); | ||
1403 | |||
1404 | /* Make sure the finv's have completed. */ | ||
1405 | mb_incoherent(); | ||
1406 | |||
1407 | /* Flush the TLB so we reference it properly from here on out. */ | ||
1408 | local_flush_tlb_all(); | ||
1409 | } | ||
1410 | |||
1411 | static struct resource data_resource = { | ||
1412 | .name = "Kernel data", | ||
1413 | .start = 0, | ||
1414 | .end = 0, | ||
1415 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
1416 | }; | ||
1417 | |||
1418 | static struct resource code_resource = { | ||
1419 | .name = "Kernel code", | ||
1420 | .start = 0, | ||
1421 | .end = 0, | ||
1422 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
1423 | }; | ||
1424 | |||
1425 | /* | ||
1426 | * We reserve all resources above 4GB so that PCI won't try to put | ||
1427 | * mappings above 4GB; the standard allows that for some devices but | ||
1428 | * the probing code trunates values to 32 bits. | ||
1429 | */ | ||
1430 | #ifdef CONFIG_PCI | ||
1431 | static struct resource* __init | ||
1432 | insert_non_bus_resource(void) | ||
1433 | { | ||
1434 | struct resource *res = | ||
1435 | kzalloc(sizeof(struct resource), GFP_ATOMIC); | ||
1436 | res->name = "Non-Bus Physical Address Space"; | ||
1437 | res->start = (1ULL << 32); | ||
1438 | res->end = -1LL; | ||
1439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
1440 | if (insert_resource(&iomem_resource, res)) { | ||
1441 | kfree(res); | ||
1442 | return NULL; | ||
1443 | } | ||
1444 | return res; | ||
1445 | } | ||
1446 | #endif | ||
1447 | |||
1448 | static struct resource* __init | ||
1449 | insert_ram_resource(u64 start_pfn, u64 end_pfn) | ||
1450 | { | ||
1451 | struct resource *res = | ||
1452 | kzalloc(sizeof(struct resource), GFP_ATOMIC); | ||
1453 | res->name = "System RAM"; | ||
1454 | res->start = start_pfn << PAGE_SHIFT; | ||
1455 | res->end = (end_pfn << PAGE_SHIFT) - 1; | ||
1456 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
1457 | if (insert_resource(&iomem_resource, res)) { | ||
1458 | kfree(res); | ||
1459 | return NULL; | ||
1460 | } | ||
1461 | return res; | ||
1462 | } | ||
1463 | |||
1464 | /* | ||
1465 | * Request address space for all standard resources | ||
1466 | * | ||
1467 | * If the system includes PCI root complex drivers, we need to create | ||
1468 | * a window just below 4GB where PCI BARs can be mapped. | ||
1469 | */ | ||
1470 | static int __init request_standard_resources(void) | ||
1471 | { | ||
1472 | int i; | ||
1473 | enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; | ||
1474 | |||
1475 | iomem_resource.end = -1LL; | ||
1476 | #ifdef CONFIG_PCI | ||
1477 | insert_non_bus_resource(); | ||
1478 | #endif | ||
1479 | |||
1480 | for_each_online_node(i) { | ||
1481 | u64 start_pfn = node_start_pfn[i]; | ||
1482 | u64 end_pfn = node_end_pfn[i]; | ||
1483 | |||
1484 | #ifdef CONFIG_PCI | ||
1485 | if (start_pfn <= pci_reserve_start_pfn && | ||
1486 | end_pfn > pci_reserve_start_pfn) { | ||
1487 | if (end_pfn > pci_reserve_end_pfn) | ||
1488 | insert_ram_resource(pci_reserve_end_pfn, | ||
1489 | end_pfn); | ||
1490 | end_pfn = pci_reserve_start_pfn; | ||
1491 | } | ||
1492 | #endif | ||
1493 | insert_ram_resource(start_pfn, end_pfn); | ||
1494 | } | ||
1495 | |||
1496 | code_resource.start = __pa(_text - CODE_DELTA); | ||
1497 | code_resource.end = __pa(_etext - CODE_DELTA)-1; | ||
1498 | data_resource.start = __pa(_sdata); | ||
1499 | data_resource.end = __pa(_end)-1; | ||
1500 | |||
1501 | insert_resource(&iomem_resource, &code_resource); | ||
1502 | insert_resource(&iomem_resource, &data_resource); | ||
1503 | |||
1504 | #ifdef CONFIG_KEXEC | ||
1505 | insert_resource(&iomem_resource, &crashk_res); | ||
1506 | #endif | ||
1507 | |||
1508 | return 0; | ||
1509 | } | ||
1510 | |||
1511 | subsys_initcall(request_standard_resources); | ||
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c new file mode 100644 index 000000000000..45b66a3c991f --- /dev/null +++ b/arch/tile/kernel/signal.c | |||
@@ -0,0 +1,358 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/sched.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/wait.h> | ||
24 | #include <linux/unistd.h> | ||
25 | #include <linux/stddef.h> | ||
26 | #include <linux/personality.h> | ||
27 | #include <linux/suspend.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/elf.h> | ||
30 | #include <linux/compat.h> | ||
31 | #include <linux/syscalls.h> | ||
32 | #include <linux/uaccess.h> | ||
33 | #include <asm/processor.h> | ||
34 | #include <asm/ucontext.h> | ||
35 | #include <asm/sigframe.h> | ||
36 | #include <asm/syscalls.h> | ||
37 | #include <arch/interrupts.h> | ||
38 | |||
39 | #define DEBUG_SIG 0 | ||
40 | |||
41 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
42 | |||
43 | |||
44 | long _sys_sigaltstack(const stack_t __user *uss, | ||
45 | stack_t __user *uoss, struct pt_regs *regs) | ||
46 | { | ||
47 | return do_sigaltstack(uss, uoss, regs->sp); | ||
48 | } | ||
49 | |||
50 | |||
51 | /* | ||
52 | * Do a signal return; undo the signal stack. | ||
53 | */ | ||
54 | |||
55 | int restore_sigcontext(struct pt_regs *regs, | ||
56 | struct sigcontext __user *sc, long *pr0) | ||
57 | { | ||
58 | int err = 0; | ||
59 | int i; | ||
60 | |||
61 | /* Always make any pending restarted system calls return -EINTR */ | ||
62 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
63 | |||
64 | for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) | ||
65 | err |= __get_user(((long *)regs)[i], | ||
66 | &((long __user *)(&sc->regs))[i]); | ||
67 | |||
68 | regs->faultnum = INT_SWINT_1_SIGRETURN; | ||
69 | |||
70 | err |= __get_user(*pr0, &sc->regs.regs[0]); | ||
71 | return err; | ||
72 | } | ||
73 | |||
74 | /* sigreturn() returns long since it restores r0 in the interrupted code. */ | ||
75 | long _sys_rt_sigreturn(struct pt_regs *regs) | ||
76 | { | ||
77 | struct rt_sigframe __user *frame = | ||
78 | (struct rt_sigframe __user *)(regs->sp); | ||
79 | sigset_t set; | ||
80 | long r0; | ||
81 | |||
82 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
83 | goto badframe; | ||
84 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
85 | goto badframe; | ||
86 | |||
87 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
88 | spin_lock_irq(¤t->sighand->siglock); | ||
89 | current->blocked = set; | ||
90 | recalc_sigpending(); | ||
91 | spin_unlock_irq(¤t->sighand->siglock); | ||
92 | |||
93 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) | ||
94 | goto badframe; | ||
95 | |||
96 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) | ||
97 | goto badframe; | ||
98 | |||
99 | return r0; | ||
100 | |||
101 | badframe: | ||
102 | force_sig(SIGSEGV, current); | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Set up a signal frame. | ||
108 | */ | ||
109 | |||
110 | int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) | ||
111 | { | ||
112 | int i, err = 0; | ||
113 | |||
114 | for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) | ||
115 | err |= __put_user(((long *)regs)[i], | ||
116 | &((long __user *)(&sc->regs))[i]); | ||
117 | |||
118 | return err; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Determine which stack to use.. | ||
123 | */ | ||
124 | static inline void __user *get_sigframe(struct k_sigaction *ka, | ||
125 | struct pt_regs *regs, | ||
126 | size_t frame_size) | ||
127 | { | ||
128 | unsigned long sp; | ||
129 | |||
130 | /* Default to using normal stack */ | ||
131 | sp = regs->sp; | ||
132 | |||
133 | /* | ||
134 | * If we are on the alternate signal stack and would overflow | ||
135 | * it, don't. Return an always-bogus address instead so we | ||
136 | * will die with SIGSEGV. | ||
137 | */ | ||
138 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) | ||
139 | return (void __user __force *)-1UL; | ||
140 | |||
141 | /* This is the X/Open sanctioned signal stack switching. */ | ||
142 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
143 | if (sas_ss_flags(sp) == 0) | ||
144 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
145 | } | ||
146 | |||
147 | sp -= frame_size; | ||
148 | /* | ||
149 | * Align the stack pointer according to the TILE ABI, | ||
150 | * i.e. so that on function entry (sp & 15) == 0. | ||
151 | */ | ||
152 | sp &= -16UL; | ||
153 | return (void __user *) sp; | ||
154 | } | ||
155 | |||
156 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
157 | sigset_t *set, struct pt_regs *regs) | ||
158 | { | ||
159 | unsigned long restorer; | ||
160 | struct rt_sigframe __user *frame; | ||
161 | int err = 0; | ||
162 | int usig; | ||
163 | |||
164 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
165 | |||
166 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
167 | goto give_sigsegv; | ||
168 | |||
169 | usig = current_thread_info()->exec_domain | ||
170 | && current_thread_info()->exec_domain->signal_invmap | ||
171 | && sig < 32 | ||
172 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
173 | : sig; | ||
174 | |||
175 | /* Always write at least the signal number for the stack backtracer. */ | ||
176 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
177 | /* At sigreturn time, restore the callee-save registers too. */ | ||
178 | err |= copy_siginfo_to_user(&frame->info, info); | ||
179 | regs->flags |= PT_FLAGS_RESTORE_REGS; | ||
180 | } else { | ||
181 | err |= __put_user(info->si_signo, &frame->info.si_signo); | ||
182 | } | ||
183 | |||
184 | /* Create the ucontext. */ | ||
185 | err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); | ||
186 | err |= __put_user(0, &frame->uc.uc_flags); | ||
187 | err |= __put_user(NULL, &frame->uc.uc_link); | ||
188 | err |= __put_user((void __user *)(current->sas_ss_sp), | ||
189 | &frame->uc.uc_stack.ss_sp); | ||
190 | err |= __put_user(sas_ss_flags(regs->sp), | ||
191 | &frame->uc.uc_stack.ss_flags); | ||
192 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
193 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); | ||
194 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
195 | if (err) | ||
196 | goto give_sigsegv; | ||
197 | |||
198 | restorer = VDSO_BASE; | ||
199 | if (ka->sa.sa_flags & SA_RESTORER) | ||
200 | restorer = (unsigned long) ka->sa.sa_restorer; | ||
201 | |||
202 | /* | ||
203 | * Set up registers for signal handler. | ||
204 | * Registers that we don't modify keep the value they had from | ||
205 | * user-space at the time we took the signal. | ||
206 | */ | ||
207 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
208 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ | ||
209 | regs->sp = (unsigned long) frame; | ||
210 | regs->lr = restorer; | ||
211 | regs->regs[0] = (unsigned long) usig; | ||
212 | |||
213 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
214 | /* Need extra arguments, so mark to restore caller-saves. */ | ||
215 | regs->regs[1] = (unsigned long) &frame->info; | ||
216 | regs->regs[2] = (unsigned long) &frame->uc; | ||
217 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * Notify any tracer that was single-stepping it. | ||
222 | * The tracer may want to single-step inside the | ||
223 | * handler too. | ||
224 | */ | ||
225 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
226 | ptrace_notify(SIGTRAP); | ||
227 | |||
228 | return 0; | ||
229 | |||
230 | give_sigsegv: | ||
231 | force_sigsegv(sig, current); | ||
232 | return -EFAULT; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * OK, we're invoking a handler | ||
237 | */ | ||
238 | |||
239 | static int handle_signal(unsigned long sig, siginfo_t *info, | ||
240 | struct k_sigaction *ka, sigset_t *oldset, | ||
241 | struct pt_regs *regs) | ||
242 | { | ||
243 | int ret; | ||
244 | |||
245 | |||
246 | /* Are we from a system call? */ | ||
247 | if (regs->faultnum == INT_SWINT_1) { | ||
248 | /* If so, check system call restarting.. */ | ||
249 | switch (regs->regs[0]) { | ||
250 | case -ERESTART_RESTARTBLOCK: | ||
251 | case -ERESTARTNOHAND: | ||
252 | regs->regs[0] = -EINTR; | ||
253 | break; | ||
254 | |||
255 | case -ERESTARTSYS: | ||
256 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
257 | regs->regs[0] = -EINTR; | ||
258 | break; | ||
259 | } | ||
260 | /* fallthrough */ | ||
261 | case -ERESTARTNOINTR: | ||
262 | /* Reload caller-saves to restore r0..r5 and r10. */ | ||
263 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
264 | regs->regs[0] = regs->orig_r0; | ||
265 | regs->pc -= 8; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | /* Set up the stack frame */ | ||
270 | #ifdef CONFIG_COMPAT | ||
271 | if (is_compat_task()) | ||
272 | ret = compat_setup_rt_frame(sig, ka, info, oldset, regs); | ||
273 | else | ||
274 | #endif | ||
275 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | ||
276 | if (ret == 0) { | ||
277 | /* This code is only called from system calls or from | ||
278 | * the work_pending path in the return-to-user code, and | ||
279 | * either way we can re-enable interrupts unconditionally. | ||
280 | */ | ||
281 | spin_lock_irq(¤t->sighand->siglock); | ||
282 | sigorsets(¤t->blocked, | ||
283 | ¤t->blocked, &ka->sa.sa_mask); | ||
284 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
285 | sigaddset(¤t->blocked, sig); | ||
286 | recalc_sigpending(); | ||
287 | spin_unlock_irq(¤t->sighand->siglock); | ||
288 | } | ||
289 | |||
290 | return ret; | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
295 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
296 | * mistake. | ||
297 | */ | ||
298 | void do_signal(struct pt_regs *regs) | ||
299 | { | ||
300 | siginfo_t info; | ||
301 | int signr; | ||
302 | struct k_sigaction ka; | ||
303 | sigset_t *oldset; | ||
304 | |||
305 | /* | ||
306 | * i386 will check if we're coming from kernel mode and bail out | ||
307 | * here. In my experience this just turns weird crashes into | ||
308 | * weird spin-hangs. But if we find a case where this seems | ||
309 | * helpful, we can reinstate the check on "!user_mode(regs)". | ||
310 | */ | ||
311 | |||
312 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) | ||
313 | oldset = ¤t->saved_sigmask; | ||
314 | else | ||
315 | oldset = ¤t->blocked; | ||
316 | |||
317 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
318 | if (signr > 0) { | ||
319 | /* Whee! Actually deliver the signal. */ | ||
320 | if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { | ||
321 | /* | ||
322 | * A signal was successfully delivered; the saved | ||
323 | * sigmask will have been stored in the signal frame, | ||
324 | * and will be restored by sigreturn, so we can simply | ||
325 | * clear the TS_RESTORE_SIGMASK flag. | ||
326 | */ | ||
327 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
328 | } | ||
329 | |||
330 | return; | ||
331 | } | ||
332 | |||
333 | /* Did we come from a system call? */ | ||
334 | if (regs->faultnum == INT_SWINT_1) { | ||
335 | /* Restart the system call - no handlers present */ | ||
336 | switch (regs->regs[0]) { | ||
337 | case -ERESTARTNOHAND: | ||
338 | case -ERESTARTSYS: | ||
339 | case -ERESTARTNOINTR: | ||
340 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
341 | regs->regs[0] = regs->orig_r0; | ||
342 | regs->pc -= 8; | ||
343 | break; | ||
344 | |||
345 | case -ERESTART_RESTARTBLOCK: | ||
346 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
347 | regs->regs[TREG_SYSCALL_NR] = __NR_restart_syscall; | ||
348 | regs->pc -= 8; | ||
349 | break; | ||
350 | } | ||
351 | } | ||
352 | |||
353 | /* If there's no signal to deliver, just put the saved sigmask back. */ | ||
354 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { | ||
355 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
356 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
357 | } | ||
358 | } | ||
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c new file mode 100644 index 000000000000..5ec4b9c651f2 --- /dev/null +++ b/arch/tile/kernel/single_step.c | |||
@@ -0,0 +1,663 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * A code-rewriter that enables instruction single-stepping. | ||
15 | * Derived from iLib's single-stepping code. | ||
16 | */ | ||
17 | |||
18 | #ifndef __tilegx__ /* No support for single-step yet. */ | ||
19 | |||
20 | /* These functions are only used on the TILE platform */ | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/thread_info.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/mman.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <asm/cacheflush.h> | ||
28 | #include <asm/opcode-tile.h> | ||
29 | #include <asm/opcode_constants.h> | ||
30 | #include <arch/abi.h> | ||
31 | |||
32 | #define signExtend17(val) sign_extend((val), 17) | ||
33 | #define TILE_X1_MASK (0xffffffffULL << 31) | ||
34 | |||
35 | int unaligned_printk; | ||
36 | |||
37 | static int __init setup_unaligned_printk(char *str) | ||
38 | { | ||
39 | long val; | ||
40 | if (strict_strtol(str, 0, &val) != 0) | ||
41 | return 0; | ||
42 | unaligned_printk = val; | ||
43 | pr_info("Printk for each unaligned data accesses is %s\n", | ||
44 | unaligned_printk ? "enabled" : "disabled"); | ||
45 | return 1; | ||
46 | } | ||
47 | __setup("unaligned_printk=", setup_unaligned_printk); | ||
48 | |||
49 | unsigned int unaligned_fixup_count; | ||
50 | |||
51 | enum mem_op { | ||
52 | MEMOP_NONE, | ||
53 | MEMOP_LOAD, | ||
54 | MEMOP_STORE, | ||
55 | MEMOP_LOAD_POSTINCR, | ||
56 | MEMOP_STORE_POSTINCR | ||
57 | }; | ||
58 | |||
59 | static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, int32_t offset) | ||
60 | { | ||
61 | tile_bundle_bits result; | ||
62 | |||
63 | /* mask out the old offset */ | ||
64 | tile_bundle_bits mask = create_BrOff_X1(-1); | ||
65 | result = n & (~mask); | ||
66 | |||
67 | /* or in the new offset */ | ||
68 | result |= create_BrOff_X1(offset); | ||
69 | |||
70 | return result; | ||
71 | } | ||
72 | |||
73 | static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src) | ||
74 | { | ||
75 | tile_bundle_bits result; | ||
76 | tile_bundle_bits op; | ||
77 | |||
78 | result = n & (~TILE_X1_MASK); | ||
79 | |||
80 | op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) | | ||
81 | create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) | | ||
82 | create_Dest_X1(dest) | | ||
83 | create_SrcB_X1(TREG_ZERO) | | ||
84 | create_SrcA_X1(src) ; | ||
85 | |||
86 | result |= op; | ||
87 | return result; | ||
88 | } | ||
89 | |||
90 | static inline tile_bundle_bits nop_X1(tile_bundle_bits n) | ||
91 | { | ||
92 | return move_X1(n, TREG_ZERO, TREG_ZERO); | ||
93 | } | ||
94 | |||
95 | static inline tile_bundle_bits addi_X1( | ||
96 | tile_bundle_bits n, int dest, int src, int imm) | ||
97 | { | ||
98 | n &= ~TILE_X1_MASK; | ||
99 | |||
100 | n |= (create_SrcA_X1(src) | | ||
101 | create_Dest_X1(dest) | | ||
102 | create_Imm8_X1(imm) | | ||
103 | create_S_X1(0) | | ||
104 | create_Opcode_X1(IMM_0_OPCODE_X1) | | ||
105 | create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1)); | ||
106 | |||
107 | return n; | ||
108 | } | ||
109 | |||
110 | static tile_bundle_bits rewrite_load_store_unaligned( | ||
111 | struct single_step_state *state, | ||
112 | tile_bundle_bits bundle, | ||
113 | struct pt_regs *regs, | ||
114 | enum mem_op mem_op, | ||
115 | int size, int sign_ext) | ||
116 | { | ||
117 | unsigned char __user *addr; | ||
118 | int val_reg, addr_reg, err, val; | ||
119 | |||
120 | /* Get address and value registers */ | ||
121 | if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) { | ||
122 | addr_reg = get_SrcA_Y2(bundle); | ||
123 | val_reg = get_SrcBDest_Y2(bundle); | ||
124 | } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | ||
125 | addr_reg = get_SrcA_X1(bundle); | ||
126 | val_reg = get_Dest_X1(bundle); | ||
127 | } else { | ||
128 | addr_reg = get_SrcA_X1(bundle); | ||
129 | val_reg = get_SrcB_X1(bundle); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * If registers are not GPRs, don't try to handle it. | ||
134 | * | ||
135 | * FIXME: we could handle non-GPR loads by getting the real value | ||
136 | * from memory, writing it to the single step buffer, using a | ||
137 | * temp_reg to hold a pointer to that memory, then executing that | ||
138 | * instruction and resetting temp_reg. For non-GPR stores, it's a | ||
139 | * little trickier; we could use the single step buffer for that | ||
140 | * too, but we'd have to add some more state bits so that we could | ||
141 | * call back in here to copy that value to the real target. For | ||
142 | * now, we just handle the simple case. | ||
143 | */ | ||
144 | if ((val_reg >= PTREGS_NR_GPRS && | ||
145 | (val_reg != TREG_ZERO || | ||
146 | mem_op == MEMOP_LOAD || | ||
147 | mem_op == MEMOP_LOAD_POSTINCR)) || | ||
148 | addr_reg >= PTREGS_NR_GPRS) | ||
149 | return bundle; | ||
150 | |||
151 | /* If it's aligned, don't handle it specially */ | ||
152 | addr = (void __user *)regs->regs[addr_reg]; | ||
153 | if (((unsigned long)addr % size) == 0) | ||
154 | return bundle; | ||
155 | |||
156 | #ifndef __LITTLE_ENDIAN | ||
157 | # error We assume little-endian representation with copy_xx_user size 2 here | ||
158 | #endif | ||
159 | /* Handle unaligned load/store */ | ||
160 | if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | ||
161 | unsigned short val_16; | ||
162 | switch (size) { | ||
163 | case 2: | ||
164 | err = copy_from_user(&val_16, addr, sizeof(val_16)); | ||
165 | val = sign_ext ? ((short)val_16) : val_16; | ||
166 | break; | ||
167 | case 4: | ||
168 | err = copy_from_user(&val, addr, sizeof(val)); | ||
169 | break; | ||
170 | default: | ||
171 | BUG(); | ||
172 | } | ||
173 | if (err == 0) { | ||
174 | state->update_reg = val_reg; | ||
175 | state->update_value = val; | ||
176 | state->update = 1; | ||
177 | } | ||
178 | } else { | ||
179 | val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg]; | ||
180 | err = copy_to_user(addr, &val, size); | ||
181 | } | ||
182 | |||
183 | if (err) { | ||
184 | siginfo_t info = { | ||
185 | .si_signo = SIGSEGV, | ||
186 | .si_code = SEGV_MAPERR, | ||
187 | .si_addr = addr | ||
188 | }; | ||
189 | force_sig_info(info.si_signo, &info, current); | ||
190 | return (tile_bundle_bits) 0; | ||
191 | } | ||
192 | |||
193 | if (unaligned_fixup == 0) { | ||
194 | siginfo_t info = { | ||
195 | .si_signo = SIGBUS, | ||
196 | .si_code = BUS_ADRALN, | ||
197 | .si_addr = addr | ||
198 | }; | ||
199 | force_sig_info(info.si_signo, &info, current); | ||
200 | return (tile_bundle_bits) 0; | ||
201 | } | ||
202 | |||
203 | if (unaligned_printk || unaligned_fixup_count == 0) { | ||
204 | pr_info("Process %d/%s: PC %#lx: Fixup of" | ||
205 | " unaligned %s at %#lx.\n", | ||
206 | current->pid, current->comm, regs->pc, | ||
207 | (mem_op == MEMOP_LOAD || | ||
208 | mem_op == MEMOP_LOAD_POSTINCR) ? | ||
209 | "load" : "store", | ||
210 | (unsigned long)addr); | ||
211 | if (!unaligned_printk) { | ||
212 | #define P pr_info | ||
213 | P("\n"); | ||
214 | P("Unaligned fixups in the kernel will slow your application considerably.\n"); | ||
215 | P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n"); | ||
216 | P("which requests the kernel show all unaligned fixups, or write a \"0\"\n"); | ||
217 | P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n"); | ||
218 | P("access will become a SIGBUS you can debug. No further warnings will be\n"); | ||
219 | P("shown so as to avoid additional slowdown, but you can track the number\n"); | ||
220 | P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n"); | ||
221 | P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n"); | ||
222 | P("\n"); | ||
223 | #undef P | ||
224 | } | ||
225 | } | ||
226 | ++unaligned_fixup_count; | ||
227 | |||
228 | if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) { | ||
229 | /* Convert the Y2 instruction to a prefetch. */ | ||
230 | bundle &= ~(create_SrcBDest_Y2(-1) | | ||
231 | create_Opcode_Y2(-1)); | ||
232 | bundle |= (create_SrcBDest_Y2(TREG_ZERO) | | ||
233 | create_Opcode_Y2(LW_OPCODE_Y2)); | ||
234 | /* Replace the load postincr with an addi */ | ||
235 | } else if (mem_op == MEMOP_LOAD_POSTINCR) { | ||
236 | bundle = addi_X1(bundle, addr_reg, addr_reg, | ||
237 | get_Imm8_X1(bundle)); | ||
238 | /* Replace the store postincr with an addi */ | ||
239 | } else if (mem_op == MEMOP_STORE_POSTINCR) { | ||
240 | bundle = addi_X1(bundle, addr_reg, addr_reg, | ||
241 | get_Dest_Imm8_X1(bundle)); | ||
242 | } else { | ||
243 | /* Convert the X1 instruction to a nop. */ | ||
244 | bundle &= ~(create_Opcode_X1(-1) | | ||
245 | create_UnShOpcodeExtension_X1(-1) | | ||
246 | create_UnOpcodeExtension_X1(-1)); | ||
247 | bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) | | ||
248 | create_UnShOpcodeExtension_X1( | ||
249 | UN_0_SHUN_0_OPCODE_X1) | | ||
250 | create_UnOpcodeExtension_X1( | ||
251 | NOP_UN_0_SHUN_0_OPCODE_X1)); | ||
252 | } | ||
253 | |||
254 | return bundle; | ||
255 | } | ||
256 | |||
257 | /** | ||
258 | * single_step_once() - entry point when single stepping has been triggered. | ||
259 | * @regs: The machine register state | ||
260 | * | ||
261 | * When we arrive at this routine via a trampoline, the single step | ||
262 | * engine copies the executing bundle to the single step buffer. | ||
263 | * If the instruction is a condition branch, then the target is | ||
264 | * reset to one past the next instruction. If the instruction | ||
265 | * sets the lr, then that is noted. If the instruction is a jump | ||
266 | * or call, then the new target pc is preserved and the current | ||
267 | * bundle instruction set to null. | ||
268 | * | ||
269 | * The necessary post-single-step rewriting information is stored in | ||
270 | * single_step_state-> We use data segment values because the | ||
271 | * stack will be rewound when we run the rewritten single-stepped | ||
272 | * instruction. | ||
273 | */ | ||
274 | void single_step_once(struct pt_regs *regs) | ||
275 | { | ||
276 | extern tile_bundle_bits __single_step_ill_insn; | ||
277 | extern tile_bundle_bits __single_step_j_insn; | ||
278 | extern tile_bundle_bits __single_step_addli_insn; | ||
279 | extern tile_bundle_bits __single_step_auli_insn; | ||
280 | struct thread_info *info = (void *)current_thread_info(); | ||
281 | struct single_step_state *state = info->step_state; | ||
282 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); | ||
283 | tile_bundle_bits __user *buffer, *pc; | ||
284 | tile_bundle_bits bundle; | ||
285 | int temp_reg; | ||
286 | int target_reg = TREG_LR; | ||
287 | int err; | ||
288 | enum mem_op mem_op = MEMOP_NONE; | ||
289 | int size = 0, sign_ext = 0; /* happy compiler */ | ||
290 | |||
291 | asm( | ||
292 | " .pushsection .rodata.single_step\n" | ||
293 | " .align 8\n" | ||
294 | " .globl __single_step_ill_insn\n" | ||
295 | "__single_step_ill_insn:\n" | ||
296 | " ill\n" | ||
297 | " .globl __single_step_addli_insn\n" | ||
298 | "__single_step_addli_insn:\n" | ||
299 | " { nop; addli r0, zero, 0 }\n" | ||
300 | " .globl __single_step_auli_insn\n" | ||
301 | "__single_step_auli_insn:\n" | ||
302 | " { nop; auli r0, r0, 0 }\n" | ||
303 | " .globl __single_step_j_insn\n" | ||
304 | "__single_step_j_insn:\n" | ||
305 | " j .\n" | ||
306 | " .popsection\n" | ||
307 | ); | ||
308 | |||
309 | if (state == NULL) { | ||
310 | /* allocate a page of writable, executable memory */ | ||
311 | state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); | ||
312 | if (state == NULL) { | ||
313 | pr_err("Out of kernel memory trying to single-step\n"); | ||
314 | return; | ||
315 | } | ||
316 | |||
317 | /* allocate a cache line of writable, executable memory */ | ||
318 | down_write(¤t->mm->mmap_sem); | ||
319 | buffer = (void __user *) do_mmap(NULL, 0, 64, | ||
320 | PROT_EXEC | PROT_READ | PROT_WRITE, | ||
321 | MAP_PRIVATE | MAP_ANONYMOUS, | ||
322 | 0); | ||
323 | up_write(¤t->mm->mmap_sem); | ||
324 | |||
325 | if (IS_ERR((void __force *)buffer)) { | ||
326 | kfree(state); | ||
327 | pr_err("Out of kernel pages trying to single-step\n"); | ||
328 | return; | ||
329 | } | ||
330 | |||
331 | state->buffer = buffer; | ||
332 | state->is_enabled = 0; | ||
333 | |||
334 | info->step_state = state; | ||
335 | |||
336 | /* Validate our stored instruction patterns */ | ||
337 | BUG_ON(get_Opcode_X1(__single_step_addli_insn) != | ||
338 | ADDLI_OPCODE_X1); | ||
339 | BUG_ON(get_Opcode_X1(__single_step_auli_insn) != | ||
340 | AULI_OPCODE_X1); | ||
341 | BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO); | ||
342 | BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0); | ||
343 | BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0); | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * If we are returning from a syscall, we still haven't hit the | ||
348 | * "ill" for the swint1 instruction. So back the PC up to be | ||
349 | * pointing at the swint1, but we'll actually return directly | ||
350 | * back to the "ill" so we come back in via SIGILL as if we | ||
351 | * had "executed" the swint1 without ever being in kernel space. | ||
352 | */ | ||
353 | if (regs->faultnum == INT_SWINT_1) | ||
354 | regs->pc -= 8; | ||
355 | |||
356 | pc = (tile_bundle_bits __user *)(regs->pc); | ||
357 | if (get_user(bundle, pc) != 0) { | ||
358 | pr_err("Couldn't read instruction at %p trying to step\n", pc); | ||
359 | return; | ||
360 | } | ||
361 | |||
362 | /* We'll follow the instruction with 2 ill op bundles */ | ||
363 | state->orig_pc = (unsigned long)pc; | ||
364 | state->next_pc = (unsigned long)(pc + 1); | ||
365 | state->branch_next_pc = 0; | ||
366 | state->update = 0; | ||
367 | |||
368 | if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) { | ||
369 | /* two wide, check for control flow */ | ||
370 | int opcode = get_Opcode_X1(bundle); | ||
371 | |||
372 | switch (opcode) { | ||
373 | /* branches */ | ||
374 | case BRANCH_OPCODE_X1: | ||
375 | { | ||
376 | int32_t offset = signExtend17(get_BrOff_X1(bundle)); | ||
377 | |||
378 | /* | ||
379 | * For branches, we use a rewriting trick to let the | ||
380 | * hardware evaluate whether the branch is taken or | ||
381 | * untaken. We record the target offset and then | ||
382 | * rewrite the branch instruction to target 1 insn | ||
383 | * ahead if the branch is taken. We then follow the | ||
384 | * rewritten branch with two bundles, each containing | ||
385 | * an "ill" instruction. The supervisor examines the | ||
386 | * pc after the single step code is executed, and if | ||
387 | * the pc is the first ill instruction, then the | ||
388 | * branch (if any) was not taken. If the pc is the | ||
389 | * second ill instruction, then the branch was | ||
390 | * taken. The new pc is computed for these cases, and | ||
391 | * inserted into the registers for the thread. If | ||
392 | * the pc is the start of the single step code, then | ||
393 | * an exception or interrupt was taken before the | ||
394 | * code started processing, and the same "original" | ||
395 | * pc is restored. This change, different from the | ||
396 | * original implementation, has the advantage of | ||
397 | * executing a single user instruction. | ||
398 | */ | ||
399 | state->branch_next_pc = (unsigned long)(pc + offset); | ||
400 | |||
401 | /* rewrite branch offset to go forward one bundle */ | ||
402 | bundle = set_BrOff_X1(bundle, 2); | ||
403 | } | ||
404 | break; | ||
405 | |||
406 | /* jumps */ | ||
407 | case JALB_OPCODE_X1: | ||
408 | case JALF_OPCODE_X1: | ||
409 | state->update = 1; | ||
410 | state->next_pc = | ||
411 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | ||
412 | break; | ||
413 | |||
414 | case JB_OPCODE_X1: | ||
415 | case JF_OPCODE_X1: | ||
416 | state->next_pc = | ||
417 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | ||
418 | bundle = nop_X1(bundle); | ||
419 | break; | ||
420 | |||
421 | case SPECIAL_0_OPCODE_X1: | ||
422 | switch (get_RRROpcodeExtension_X1(bundle)) { | ||
423 | /* jump-register */ | ||
424 | case JALRP_SPECIAL_0_OPCODE_X1: | ||
425 | case JALR_SPECIAL_0_OPCODE_X1: | ||
426 | state->update = 1; | ||
427 | state->next_pc = | ||
428 | regs->regs[get_SrcA_X1(bundle)]; | ||
429 | break; | ||
430 | |||
431 | case JRP_SPECIAL_0_OPCODE_X1: | ||
432 | case JR_SPECIAL_0_OPCODE_X1: | ||
433 | state->next_pc = | ||
434 | regs->regs[get_SrcA_X1(bundle)]; | ||
435 | bundle = nop_X1(bundle); | ||
436 | break; | ||
437 | |||
438 | case LNK_SPECIAL_0_OPCODE_X1: | ||
439 | state->update = 1; | ||
440 | target_reg = get_Dest_X1(bundle); | ||
441 | break; | ||
442 | |||
443 | /* stores */ | ||
444 | case SH_SPECIAL_0_OPCODE_X1: | ||
445 | mem_op = MEMOP_STORE; | ||
446 | size = 2; | ||
447 | break; | ||
448 | |||
449 | case SW_SPECIAL_0_OPCODE_X1: | ||
450 | mem_op = MEMOP_STORE; | ||
451 | size = 4; | ||
452 | break; | ||
453 | } | ||
454 | break; | ||
455 | |||
456 | /* loads and iret */ | ||
457 | case SHUN_0_OPCODE_X1: | ||
458 | if (get_UnShOpcodeExtension_X1(bundle) == | ||
459 | UN_0_SHUN_0_OPCODE_X1) { | ||
460 | switch (get_UnOpcodeExtension_X1(bundle)) { | ||
461 | case LH_UN_0_SHUN_0_OPCODE_X1: | ||
462 | mem_op = MEMOP_LOAD; | ||
463 | size = 2; | ||
464 | sign_ext = 1; | ||
465 | break; | ||
466 | |||
467 | case LH_U_UN_0_SHUN_0_OPCODE_X1: | ||
468 | mem_op = MEMOP_LOAD; | ||
469 | size = 2; | ||
470 | sign_ext = 0; | ||
471 | break; | ||
472 | |||
473 | case LW_UN_0_SHUN_0_OPCODE_X1: | ||
474 | mem_op = MEMOP_LOAD; | ||
475 | size = 4; | ||
476 | break; | ||
477 | |||
478 | case IRET_UN_0_SHUN_0_OPCODE_X1: | ||
479 | { | ||
480 | unsigned long ex0_0 = __insn_mfspr( | ||
481 | SPR_EX_CONTEXT_0_0); | ||
482 | unsigned long ex0_1 = __insn_mfspr( | ||
483 | SPR_EX_CONTEXT_0_1); | ||
484 | /* | ||
485 | * Special-case it if we're iret'ing | ||
486 | * to PL0 again. Otherwise just let | ||
487 | * it run and it will generate SIGILL. | ||
488 | */ | ||
489 | if (EX1_PL(ex0_1) == USER_PL) { | ||
490 | state->next_pc = ex0_0; | ||
491 | regs->ex1 = ex0_1; | ||
492 | bundle = nop_X1(bundle); | ||
493 | } | ||
494 | } | ||
495 | } | ||
496 | } | ||
497 | break; | ||
498 | |||
499 | #if CHIP_HAS_WH64() | ||
500 | /* postincrement operations */ | ||
501 | case IMM_0_OPCODE_X1: | ||
502 | switch (get_ImmOpcodeExtension_X1(bundle)) { | ||
503 | case LWADD_IMM_0_OPCODE_X1: | ||
504 | mem_op = MEMOP_LOAD_POSTINCR; | ||
505 | size = 4; | ||
506 | break; | ||
507 | |||
508 | case LHADD_IMM_0_OPCODE_X1: | ||
509 | mem_op = MEMOP_LOAD_POSTINCR; | ||
510 | size = 2; | ||
511 | sign_ext = 1; | ||
512 | break; | ||
513 | |||
514 | case LHADD_U_IMM_0_OPCODE_X1: | ||
515 | mem_op = MEMOP_LOAD_POSTINCR; | ||
516 | size = 2; | ||
517 | sign_ext = 0; | ||
518 | break; | ||
519 | |||
520 | case SWADD_IMM_0_OPCODE_X1: | ||
521 | mem_op = MEMOP_STORE_POSTINCR; | ||
522 | size = 4; | ||
523 | break; | ||
524 | |||
525 | case SHADD_IMM_0_OPCODE_X1: | ||
526 | mem_op = MEMOP_STORE_POSTINCR; | ||
527 | size = 2; | ||
528 | break; | ||
529 | |||
530 | default: | ||
531 | break; | ||
532 | } | ||
533 | break; | ||
534 | #endif /* CHIP_HAS_WH64() */ | ||
535 | } | ||
536 | |||
537 | if (state->update) { | ||
538 | /* | ||
539 | * Get an available register. We start with a | ||
540 | * bitmask with 1's for available registers. | ||
541 | * We truncate to the low 32 registers since | ||
542 | * we are guaranteed to have set bits in the | ||
543 | * low 32 bits, then use ctz to pick the first. | ||
544 | */ | ||
545 | u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) | | ||
546 | (1ULL << get_SrcA_X0(bundle)) | | ||
547 | (1ULL << get_SrcB_X0(bundle)) | | ||
548 | (1ULL << target_reg)); | ||
549 | temp_reg = __builtin_ctz(mask); | ||
550 | state->update_reg = temp_reg; | ||
551 | state->update_value = regs->regs[temp_reg]; | ||
552 | regs->regs[temp_reg] = (unsigned long) (pc+1); | ||
553 | regs->flags |= PT_FLAGS_RESTORE_REGS; | ||
554 | bundle = move_X1(bundle, target_reg, temp_reg); | ||
555 | } | ||
556 | } else { | ||
557 | int opcode = get_Opcode_Y2(bundle); | ||
558 | |||
559 | switch (opcode) { | ||
560 | /* loads */ | ||
561 | case LH_OPCODE_Y2: | ||
562 | mem_op = MEMOP_LOAD; | ||
563 | size = 2; | ||
564 | sign_ext = 1; | ||
565 | break; | ||
566 | |||
567 | case LH_U_OPCODE_Y2: | ||
568 | mem_op = MEMOP_LOAD; | ||
569 | size = 2; | ||
570 | sign_ext = 0; | ||
571 | break; | ||
572 | |||
573 | case LW_OPCODE_Y2: | ||
574 | mem_op = MEMOP_LOAD; | ||
575 | size = 4; | ||
576 | break; | ||
577 | |||
578 | /* stores */ | ||
579 | case SH_OPCODE_Y2: | ||
580 | mem_op = MEMOP_STORE; | ||
581 | size = 2; | ||
582 | break; | ||
583 | |||
584 | case SW_OPCODE_Y2: | ||
585 | mem_op = MEMOP_STORE; | ||
586 | size = 4; | ||
587 | break; | ||
588 | } | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * Check if we need to rewrite an unaligned load/store. | ||
593 | * Returning zero is a special value meaning we need to SIGSEGV. | ||
594 | */ | ||
595 | if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) { | ||
596 | bundle = rewrite_load_store_unaligned(state, bundle, regs, | ||
597 | mem_op, size, sign_ext); | ||
598 | if (bundle == 0) | ||
599 | return; | ||
600 | } | ||
601 | |||
602 | /* write the bundle to our execution area */ | ||
603 | buffer = state->buffer; | ||
604 | err = __put_user(bundle, buffer++); | ||
605 | |||
606 | /* | ||
607 | * If we're really single-stepping, we take an INT_ILL after. | ||
608 | * If we're just handling an unaligned access, we can just | ||
609 | * jump directly back to where we were in user code. | ||
610 | */ | ||
611 | if (is_single_step) { | ||
612 | err |= __put_user(__single_step_ill_insn, buffer++); | ||
613 | err |= __put_user(__single_step_ill_insn, buffer++); | ||
614 | } else { | ||
615 | long delta; | ||
616 | |||
617 | if (state->update) { | ||
618 | /* We have some state to update; do it inline */ | ||
619 | int ha16; | ||
620 | bundle = __single_step_addli_insn; | ||
621 | bundle |= create_Dest_X1(state->update_reg); | ||
622 | bundle |= create_Imm16_X1(state->update_value); | ||
623 | err |= __put_user(bundle, buffer++); | ||
624 | bundle = __single_step_auli_insn; | ||
625 | bundle |= create_Dest_X1(state->update_reg); | ||
626 | bundle |= create_SrcA_X1(state->update_reg); | ||
627 | ha16 = (state->update_value + 0x8000) >> 16; | ||
628 | bundle |= create_Imm16_X1(ha16); | ||
629 | err |= __put_user(bundle, buffer++); | ||
630 | state->update = 0; | ||
631 | } | ||
632 | |||
633 | /* End with a jump back to the next instruction */ | ||
634 | delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) - | ||
635 | (unsigned long)buffer) >> | ||
636 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES; | ||
637 | bundle = __single_step_j_insn; | ||
638 | bundle |= create_JOffLong_X1(delta); | ||
639 | err |= __put_user(bundle, buffer++); | ||
640 | } | ||
641 | |||
642 | if (err) { | ||
643 | pr_err("Fault when writing to single-step buffer\n"); | ||
644 | return; | ||
645 | } | ||
646 | |||
647 | /* | ||
648 | * Flush the buffer. | ||
649 | * We do a local flush only, since this is a thread-specific buffer. | ||
650 | */ | ||
651 | __flush_icache_range((unsigned long)state->buffer, | ||
652 | (unsigned long)buffer); | ||
653 | |||
654 | /* Indicate enabled */ | ||
655 | state->is_enabled = is_single_step; | ||
656 | regs->pc = (unsigned long)state->buffer; | ||
657 | |||
658 | /* Fault immediately if we are coming back from a syscall. */ | ||
659 | if (regs->faultnum == INT_SWINT_1) | ||
660 | regs->pc += 8; | ||
661 | } | ||
662 | |||
663 | #endif /* !__tilegx__ */ | ||
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c new file mode 100644 index 000000000000..1cb5ec79de04 --- /dev/null +++ b/arch/tile/kernel/smp.c | |||
@@ -0,0 +1,256 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * TILE SMP support routines. | ||
15 | */ | ||
16 | |||
17 | #include <linux/smp.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <asm/cacheflush.h> | ||
23 | |||
24 | HV_Topology smp_topology __write_once; | ||
25 | EXPORT_SYMBOL(smp_topology); | ||
26 | |||
27 | #if CHIP_HAS_IPI() | ||
28 | static unsigned long __iomem *ipi_mappings[NR_CPUS]; | ||
29 | #endif | ||
30 | |||
31 | |||
32 | /* | ||
33 | * Top-level send_IPI*() functions to send messages to other cpus. | ||
34 | */ | ||
35 | |||
36 | /* Set by smp_send_stop() to avoid recursive panics. */ | ||
37 | static int stopping_cpus; | ||
38 | |||
39 | void send_IPI_single(int cpu, int tag) | ||
40 | { | ||
41 | HV_Recipient recip = { | ||
42 | .y = cpu / smp_width, | ||
43 | .x = cpu % smp_width, | ||
44 | .state = HV_TO_BE_SENT | ||
45 | }; | ||
46 | int rc = hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)); | ||
47 | BUG_ON(rc <= 0); | ||
48 | } | ||
49 | |||
50 | void send_IPI_many(const struct cpumask *mask, int tag) | ||
51 | { | ||
52 | HV_Recipient recip[NR_CPUS]; | ||
53 | int cpu, sent; | ||
54 | int nrecip = 0; | ||
55 | int my_cpu = smp_processor_id(); | ||
56 | for_each_cpu(cpu, mask) { | ||
57 | HV_Recipient *r; | ||
58 | BUG_ON(cpu == my_cpu); | ||
59 | r = &recip[nrecip++]; | ||
60 | r->y = cpu / smp_width; | ||
61 | r->x = cpu % smp_width; | ||
62 | r->state = HV_TO_BE_SENT; | ||
63 | } | ||
64 | sent = 0; | ||
65 | while (sent < nrecip) { | ||
66 | int rc = hv_send_message(recip, nrecip, | ||
67 | (HV_VirtAddr)&tag, sizeof(tag)); | ||
68 | if (rc <= 0) { | ||
69 | if (!stopping_cpus) /* avoid recursive panic */ | ||
70 | panic("hv_send_message returned %d", rc); | ||
71 | break; | ||
72 | } | ||
73 | sent += rc; | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void send_IPI_allbutself(int tag) | ||
78 | { | ||
79 | struct cpumask mask; | ||
80 | cpumask_copy(&mask, cpu_online_mask); | ||
81 | cpumask_clear_cpu(smp_processor_id(), &mask); | ||
82 | send_IPI_many(&mask, tag); | ||
83 | } | ||
84 | |||
85 | |||
86 | /* | ||
87 | * Provide smp_call_function_mask, but also run function locally | ||
88 | * if specified in the mask. | ||
89 | */ | ||
90 | void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *), | ||
91 | void *info, bool wait) | ||
92 | { | ||
93 | int cpu = get_cpu(); | ||
94 | smp_call_function_many(mask, func, info, wait); | ||
95 | if (cpumask_test_cpu(cpu, mask)) { | ||
96 | local_irq_disable(); | ||
97 | func(info); | ||
98 | local_irq_enable(); | ||
99 | } | ||
100 | put_cpu(); | ||
101 | } | ||
102 | |||
103 | |||
104 | /* | ||
105 | * Functions related to starting/stopping cpus. | ||
106 | */ | ||
107 | |||
108 | /* Handler to start the current cpu. */ | ||
109 | static void smp_start_cpu_interrupt(void) | ||
110 | { | ||
111 | get_irq_regs()->pc = start_cpu_function_addr; | ||
112 | } | ||
113 | |||
114 | /* Handler to stop the current cpu. */ | ||
115 | static void smp_stop_cpu_interrupt(void) | ||
116 | { | ||
117 | set_cpu_online(smp_processor_id(), 0); | ||
118 | raw_local_irq_disable_all(); | ||
119 | for (;;) | ||
120 | asm("nap"); | ||
121 | } | ||
122 | |||
123 | /* This function calls the 'stop' function on all other CPUs in the system. */ | ||
124 | void smp_send_stop(void) | ||
125 | { | ||
126 | stopping_cpus = 1; | ||
127 | send_IPI_allbutself(MSG_TAG_STOP_CPU); | ||
128 | } | ||
129 | |||
130 | |||
131 | /* | ||
132 | * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. | ||
133 | */ | ||
134 | void evaluate_message(int tag) | ||
135 | { | ||
136 | switch (tag) { | ||
137 | case MSG_TAG_START_CPU: /* Start up a cpu */ | ||
138 | smp_start_cpu_interrupt(); | ||
139 | break; | ||
140 | |||
141 | case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */ | ||
142 | smp_stop_cpu_interrupt(); | ||
143 | break; | ||
144 | |||
145 | case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */ | ||
146 | generic_smp_call_function_interrupt(); | ||
147 | break; | ||
148 | |||
149 | case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */ | ||
150 | generic_smp_call_function_single_interrupt(); | ||
151 | break; | ||
152 | |||
153 | default: | ||
154 | panic("Unknown IPI message tag %d", tag); | ||
155 | break; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | |||
160 | /* | ||
161 | * flush_icache_range() code uses smp_call_function(). | ||
162 | */ | ||
163 | |||
164 | struct ipi_flush { | ||
165 | unsigned long start; | ||
166 | unsigned long end; | ||
167 | }; | ||
168 | |||
169 | static void ipi_flush_icache_range(void *info) | ||
170 | { | ||
171 | struct ipi_flush *flush = (struct ipi_flush *) info; | ||
172 | __flush_icache_range(flush->start, flush->end); | ||
173 | } | ||
174 | |||
175 | void flush_icache_range(unsigned long start, unsigned long end) | ||
176 | { | ||
177 | struct ipi_flush flush = { start, end }; | ||
178 | preempt_disable(); | ||
179 | on_each_cpu(ipi_flush_icache_range, &flush, 1); | ||
180 | preempt_enable(); | ||
181 | } | ||
182 | |||
183 | |||
184 | /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ | ||
185 | static irqreturn_t handle_reschedule_ipi(int irq, void *token) | ||
186 | { | ||
187 | /* | ||
188 | * Nothing to do here; when we return from interrupt, the | ||
189 | * rescheduling will occur there. But do bump the interrupt | ||
190 | * profiler count in the meantime. | ||
191 | */ | ||
192 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
193 | |||
194 | return IRQ_HANDLED; | ||
195 | } | ||
196 | |||
197 | static struct irqaction resched_action = { | ||
198 | .handler = handle_reschedule_ipi, | ||
199 | .name = "resched", | ||
200 | .dev_id = handle_reschedule_ipi /* unique token */, | ||
201 | }; | ||
202 | |||
203 | void __init ipi_init(void) | ||
204 | { | ||
205 | #if CHIP_HAS_IPI() | ||
206 | int cpu; | ||
207 | /* Map IPI trigger MMIO addresses. */ | ||
208 | for_each_possible_cpu(cpu) { | ||
209 | HV_Coord tile; | ||
210 | HV_PTE pte; | ||
211 | unsigned long offset; | ||
212 | |||
213 | tile.x = cpu_x(cpu); | ||
214 | tile.y = cpu_y(cpu); | ||
215 | if (hv_get_ipi_pte(tile, 1, &pte) != 0) | ||
216 | panic("Failed to initialize IPI for cpu %d\n", cpu); | ||
217 | |||
218 | offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; | ||
219 | ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte); | ||
220 | } | ||
221 | #endif | ||
222 | |||
223 | /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */ | ||
224 | tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU); | ||
225 | BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action)); | ||
226 | } | ||
227 | |||
228 | #if CHIP_HAS_IPI() | ||
229 | |||
230 | void smp_send_reschedule(int cpu) | ||
231 | { | ||
232 | WARN_ON(cpu_is_offline(cpu)); | ||
233 | |||
234 | /* | ||
235 | * We just want to do an MMIO store. The traditional writeq() | ||
236 | * functions aren't really correct here, since they're always | ||
237 | * directed at the PCI shim. For now, just do a raw store, | ||
238 | * casting away the __iomem attribute. | ||
239 | */ | ||
240 | ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0; | ||
241 | } | ||
242 | |||
243 | #else | ||
244 | |||
245 | void smp_send_reschedule(int cpu) | ||
246 | { | ||
247 | HV_Coord coord; | ||
248 | |||
249 | WARN_ON(cpu_is_offline(cpu)); | ||
250 | |||
251 | coord.y = cpu_y(cpu); | ||
252 | coord.x = cpu_x(cpu); | ||
253 | hv_trigger_ipi(coord, IRQ_RESCHEDULE); | ||
254 | } | ||
255 | |||
256 | #endif /* CHIP_HAS_IPI() */ | ||
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c new file mode 100644 index 000000000000..74d62d098edf --- /dev/null +++ b/arch/tile/kernel/smpboot.c | |||
@@ -0,0 +1,278 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/kernel_stat.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/bootmem.h> | ||
23 | #include <linux/notifier.h> | ||
24 | #include <linux/cpu.h> | ||
25 | #include <linux/percpu.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/err.h> | ||
28 | #include <linux/irq.h> | ||
29 | #include <asm/mmu_context.h> | ||
30 | #include <asm/tlbflush.h> | ||
31 | #include <asm/sections.h> | ||
32 | |||
33 | /* State of each CPU. */ | ||
34 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; | ||
35 | |||
36 | /* The messaging code jumps to this pointer during boot-up */ | ||
37 | unsigned long start_cpu_function_addr; | ||
38 | |||
39 | /* Called very early during startup to mark boot cpu as online */ | ||
40 | void __init smp_prepare_boot_cpu(void) | ||
41 | { | ||
42 | int cpu = smp_processor_id(); | ||
43 | set_cpu_online(cpu, 1); | ||
44 | set_cpu_present(cpu, 1); | ||
45 | __get_cpu_var(cpu_state) = CPU_ONLINE; | ||
46 | |||
47 | init_messaging(); | ||
48 | } | ||
49 | |||
50 | static void start_secondary(void); | ||
51 | |||
52 | /* | ||
53 | * Called at the top of init() to launch all the other CPUs. | ||
54 | * They run free to complete their initialization and then wait | ||
55 | * until they get an IPI from the boot cpu to come online. | ||
56 | */ | ||
57 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
58 | { | ||
59 | long rc; | ||
60 | int cpu, cpu_count; | ||
61 | int boot_cpu = smp_processor_id(); | ||
62 | |||
63 | current_thread_info()->cpu = boot_cpu; | ||
64 | |||
65 | /* | ||
66 | * Pin this task to the boot CPU while we bring up the others, | ||
67 | * just to make sure we don't uselessly migrate as they come up. | ||
68 | */ | ||
69 | rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu)); | ||
70 | if (rc != 0) | ||
71 | pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc); | ||
72 | |||
73 | /* Print information about disabled and dataplane cpus. */ | ||
74 | print_disabled_cpus(); | ||
75 | |||
76 | /* | ||
77 | * Tell the messaging subsystem how to respond to the | ||
78 | * startup message. We use a level of indirection to avoid | ||
79 | * confusing the linker with the fact that the messaging | ||
80 | * subsystem is calling __init code. | ||
81 | */ | ||
82 | start_cpu_function_addr = (unsigned long) &online_secondary; | ||
83 | |||
84 | /* Set up thread context for all new processors. */ | ||
85 | cpu_count = 1; | ||
86 | for (cpu = 0; cpu < NR_CPUS; ++cpu) { | ||
87 | struct task_struct *idle; | ||
88 | |||
89 | if (cpu == boot_cpu) | ||
90 | continue; | ||
91 | |||
92 | if (!cpu_possible(cpu)) { | ||
93 | /* | ||
94 | * Make this processor do nothing on boot. | ||
95 | * Note that we don't give the boot_pc function | ||
96 | * a stack, so it has to be assembly code. | ||
97 | */ | ||
98 | per_cpu(boot_sp, cpu) = 0; | ||
99 | per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; | ||
100 | continue; | ||
101 | } | ||
102 | |||
103 | /* Create a new idle thread to run start_secondary() */ | ||
104 | idle = fork_idle(cpu); | ||
105 | if (IS_ERR(idle)) | ||
106 | panic("failed fork for CPU %d", cpu); | ||
107 | idle->thread.pc = (unsigned long) start_secondary; | ||
108 | |||
109 | /* Make this thread the boot thread for this processor */ | ||
110 | per_cpu(boot_sp, cpu) = task_ksp0(idle); | ||
111 | per_cpu(boot_pc, cpu) = idle->thread.pc; | ||
112 | |||
113 | ++cpu_count; | ||
114 | } | ||
115 | BUG_ON(cpu_count > (max_cpus ? max_cpus : 1)); | ||
116 | |||
117 | /* Fire up the other tiles, if any */ | ||
118 | init_cpu_present(cpu_possible_mask); | ||
119 | if (cpumask_weight(cpu_present_mask) > 1) { | ||
120 | mb(); /* make sure all data is visible to new processors */ | ||
121 | hv_start_all_tiles(); | ||
122 | } | ||
123 | } | ||
124 | |||
125 | static __initdata struct cpumask init_affinity; | ||
126 | |||
127 | static __init int reset_init_affinity(void) | ||
128 | { | ||
129 | long rc = sched_setaffinity(current->pid, &init_affinity); | ||
130 | if (rc != 0) | ||
131 | pr_warning("couldn't reset init affinity (%ld)\n", | ||
132 | rc); | ||
133 | return 0; | ||
134 | } | ||
135 | late_initcall(reset_init_affinity); | ||
136 | |||
137 | static struct cpumask cpu_started __cpuinitdata; | ||
138 | |||
139 | /* | ||
140 | * Activate a secondary processor. Very minimal; don't add anything | ||
141 | * to this path without knowing what you're doing, since SMP booting | ||
142 | * is pretty fragile. | ||
143 | */ | ||
144 | static void __cpuinit start_secondary(void) | ||
145 | { | ||
146 | int cpuid = smp_processor_id(); | ||
147 | |||
148 | /* Set our thread pointer appropriately. */ | ||
149 | set_my_cpu_offset(__per_cpu_offset[cpuid]); | ||
150 | |||
151 | preempt_disable(); | ||
152 | |||
153 | /* | ||
154 | * In large machines even this will slow us down, since we | ||
155 | * will be contending for for the printk spinlock. | ||
156 | */ | ||
157 | /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ | ||
158 | |||
159 | /* Initialize the current asid for our first page table. */ | ||
160 | __get_cpu_var(current_asid) = min_asid; | ||
161 | |||
162 | /* Set up this thread as another owner of the init_mm */ | ||
163 | atomic_inc(&init_mm.mm_count); | ||
164 | current->active_mm = &init_mm; | ||
165 | if (current->mm) | ||
166 | BUG(); | ||
167 | enter_lazy_tlb(&init_mm, current); | ||
168 | |||
169 | /* Allow hypervisor messages to be received */ | ||
170 | init_messaging(); | ||
171 | local_irq_enable(); | ||
172 | |||
173 | /* Indicate that we're ready to come up. */ | ||
174 | /* Must not do this before we're ready to receive messages */ | ||
175 | if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { | ||
176 | pr_warning("CPU#%d already started!\n", cpuid); | ||
177 | for (;;) | ||
178 | local_irq_enable(); | ||
179 | } | ||
180 | |||
181 | smp_nap(); | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Bring a secondary processor online. | ||
186 | */ | ||
187 | void __cpuinit online_secondary(void) | ||
188 | { | ||
189 | /* | ||
190 | * low-memory mappings have been cleared, flush them from | ||
191 | * the local TLBs too. | ||
192 | */ | ||
193 | local_flush_tlb(); | ||
194 | |||
195 | BUG_ON(in_interrupt()); | ||
196 | |||
197 | /* This must be done before setting cpu_online_mask */ | ||
198 | wmb(); | ||
199 | |||
200 | /* | ||
201 | * We need to hold call_lock, so there is no inconsistency | ||
202 | * between the time smp_call_function() determines number of | ||
203 | * IPI recipients, and the time when the determination is made | ||
204 | * for which cpus receive the IPI. Holding this | ||
205 | * lock helps us to not include this cpu in a currently in progress | ||
206 | * smp_call_function(). | ||
207 | */ | ||
208 | ipi_call_lock(); | ||
209 | set_cpu_online(smp_processor_id(), 1); | ||
210 | ipi_call_unlock(); | ||
211 | __get_cpu_var(cpu_state) = CPU_ONLINE; | ||
212 | |||
213 | /* Set up tile-specific state for this cpu. */ | ||
214 | setup_cpu(0); | ||
215 | |||
216 | /* Set up tile-timer clock-event device on this cpu */ | ||
217 | setup_tile_timer(); | ||
218 | |||
219 | preempt_enable(); | ||
220 | |||
221 | cpu_idle(); | ||
222 | } | ||
223 | |||
224 | int __cpuinit __cpu_up(unsigned int cpu) | ||
225 | { | ||
226 | /* Wait 5s total for all CPUs for them to come online */ | ||
227 | static int timeout; | ||
228 | for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) { | ||
229 | if (timeout >= 50000) { | ||
230 | pr_info("skipping unresponsive cpu%d\n", cpu); | ||
231 | local_irq_enable(); | ||
232 | return -EIO; | ||
233 | } | ||
234 | udelay(100); | ||
235 | } | ||
236 | |||
237 | local_irq_enable(); | ||
238 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
239 | |||
240 | /* Unleash the CPU! */ | ||
241 | send_IPI_single(cpu, MSG_TAG_START_CPU); | ||
242 | while (!cpumask_test_cpu(cpu, cpu_online_mask)) | ||
243 | cpu_relax(); | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static void panic_start_cpu(void) | ||
248 | { | ||
249 | panic("Received a MSG_START_CPU IPI after boot finished."); | ||
250 | } | ||
251 | |||
252 | void __init smp_cpus_done(unsigned int max_cpus) | ||
253 | { | ||
254 | int cpu, next, rc; | ||
255 | |||
256 | /* Reset the response to a (now illegal) MSG_START_CPU IPI. */ | ||
257 | start_cpu_function_addr = (unsigned long) &panic_start_cpu; | ||
258 | |||
259 | cpumask_copy(&init_affinity, cpu_online_mask); | ||
260 | |||
261 | /* | ||
262 | * Pin ourselves to a single cpu in the initial affinity set | ||
263 | * so that kernel mappings for the rootfs are not in the dataplane, | ||
264 | * if set, and to avoid unnecessary migrating during bringup. | ||
265 | * Use the last cpu just in case the whole chip has been | ||
266 | * isolated from the scheduler, to keep init away from likely | ||
267 | * more useful user code. This also ensures that work scheduled | ||
268 | * via schedule_delayed_work() in the init routines will land | ||
269 | * on this cpu. | ||
270 | */ | ||
271 | for (cpu = cpumask_first(&init_affinity); | ||
272 | (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids; | ||
273 | cpu = next) | ||
274 | ; | ||
275 | rc = sched_setaffinity(current->pid, cpumask_of(cpu)); | ||
276 | if (rc != 0) | ||
277 | pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc); | ||
278 | } | ||
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c new file mode 100644 index 000000000000..b6268d3ae869 --- /dev/null +++ b/arch/tile/kernel/stack.c | |||
@@ -0,0 +1,486 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/pfn.h> | ||
20 | #include <linux/kallsyms.h> | ||
21 | #include <linux/stacktrace.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/mmzone.h> | ||
24 | #include <asm/backtrace.h> | ||
25 | #include <asm/page.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/ucontext.h> | ||
28 | #include <asm/sigframe.h> | ||
29 | #include <asm/stack.h> | ||
30 | #include <arch/abi.h> | ||
31 | #include <arch/interrupts.h> | ||
32 | |||
33 | |||
34 | /* Is address on the specified kernel stack? */ | ||
35 | static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp) | ||
36 | { | ||
37 | ulong kstack_base = (ulong) kbt->task->stack; | ||
38 | if (kstack_base == 0) /* corrupt task pointer; just follow stack... */ | ||
39 | return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory; | ||
40 | return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; | ||
41 | } | ||
42 | |||
43 | /* Is address in the specified kernel code? */ | ||
44 | static int in_kernel_text(VirtualAddress address) | ||
45 | { | ||
46 | return (address >= MEM_SV_INTRPT && | ||
47 | address < MEM_SV_INTRPT + HPAGE_SIZE); | ||
48 | } | ||
49 | |||
50 | /* Is address valid for reading? */ | ||
51 | static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) | ||
52 | { | ||
53 | HV_PTE *l1_pgtable = kbt->pgtable; | ||
54 | HV_PTE *l2_pgtable; | ||
55 | unsigned long pfn; | ||
56 | HV_PTE pte; | ||
57 | struct page *page; | ||
58 | |||
59 | if (l1_pgtable == NULL) | ||
60 | return 0; /* can't read user space in other tasks */ | ||
61 | |||
62 | pte = l1_pgtable[HV_L1_INDEX(address)]; | ||
63 | if (!hv_pte_get_present(pte)) | ||
64 | return 0; | ||
65 | pfn = hv_pte_get_pfn(pte); | ||
66 | if (pte_huge(pte)) { | ||
67 | if (!pfn_valid(pfn)) { | ||
68 | pr_err("huge page has bad pfn %#lx\n", pfn); | ||
69 | return 0; | ||
70 | } | ||
71 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
72 | } | ||
73 | |||
74 | page = pfn_to_page(pfn); | ||
75 | if (PageHighMem(page)) { | ||
76 | pr_err("L2 page table not in LOWMEM (%#llx)\n", | ||
77 | HV_PFN_TO_CPA(pfn)); | ||
78 | return 0; | ||
79 | } | ||
80 | l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); | ||
81 | pte = l2_pgtable[HV_L2_INDEX(address)]; | ||
82 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
83 | } | ||
84 | |||
85 | /* Callback for backtracer; basically a glorified memcpy */ | ||
86 | static bool read_memory_func(void *result, VirtualAddress address, | ||
87 | unsigned int size, void *vkbt) | ||
88 | { | ||
89 | int retval; | ||
90 | struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; | ||
91 | if (in_kernel_text(address)) { | ||
92 | /* OK to read kernel code. */ | ||
93 | } else if (address >= PAGE_OFFSET) { | ||
94 | /* We only tolerate kernel-space reads of this task's stack */ | ||
95 | if (!in_kernel_stack(kbt, address)) | ||
96 | return 0; | ||
97 | } else if (!valid_address(kbt, address)) { | ||
98 | return 0; /* invalid user-space address */ | ||
99 | } | ||
100 | pagefault_disable(); | ||
101 | retval = __copy_from_user_inatomic(result, | ||
102 | (void __user __force *)address, | ||
103 | size); | ||
104 | pagefault_enable(); | ||
105 | return (retval == 0); | ||
106 | } | ||
107 | |||
108 | /* Return a pt_regs pointer for a valid fault handler frame */ | ||
109 | static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | ||
110 | { | ||
111 | #ifndef __tilegx__ | ||
112 | const char *fault = NULL; /* happy compiler */ | ||
113 | char fault_buf[64]; | ||
114 | VirtualAddress sp = kbt->it.sp; | ||
115 | struct pt_regs *p; | ||
116 | |||
117 | if (!in_kernel_stack(kbt, sp)) | ||
118 | return NULL; | ||
119 | if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) | ||
120 | return NULL; | ||
121 | p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE); | ||
122 | if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN) | ||
123 | fault = "syscall"; | ||
124 | else { | ||
125 | if (kbt->verbose) { /* else we aren't going to use it */ | ||
126 | snprintf(fault_buf, sizeof(fault_buf), | ||
127 | "interrupt %ld", p->faultnum); | ||
128 | fault = fault_buf; | ||
129 | } | ||
130 | } | ||
131 | if (EX1_PL(p->ex1) == KERNEL_PL && | ||
132 | in_kernel_text(p->pc) && | ||
133 | in_kernel_stack(kbt, p->sp) && | ||
134 | p->sp >= sp) { | ||
135 | if (kbt->verbose) | ||
136 | pr_err(" <%s while in kernel mode>\n", fault); | ||
137 | } else if (EX1_PL(p->ex1) == USER_PL && | ||
138 | p->pc < PAGE_OFFSET && | ||
139 | p->sp < PAGE_OFFSET) { | ||
140 | if (kbt->verbose) | ||
141 | pr_err(" <%s while in user mode>\n", fault); | ||
142 | } else if (kbt->verbose) { | ||
143 | pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", | ||
144 | p->pc, p->sp, p->ex1); | ||
145 | p = NULL; | ||
146 | } | ||
147 | if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) | ||
148 | return p; | ||
149 | #endif | ||
150 | return NULL; | ||
151 | } | ||
152 | |||
153 | /* Is the pc pointing to a sigreturn trampoline? */ | ||
154 | static int is_sigreturn(VirtualAddress pc) | ||
155 | { | ||
156 | return (pc == VDSO_BASE); | ||
157 | } | ||
158 | |||
159 | /* Return a pt_regs pointer for a valid signal handler frame */ | ||
160 | static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) | ||
161 | { | ||
162 | BacktraceIterator *b = &kbt->it; | ||
163 | |||
164 | if (b->pc == VDSO_BASE) { | ||
165 | struct rt_sigframe *frame; | ||
166 | unsigned long sigframe_top = | ||
167 | b->sp + sizeof(struct rt_sigframe) - 1; | ||
168 | if (!valid_address(kbt, b->sp) || | ||
169 | !valid_address(kbt, sigframe_top)) { | ||
170 | if (kbt->verbose) | ||
171 | pr_err(" (odd signal: sp %#lx?)\n", | ||
172 | (unsigned long)(b->sp)); | ||
173 | return NULL; | ||
174 | } | ||
175 | frame = (struct rt_sigframe *)b->sp; | ||
176 | if (kbt->verbose) { | ||
177 | pr_err(" <received signal %d>\n", | ||
178 | frame->info.si_signo); | ||
179 | } | ||
180 | return &frame->uc.uc_mcontext.regs; | ||
181 | } | ||
182 | return NULL; | ||
183 | } | ||
184 | |||
185 | static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) | ||
186 | { | ||
187 | return is_sigreturn(kbt->it.pc); | ||
188 | } | ||
189 | |||
190 | static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) | ||
191 | { | ||
192 | struct pt_regs *p; | ||
193 | |||
194 | p = valid_fault_handler(kbt); | ||
195 | if (p == NULL) | ||
196 | p = valid_sigframe(kbt); | ||
197 | if (p == NULL) | ||
198 | return 0; | ||
199 | backtrace_init(&kbt->it, read_memory_func, kbt, | ||
200 | p->pc, p->lr, p->sp, p->regs[52]); | ||
201 | kbt->new_context = 1; | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | /* Find a frame that isn't a sigreturn, if there is one. */ | ||
206 | static int KBacktraceIterator_next_item_inclusive( | ||
207 | struct KBacktraceIterator *kbt) | ||
208 | { | ||
209 | for (;;) { | ||
210 | do { | ||
211 | if (!KBacktraceIterator_is_sigreturn(kbt)) | ||
212 | return 1; | ||
213 | } while (backtrace_next(&kbt->it)); | ||
214 | |||
215 | if (!KBacktraceIterator_restart(kbt)) | ||
216 | return 0; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * If the current sp is on a page different than what we recorded | ||
222 | * as the top-of-kernel-stack last time we context switched, we have | ||
223 | * probably blown the stack, and nothing is going to work out well. | ||
224 | * If we can at least get out a warning, that may help the debug, | ||
225 | * though we probably won't be able to backtrace into the code that | ||
226 | * actually did the recursive damage. | ||
227 | */ | ||
228 | static void validate_stack(struct pt_regs *regs) | ||
229 | { | ||
230 | int cpu = smp_processor_id(); | ||
231 | unsigned long ksp0 = get_current_ksp0(); | ||
232 | unsigned long ksp0_base = ksp0 - THREAD_SIZE; | ||
233 | unsigned long sp = stack_pointer; | ||
234 | |||
235 | if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { | ||
236 | pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n" | ||
237 | " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", | ||
238 | cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); | ||
239 | } | ||
240 | |||
241 | else if (sp < ksp0_base + sizeof(struct thread_info)) { | ||
242 | pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n" | ||
243 | " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", | ||
244 | cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | ||
249 | struct task_struct *t, struct pt_regs *regs) | ||
250 | { | ||
251 | VirtualAddress pc, lr, sp, r52; | ||
252 | int is_current; | ||
253 | |||
254 | /* | ||
255 | * Set up callback information. We grab the kernel stack base | ||
256 | * so we will allow reads of that address range, and if we're | ||
257 | * asking about the current process we grab the page table | ||
258 | * so we can check user accesses before trying to read them. | ||
259 | * We flush the TLB to avoid any weird skew issues. | ||
260 | */ | ||
261 | is_current = (t == NULL); | ||
262 | kbt->is_current = is_current; | ||
263 | if (is_current) | ||
264 | t = validate_current(); | ||
265 | kbt->task = t; | ||
266 | kbt->pgtable = NULL; | ||
267 | kbt->verbose = 0; /* override in caller if desired */ | ||
268 | kbt->profile = 0; /* override in caller if desired */ | ||
269 | kbt->end = 0; | ||
270 | kbt->new_context = 0; | ||
271 | if (is_current) { | ||
272 | HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; | ||
273 | if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) { | ||
274 | /* | ||
275 | * Not just an optimization: this also allows | ||
276 | * this to work at all before va/pa mappings | ||
277 | * are set up. | ||
278 | */ | ||
279 | kbt->pgtable = swapper_pg_dir; | ||
280 | } else { | ||
281 | struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa)); | ||
282 | if (!PageHighMem(page)) | ||
283 | kbt->pgtable = __va(pgdir_pa); | ||
284 | else | ||
285 | pr_err("page table not in LOWMEM" | ||
286 | " (%#llx)\n", pgdir_pa); | ||
287 | } | ||
288 | local_flush_tlb_all(); | ||
289 | validate_stack(regs); | ||
290 | } | ||
291 | |||
292 | if (regs == NULL) { | ||
293 | if (is_current || t->state == TASK_RUNNING) { | ||
294 | /* Can't do this; we need registers */ | ||
295 | kbt->end = 1; | ||
296 | return; | ||
297 | } | ||
298 | pc = get_switch_to_pc(); | ||
299 | lr = t->thread.pc; | ||
300 | sp = t->thread.ksp; | ||
301 | r52 = 0; | ||
302 | } else { | ||
303 | pc = regs->pc; | ||
304 | lr = regs->lr; | ||
305 | sp = regs->sp; | ||
306 | r52 = regs->regs[52]; | ||
307 | } | ||
308 | |||
309 | backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52); | ||
310 | kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); | ||
311 | } | ||
312 | EXPORT_SYMBOL(KBacktraceIterator_init); | ||
313 | |||
314 | int KBacktraceIterator_end(struct KBacktraceIterator *kbt) | ||
315 | { | ||
316 | return kbt->end; | ||
317 | } | ||
318 | EXPORT_SYMBOL(KBacktraceIterator_end); | ||
319 | |||
320 | void KBacktraceIterator_next(struct KBacktraceIterator *kbt) | ||
321 | { | ||
322 | kbt->new_context = 0; | ||
323 | if (!backtrace_next(&kbt->it) && | ||
324 | !KBacktraceIterator_restart(kbt)) { | ||
325 | kbt->end = 1; | ||
326 | return; | ||
327 | } | ||
328 | |||
329 | kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); | ||
330 | } | ||
331 | EXPORT_SYMBOL(KBacktraceIterator_next); | ||
332 | |||
333 | /* | ||
334 | * This method wraps the backtracer's more generic support. | ||
335 | * It is only invoked from the architecture-specific code; show_stack() | ||
336 | * and dump_stack() (in entry.S) are architecture-independent entry points. | ||
337 | */ | ||
338 | void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | ||
339 | { | ||
340 | int i; | ||
341 | |||
342 | if (headers) { | ||
343 | /* | ||
344 | * Add a blank line since if we are called from panic(), | ||
345 | * then bust_spinlocks() spit out a space in front of us | ||
346 | * and it will mess up our KERN_ERR. | ||
347 | */ | ||
348 | pr_err("\n"); | ||
349 | pr_err("Starting stack dump of tid %d, pid %d (%s)" | ||
350 | " on cpu %d at cycle %lld\n", | ||
351 | kbt->task->pid, kbt->task->tgid, kbt->task->comm, | ||
352 | smp_processor_id(), get_cycles()); | ||
353 | } | ||
354 | #ifdef __tilegx__ | ||
355 | if (kbt->is_current) { | ||
356 | __insn_mtspr(SPR_SIM_CONTROL, | ||
357 | SIM_DUMP_SPR_ARG(SIM_DUMP_BACKTRACE)); | ||
358 | } | ||
359 | #endif | ||
360 | kbt->verbose = 1; | ||
361 | i = 0; | ||
362 | for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { | ||
363 | char *modname; | ||
364 | const char *name; | ||
365 | unsigned long address = kbt->it.pc; | ||
366 | unsigned long offset, size; | ||
367 | char namebuf[KSYM_NAME_LEN+100]; | ||
368 | |||
369 | if (address >= PAGE_OFFSET) | ||
370 | name = kallsyms_lookup(address, &size, &offset, | ||
371 | &modname, namebuf); | ||
372 | else | ||
373 | name = NULL; | ||
374 | |||
375 | if (!name) | ||
376 | namebuf[0] = '\0'; | ||
377 | else { | ||
378 | size_t namelen = strlen(namebuf); | ||
379 | size_t remaining = (sizeof(namebuf) - 1) - namelen; | ||
380 | char *p = namebuf + namelen; | ||
381 | int rc = snprintf(p, remaining, "+%#lx/%#lx ", | ||
382 | offset, size); | ||
383 | if (modname && rc < remaining) | ||
384 | snprintf(p + rc, remaining - rc, | ||
385 | "[%s] ", modname); | ||
386 | namebuf[sizeof(namebuf)-1] = '\0'; | ||
387 | } | ||
388 | |||
389 | pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", | ||
390 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); | ||
391 | |||
392 | if (i >= 100) { | ||
393 | pr_err("Stack dump truncated" | ||
394 | " (%d frames)\n", i); | ||
395 | break; | ||
396 | } | ||
397 | } | ||
398 | if (headers) | ||
399 | pr_err("Stack dump complete\n"); | ||
400 | } | ||
401 | EXPORT_SYMBOL(tile_show_stack); | ||
402 | |||
403 | |||
404 | /* This is called from show_regs() and _dump_stack() */ | ||
405 | void dump_stack_regs(struct pt_regs *regs) | ||
406 | { | ||
407 | struct KBacktraceIterator kbt; | ||
408 | KBacktraceIterator_init(&kbt, NULL, regs); | ||
409 | tile_show_stack(&kbt, 1); | ||
410 | } | ||
411 | EXPORT_SYMBOL(dump_stack_regs); | ||
412 | |||
413 | static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs, | ||
414 | ulong pc, ulong lr, ulong sp, ulong r52) | ||
415 | { | ||
416 | memset(regs, 0, sizeof(struct pt_regs)); | ||
417 | regs->pc = pc; | ||
418 | regs->lr = lr; | ||
419 | regs->sp = sp; | ||
420 | regs->regs[52] = r52; | ||
421 | return regs; | ||
422 | } | ||
423 | |||
424 | /* This is called from dump_stack() and just converts to pt_regs */ | ||
425 | void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) | ||
426 | { | ||
427 | struct pt_regs regs; | ||
428 | dump_stack_regs(regs_to_pt_regs(®s, pc, lr, sp, r52)); | ||
429 | } | ||
430 | |||
431 | /* This is called from KBacktraceIterator_init_current() */ | ||
432 | void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc, | ||
433 | ulong lr, ulong sp, ulong r52) | ||
434 | { | ||
435 | struct pt_regs regs; | ||
436 | KBacktraceIterator_init(kbt, NULL, | ||
437 | regs_to_pt_regs(®s, pc, lr, sp, r52)); | ||
438 | } | ||
439 | |||
440 | /* This is called only from kernel/sched.c, with esp == NULL */ | ||
441 | void show_stack(struct task_struct *task, unsigned long *esp) | ||
442 | { | ||
443 | struct KBacktraceIterator kbt; | ||
444 | if (task == NULL || task == current) | ||
445 | KBacktraceIterator_init_current(&kbt); | ||
446 | else | ||
447 | KBacktraceIterator_init(&kbt, task, NULL); | ||
448 | tile_show_stack(&kbt, 0); | ||
449 | } | ||
450 | |||
451 | #ifdef CONFIG_STACKTRACE | ||
452 | |||
453 | /* Support generic Linux stack API too */ | ||
454 | |||
455 | void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) | ||
456 | { | ||
457 | struct KBacktraceIterator kbt; | ||
458 | int skip = trace->skip; | ||
459 | int i = 0; | ||
460 | |||
461 | if (task == NULL || task == current) | ||
462 | KBacktraceIterator_init_current(&kbt); | ||
463 | else | ||
464 | KBacktraceIterator_init(&kbt, task, NULL); | ||
465 | for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) { | ||
466 | if (skip) { | ||
467 | --skip; | ||
468 | continue; | ||
469 | } | ||
470 | if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) | ||
471 | break; | ||
472 | trace->entries[i++] = kbt.it.pc; | ||
473 | } | ||
474 | trace->nr_entries = i; | ||
475 | } | ||
476 | EXPORT_SYMBOL(save_stack_trace_tsk); | ||
477 | |||
478 | void save_stack_trace(struct stack_trace *trace) | ||
479 | { | ||
480 | save_stack_trace_tsk(NULL, trace); | ||
481 | } | ||
482 | |||
483 | #endif | ||
484 | |||
485 | /* In entry.S */ | ||
486 | EXPORT_SYMBOL(KBacktraceIterator_init_current); | ||
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c new file mode 100644 index 000000000000..f0f87eab8c39 --- /dev/null +++ b/arch/tile/kernel/sys.c | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This file contains various random system calls that | ||
15 | * have a non-standard calling sequence on the Linux/TILE | ||
16 | * platform. | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/smp_lock.h> | ||
24 | #include <linux/syscalls.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/file.h> | ||
27 | #include <linux/mempolicy.h> | ||
28 | #include <linux/binfmts.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/compat.h> | ||
31 | #include <linux/uaccess.h> | ||
32 | #include <linux/signal.h> | ||
33 | #include <asm/syscalls.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/homecache.h> | ||
36 | #include <arch/chip.h> | ||
37 | |||
38 | SYSCALL_DEFINE0(flush_cache) | ||
39 | { | ||
40 | homecache_evict(cpumask_of(smp_processor_id())); | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * Syscalls that pass 64-bit values on 32-bit systems normally | ||
46 | * pass them as (low,high) word packed into the immediately adjacent | ||
47 | * registers. If the low word naturally falls on an even register, | ||
48 | * our ABI makes it work correctly; if not, we adjust it here. | ||
49 | * Handling it here means we don't have to fix uclibc AND glibc AND | ||
50 | * any other standard libcs we want to support. | ||
51 | */ | ||
52 | |||
53 | #if !defined(__tilegx__) || defined(CONFIG_COMPAT) | ||
54 | |||
55 | ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count) | ||
56 | { | ||
57 | return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count); | ||
58 | } | ||
59 | |||
60 | long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi, | ||
61 | u32 len, int advice) | ||
62 | { | ||
63 | return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo, | ||
64 | len, advice); | ||
65 | } | ||
66 | |||
67 | int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, | ||
68 | u32 len_lo, u32 len_hi, int advice) | ||
69 | { | ||
70 | return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo, | ||
71 | ((loff_t)len_hi << 32) | len_lo, advice); | ||
72 | } | ||
73 | |||
74 | #endif /* 32-bit syscall wrappers */ | ||
75 | |||
76 | /* Note: used by the compat code even in 64-bit Linux. */ | ||
77 | SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, | ||
78 | unsigned long, prot, unsigned long, flags, | ||
79 | unsigned long, fd, unsigned long, off_4k) | ||
80 | { | ||
81 | #define PAGE_ADJUST (PAGE_SHIFT - 12) | ||
82 | if (off_4k & ((1 << PAGE_ADJUST) - 1)) | ||
83 | return -EINVAL; | ||
84 | return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
85 | off_4k >> PAGE_ADJUST); | ||
86 | } | ||
87 | |||
88 | #ifdef __tilegx__ | ||
89 | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, | ||
90 | unsigned long, prot, unsigned long, flags, | ||
91 | unsigned long, fd, off_t, offset) | ||
92 | { | ||
93 | if (offset & ((1 << PAGE_SHIFT) - 1)) | ||
94 | return -EINVAL; | ||
95 | return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
96 | offset >> PAGE_SHIFT); | ||
97 | } | ||
98 | #endif | ||
99 | |||
100 | |||
101 | /* Provide the actual syscall number to call mapping. */ | ||
102 | #undef __SYSCALL | ||
103 | #define __SYSCALL(nr, call) [nr] = (call), | ||
104 | |||
105 | #ifndef __tilegx__ | ||
106 | /* See comments at the top of the file. */ | ||
107 | #define sys_fadvise64 sys32_fadvise64 | ||
108 | #define sys_fadvise64_64 sys32_fadvise64_64 | ||
109 | #define sys_readahead sys32_readahead | ||
110 | #define sys_sync_file_range sys_sync_file_range2 | ||
111 | #endif | ||
112 | |||
113 | /* | ||
114 | * Note that we can't include <linux/unistd.h> here since the header | ||
115 | * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well. | ||
116 | */ | ||
117 | void *sys_call_table[__NR_syscalls] = { | ||
118 | [0 ... __NR_syscalls-1] = sys_ni_syscall, | ||
119 | #include <asm/unistd.h> | ||
120 | }; | ||
diff --git a/arch/tile/kernel/tile-desc_32.c b/arch/tile/kernel/tile-desc_32.c new file mode 100644 index 000000000000..69af0e150f78 --- /dev/null +++ b/arch/tile/kernel/tile-desc_32.c | |||
@@ -0,0 +1,2498 @@ | |||
1 | /* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */ | ||
2 | #define BFD_RELOC(x) -1 | ||
3 | |||
4 | /* Special registers. */ | ||
5 | #define TREG_LR 55 | ||
6 | #define TREG_SN 56 | ||
7 | #define TREG_ZERO 63 | ||
8 | |||
9 | /* FIXME: Rename this. */ | ||
10 | #include <asm/opcode-tile.h> | ||
11 | |||
12 | #include <linux/stddef.h> | ||
13 | |||
14 | const struct tile_opcode tile_opcodes[395] = | ||
15 | { | ||
16 | { "bpt", TILE_OPC_BPT, 0x2, 0, TREG_ZERO, 0, | ||
17 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
18 | }, | ||
19 | { "info", TILE_OPC_INFO, 0xf, 1, TREG_ZERO, 1, | ||
20 | { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } }, | ||
21 | }, | ||
22 | { "infol", TILE_OPC_INFOL, 0x3, 1, TREG_ZERO, 1, | ||
23 | { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } }, | ||
24 | }, | ||
25 | { "j", TILE_OPC_J, 0x2, 1, TREG_ZERO, 1, | ||
26 | { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } }, | ||
27 | }, | ||
28 | { "jal", TILE_OPC_JAL, 0x2, 1, TREG_LR, 1, | ||
29 | { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } }, | ||
30 | }, | ||
31 | { "move", TILE_OPC_MOVE, 0xf, 2, TREG_ZERO, 1, | ||
32 | { { 7, 8 }, { 9, 10 }, { 11, 12 }, { 13, 14 }, { 0, } }, | ||
33 | }, | ||
34 | { "move.sn", TILE_OPC_MOVE_SN, 0x3, 2, TREG_SN, 1, | ||
35 | { { 7, 8 }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
36 | }, | ||
37 | { "movei", TILE_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1, | ||
38 | { { 7, 0 }, { 9, 1 }, { 11, 2 }, { 13, 3 }, { 0, } }, | ||
39 | }, | ||
40 | { "movei.sn", TILE_OPC_MOVEI_SN, 0x3, 2, TREG_SN, 1, | ||
41 | { { 7, 0 }, { 9, 1 }, { 0, }, { 0, }, { 0, } }, | ||
42 | }, | ||
43 | { "moveli", TILE_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1, | ||
44 | { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, | ||
45 | }, | ||
46 | { "moveli.sn", TILE_OPC_MOVELI_SN, 0x3, 2, TREG_SN, 1, | ||
47 | { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, | ||
48 | }, | ||
49 | { "movelis", TILE_OPC_MOVELIS, 0x3, 2, TREG_SN, 1, | ||
50 | { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, | ||
51 | }, | ||
52 | { "prefetch", TILE_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1, | ||
53 | { { 0, }, { 10 }, { 0, }, { 0, }, { 15 } }, | ||
54 | }, | ||
55 | { "raise", TILE_OPC_RAISE, 0x2, 0, TREG_ZERO, 1, | ||
56 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
57 | }, | ||
58 | { "add", TILE_OPC_ADD, 0xf, 3, TREG_ZERO, 1, | ||
59 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
60 | }, | ||
61 | { "add.sn", TILE_OPC_ADD_SN, 0x3, 3, TREG_SN, 1, | ||
62 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
63 | }, | ||
64 | { "addb", TILE_OPC_ADDB, 0x3, 3, TREG_ZERO, 1, | ||
65 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
66 | }, | ||
67 | { "addb.sn", TILE_OPC_ADDB_SN, 0x3, 3, TREG_SN, 1, | ||
68 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
69 | }, | ||
70 | { "addbs_u", TILE_OPC_ADDBS_U, 0x3, 3, TREG_ZERO, 1, | ||
71 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
72 | }, | ||
73 | { "addbs_u.sn", TILE_OPC_ADDBS_U_SN, 0x3, 3, TREG_SN, 1, | ||
74 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
75 | }, | ||
76 | { "addh", TILE_OPC_ADDH, 0x3, 3, TREG_ZERO, 1, | ||
77 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
78 | }, | ||
79 | { "addh.sn", TILE_OPC_ADDH_SN, 0x3, 3, TREG_SN, 1, | ||
80 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
81 | }, | ||
82 | { "addhs", TILE_OPC_ADDHS, 0x3, 3, TREG_ZERO, 1, | ||
83 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
84 | }, | ||
85 | { "addhs.sn", TILE_OPC_ADDHS_SN, 0x3, 3, TREG_SN, 1, | ||
86 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
87 | }, | ||
88 | { "addi", TILE_OPC_ADDI, 0xf, 3, TREG_ZERO, 1, | ||
89 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
90 | }, | ||
91 | { "addi.sn", TILE_OPC_ADDI_SN, 0x3, 3, TREG_SN, 1, | ||
92 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
93 | }, | ||
94 | { "addib", TILE_OPC_ADDIB, 0x3, 3, TREG_ZERO, 1, | ||
95 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
96 | }, | ||
97 | { "addib.sn", TILE_OPC_ADDIB_SN, 0x3, 3, TREG_SN, 1, | ||
98 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
99 | }, | ||
100 | { "addih", TILE_OPC_ADDIH, 0x3, 3, TREG_ZERO, 1, | ||
101 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
102 | }, | ||
103 | { "addih.sn", TILE_OPC_ADDIH_SN, 0x3, 3, TREG_SN, 1, | ||
104 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
105 | }, | ||
106 | { "addli", TILE_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1, | ||
107 | { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, | ||
108 | }, | ||
109 | { "addli.sn", TILE_OPC_ADDLI_SN, 0x3, 3, TREG_SN, 1, | ||
110 | { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, | ||
111 | }, | ||
112 | { "addlis", TILE_OPC_ADDLIS, 0x3, 3, TREG_SN, 1, | ||
113 | { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, | ||
114 | }, | ||
115 | { "adds", TILE_OPC_ADDS, 0x3, 3, TREG_ZERO, 1, | ||
116 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
117 | }, | ||
118 | { "adds.sn", TILE_OPC_ADDS_SN, 0x3, 3, TREG_SN, 1, | ||
119 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
120 | }, | ||
121 | { "adiffb_u", TILE_OPC_ADIFFB_U, 0x1, 3, TREG_ZERO, 1, | ||
122 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
123 | }, | ||
124 | { "adiffb_u.sn", TILE_OPC_ADIFFB_U_SN, 0x1, 3, TREG_SN, 1, | ||
125 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
126 | }, | ||
127 | { "adiffh", TILE_OPC_ADIFFH, 0x1, 3, TREG_ZERO, 1, | ||
128 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
129 | }, | ||
130 | { "adiffh.sn", TILE_OPC_ADIFFH_SN, 0x1, 3, TREG_SN, 1, | ||
131 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
132 | }, | ||
133 | { "and", TILE_OPC_AND, 0xf, 3, TREG_ZERO, 1, | ||
134 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
135 | }, | ||
136 | { "and.sn", TILE_OPC_AND_SN, 0x3, 3, TREG_SN, 1, | ||
137 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
138 | }, | ||
139 | { "andi", TILE_OPC_ANDI, 0xf, 3, TREG_ZERO, 1, | ||
140 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
141 | }, | ||
142 | { "andi.sn", TILE_OPC_ANDI_SN, 0x3, 3, TREG_SN, 1, | ||
143 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
144 | }, | ||
145 | { "auli", TILE_OPC_AULI, 0x3, 3, TREG_ZERO, 1, | ||
146 | { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, | ||
147 | }, | ||
148 | { "avgb_u", TILE_OPC_AVGB_U, 0x1, 3, TREG_ZERO, 1, | ||
149 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
150 | }, | ||
151 | { "avgb_u.sn", TILE_OPC_AVGB_U_SN, 0x1, 3, TREG_SN, 1, | ||
152 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
153 | }, | ||
154 | { "avgh", TILE_OPC_AVGH, 0x1, 3, TREG_ZERO, 1, | ||
155 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
156 | }, | ||
157 | { "avgh.sn", TILE_OPC_AVGH_SN, 0x1, 3, TREG_SN, 1, | ||
158 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
159 | }, | ||
160 | { "bbns", TILE_OPC_BBNS, 0x2, 2, TREG_ZERO, 1, | ||
161 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
162 | }, | ||
163 | { "bbns.sn", TILE_OPC_BBNS_SN, 0x2, 2, TREG_SN, 1, | ||
164 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
165 | }, | ||
166 | { "bbnst", TILE_OPC_BBNST, 0x2, 2, TREG_ZERO, 1, | ||
167 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
168 | }, | ||
169 | { "bbnst.sn", TILE_OPC_BBNST_SN, 0x2, 2, TREG_SN, 1, | ||
170 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
171 | }, | ||
172 | { "bbs", TILE_OPC_BBS, 0x2, 2, TREG_ZERO, 1, | ||
173 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
174 | }, | ||
175 | { "bbs.sn", TILE_OPC_BBS_SN, 0x2, 2, TREG_SN, 1, | ||
176 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
177 | }, | ||
178 | { "bbst", TILE_OPC_BBST, 0x2, 2, TREG_ZERO, 1, | ||
179 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
180 | }, | ||
181 | { "bbst.sn", TILE_OPC_BBST_SN, 0x2, 2, TREG_SN, 1, | ||
182 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
183 | }, | ||
184 | { "bgez", TILE_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1, | ||
185 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
186 | }, | ||
187 | { "bgez.sn", TILE_OPC_BGEZ_SN, 0x2, 2, TREG_SN, 1, | ||
188 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
189 | }, | ||
190 | { "bgezt", TILE_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1, | ||
191 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
192 | }, | ||
193 | { "bgezt.sn", TILE_OPC_BGEZT_SN, 0x2, 2, TREG_SN, 1, | ||
194 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
195 | }, | ||
196 | { "bgz", TILE_OPC_BGZ, 0x2, 2, TREG_ZERO, 1, | ||
197 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
198 | }, | ||
199 | { "bgz.sn", TILE_OPC_BGZ_SN, 0x2, 2, TREG_SN, 1, | ||
200 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
201 | }, | ||
202 | { "bgzt", TILE_OPC_BGZT, 0x2, 2, TREG_ZERO, 1, | ||
203 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
204 | }, | ||
205 | { "bgzt.sn", TILE_OPC_BGZT_SN, 0x2, 2, TREG_SN, 1, | ||
206 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
207 | }, | ||
208 | { "bitx", TILE_OPC_BITX, 0x5, 2, TREG_ZERO, 1, | ||
209 | { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, | ||
210 | }, | ||
211 | { "bitx.sn", TILE_OPC_BITX_SN, 0x1, 2, TREG_SN, 1, | ||
212 | { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
213 | }, | ||
214 | { "blez", TILE_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1, | ||
215 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
216 | }, | ||
217 | { "blez.sn", TILE_OPC_BLEZ_SN, 0x2, 2, TREG_SN, 1, | ||
218 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
219 | }, | ||
220 | { "blezt", TILE_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1, | ||
221 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
222 | }, | ||
223 | { "blezt.sn", TILE_OPC_BLEZT_SN, 0x2, 2, TREG_SN, 1, | ||
224 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
225 | }, | ||
226 | { "blz", TILE_OPC_BLZ, 0x2, 2, TREG_ZERO, 1, | ||
227 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
228 | }, | ||
229 | { "blz.sn", TILE_OPC_BLZ_SN, 0x2, 2, TREG_SN, 1, | ||
230 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
231 | }, | ||
232 | { "blzt", TILE_OPC_BLZT, 0x2, 2, TREG_ZERO, 1, | ||
233 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
234 | }, | ||
235 | { "blzt.sn", TILE_OPC_BLZT_SN, 0x2, 2, TREG_SN, 1, | ||
236 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
237 | }, | ||
238 | { "bnz", TILE_OPC_BNZ, 0x2, 2, TREG_ZERO, 1, | ||
239 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
240 | }, | ||
241 | { "bnz.sn", TILE_OPC_BNZ_SN, 0x2, 2, TREG_SN, 1, | ||
242 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
243 | }, | ||
244 | { "bnzt", TILE_OPC_BNZT, 0x2, 2, TREG_ZERO, 1, | ||
245 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
246 | }, | ||
247 | { "bnzt.sn", TILE_OPC_BNZT_SN, 0x2, 2, TREG_SN, 1, | ||
248 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
249 | }, | ||
250 | { "bytex", TILE_OPC_BYTEX, 0x5, 2, TREG_ZERO, 1, | ||
251 | { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, | ||
252 | }, | ||
253 | { "bytex.sn", TILE_OPC_BYTEX_SN, 0x1, 2, TREG_SN, 1, | ||
254 | { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
255 | }, | ||
256 | { "bz", TILE_OPC_BZ, 0x2, 2, TREG_ZERO, 1, | ||
257 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
258 | }, | ||
259 | { "bz.sn", TILE_OPC_BZ_SN, 0x2, 2, TREG_SN, 1, | ||
260 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
261 | }, | ||
262 | { "bzt", TILE_OPC_BZT, 0x2, 2, TREG_ZERO, 1, | ||
263 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
264 | }, | ||
265 | { "bzt.sn", TILE_OPC_BZT_SN, 0x2, 2, TREG_SN, 1, | ||
266 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
267 | }, | ||
268 | { "clz", TILE_OPC_CLZ, 0x5, 2, TREG_ZERO, 1, | ||
269 | { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, | ||
270 | }, | ||
271 | { "clz.sn", TILE_OPC_CLZ_SN, 0x1, 2, TREG_SN, 1, | ||
272 | { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
273 | }, | ||
274 | { "crc32_32", TILE_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1, | ||
275 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
276 | }, | ||
277 | { "crc32_32.sn", TILE_OPC_CRC32_32_SN, 0x1, 3, TREG_SN, 1, | ||
278 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
279 | }, | ||
280 | { "crc32_8", TILE_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1, | ||
281 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
282 | }, | ||
283 | { "crc32_8.sn", TILE_OPC_CRC32_8_SN, 0x1, 3, TREG_SN, 1, | ||
284 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
285 | }, | ||
286 | { "ctz", TILE_OPC_CTZ, 0x5, 2, TREG_ZERO, 1, | ||
287 | { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, | ||
288 | }, | ||
289 | { "ctz.sn", TILE_OPC_CTZ_SN, 0x1, 2, TREG_SN, 1, | ||
290 | { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
291 | }, | ||
292 | { "drain", TILE_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0, | ||
293 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
294 | }, | ||
295 | { "dtlbpr", TILE_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1, | ||
296 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
297 | }, | ||
298 | { "dword_align", TILE_OPC_DWORD_ALIGN, 0x1, 3, TREG_ZERO, 1, | ||
299 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
300 | }, | ||
301 | { "dword_align.sn", TILE_OPC_DWORD_ALIGN_SN, 0x1, 3, TREG_SN, 1, | ||
302 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
303 | }, | ||
304 | { "finv", TILE_OPC_FINV, 0x2, 1, TREG_ZERO, 1, | ||
305 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
306 | }, | ||
307 | { "flush", TILE_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1, | ||
308 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
309 | }, | ||
310 | { "fnop", TILE_OPC_FNOP, 0xf, 0, TREG_ZERO, 1, | ||
311 | { { }, { }, { }, { }, { 0, } }, | ||
312 | }, | ||
313 | { "icoh", TILE_OPC_ICOH, 0x2, 1, TREG_ZERO, 1, | ||
314 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
315 | }, | ||
316 | { "ill", TILE_OPC_ILL, 0xa, 0, TREG_ZERO, 1, | ||
317 | { { 0, }, { }, { 0, }, { }, { 0, } }, | ||
318 | }, | ||
319 | { "inthb", TILE_OPC_INTHB, 0x3, 3, TREG_ZERO, 1, | ||
320 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
321 | }, | ||
322 | { "inthb.sn", TILE_OPC_INTHB_SN, 0x3, 3, TREG_SN, 1, | ||
323 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
324 | }, | ||
325 | { "inthh", TILE_OPC_INTHH, 0x3, 3, TREG_ZERO, 1, | ||
326 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
327 | }, | ||
328 | { "inthh.sn", TILE_OPC_INTHH_SN, 0x3, 3, TREG_SN, 1, | ||
329 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
330 | }, | ||
331 | { "intlb", TILE_OPC_INTLB, 0x3, 3, TREG_ZERO, 1, | ||
332 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
333 | }, | ||
334 | { "intlb.sn", TILE_OPC_INTLB_SN, 0x3, 3, TREG_SN, 1, | ||
335 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
336 | }, | ||
337 | { "intlh", TILE_OPC_INTLH, 0x3, 3, TREG_ZERO, 1, | ||
338 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
339 | }, | ||
340 | { "intlh.sn", TILE_OPC_INTLH_SN, 0x3, 3, TREG_SN, 1, | ||
341 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
342 | }, | ||
343 | { "inv", TILE_OPC_INV, 0x2, 1, TREG_ZERO, 1, | ||
344 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
345 | }, | ||
346 | { "iret", TILE_OPC_IRET, 0x2, 0, TREG_ZERO, 1, | ||
347 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
348 | }, | ||
349 | { "jalb", TILE_OPC_JALB, 0x2, 1, TREG_LR, 1, | ||
350 | { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, | ||
351 | }, | ||
352 | { "jalf", TILE_OPC_JALF, 0x2, 1, TREG_LR, 1, | ||
353 | { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, | ||
354 | }, | ||
355 | { "jalr", TILE_OPC_JALR, 0x2, 1, TREG_LR, 1, | ||
356 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
357 | }, | ||
358 | { "jalrp", TILE_OPC_JALRP, 0x2, 1, TREG_LR, 1, | ||
359 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
360 | }, | ||
361 | { "jb", TILE_OPC_JB, 0x2, 1, TREG_ZERO, 1, | ||
362 | { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, | ||
363 | }, | ||
364 | { "jf", TILE_OPC_JF, 0x2, 1, TREG_ZERO, 1, | ||
365 | { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, | ||
366 | }, | ||
367 | { "jr", TILE_OPC_JR, 0x2, 1, TREG_ZERO, 1, | ||
368 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
369 | }, | ||
370 | { "jrp", TILE_OPC_JRP, 0x2, 1, TREG_ZERO, 1, | ||
371 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
372 | }, | ||
373 | { "lb", TILE_OPC_LB, 0x12, 2, TREG_ZERO, 1, | ||
374 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, | ||
375 | }, | ||
376 | { "lb.sn", TILE_OPC_LB_SN, 0x2, 2, TREG_SN, 1, | ||
377 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
378 | }, | ||
379 | { "lb_u", TILE_OPC_LB_U, 0x12, 2, TREG_ZERO, 1, | ||
380 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, | ||
381 | }, | ||
382 | { "lb_u.sn", TILE_OPC_LB_U_SN, 0x2, 2, TREG_SN, 1, | ||
383 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
384 | }, | ||
385 | { "lbadd", TILE_OPC_LBADD, 0x2, 3, TREG_ZERO, 1, | ||
386 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
387 | }, | ||
388 | { "lbadd.sn", TILE_OPC_LBADD_SN, 0x2, 3, TREG_SN, 1, | ||
389 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
390 | }, | ||
391 | { "lbadd_u", TILE_OPC_LBADD_U, 0x2, 3, TREG_ZERO, 1, | ||
392 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
393 | }, | ||
394 | { "lbadd_u.sn", TILE_OPC_LBADD_U_SN, 0x2, 3, TREG_SN, 1, | ||
395 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
396 | }, | ||
397 | { "lh", TILE_OPC_LH, 0x12, 2, TREG_ZERO, 1, | ||
398 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, | ||
399 | }, | ||
400 | { "lh.sn", TILE_OPC_LH_SN, 0x2, 2, TREG_SN, 1, | ||
401 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
402 | }, | ||
403 | { "lh_u", TILE_OPC_LH_U, 0x12, 2, TREG_ZERO, 1, | ||
404 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, | ||
405 | }, | ||
406 | { "lh_u.sn", TILE_OPC_LH_U_SN, 0x2, 2, TREG_SN, 1, | ||
407 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
408 | }, | ||
409 | { "lhadd", TILE_OPC_LHADD, 0x2, 3, TREG_ZERO, 1, | ||
410 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
411 | }, | ||
412 | { "lhadd.sn", TILE_OPC_LHADD_SN, 0x2, 3, TREG_SN, 1, | ||
413 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
414 | }, | ||
415 | { "lhadd_u", TILE_OPC_LHADD_U, 0x2, 3, TREG_ZERO, 1, | ||
416 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
417 | }, | ||
418 | { "lhadd_u.sn", TILE_OPC_LHADD_U_SN, 0x2, 3, TREG_SN, 1, | ||
419 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
420 | }, | ||
421 | { "lnk", TILE_OPC_LNK, 0x2, 1, TREG_ZERO, 1, | ||
422 | { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, | ||
423 | }, | ||
424 | { "lnk.sn", TILE_OPC_LNK_SN, 0x2, 1, TREG_SN, 1, | ||
425 | { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, | ||
426 | }, | ||
427 | { "lw", TILE_OPC_LW, 0x12, 2, TREG_ZERO, 1, | ||
428 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, | ||
429 | }, | ||
430 | { "lw.sn", TILE_OPC_LW_SN, 0x2, 2, TREG_SN, 1, | ||
431 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
432 | }, | ||
433 | { "lw_na", TILE_OPC_LW_NA, 0x2, 2, TREG_ZERO, 1, | ||
434 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
435 | }, | ||
436 | { "lw_na.sn", TILE_OPC_LW_NA_SN, 0x2, 2, TREG_SN, 1, | ||
437 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
438 | }, | ||
439 | { "lwadd", TILE_OPC_LWADD, 0x2, 3, TREG_ZERO, 1, | ||
440 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
441 | }, | ||
442 | { "lwadd.sn", TILE_OPC_LWADD_SN, 0x2, 3, TREG_SN, 1, | ||
443 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
444 | }, | ||
445 | { "lwadd_na", TILE_OPC_LWADD_NA, 0x2, 3, TREG_ZERO, 1, | ||
446 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
447 | }, | ||
448 | { "lwadd_na.sn", TILE_OPC_LWADD_NA_SN, 0x2, 3, TREG_SN, 1, | ||
449 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
450 | }, | ||
451 | { "maxb_u", TILE_OPC_MAXB_U, 0x3, 3, TREG_ZERO, 1, | ||
452 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
453 | }, | ||
454 | { "maxb_u.sn", TILE_OPC_MAXB_U_SN, 0x3, 3, TREG_SN, 1, | ||
455 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
456 | }, | ||
457 | { "maxh", TILE_OPC_MAXH, 0x3, 3, TREG_ZERO, 1, | ||
458 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
459 | }, | ||
460 | { "maxh.sn", TILE_OPC_MAXH_SN, 0x3, 3, TREG_SN, 1, | ||
461 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
462 | }, | ||
463 | { "maxib_u", TILE_OPC_MAXIB_U, 0x3, 3, TREG_ZERO, 1, | ||
464 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
465 | }, | ||
466 | { "maxib_u.sn", TILE_OPC_MAXIB_U_SN, 0x3, 3, TREG_SN, 1, | ||
467 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
468 | }, | ||
469 | { "maxih", TILE_OPC_MAXIH, 0x3, 3, TREG_ZERO, 1, | ||
470 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
471 | }, | ||
472 | { "maxih.sn", TILE_OPC_MAXIH_SN, 0x3, 3, TREG_SN, 1, | ||
473 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
474 | }, | ||
475 | { "mf", TILE_OPC_MF, 0x2, 0, TREG_ZERO, 1, | ||
476 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
477 | }, | ||
478 | { "mfspr", TILE_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1, | ||
479 | { { 0, }, { 9, 25 }, { 0, }, { 0, }, { 0, } }, | ||
480 | }, | ||
481 | { "minb_u", TILE_OPC_MINB_U, 0x3, 3, TREG_ZERO, 1, | ||
482 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
483 | }, | ||
484 | { "minb_u.sn", TILE_OPC_MINB_U_SN, 0x3, 3, TREG_SN, 1, | ||
485 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
486 | }, | ||
487 | { "minh", TILE_OPC_MINH, 0x3, 3, TREG_ZERO, 1, | ||
488 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
489 | }, | ||
490 | { "minh.sn", TILE_OPC_MINH_SN, 0x3, 3, TREG_SN, 1, | ||
491 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
492 | }, | ||
493 | { "minib_u", TILE_OPC_MINIB_U, 0x3, 3, TREG_ZERO, 1, | ||
494 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
495 | }, | ||
496 | { "minib_u.sn", TILE_OPC_MINIB_U_SN, 0x3, 3, TREG_SN, 1, | ||
497 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
498 | }, | ||
499 | { "minih", TILE_OPC_MINIH, 0x3, 3, TREG_ZERO, 1, | ||
500 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
501 | }, | ||
502 | { "minih.sn", TILE_OPC_MINIH_SN, 0x3, 3, TREG_SN, 1, | ||
503 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
504 | }, | ||
505 | { "mm", TILE_OPC_MM, 0x3, 5, TREG_ZERO, 1, | ||
506 | { { 7, 8, 16, 26, 27 }, { 9, 10, 17, 28, 29 }, { 0, }, { 0, }, { 0, } }, | ||
507 | }, | ||
508 | { "mnz", TILE_OPC_MNZ, 0xf, 3, TREG_ZERO, 1, | ||
509 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
510 | }, | ||
511 | { "mnz.sn", TILE_OPC_MNZ_SN, 0x3, 3, TREG_SN, 1, | ||
512 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
513 | }, | ||
514 | { "mnzb", TILE_OPC_MNZB, 0x3, 3, TREG_ZERO, 1, | ||
515 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
516 | }, | ||
517 | { "mnzb.sn", TILE_OPC_MNZB_SN, 0x3, 3, TREG_SN, 1, | ||
518 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
519 | }, | ||
520 | { "mnzh", TILE_OPC_MNZH, 0x3, 3, TREG_ZERO, 1, | ||
521 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
522 | }, | ||
523 | { "mnzh.sn", TILE_OPC_MNZH_SN, 0x3, 3, TREG_SN, 1, | ||
524 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
525 | }, | ||
526 | { "mtspr", TILE_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1, | ||
527 | { { 0, }, { 30, 10 }, { 0, }, { 0, }, { 0, } }, | ||
528 | }, | ||
529 | { "mulhh_ss", TILE_OPC_MULHH_SS, 0x5, 3, TREG_ZERO, 1, | ||
530 | { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, | ||
531 | }, | ||
532 | { "mulhh_ss.sn", TILE_OPC_MULHH_SS_SN, 0x1, 3, TREG_SN, 1, | ||
533 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
534 | }, | ||
535 | { "mulhh_su", TILE_OPC_MULHH_SU, 0x1, 3, TREG_ZERO, 1, | ||
536 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
537 | }, | ||
538 | { "mulhh_su.sn", TILE_OPC_MULHH_SU_SN, 0x1, 3, TREG_SN, 1, | ||
539 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
540 | }, | ||
541 | { "mulhh_uu", TILE_OPC_MULHH_UU, 0x5, 3, TREG_ZERO, 1, | ||
542 | { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, | ||
543 | }, | ||
544 | { "mulhh_uu.sn", TILE_OPC_MULHH_UU_SN, 0x1, 3, TREG_SN, 1, | ||
545 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
546 | }, | ||
547 | { "mulhha_ss", TILE_OPC_MULHHA_SS, 0x5, 3, TREG_ZERO, 1, | ||
548 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
549 | }, | ||
550 | { "mulhha_ss.sn", TILE_OPC_MULHHA_SS_SN, 0x1, 3, TREG_SN, 1, | ||
551 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
552 | }, | ||
553 | { "mulhha_su", TILE_OPC_MULHHA_SU, 0x1, 3, TREG_ZERO, 1, | ||
554 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
555 | }, | ||
556 | { "mulhha_su.sn", TILE_OPC_MULHHA_SU_SN, 0x1, 3, TREG_SN, 1, | ||
557 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
558 | }, | ||
559 | { "mulhha_uu", TILE_OPC_MULHHA_UU, 0x5, 3, TREG_ZERO, 1, | ||
560 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
561 | }, | ||
562 | { "mulhha_uu.sn", TILE_OPC_MULHHA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
563 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
564 | }, | ||
565 | { "mulhhsa_uu", TILE_OPC_MULHHSA_UU, 0x1, 3, TREG_ZERO, 1, | ||
566 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
567 | }, | ||
568 | { "mulhhsa_uu.sn", TILE_OPC_MULHHSA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
569 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
570 | }, | ||
571 | { "mulhl_ss", TILE_OPC_MULHL_SS, 0x1, 3, TREG_ZERO, 1, | ||
572 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
573 | }, | ||
574 | { "mulhl_ss.sn", TILE_OPC_MULHL_SS_SN, 0x1, 3, TREG_SN, 1, | ||
575 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
576 | }, | ||
577 | { "mulhl_su", TILE_OPC_MULHL_SU, 0x1, 3, TREG_ZERO, 1, | ||
578 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
579 | }, | ||
580 | { "mulhl_su.sn", TILE_OPC_MULHL_SU_SN, 0x1, 3, TREG_SN, 1, | ||
581 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
582 | }, | ||
583 | { "mulhl_us", TILE_OPC_MULHL_US, 0x1, 3, TREG_ZERO, 1, | ||
584 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
585 | }, | ||
586 | { "mulhl_us.sn", TILE_OPC_MULHL_US_SN, 0x1, 3, TREG_SN, 1, | ||
587 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
588 | }, | ||
589 | { "mulhl_uu", TILE_OPC_MULHL_UU, 0x1, 3, TREG_ZERO, 1, | ||
590 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
591 | }, | ||
592 | { "mulhl_uu.sn", TILE_OPC_MULHL_UU_SN, 0x1, 3, TREG_SN, 1, | ||
593 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
594 | }, | ||
595 | { "mulhla_ss", TILE_OPC_MULHLA_SS, 0x1, 3, TREG_ZERO, 1, | ||
596 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
597 | }, | ||
598 | { "mulhla_ss.sn", TILE_OPC_MULHLA_SS_SN, 0x1, 3, TREG_SN, 1, | ||
599 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
600 | }, | ||
601 | { "mulhla_su", TILE_OPC_MULHLA_SU, 0x1, 3, TREG_ZERO, 1, | ||
602 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
603 | }, | ||
604 | { "mulhla_su.sn", TILE_OPC_MULHLA_SU_SN, 0x1, 3, TREG_SN, 1, | ||
605 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
606 | }, | ||
607 | { "mulhla_us", TILE_OPC_MULHLA_US, 0x1, 3, TREG_ZERO, 1, | ||
608 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
609 | }, | ||
610 | { "mulhla_us.sn", TILE_OPC_MULHLA_US_SN, 0x1, 3, TREG_SN, 1, | ||
611 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
612 | }, | ||
613 | { "mulhla_uu", TILE_OPC_MULHLA_UU, 0x1, 3, TREG_ZERO, 1, | ||
614 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
615 | }, | ||
616 | { "mulhla_uu.sn", TILE_OPC_MULHLA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
617 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
618 | }, | ||
619 | { "mulhlsa_uu", TILE_OPC_MULHLSA_UU, 0x5, 3, TREG_ZERO, 1, | ||
620 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
621 | }, | ||
622 | { "mulhlsa_uu.sn", TILE_OPC_MULHLSA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
623 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
624 | }, | ||
625 | { "mulll_ss", TILE_OPC_MULLL_SS, 0x5, 3, TREG_ZERO, 1, | ||
626 | { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, | ||
627 | }, | ||
628 | { "mulll_ss.sn", TILE_OPC_MULLL_SS_SN, 0x1, 3, TREG_SN, 1, | ||
629 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
630 | }, | ||
631 | { "mulll_su", TILE_OPC_MULLL_SU, 0x1, 3, TREG_ZERO, 1, | ||
632 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
633 | }, | ||
634 | { "mulll_su.sn", TILE_OPC_MULLL_SU_SN, 0x1, 3, TREG_SN, 1, | ||
635 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
636 | }, | ||
637 | { "mulll_uu", TILE_OPC_MULLL_UU, 0x5, 3, TREG_ZERO, 1, | ||
638 | { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, | ||
639 | }, | ||
640 | { "mulll_uu.sn", TILE_OPC_MULLL_UU_SN, 0x1, 3, TREG_SN, 1, | ||
641 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
642 | }, | ||
643 | { "mullla_ss", TILE_OPC_MULLLA_SS, 0x5, 3, TREG_ZERO, 1, | ||
644 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
645 | }, | ||
646 | { "mullla_ss.sn", TILE_OPC_MULLLA_SS_SN, 0x1, 3, TREG_SN, 1, | ||
647 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
648 | }, | ||
649 | { "mullla_su", TILE_OPC_MULLLA_SU, 0x1, 3, TREG_ZERO, 1, | ||
650 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
651 | }, | ||
652 | { "mullla_su.sn", TILE_OPC_MULLLA_SU_SN, 0x1, 3, TREG_SN, 1, | ||
653 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
654 | }, | ||
655 | { "mullla_uu", TILE_OPC_MULLLA_UU, 0x5, 3, TREG_ZERO, 1, | ||
656 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
657 | }, | ||
658 | { "mullla_uu.sn", TILE_OPC_MULLLA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
659 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
660 | }, | ||
661 | { "mulllsa_uu", TILE_OPC_MULLLSA_UU, 0x1, 3, TREG_ZERO, 1, | ||
662 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
663 | }, | ||
664 | { "mulllsa_uu.sn", TILE_OPC_MULLLSA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
665 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
666 | }, | ||
667 | { "mvnz", TILE_OPC_MVNZ, 0x5, 3, TREG_ZERO, 1, | ||
668 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
669 | }, | ||
670 | { "mvnz.sn", TILE_OPC_MVNZ_SN, 0x1, 3, TREG_SN, 1, | ||
671 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
672 | }, | ||
673 | { "mvz", TILE_OPC_MVZ, 0x5, 3, TREG_ZERO, 1, | ||
674 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
675 | }, | ||
676 | { "mvz.sn", TILE_OPC_MVZ_SN, 0x1, 3, TREG_SN, 1, | ||
677 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
678 | }, | ||
679 | { "mz", TILE_OPC_MZ, 0xf, 3, TREG_ZERO, 1, | ||
680 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
681 | }, | ||
682 | { "mz.sn", TILE_OPC_MZ_SN, 0x3, 3, TREG_SN, 1, | ||
683 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
684 | }, | ||
685 | { "mzb", TILE_OPC_MZB, 0x3, 3, TREG_ZERO, 1, | ||
686 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
687 | }, | ||
688 | { "mzb.sn", TILE_OPC_MZB_SN, 0x3, 3, TREG_SN, 1, | ||
689 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
690 | }, | ||
691 | { "mzh", TILE_OPC_MZH, 0x3, 3, TREG_ZERO, 1, | ||
692 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
693 | }, | ||
694 | { "mzh.sn", TILE_OPC_MZH_SN, 0x3, 3, TREG_SN, 1, | ||
695 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
696 | }, | ||
697 | { "nap", TILE_OPC_NAP, 0x2, 0, TREG_ZERO, 0, | ||
698 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
699 | }, | ||
700 | { "nop", TILE_OPC_NOP, 0xf, 0, TREG_ZERO, 1, | ||
701 | { { }, { }, { }, { }, { 0, } }, | ||
702 | }, | ||
703 | { "nor", TILE_OPC_NOR, 0xf, 3, TREG_ZERO, 1, | ||
704 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
705 | }, | ||
706 | { "nor.sn", TILE_OPC_NOR_SN, 0x3, 3, TREG_SN, 1, | ||
707 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
708 | }, | ||
709 | { "or", TILE_OPC_OR, 0xf, 3, TREG_ZERO, 1, | ||
710 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
711 | }, | ||
712 | { "or.sn", TILE_OPC_OR_SN, 0x3, 3, TREG_SN, 1, | ||
713 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
714 | }, | ||
715 | { "ori", TILE_OPC_ORI, 0xf, 3, TREG_ZERO, 1, | ||
716 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
717 | }, | ||
718 | { "ori.sn", TILE_OPC_ORI_SN, 0x3, 3, TREG_SN, 1, | ||
719 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
720 | }, | ||
721 | { "packbs_u", TILE_OPC_PACKBS_U, 0x3, 3, TREG_ZERO, 1, | ||
722 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
723 | }, | ||
724 | { "packbs_u.sn", TILE_OPC_PACKBS_U_SN, 0x3, 3, TREG_SN, 1, | ||
725 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
726 | }, | ||
727 | { "packhb", TILE_OPC_PACKHB, 0x3, 3, TREG_ZERO, 1, | ||
728 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
729 | }, | ||
730 | { "packhb.sn", TILE_OPC_PACKHB_SN, 0x3, 3, TREG_SN, 1, | ||
731 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
732 | }, | ||
733 | { "packhs", TILE_OPC_PACKHS, 0x3, 3, TREG_ZERO, 1, | ||
734 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
735 | }, | ||
736 | { "packhs.sn", TILE_OPC_PACKHS_SN, 0x3, 3, TREG_SN, 1, | ||
737 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
738 | }, | ||
739 | { "packlb", TILE_OPC_PACKLB, 0x3, 3, TREG_ZERO, 1, | ||
740 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
741 | }, | ||
742 | { "packlb.sn", TILE_OPC_PACKLB_SN, 0x3, 3, TREG_SN, 1, | ||
743 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
744 | }, | ||
745 | { "pcnt", TILE_OPC_PCNT, 0x5, 2, TREG_ZERO, 1, | ||
746 | { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, | ||
747 | }, | ||
748 | { "pcnt.sn", TILE_OPC_PCNT_SN, 0x1, 2, TREG_SN, 1, | ||
749 | { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
750 | }, | ||
751 | { "rl", TILE_OPC_RL, 0xf, 3, TREG_ZERO, 1, | ||
752 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
753 | }, | ||
754 | { "rl.sn", TILE_OPC_RL_SN, 0x3, 3, TREG_SN, 1, | ||
755 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
756 | }, | ||
757 | { "rli", TILE_OPC_RLI, 0xf, 3, TREG_ZERO, 1, | ||
758 | { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, | ||
759 | }, | ||
760 | { "rli.sn", TILE_OPC_RLI_SN, 0x3, 3, TREG_SN, 1, | ||
761 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
762 | }, | ||
763 | { "s1a", TILE_OPC_S1A, 0xf, 3, TREG_ZERO, 1, | ||
764 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
765 | }, | ||
766 | { "s1a.sn", TILE_OPC_S1A_SN, 0x3, 3, TREG_SN, 1, | ||
767 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
768 | }, | ||
769 | { "s2a", TILE_OPC_S2A, 0xf, 3, TREG_ZERO, 1, | ||
770 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
771 | }, | ||
772 | { "s2a.sn", TILE_OPC_S2A_SN, 0x3, 3, TREG_SN, 1, | ||
773 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
774 | }, | ||
775 | { "s3a", TILE_OPC_S3A, 0xf, 3, TREG_ZERO, 1, | ||
776 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
777 | }, | ||
778 | { "s3a.sn", TILE_OPC_S3A_SN, 0x3, 3, TREG_SN, 1, | ||
779 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
780 | }, | ||
781 | { "sadab_u", TILE_OPC_SADAB_U, 0x1, 3, TREG_ZERO, 1, | ||
782 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
783 | }, | ||
784 | { "sadab_u.sn", TILE_OPC_SADAB_U_SN, 0x1, 3, TREG_SN, 1, | ||
785 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
786 | }, | ||
787 | { "sadah", TILE_OPC_SADAH, 0x1, 3, TREG_ZERO, 1, | ||
788 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
789 | }, | ||
790 | { "sadah.sn", TILE_OPC_SADAH_SN, 0x1, 3, TREG_SN, 1, | ||
791 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
792 | }, | ||
793 | { "sadah_u", TILE_OPC_SADAH_U, 0x1, 3, TREG_ZERO, 1, | ||
794 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
795 | }, | ||
796 | { "sadah_u.sn", TILE_OPC_SADAH_U_SN, 0x1, 3, TREG_SN, 1, | ||
797 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
798 | }, | ||
799 | { "sadb_u", TILE_OPC_SADB_U, 0x1, 3, TREG_ZERO, 1, | ||
800 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
801 | }, | ||
802 | { "sadb_u.sn", TILE_OPC_SADB_U_SN, 0x1, 3, TREG_SN, 1, | ||
803 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
804 | }, | ||
805 | { "sadh", TILE_OPC_SADH, 0x1, 3, TREG_ZERO, 1, | ||
806 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
807 | }, | ||
808 | { "sadh.sn", TILE_OPC_SADH_SN, 0x1, 3, TREG_SN, 1, | ||
809 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
810 | }, | ||
811 | { "sadh_u", TILE_OPC_SADH_U, 0x1, 3, TREG_ZERO, 1, | ||
812 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
813 | }, | ||
814 | { "sadh_u.sn", TILE_OPC_SADH_U_SN, 0x1, 3, TREG_SN, 1, | ||
815 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
816 | }, | ||
817 | { "sb", TILE_OPC_SB, 0x12, 2, TREG_ZERO, 1, | ||
818 | { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, | ||
819 | }, | ||
820 | { "sbadd", TILE_OPC_SBADD, 0x2, 3, TREG_ZERO, 1, | ||
821 | { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, | ||
822 | }, | ||
823 | { "seq", TILE_OPC_SEQ, 0xf, 3, TREG_ZERO, 1, | ||
824 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
825 | }, | ||
826 | { "seq.sn", TILE_OPC_SEQ_SN, 0x3, 3, TREG_SN, 1, | ||
827 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
828 | }, | ||
829 | { "seqb", TILE_OPC_SEQB, 0x3, 3, TREG_ZERO, 1, | ||
830 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
831 | }, | ||
832 | { "seqb.sn", TILE_OPC_SEQB_SN, 0x3, 3, TREG_SN, 1, | ||
833 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
834 | }, | ||
835 | { "seqh", TILE_OPC_SEQH, 0x3, 3, TREG_ZERO, 1, | ||
836 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
837 | }, | ||
838 | { "seqh.sn", TILE_OPC_SEQH_SN, 0x3, 3, TREG_SN, 1, | ||
839 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
840 | }, | ||
841 | { "seqi", TILE_OPC_SEQI, 0xf, 3, TREG_ZERO, 1, | ||
842 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
843 | }, | ||
844 | { "seqi.sn", TILE_OPC_SEQI_SN, 0x3, 3, TREG_SN, 1, | ||
845 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
846 | }, | ||
847 | { "seqib", TILE_OPC_SEQIB, 0x3, 3, TREG_ZERO, 1, | ||
848 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
849 | }, | ||
850 | { "seqib.sn", TILE_OPC_SEQIB_SN, 0x3, 3, TREG_SN, 1, | ||
851 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
852 | }, | ||
853 | { "seqih", TILE_OPC_SEQIH, 0x3, 3, TREG_ZERO, 1, | ||
854 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
855 | }, | ||
856 | { "seqih.sn", TILE_OPC_SEQIH_SN, 0x3, 3, TREG_SN, 1, | ||
857 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
858 | }, | ||
859 | { "sh", TILE_OPC_SH, 0x12, 2, TREG_ZERO, 1, | ||
860 | { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, | ||
861 | }, | ||
862 | { "shadd", TILE_OPC_SHADD, 0x2, 3, TREG_ZERO, 1, | ||
863 | { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, | ||
864 | }, | ||
865 | { "shl", TILE_OPC_SHL, 0xf, 3, TREG_ZERO, 1, | ||
866 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
867 | }, | ||
868 | { "shl.sn", TILE_OPC_SHL_SN, 0x3, 3, TREG_SN, 1, | ||
869 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
870 | }, | ||
871 | { "shlb", TILE_OPC_SHLB, 0x3, 3, TREG_ZERO, 1, | ||
872 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
873 | }, | ||
874 | { "shlb.sn", TILE_OPC_SHLB_SN, 0x3, 3, TREG_SN, 1, | ||
875 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
876 | }, | ||
877 | { "shlh", TILE_OPC_SHLH, 0x3, 3, TREG_ZERO, 1, | ||
878 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
879 | }, | ||
880 | { "shlh.sn", TILE_OPC_SHLH_SN, 0x3, 3, TREG_SN, 1, | ||
881 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
882 | }, | ||
883 | { "shli", TILE_OPC_SHLI, 0xf, 3, TREG_ZERO, 1, | ||
884 | { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, | ||
885 | }, | ||
886 | { "shli.sn", TILE_OPC_SHLI_SN, 0x3, 3, TREG_SN, 1, | ||
887 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
888 | }, | ||
889 | { "shlib", TILE_OPC_SHLIB, 0x3, 3, TREG_ZERO, 1, | ||
890 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
891 | }, | ||
892 | { "shlib.sn", TILE_OPC_SHLIB_SN, 0x3, 3, TREG_SN, 1, | ||
893 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
894 | }, | ||
895 | { "shlih", TILE_OPC_SHLIH, 0x3, 3, TREG_ZERO, 1, | ||
896 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
897 | }, | ||
898 | { "shlih.sn", TILE_OPC_SHLIH_SN, 0x3, 3, TREG_SN, 1, | ||
899 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
900 | }, | ||
901 | { "shr", TILE_OPC_SHR, 0xf, 3, TREG_ZERO, 1, | ||
902 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
903 | }, | ||
904 | { "shr.sn", TILE_OPC_SHR_SN, 0x3, 3, TREG_SN, 1, | ||
905 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
906 | }, | ||
907 | { "shrb", TILE_OPC_SHRB, 0x3, 3, TREG_ZERO, 1, | ||
908 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
909 | }, | ||
910 | { "shrb.sn", TILE_OPC_SHRB_SN, 0x3, 3, TREG_SN, 1, | ||
911 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
912 | }, | ||
913 | { "shrh", TILE_OPC_SHRH, 0x3, 3, TREG_ZERO, 1, | ||
914 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
915 | }, | ||
916 | { "shrh.sn", TILE_OPC_SHRH_SN, 0x3, 3, TREG_SN, 1, | ||
917 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
918 | }, | ||
919 | { "shri", TILE_OPC_SHRI, 0xf, 3, TREG_ZERO, 1, | ||
920 | { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, | ||
921 | }, | ||
922 | { "shri.sn", TILE_OPC_SHRI_SN, 0x3, 3, TREG_SN, 1, | ||
923 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
924 | }, | ||
925 | { "shrib", TILE_OPC_SHRIB, 0x3, 3, TREG_ZERO, 1, | ||
926 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
927 | }, | ||
928 | { "shrib.sn", TILE_OPC_SHRIB_SN, 0x3, 3, TREG_SN, 1, | ||
929 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
930 | }, | ||
931 | { "shrih", TILE_OPC_SHRIH, 0x3, 3, TREG_ZERO, 1, | ||
932 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
933 | }, | ||
934 | { "shrih.sn", TILE_OPC_SHRIH_SN, 0x3, 3, TREG_SN, 1, | ||
935 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
936 | }, | ||
937 | { "slt", TILE_OPC_SLT, 0xf, 3, TREG_ZERO, 1, | ||
938 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
939 | }, | ||
940 | { "slt.sn", TILE_OPC_SLT_SN, 0x3, 3, TREG_SN, 1, | ||
941 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
942 | }, | ||
943 | { "slt_u", TILE_OPC_SLT_U, 0xf, 3, TREG_ZERO, 1, | ||
944 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
945 | }, | ||
946 | { "slt_u.sn", TILE_OPC_SLT_U_SN, 0x3, 3, TREG_SN, 1, | ||
947 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
948 | }, | ||
949 | { "sltb", TILE_OPC_SLTB, 0x3, 3, TREG_ZERO, 1, | ||
950 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
951 | }, | ||
952 | { "sltb.sn", TILE_OPC_SLTB_SN, 0x3, 3, TREG_SN, 1, | ||
953 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
954 | }, | ||
955 | { "sltb_u", TILE_OPC_SLTB_U, 0x3, 3, TREG_ZERO, 1, | ||
956 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
957 | }, | ||
958 | { "sltb_u.sn", TILE_OPC_SLTB_U_SN, 0x3, 3, TREG_SN, 1, | ||
959 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
960 | }, | ||
961 | { "slte", TILE_OPC_SLTE, 0xf, 3, TREG_ZERO, 1, | ||
962 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
963 | }, | ||
964 | { "slte.sn", TILE_OPC_SLTE_SN, 0x3, 3, TREG_SN, 1, | ||
965 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
966 | }, | ||
967 | { "slte_u", TILE_OPC_SLTE_U, 0xf, 3, TREG_ZERO, 1, | ||
968 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
969 | }, | ||
970 | { "slte_u.sn", TILE_OPC_SLTE_U_SN, 0x3, 3, TREG_SN, 1, | ||
971 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
972 | }, | ||
973 | { "slteb", TILE_OPC_SLTEB, 0x3, 3, TREG_ZERO, 1, | ||
974 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
975 | }, | ||
976 | { "slteb.sn", TILE_OPC_SLTEB_SN, 0x3, 3, TREG_SN, 1, | ||
977 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
978 | }, | ||
979 | { "slteb_u", TILE_OPC_SLTEB_U, 0x3, 3, TREG_ZERO, 1, | ||
980 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
981 | }, | ||
982 | { "slteb_u.sn", TILE_OPC_SLTEB_U_SN, 0x3, 3, TREG_SN, 1, | ||
983 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
984 | }, | ||
985 | { "slteh", TILE_OPC_SLTEH, 0x3, 3, TREG_ZERO, 1, | ||
986 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
987 | }, | ||
988 | { "slteh.sn", TILE_OPC_SLTEH_SN, 0x3, 3, TREG_SN, 1, | ||
989 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
990 | }, | ||
991 | { "slteh_u", TILE_OPC_SLTEH_U, 0x3, 3, TREG_ZERO, 1, | ||
992 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
993 | }, | ||
994 | { "slteh_u.sn", TILE_OPC_SLTEH_U_SN, 0x3, 3, TREG_SN, 1, | ||
995 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
996 | }, | ||
997 | { "slth", TILE_OPC_SLTH, 0x3, 3, TREG_ZERO, 1, | ||
998 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
999 | }, | ||
1000 | { "slth.sn", TILE_OPC_SLTH_SN, 0x3, 3, TREG_SN, 1, | ||
1001 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1002 | }, | ||
1003 | { "slth_u", TILE_OPC_SLTH_U, 0x3, 3, TREG_ZERO, 1, | ||
1004 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1005 | }, | ||
1006 | { "slth_u.sn", TILE_OPC_SLTH_U_SN, 0x3, 3, TREG_SN, 1, | ||
1007 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1008 | }, | ||
1009 | { "slti", TILE_OPC_SLTI, 0xf, 3, TREG_ZERO, 1, | ||
1010 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
1011 | }, | ||
1012 | { "slti.sn", TILE_OPC_SLTI_SN, 0x3, 3, TREG_SN, 1, | ||
1013 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1014 | }, | ||
1015 | { "slti_u", TILE_OPC_SLTI_U, 0xf, 3, TREG_ZERO, 1, | ||
1016 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
1017 | }, | ||
1018 | { "slti_u.sn", TILE_OPC_SLTI_U_SN, 0x3, 3, TREG_SN, 1, | ||
1019 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1020 | }, | ||
1021 | { "sltib", TILE_OPC_SLTIB, 0x3, 3, TREG_ZERO, 1, | ||
1022 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1023 | }, | ||
1024 | { "sltib.sn", TILE_OPC_SLTIB_SN, 0x3, 3, TREG_SN, 1, | ||
1025 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1026 | }, | ||
1027 | { "sltib_u", TILE_OPC_SLTIB_U, 0x3, 3, TREG_ZERO, 1, | ||
1028 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1029 | }, | ||
1030 | { "sltib_u.sn", TILE_OPC_SLTIB_U_SN, 0x3, 3, TREG_SN, 1, | ||
1031 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1032 | }, | ||
1033 | { "sltih", TILE_OPC_SLTIH, 0x3, 3, TREG_ZERO, 1, | ||
1034 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1035 | }, | ||
1036 | { "sltih.sn", TILE_OPC_SLTIH_SN, 0x3, 3, TREG_SN, 1, | ||
1037 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1038 | }, | ||
1039 | { "sltih_u", TILE_OPC_SLTIH_U, 0x3, 3, TREG_ZERO, 1, | ||
1040 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1041 | }, | ||
1042 | { "sltih_u.sn", TILE_OPC_SLTIH_U_SN, 0x3, 3, TREG_SN, 1, | ||
1043 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1044 | }, | ||
1045 | { "sne", TILE_OPC_SNE, 0xf, 3, TREG_ZERO, 1, | ||
1046 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
1047 | }, | ||
1048 | { "sne.sn", TILE_OPC_SNE_SN, 0x3, 3, TREG_SN, 1, | ||
1049 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1050 | }, | ||
1051 | { "sneb", TILE_OPC_SNEB, 0x3, 3, TREG_ZERO, 1, | ||
1052 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1053 | }, | ||
1054 | { "sneb.sn", TILE_OPC_SNEB_SN, 0x3, 3, TREG_SN, 1, | ||
1055 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1056 | }, | ||
1057 | { "sneh", TILE_OPC_SNEH, 0x3, 3, TREG_ZERO, 1, | ||
1058 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1059 | }, | ||
1060 | { "sneh.sn", TILE_OPC_SNEH_SN, 0x3, 3, TREG_SN, 1, | ||
1061 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1062 | }, | ||
1063 | { "sra", TILE_OPC_SRA, 0xf, 3, TREG_ZERO, 1, | ||
1064 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
1065 | }, | ||
1066 | { "sra.sn", TILE_OPC_SRA_SN, 0x3, 3, TREG_SN, 1, | ||
1067 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1068 | }, | ||
1069 | { "srab", TILE_OPC_SRAB, 0x3, 3, TREG_ZERO, 1, | ||
1070 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1071 | }, | ||
1072 | { "srab.sn", TILE_OPC_SRAB_SN, 0x3, 3, TREG_SN, 1, | ||
1073 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1074 | }, | ||
1075 | { "srah", TILE_OPC_SRAH, 0x3, 3, TREG_ZERO, 1, | ||
1076 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1077 | }, | ||
1078 | { "srah.sn", TILE_OPC_SRAH_SN, 0x3, 3, TREG_SN, 1, | ||
1079 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1080 | }, | ||
1081 | { "srai", TILE_OPC_SRAI, 0xf, 3, TREG_ZERO, 1, | ||
1082 | { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, | ||
1083 | }, | ||
1084 | { "srai.sn", TILE_OPC_SRAI_SN, 0x3, 3, TREG_SN, 1, | ||
1085 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
1086 | }, | ||
1087 | { "sraib", TILE_OPC_SRAIB, 0x3, 3, TREG_ZERO, 1, | ||
1088 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
1089 | }, | ||
1090 | { "sraib.sn", TILE_OPC_SRAIB_SN, 0x3, 3, TREG_SN, 1, | ||
1091 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
1092 | }, | ||
1093 | { "sraih", TILE_OPC_SRAIH, 0x3, 3, TREG_ZERO, 1, | ||
1094 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
1095 | }, | ||
1096 | { "sraih.sn", TILE_OPC_SRAIH_SN, 0x3, 3, TREG_SN, 1, | ||
1097 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
1098 | }, | ||
1099 | { "sub", TILE_OPC_SUB, 0xf, 3, TREG_ZERO, 1, | ||
1100 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
1101 | }, | ||
1102 | { "sub.sn", TILE_OPC_SUB_SN, 0x3, 3, TREG_SN, 1, | ||
1103 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1104 | }, | ||
1105 | { "subb", TILE_OPC_SUBB, 0x3, 3, TREG_ZERO, 1, | ||
1106 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1107 | }, | ||
1108 | { "subb.sn", TILE_OPC_SUBB_SN, 0x3, 3, TREG_SN, 1, | ||
1109 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1110 | }, | ||
1111 | { "subbs_u", TILE_OPC_SUBBS_U, 0x3, 3, TREG_ZERO, 1, | ||
1112 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1113 | }, | ||
1114 | { "subbs_u.sn", TILE_OPC_SUBBS_U_SN, 0x3, 3, TREG_SN, 1, | ||
1115 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1116 | }, | ||
1117 | { "subh", TILE_OPC_SUBH, 0x3, 3, TREG_ZERO, 1, | ||
1118 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1119 | }, | ||
1120 | { "subh.sn", TILE_OPC_SUBH_SN, 0x3, 3, TREG_SN, 1, | ||
1121 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1122 | }, | ||
1123 | { "subhs", TILE_OPC_SUBHS, 0x3, 3, TREG_ZERO, 1, | ||
1124 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1125 | }, | ||
1126 | { "subhs.sn", TILE_OPC_SUBHS_SN, 0x3, 3, TREG_SN, 1, | ||
1127 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1128 | }, | ||
1129 | { "subs", TILE_OPC_SUBS, 0x3, 3, TREG_ZERO, 1, | ||
1130 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1131 | }, | ||
1132 | { "subs.sn", TILE_OPC_SUBS_SN, 0x3, 3, TREG_SN, 1, | ||
1133 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1134 | }, | ||
1135 | { "sw", TILE_OPC_SW, 0x12, 2, TREG_ZERO, 1, | ||
1136 | { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, | ||
1137 | }, | ||
1138 | { "swadd", TILE_OPC_SWADD, 0x2, 3, TREG_ZERO, 1, | ||
1139 | { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, | ||
1140 | }, | ||
1141 | { "swint0", TILE_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0, | ||
1142 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
1143 | }, | ||
1144 | { "swint1", TILE_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0, | ||
1145 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
1146 | }, | ||
1147 | { "swint2", TILE_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0, | ||
1148 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
1149 | }, | ||
1150 | { "swint3", TILE_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0, | ||
1151 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
1152 | }, | ||
1153 | { "tblidxb0", TILE_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1, | ||
1154 | { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, | ||
1155 | }, | ||
1156 | { "tblidxb0.sn", TILE_OPC_TBLIDXB0_SN, 0x1, 2, TREG_SN, 1, | ||
1157 | { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
1158 | }, | ||
1159 | { "tblidxb1", TILE_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1, | ||
1160 | { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, | ||
1161 | }, | ||
1162 | { "tblidxb1.sn", TILE_OPC_TBLIDXB1_SN, 0x1, 2, TREG_SN, 1, | ||
1163 | { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
1164 | }, | ||
1165 | { "tblidxb2", TILE_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1, | ||
1166 | { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, | ||
1167 | }, | ||
1168 | { "tblidxb2.sn", TILE_OPC_TBLIDXB2_SN, 0x1, 2, TREG_SN, 1, | ||
1169 | { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
1170 | }, | ||
1171 | { "tblidxb3", TILE_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1, | ||
1172 | { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, | ||
1173 | }, | ||
1174 | { "tblidxb3.sn", TILE_OPC_TBLIDXB3_SN, 0x1, 2, TREG_SN, 1, | ||
1175 | { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
1176 | }, | ||
1177 | { "tns", TILE_OPC_TNS, 0x2, 2, TREG_ZERO, 1, | ||
1178 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
1179 | }, | ||
1180 | { "tns.sn", TILE_OPC_TNS_SN, 0x2, 2, TREG_SN, 1, | ||
1181 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
1182 | }, | ||
1183 | { "wh64", TILE_OPC_WH64, 0x2, 1, TREG_ZERO, 1, | ||
1184 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
1185 | }, | ||
1186 | { "xor", TILE_OPC_XOR, 0xf, 3, TREG_ZERO, 1, | ||
1187 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
1188 | }, | ||
1189 | { "xor.sn", TILE_OPC_XOR_SN, 0x3, 3, TREG_SN, 1, | ||
1190 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1191 | }, | ||
1192 | { "xori", TILE_OPC_XORI, 0x3, 3, TREG_ZERO, 1, | ||
1193 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1194 | }, | ||
1195 | { "xori.sn", TILE_OPC_XORI_SN, 0x3, 3, TREG_SN, 1, | ||
1196 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1197 | }, | ||
1198 | { NULL, TILE_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } }, | ||
1199 | } | ||
1200 | }; | ||
1201 | #define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6)) | ||
1202 | #define CHILD(array_index) (TILE_OPC_NONE + (array_index)) | ||
1203 | |||
1204 | static const unsigned short decode_X0_fsm[1153] = | ||
1205 | { | ||
1206 | BITFIELD(22, 9) /* index 0 */, | ||
1207 | CHILD(513), CHILD(530), CHILD(547), CHILD(564), CHILD(596), CHILD(613), | ||
1208 | CHILD(630), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1209 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1210 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1211 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1212 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1213 | TILE_OPC_NONE, CHILD(663), CHILD(680), CHILD(697), CHILD(714), CHILD(746), | ||
1214 | CHILD(763), CHILD(780), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1215 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1216 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1217 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1218 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1219 | TILE_OPC_NONE, TILE_OPC_NONE, CHILD(813), CHILD(813), CHILD(813), | ||
1220 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1221 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1222 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1223 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1224 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1225 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1226 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1227 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1228 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1229 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1230 | CHILD(813), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1231 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1232 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1233 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1234 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1235 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1236 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1237 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1238 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1239 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1240 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(843), | ||
1241 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1242 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1243 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1244 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1245 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1246 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1247 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1248 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1249 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1250 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1251 | CHILD(843), CHILD(843), CHILD(843), CHILD(873), CHILD(878), CHILD(883), | ||
1252 | CHILD(903), CHILD(908), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1253 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1254 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1255 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1256 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1257 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(913), | ||
1258 | CHILD(918), CHILD(923), CHILD(943), CHILD(948), TILE_OPC_NONE, | ||
1259 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1260 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1261 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1262 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1263 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1264 | TILE_OPC_NONE, CHILD(953), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1265 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1266 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1267 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1268 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1269 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1270 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(988), TILE_OPC_NONE, | ||
1271 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1272 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1273 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1274 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1275 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1276 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1277 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1278 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1279 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1280 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1281 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1282 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1283 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1284 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1285 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1286 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1287 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1288 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1289 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, CHILD(993), | ||
1290 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1291 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1292 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1293 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1294 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1295 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1296 | TILE_OPC_NONE, CHILD(1076), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1297 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1298 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1299 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1300 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1301 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1302 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1303 | BITFIELD(18, 4) /* index 513 */, | ||
1304 | TILE_OPC_NONE, TILE_OPC_ADDB, TILE_OPC_ADDH, TILE_OPC_ADD, | ||
1305 | TILE_OPC_ADIFFB_U, TILE_OPC_ADIFFH, TILE_OPC_AND, TILE_OPC_AVGB_U, | ||
1306 | TILE_OPC_AVGH, TILE_OPC_CRC32_32, TILE_OPC_CRC32_8, TILE_OPC_INTHB, | ||
1307 | TILE_OPC_INTHH, TILE_OPC_INTLB, TILE_OPC_INTLH, TILE_OPC_MAXB_U, | ||
1308 | BITFIELD(18, 4) /* index 530 */, | ||
1309 | TILE_OPC_MAXH, TILE_OPC_MINB_U, TILE_OPC_MINH, TILE_OPC_MNZB, TILE_OPC_MNZH, | ||
1310 | TILE_OPC_MNZ, TILE_OPC_MULHHA_SS, TILE_OPC_MULHHA_SU, TILE_OPC_MULHHA_UU, | ||
1311 | TILE_OPC_MULHHSA_UU, TILE_OPC_MULHH_SS, TILE_OPC_MULHH_SU, | ||
1312 | TILE_OPC_MULHH_UU, TILE_OPC_MULHLA_SS, TILE_OPC_MULHLA_SU, | ||
1313 | TILE_OPC_MULHLA_US, | ||
1314 | BITFIELD(18, 4) /* index 547 */, | ||
1315 | TILE_OPC_MULHLA_UU, TILE_OPC_MULHLSA_UU, TILE_OPC_MULHL_SS, | ||
1316 | TILE_OPC_MULHL_SU, TILE_OPC_MULHL_US, TILE_OPC_MULHL_UU, TILE_OPC_MULLLA_SS, | ||
1317 | TILE_OPC_MULLLA_SU, TILE_OPC_MULLLA_UU, TILE_OPC_MULLLSA_UU, | ||
1318 | TILE_OPC_MULLL_SS, TILE_OPC_MULLL_SU, TILE_OPC_MULLL_UU, TILE_OPC_MVNZ, | ||
1319 | TILE_OPC_MVZ, TILE_OPC_MZB, | ||
1320 | BITFIELD(18, 4) /* index 564 */, | ||
1321 | TILE_OPC_MZH, TILE_OPC_MZ, TILE_OPC_NOR, CHILD(581), TILE_OPC_PACKHB, | ||
1322 | TILE_OPC_PACKLB, TILE_OPC_RL, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_S3A, | ||
1323 | TILE_OPC_SADAB_U, TILE_OPC_SADAH, TILE_OPC_SADAH_U, TILE_OPC_SADB_U, | ||
1324 | TILE_OPC_SADH, TILE_OPC_SADH_U, | ||
1325 | BITFIELD(12, 2) /* index 581 */, | ||
1326 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(586), | ||
1327 | BITFIELD(14, 2) /* index 586 */, | ||
1328 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(591), | ||
1329 | BITFIELD(16, 2) /* index 591 */, | ||
1330 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
1331 | BITFIELD(18, 4) /* index 596 */, | ||
1332 | TILE_OPC_SEQB, TILE_OPC_SEQH, TILE_OPC_SEQ, TILE_OPC_SHLB, TILE_OPC_SHLH, | ||
1333 | TILE_OPC_SHL, TILE_OPC_SHRB, TILE_OPC_SHRH, TILE_OPC_SHR, TILE_OPC_SLTB, | ||
1334 | TILE_OPC_SLTB_U, TILE_OPC_SLTEB, TILE_OPC_SLTEB_U, TILE_OPC_SLTEH, | ||
1335 | TILE_OPC_SLTEH_U, TILE_OPC_SLTE, | ||
1336 | BITFIELD(18, 4) /* index 613 */, | ||
1337 | TILE_OPC_SLTE_U, TILE_OPC_SLTH, TILE_OPC_SLTH_U, TILE_OPC_SLT, | ||
1338 | TILE_OPC_SLT_U, TILE_OPC_SNEB, TILE_OPC_SNEH, TILE_OPC_SNE, TILE_OPC_SRAB, | ||
1339 | TILE_OPC_SRAH, TILE_OPC_SRA, TILE_OPC_SUBB, TILE_OPC_SUBH, TILE_OPC_SUB, | ||
1340 | TILE_OPC_XOR, TILE_OPC_DWORD_ALIGN, | ||
1341 | BITFIELD(18, 3) /* index 630 */, | ||
1342 | CHILD(639), CHILD(642), CHILD(645), CHILD(648), CHILD(651), CHILD(654), | ||
1343 | CHILD(657), CHILD(660), | ||
1344 | BITFIELD(21, 1) /* index 639 */, | ||
1345 | TILE_OPC_ADDS, TILE_OPC_NONE, | ||
1346 | BITFIELD(21, 1) /* index 642 */, | ||
1347 | TILE_OPC_SUBS, TILE_OPC_NONE, | ||
1348 | BITFIELD(21, 1) /* index 645 */, | ||
1349 | TILE_OPC_ADDBS_U, TILE_OPC_NONE, | ||
1350 | BITFIELD(21, 1) /* index 648 */, | ||
1351 | TILE_OPC_ADDHS, TILE_OPC_NONE, | ||
1352 | BITFIELD(21, 1) /* index 651 */, | ||
1353 | TILE_OPC_SUBBS_U, TILE_OPC_NONE, | ||
1354 | BITFIELD(21, 1) /* index 654 */, | ||
1355 | TILE_OPC_SUBHS, TILE_OPC_NONE, | ||
1356 | BITFIELD(21, 1) /* index 657 */, | ||
1357 | TILE_OPC_PACKHS, TILE_OPC_NONE, | ||
1358 | BITFIELD(21, 1) /* index 660 */, | ||
1359 | TILE_OPC_PACKBS_U, TILE_OPC_NONE, | ||
1360 | BITFIELD(18, 4) /* index 663 */, | ||
1361 | TILE_OPC_NONE, TILE_OPC_ADDB_SN, TILE_OPC_ADDH_SN, TILE_OPC_ADD_SN, | ||
1362 | TILE_OPC_ADIFFB_U_SN, TILE_OPC_ADIFFH_SN, TILE_OPC_AND_SN, | ||
1363 | TILE_OPC_AVGB_U_SN, TILE_OPC_AVGH_SN, TILE_OPC_CRC32_32_SN, | ||
1364 | TILE_OPC_CRC32_8_SN, TILE_OPC_INTHB_SN, TILE_OPC_INTHH_SN, | ||
1365 | TILE_OPC_INTLB_SN, TILE_OPC_INTLH_SN, TILE_OPC_MAXB_U_SN, | ||
1366 | BITFIELD(18, 4) /* index 680 */, | ||
1367 | TILE_OPC_MAXH_SN, TILE_OPC_MINB_U_SN, TILE_OPC_MINH_SN, TILE_OPC_MNZB_SN, | ||
1368 | TILE_OPC_MNZH_SN, TILE_OPC_MNZ_SN, TILE_OPC_MULHHA_SS_SN, | ||
1369 | TILE_OPC_MULHHA_SU_SN, TILE_OPC_MULHHA_UU_SN, TILE_OPC_MULHHSA_UU_SN, | ||
1370 | TILE_OPC_MULHH_SS_SN, TILE_OPC_MULHH_SU_SN, TILE_OPC_MULHH_UU_SN, | ||
1371 | TILE_OPC_MULHLA_SS_SN, TILE_OPC_MULHLA_SU_SN, TILE_OPC_MULHLA_US_SN, | ||
1372 | BITFIELD(18, 4) /* index 697 */, | ||
1373 | TILE_OPC_MULHLA_UU_SN, TILE_OPC_MULHLSA_UU_SN, TILE_OPC_MULHL_SS_SN, | ||
1374 | TILE_OPC_MULHL_SU_SN, TILE_OPC_MULHL_US_SN, TILE_OPC_MULHL_UU_SN, | ||
1375 | TILE_OPC_MULLLA_SS_SN, TILE_OPC_MULLLA_SU_SN, TILE_OPC_MULLLA_UU_SN, | ||
1376 | TILE_OPC_MULLLSA_UU_SN, TILE_OPC_MULLL_SS_SN, TILE_OPC_MULLL_SU_SN, | ||
1377 | TILE_OPC_MULLL_UU_SN, TILE_OPC_MVNZ_SN, TILE_OPC_MVZ_SN, TILE_OPC_MZB_SN, | ||
1378 | BITFIELD(18, 4) /* index 714 */, | ||
1379 | TILE_OPC_MZH_SN, TILE_OPC_MZ_SN, TILE_OPC_NOR_SN, CHILD(731), | ||
1380 | TILE_OPC_PACKHB_SN, TILE_OPC_PACKLB_SN, TILE_OPC_RL_SN, TILE_OPC_S1A_SN, | ||
1381 | TILE_OPC_S2A_SN, TILE_OPC_S3A_SN, TILE_OPC_SADAB_U_SN, TILE_OPC_SADAH_SN, | ||
1382 | TILE_OPC_SADAH_U_SN, TILE_OPC_SADB_U_SN, TILE_OPC_SADH_SN, | ||
1383 | TILE_OPC_SADH_U_SN, | ||
1384 | BITFIELD(12, 2) /* index 731 */, | ||
1385 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(736), | ||
1386 | BITFIELD(14, 2) /* index 736 */, | ||
1387 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(741), | ||
1388 | BITFIELD(16, 2) /* index 741 */, | ||
1389 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_MOVE_SN, | ||
1390 | BITFIELD(18, 4) /* index 746 */, | ||
1391 | TILE_OPC_SEQB_SN, TILE_OPC_SEQH_SN, TILE_OPC_SEQ_SN, TILE_OPC_SHLB_SN, | ||
1392 | TILE_OPC_SHLH_SN, TILE_OPC_SHL_SN, TILE_OPC_SHRB_SN, TILE_OPC_SHRH_SN, | ||
1393 | TILE_OPC_SHR_SN, TILE_OPC_SLTB_SN, TILE_OPC_SLTB_U_SN, TILE_OPC_SLTEB_SN, | ||
1394 | TILE_OPC_SLTEB_U_SN, TILE_OPC_SLTEH_SN, TILE_OPC_SLTEH_U_SN, | ||
1395 | TILE_OPC_SLTE_SN, | ||
1396 | BITFIELD(18, 4) /* index 763 */, | ||
1397 | TILE_OPC_SLTE_U_SN, TILE_OPC_SLTH_SN, TILE_OPC_SLTH_U_SN, TILE_OPC_SLT_SN, | ||
1398 | TILE_OPC_SLT_U_SN, TILE_OPC_SNEB_SN, TILE_OPC_SNEH_SN, TILE_OPC_SNE_SN, | ||
1399 | TILE_OPC_SRAB_SN, TILE_OPC_SRAH_SN, TILE_OPC_SRA_SN, TILE_OPC_SUBB_SN, | ||
1400 | TILE_OPC_SUBH_SN, TILE_OPC_SUB_SN, TILE_OPC_XOR_SN, TILE_OPC_DWORD_ALIGN_SN, | ||
1401 | BITFIELD(18, 3) /* index 780 */, | ||
1402 | CHILD(789), CHILD(792), CHILD(795), CHILD(798), CHILD(801), CHILD(804), | ||
1403 | CHILD(807), CHILD(810), | ||
1404 | BITFIELD(21, 1) /* index 789 */, | ||
1405 | TILE_OPC_ADDS_SN, TILE_OPC_NONE, | ||
1406 | BITFIELD(21, 1) /* index 792 */, | ||
1407 | TILE_OPC_SUBS_SN, TILE_OPC_NONE, | ||
1408 | BITFIELD(21, 1) /* index 795 */, | ||
1409 | TILE_OPC_ADDBS_U_SN, TILE_OPC_NONE, | ||
1410 | BITFIELD(21, 1) /* index 798 */, | ||
1411 | TILE_OPC_ADDHS_SN, TILE_OPC_NONE, | ||
1412 | BITFIELD(21, 1) /* index 801 */, | ||
1413 | TILE_OPC_SUBBS_U_SN, TILE_OPC_NONE, | ||
1414 | BITFIELD(21, 1) /* index 804 */, | ||
1415 | TILE_OPC_SUBHS_SN, TILE_OPC_NONE, | ||
1416 | BITFIELD(21, 1) /* index 807 */, | ||
1417 | TILE_OPC_PACKHS_SN, TILE_OPC_NONE, | ||
1418 | BITFIELD(21, 1) /* index 810 */, | ||
1419 | TILE_OPC_PACKBS_U_SN, TILE_OPC_NONE, | ||
1420 | BITFIELD(6, 2) /* index 813 */, | ||
1421 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(818), | ||
1422 | BITFIELD(8, 2) /* index 818 */, | ||
1423 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(823), | ||
1424 | BITFIELD(10, 2) /* index 823 */, | ||
1425 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_MOVELI_SN, | ||
1426 | BITFIELD(6, 2) /* index 828 */, | ||
1427 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(833), | ||
1428 | BITFIELD(8, 2) /* index 833 */, | ||
1429 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(838), | ||
1430 | BITFIELD(10, 2) /* index 838 */, | ||
1431 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_MOVELI, | ||
1432 | BITFIELD(0, 2) /* index 843 */, | ||
1433 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(848), | ||
1434 | BITFIELD(2, 2) /* index 848 */, | ||
1435 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(853), | ||
1436 | BITFIELD(4, 2) /* index 853 */, | ||
1437 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(858), | ||
1438 | BITFIELD(6, 2) /* index 858 */, | ||
1439 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(863), | ||
1440 | BITFIELD(8, 2) /* index 863 */, | ||
1441 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(868), | ||
1442 | BITFIELD(10, 2) /* index 868 */, | ||
1443 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_INFOL, | ||
1444 | BITFIELD(20, 2) /* index 873 */, | ||
1445 | TILE_OPC_NONE, TILE_OPC_ADDIB, TILE_OPC_ADDIH, TILE_OPC_ADDI, | ||
1446 | BITFIELD(20, 2) /* index 878 */, | ||
1447 | TILE_OPC_MAXIB_U, TILE_OPC_MAXIH, TILE_OPC_MINIB_U, TILE_OPC_MINIH, | ||
1448 | BITFIELD(20, 2) /* index 883 */, | ||
1449 | CHILD(888), TILE_OPC_SEQIB, TILE_OPC_SEQIH, TILE_OPC_SEQI, | ||
1450 | BITFIELD(6, 2) /* index 888 */, | ||
1451 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(893), | ||
1452 | BITFIELD(8, 2) /* index 893 */, | ||
1453 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(898), | ||
1454 | BITFIELD(10, 2) /* index 898 */, | ||
1455 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
1456 | BITFIELD(20, 2) /* index 903 */, | ||
1457 | TILE_OPC_SLTIB, TILE_OPC_SLTIB_U, TILE_OPC_SLTIH, TILE_OPC_SLTIH_U, | ||
1458 | BITFIELD(20, 2) /* index 908 */, | ||
1459 | TILE_OPC_SLTI, TILE_OPC_SLTI_U, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1460 | BITFIELD(20, 2) /* index 913 */, | ||
1461 | TILE_OPC_NONE, TILE_OPC_ADDIB_SN, TILE_OPC_ADDIH_SN, TILE_OPC_ADDI_SN, | ||
1462 | BITFIELD(20, 2) /* index 918 */, | ||
1463 | TILE_OPC_MAXIB_U_SN, TILE_OPC_MAXIH_SN, TILE_OPC_MINIB_U_SN, | ||
1464 | TILE_OPC_MINIH_SN, | ||
1465 | BITFIELD(20, 2) /* index 923 */, | ||
1466 | CHILD(928), TILE_OPC_SEQIB_SN, TILE_OPC_SEQIH_SN, TILE_OPC_SEQI_SN, | ||
1467 | BITFIELD(6, 2) /* index 928 */, | ||
1468 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(933), | ||
1469 | BITFIELD(8, 2) /* index 933 */, | ||
1470 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(938), | ||
1471 | BITFIELD(10, 2) /* index 938 */, | ||
1472 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_MOVEI_SN, | ||
1473 | BITFIELD(20, 2) /* index 943 */, | ||
1474 | TILE_OPC_SLTIB_SN, TILE_OPC_SLTIB_U_SN, TILE_OPC_SLTIH_SN, | ||
1475 | TILE_OPC_SLTIH_U_SN, | ||
1476 | BITFIELD(20, 2) /* index 948 */, | ||
1477 | TILE_OPC_SLTI_SN, TILE_OPC_SLTI_U_SN, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1478 | BITFIELD(20, 2) /* index 953 */, | ||
1479 | TILE_OPC_NONE, CHILD(958), TILE_OPC_XORI, TILE_OPC_NONE, | ||
1480 | BITFIELD(0, 2) /* index 958 */, | ||
1481 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(963), | ||
1482 | BITFIELD(2, 2) /* index 963 */, | ||
1483 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(968), | ||
1484 | BITFIELD(4, 2) /* index 968 */, | ||
1485 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(973), | ||
1486 | BITFIELD(6, 2) /* index 973 */, | ||
1487 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(978), | ||
1488 | BITFIELD(8, 2) /* index 978 */, | ||
1489 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(983), | ||
1490 | BITFIELD(10, 2) /* index 983 */, | ||
1491 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
1492 | BITFIELD(20, 2) /* index 988 */, | ||
1493 | TILE_OPC_NONE, TILE_OPC_ANDI_SN, TILE_OPC_XORI_SN, TILE_OPC_NONE, | ||
1494 | BITFIELD(17, 5) /* index 993 */, | ||
1495 | TILE_OPC_NONE, TILE_OPC_RLI, TILE_OPC_SHLIB, TILE_OPC_SHLIH, TILE_OPC_SHLI, | ||
1496 | TILE_OPC_SHRIB, TILE_OPC_SHRIH, TILE_OPC_SHRI, TILE_OPC_SRAIB, | ||
1497 | TILE_OPC_SRAIH, TILE_OPC_SRAI, CHILD(1026), TILE_OPC_NONE, TILE_OPC_NONE, | ||
1498 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1499 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1500 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1501 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1502 | BITFIELD(12, 4) /* index 1026 */, | ||
1503 | TILE_OPC_NONE, CHILD(1043), CHILD(1046), CHILD(1049), CHILD(1052), | ||
1504 | CHILD(1055), CHILD(1058), CHILD(1061), CHILD(1064), CHILD(1067), | ||
1505 | CHILD(1070), CHILD(1073), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1506 | TILE_OPC_NONE, | ||
1507 | BITFIELD(16, 1) /* index 1043 */, | ||
1508 | TILE_OPC_BITX, TILE_OPC_NONE, | ||
1509 | BITFIELD(16, 1) /* index 1046 */, | ||
1510 | TILE_OPC_BYTEX, TILE_OPC_NONE, | ||
1511 | BITFIELD(16, 1) /* index 1049 */, | ||
1512 | TILE_OPC_CLZ, TILE_OPC_NONE, | ||
1513 | BITFIELD(16, 1) /* index 1052 */, | ||
1514 | TILE_OPC_CTZ, TILE_OPC_NONE, | ||
1515 | BITFIELD(16, 1) /* index 1055 */, | ||
1516 | TILE_OPC_FNOP, TILE_OPC_NONE, | ||
1517 | BITFIELD(16, 1) /* index 1058 */, | ||
1518 | TILE_OPC_NOP, TILE_OPC_NONE, | ||
1519 | BITFIELD(16, 1) /* index 1061 */, | ||
1520 | TILE_OPC_PCNT, TILE_OPC_NONE, | ||
1521 | BITFIELD(16, 1) /* index 1064 */, | ||
1522 | TILE_OPC_TBLIDXB0, TILE_OPC_NONE, | ||
1523 | BITFIELD(16, 1) /* index 1067 */, | ||
1524 | TILE_OPC_TBLIDXB1, TILE_OPC_NONE, | ||
1525 | BITFIELD(16, 1) /* index 1070 */, | ||
1526 | TILE_OPC_TBLIDXB2, TILE_OPC_NONE, | ||
1527 | BITFIELD(16, 1) /* index 1073 */, | ||
1528 | TILE_OPC_TBLIDXB3, TILE_OPC_NONE, | ||
1529 | BITFIELD(17, 5) /* index 1076 */, | ||
1530 | TILE_OPC_NONE, TILE_OPC_RLI_SN, TILE_OPC_SHLIB_SN, TILE_OPC_SHLIH_SN, | ||
1531 | TILE_OPC_SHLI_SN, TILE_OPC_SHRIB_SN, TILE_OPC_SHRIH_SN, TILE_OPC_SHRI_SN, | ||
1532 | TILE_OPC_SRAIB_SN, TILE_OPC_SRAIH_SN, TILE_OPC_SRAI_SN, CHILD(1109), | ||
1533 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1534 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1535 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1536 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1537 | BITFIELD(12, 4) /* index 1109 */, | ||
1538 | TILE_OPC_NONE, CHILD(1126), CHILD(1129), CHILD(1132), CHILD(1135), | ||
1539 | CHILD(1055), CHILD(1058), CHILD(1138), CHILD(1141), CHILD(1144), | ||
1540 | CHILD(1147), CHILD(1150), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1541 | TILE_OPC_NONE, | ||
1542 | BITFIELD(16, 1) /* index 1126 */, | ||
1543 | TILE_OPC_BITX_SN, TILE_OPC_NONE, | ||
1544 | BITFIELD(16, 1) /* index 1129 */, | ||
1545 | TILE_OPC_BYTEX_SN, TILE_OPC_NONE, | ||
1546 | BITFIELD(16, 1) /* index 1132 */, | ||
1547 | TILE_OPC_CLZ_SN, TILE_OPC_NONE, | ||
1548 | BITFIELD(16, 1) /* index 1135 */, | ||
1549 | TILE_OPC_CTZ_SN, TILE_OPC_NONE, | ||
1550 | BITFIELD(16, 1) /* index 1138 */, | ||
1551 | TILE_OPC_PCNT_SN, TILE_OPC_NONE, | ||
1552 | BITFIELD(16, 1) /* index 1141 */, | ||
1553 | TILE_OPC_TBLIDXB0_SN, TILE_OPC_NONE, | ||
1554 | BITFIELD(16, 1) /* index 1144 */, | ||
1555 | TILE_OPC_TBLIDXB1_SN, TILE_OPC_NONE, | ||
1556 | BITFIELD(16, 1) /* index 1147 */, | ||
1557 | TILE_OPC_TBLIDXB2_SN, TILE_OPC_NONE, | ||
1558 | BITFIELD(16, 1) /* index 1150 */, | ||
1559 | TILE_OPC_TBLIDXB3_SN, TILE_OPC_NONE, | ||
1560 | }; | ||
1561 | |||
1562 | static const unsigned short decode_X1_fsm[1540] = | ||
1563 | { | ||
1564 | BITFIELD(54, 9) /* index 0 */, | ||
1565 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1566 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1567 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1568 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1569 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1570 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1571 | TILE_OPC_NONE, TILE_OPC_NONE, CHILD(513), CHILD(561), CHILD(594), | ||
1572 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1573 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1574 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(641), CHILD(689), | ||
1575 | CHILD(722), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1576 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1577 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(766), | ||
1578 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
1579 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
1580 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
1581 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
1582 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
1583 | CHILD(766), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
1584 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
1585 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
1586 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
1587 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
1588 | CHILD(781), CHILD(781), CHILD(781), CHILD(796), CHILD(796), CHILD(796), | ||
1589 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
1590 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
1591 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
1592 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
1593 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(826), | ||
1594 | CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), | ||
1595 | CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), | ||
1596 | CHILD(826), CHILD(826), CHILD(826), CHILD(843), CHILD(843), CHILD(843), | ||
1597 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1598 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1599 | CHILD(843), CHILD(860), CHILD(899), CHILD(923), CHILD(932), TILE_OPC_NONE, | ||
1600 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1601 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1602 | TILE_OPC_NONE, CHILD(941), CHILD(950), CHILD(974), CHILD(983), | ||
1603 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1604 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1605 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1606 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1607 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1608 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1609 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1610 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1611 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, CHILD(992), | ||
1612 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1613 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1614 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1615 | CHILD(1334), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1616 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1617 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1618 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1619 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1620 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1621 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1622 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1623 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1624 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_J, TILE_OPC_J, | ||
1625 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1626 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1627 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1628 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1629 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1630 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1631 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1632 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1633 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1634 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1635 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1636 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1637 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1638 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1639 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1640 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1641 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1642 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1643 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1644 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1645 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1646 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1647 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1648 | TILE_OPC_JAL, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1649 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1650 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1651 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1652 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1653 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1654 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1655 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1656 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1657 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1658 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1659 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1660 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1661 | BITFIELD(49, 5) /* index 513 */, | ||
1662 | TILE_OPC_NONE, TILE_OPC_ADDB, TILE_OPC_ADDH, TILE_OPC_ADD, TILE_OPC_AND, | ||
1663 | TILE_OPC_INTHB, TILE_OPC_INTHH, TILE_OPC_INTLB, TILE_OPC_INTLH, | ||
1664 | TILE_OPC_JALRP, TILE_OPC_JALR, TILE_OPC_JRP, TILE_OPC_JR, TILE_OPC_LNK, | ||
1665 | TILE_OPC_MAXB_U, TILE_OPC_MAXH, TILE_OPC_MINB_U, TILE_OPC_MINH, | ||
1666 | TILE_OPC_MNZB, TILE_OPC_MNZH, TILE_OPC_MNZ, TILE_OPC_MZB, TILE_OPC_MZH, | ||
1667 | TILE_OPC_MZ, TILE_OPC_NOR, CHILD(546), TILE_OPC_PACKHB, TILE_OPC_PACKLB, | ||
1668 | TILE_OPC_RL, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_S3A, | ||
1669 | BITFIELD(43, 2) /* index 546 */, | ||
1670 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(551), | ||
1671 | BITFIELD(45, 2) /* index 551 */, | ||
1672 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(556), | ||
1673 | BITFIELD(47, 2) /* index 556 */, | ||
1674 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
1675 | BITFIELD(49, 5) /* index 561 */, | ||
1676 | TILE_OPC_SB, TILE_OPC_SEQB, TILE_OPC_SEQH, TILE_OPC_SEQ, TILE_OPC_SHLB, | ||
1677 | TILE_OPC_SHLH, TILE_OPC_SHL, TILE_OPC_SHRB, TILE_OPC_SHRH, TILE_OPC_SHR, | ||
1678 | TILE_OPC_SH, TILE_OPC_SLTB, TILE_OPC_SLTB_U, TILE_OPC_SLTEB, | ||
1679 | TILE_OPC_SLTEB_U, TILE_OPC_SLTEH, TILE_OPC_SLTEH_U, TILE_OPC_SLTE, | ||
1680 | TILE_OPC_SLTE_U, TILE_OPC_SLTH, TILE_OPC_SLTH_U, TILE_OPC_SLT, | ||
1681 | TILE_OPC_SLT_U, TILE_OPC_SNEB, TILE_OPC_SNEH, TILE_OPC_SNE, TILE_OPC_SRAB, | ||
1682 | TILE_OPC_SRAH, TILE_OPC_SRA, TILE_OPC_SUBB, TILE_OPC_SUBH, TILE_OPC_SUB, | ||
1683 | BITFIELD(49, 4) /* index 594 */, | ||
1684 | CHILD(611), CHILD(614), CHILD(617), CHILD(620), CHILD(623), CHILD(626), | ||
1685 | CHILD(629), CHILD(632), CHILD(635), CHILD(638), TILE_OPC_NONE, | ||
1686 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1687 | BITFIELD(53, 1) /* index 611 */, | ||
1688 | TILE_OPC_SW, TILE_OPC_NONE, | ||
1689 | BITFIELD(53, 1) /* index 614 */, | ||
1690 | TILE_OPC_XOR, TILE_OPC_NONE, | ||
1691 | BITFIELD(53, 1) /* index 617 */, | ||
1692 | TILE_OPC_ADDS, TILE_OPC_NONE, | ||
1693 | BITFIELD(53, 1) /* index 620 */, | ||
1694 | TILE_OPC_SUBS, TILE_OPC_NONE, | ||
1695 | BITFIELD(53, 1) /* index 623 */, | ||
1696 | TILE_OPC_ADDBS_U, TILE_OPC_NONE, | ||
1697 | BITFIELD(53, 1) /* index 626 */, | ||
1698 | TILE_OPC_ADDHS, TILE_OPC_NONE, | ||
1699 | BITFIELD(53, 1) /* index 629 */, | ||
1700 | TILE_OPC_SUBBS_U, TILE_OPC_NONE, | ||
1701 | BITFIELD(53, 1) /* index 632 */, | ||
1702 | TILE_OPC_SUBHS, TILE_OPC_NONE, | ||
1703 | BITFIELD(53, 1) /* index 635 */, | ||
1704 | TILE_OPC_PACKHS, TILE_OPC_NONE, | ||
1705 | BITFIELD(53, 1) /* index 638 */, | ||
1706 | TILE_OPC_PACKBS_U, TILE_OPC_NONE, | ||
1707 | BITFIELD(49, 5) /* index 641 */, | ||
1708 | TILE_OPC_NONE, TILE_OPC_ADDB_SN, TILE_OPC_ADDH_SN, TILE_OPC_ADD_SN, | ||
1709 | TILE_OPC_AND_SN, TILE_OPC_INTHB_SN, TILE_OPC_INTHH_SN, TILE_OPC_INTLB_SN, | ||
1710 | TILE_OPC_INTLH_SN, TILE_OPC_JALRP, TILE_OPC_JALR, TILE_OPC_JRP, TILE_OPC_JR, | ||
1711 | TILE_OPC_LNK_SN, TILE_OPC_MAXB_U_SN, TILE_OPC_MAXH_SN, TILE_OPC_MINB_U_SN, | ||
1712 | TILE_OPC_MINH_SN, TILE_OPC_MNZB_SN, TILE_OPC_MNZH_SN, TILE_OPC_MNZ_SN, | ||
1713 | TILE_OPC_MZB_SN, TILE_OPC_MZH_SN, TILE_OPC_MZ_SN, TILE_OPC_NOR_SN, | ||
1714 | CHILD(674), TILE_OPC_PACKHB_SN, TILE_OPC_PACKLB_SN, TILE_OPC_RL_SN, | ||
1715 | TILE_OPC_S1A_SN, TILE_OPC_S2A_SN, TILE_OPC_S3A_SN, | ||
1716 | BITFIELD(43, 2) /* index 674 */, | ||
1717 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(679), | ||
1718 | BITFIELD(45, 2) /* index 679 */, | ||
1719 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(684), | ||
1720 | BITFIELD(47, 2) /* index 684 */, | ||
1721 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_MOVE_SN, | ||
1722 | BITFIELD(49, 5) /* index 689 */, | ||
1723 | TILE_OPC_SB, TILE_OPC_SEQB_SN, TILE_OPC_SEQH_SN, TILE_OPC_SEQ_SN, | ||
1724 | TILE_OPC_SHLB_SN, TILE_OPC_SHLH_SN, TILE_OPC_SHL_SN, TILE_OPC_SHRB_SN, | ||
1725 | TILE_OPC_SHRH_SN, TILE_OPC_SHR_SN, TILE_OPC_SH, TILE_OPC_SLTB_SN, | ||
1726 | TILE_OPC_SLTB_U_SN, TILE_OPC_SLTEB_SN, TILE_OPC_SLTEB_U_SN, | ||
1727 | TILE_OPC_SLTEH_SN, TILE_OPC_SLTEH_U_SN, TILE_OPC_SLTE_SN, | ||
1728 | TILE_OPC_SLTE_U_SN, TILE_OPC_SLTH_SN, TILE_OPC_SLTH_U_SN, TILE_OPC_SLT_SN, | ||
1729 | TILE_OPC_SLT_U_SN, TILE_OPC_SNEB_SN, TILE_OPC_SNEH_SN, TILE_OPC_SNE_SN, | ||
1730 | TILE_OPC_SRAB_SN, TILE_OPC_SRAH_SN, TILE_OPC_SRA_SN, TILE_OPC_SUBB_SN, | ||
1731 | TILE_OPC_SUBH_SN, TILE_OPC_SUB_SN, | ||
1732 | BITFIELD(49, 4) /* index 722 */, | ||
1733 | CHILD(611), CHILD(739), CHILD(742), CHILD(745), CHILD(748), CHILD(751), | ||
1734 | CHILD(754), CHILD(757), CHILD(760), CHILD(763), TILE_OPC_NONE, | ||
1735 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1736 | BITFIELD(53, 1) /* index 739 */, | ||
1737 | TILE_OPC_XOR_SN, TILE_OPC_NONE, | ||
1738 | BITFIELD(53, 1) /* index 742 */, | ||
1739 | TILE_OPC_ADDS_SN, TILE_OPC_NONE, | ||
1740 | BITFIELD(53, 1) /* index 745 */, | ||
1741 | TILE_OPC_SUBS_SN, TILE_OPC_NONE, | ||
1742 | BITFIELD(53, 1) /* index 748 */, | ||
1743 | TILE_OPC_ADDBS_U_SN, TILE_OPC_NONE, | ||
1744 | BITFIELD(53, 1) /* index 751 */, | ||
1745 | TILE_OPC_ADDHS_SN, TILE_OPC_NONE, | ||
1746 | BITFIELD(53, 1) /* index 754 */, | ||
1747 | TILE_OPC_SUBBS_U_SN, TILE_OPC_NONE, | ||
1748 | BITFIELD(53, 1) /* index 757 */, | ||
1749 | TILE_OPC_SUBHS_SN, TILE_OPC_NONE, | ||
1750 | BITFIELD(53, 1) /* index 760 */, | ||
1751 | TILE_OPC_PACKHS_SN, TILE_OPC_NONE, | ||
1752 | BITFIELD(53, 1) /* index 763 */, | ||
1753 | TILE_OPC_PACKBS_U_SN, TILE_OPC_NONE, | ||
1754 | BITFIELD(37, 2) /* index 766 */, | ||
1755 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(771), | ||
1756 | BITFIELD(39, 2) /* index 771 */, | ||
1757 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(776), | ||
1758 | BITFIELD(41, 2) /* index 776 */, | ||
1759 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_MOVELI_SN, | ||
1760 | BITFIELD(37, 2) /* index 781 */, | ||
1761 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(786), | ||
1762 | BITFIELD(39, 2) /* index 786 */, | ||
1763 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(791), | ||
1764 | BITFIELD(41, 2) /* index 791 */, | ||
1765 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_MOVELI, | ||
1766 | BITFIELD(31, 2) /* index 796 */, | ||
1767 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(801), | ||
1768 | BITFIELD(33, 2) /* index 801 */, | ||
1769 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(806), | ||
1770 | BITFIELD(35, 2) /* index 806 */, | ||
1771 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(811), | ||
1772 | BITFIELD(37, 2) /* index 811 */, | ||
1773 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(816), | ||
1774 | BITFIELD(39, 2) /* index 816 */, | ||
1775 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(821), | ||
1776 | BITFIELD(41, 2) /* index 821 */, | ||
1777 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_INFOL, | ||
1778 | BITFIELD(31, 4) /* index 826 */, | ||
1779 | TILE_OPC_BZ, TILE_OPC_BZT, TILE_OPC_BNZ, TILE_OPC_BNZT, TILE_OPC_BGZ, | ||
1780 | TILE_OPC_BGZT, TILE_OPC_BGEZ, TILE_OPC_BGEZT, TILE_OPC_BLZ, TILE_OPC_BLZT, | ||
1781 | TILE_OPC_BLEZ, TILE_OPC_BLEZT, TILE_OPC_BBS, TILE_OPC_BBST, TILE_OPC_BBNS, | ||
1782 | TILE_OPC_BBNST, | ||
1783 | BITFIELD(31, 4) /* index 843 */, | ||
1784 | TILE_OPC_BZ_SN, TILE_OPC_BZT_SN, TILE_OPC_BNZ_SN, TILE_OPC_BNZT_SN, | ||
1785 | TILE_OPC_BGZ_SN, TILE_OPC_BGZT_SN, TILE_OPC_BGEZ_SN, TILE_OPC_BGEZT_SN, | ||
1786 | TILE_OPC_BLZ_SN, TILE_OPC_BLZT_SN, TILE_OPC_BLEZ_SN, TILE_OPC_BLEZT_SN, | ||
1787 | TILE_OPC_BBS_SN, TILE_OPC_BBST_SN, TILE_OPC_BBNS_SN, TILE_OPC_BBNST_SN, | ||
1788 | BITFIELD(51, 3) /* index 860 */, | ||
1789 | TILE_OPC_NONE, TILE_OPC_ADDIB, TILE_OPC_ADDIH, TILE_OPC_ADDI, CHILD(869), | ||
1790 | TILE_OPC_MAXIB_U, TILE_OPC_MAXIH, TILE_OPC_MFSPR, | ||
1791 | BITFIELD(31, 2) /* index 869 */, | ||
1792 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(874), | ||
1793 | BITFIELD(33, 2) /* index 874 */, | ||
1794 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(879), | ||
1795 | BITFIELD(35, 2) /* index 879 */, | ||
1796 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(884), | ||
1797 | BITFIELD(37, 2) /* index 884 */, | ||
1798 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(889), | ||
1799 | BITFIELD(39, 2) /* index 889 */, | ||
1800 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(894), | ||
1801 | BITFIELD(41, 2) /* index 894 */, | ||
1802 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
1803 | BITFIELD(51, 3) /* index 899 */, | ||
1804 | TILE_OPC_MINIB_U, TILE_OPC_MINIH, TILE_OPC_MTSPR, CHILD(908), | ||
1805 | TILE_OPC_SEQIB, TILE_OPC_SEQIH, TILE_OPC_SEQI, TILE_OPC_SLTIB, | ||
1806 | BITFIELD(37, 2) /* index 908 */, | ||
1807 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(913), | ||
1808 | BITFIELD(39, 2) /* index 913 */, | ||
1809 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(918), | ||
1810 | BITFIELD(41, 2) /* index 918 */, | ||
1811 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
1812 | BITFIELD(51, 3) /* index 923 */, | ||
1813 | TILE_OPC_SLTIB_U, TILE_OPC_SLTIH, TILE_OPC_SLTIH_U, TILE_OPC_SLTI, | ||
1814 | TILE_OPC_SLTI_U, TILE_OPC_XORI, TILE_OPC_LBADD, TILE_OPC_LBADD_U, | ||
1815 | BITFIELD(51, 3) /* index 932 */, | ||
1816 | TILE_OPC_LHADD, TILE_OPC_LHADD_U, TILE_OPC_LWADD, TILE_OPC_LWADD_NA, | ||
1817 | TILE_OPC_SBADD, TILE_OPC_SHADD, TILE_OPC_SWADD, TILE_OPC_NONE, | ||
1818 | BITFIELD(51, 3) /* index 941 */, | ||
1819 | TILE_OPC_NONE, TILE_OPC_ADDIB_SN, TILE_OPC_ADDIH_SN, TILE_OPC_ADDI_SN, | ||
1820 | TILE_OPC_ANDI_SN, TILE_OPC_MAXIB_U_SN, TILE_OPC_MAXIH_SN, TILE_OPC_MFSPR, | ||
1821 | BITFIELD(51, 3) /* index 950 */, | ||
1822 | TILE_OPC_MINIB_U_SN, TILE_OPC_MINIH_SN, TILE_OPC_MTSPR, CHILD(959), | ||
1823 | TILE_OPC_SEQIB_SN, TILE_OPC_SEQIH_SN, TILE_OPC_SEQI_SN, TILE_OPC_SLTIB_SN, | ||
1824 | BITFIELD(37, 2) /* index 959 */, | ||
1825 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(964), | ||
1826 | BITFIELD(39, 2) /* index 964 */, | ||
1827 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(969), | ||
1828 | BITFIELD(41, 2) /* index 969 */, | ||
1829 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_MOVEI_SN, | ||
1830 | BITFIELD(51, 3) /* index 974 */, | ||
1831 | TILE_OPC_SLTIB_U_SN, TILE_OPC_SLTIH_SN, TILE_OPC_SLTIH_U_SN, | ||
1832 | TILE_OPC_SLTI_SN, TILE_OPC_SLTI_U_SN, TILE_OPC_XORI_SN, TILE_OPC_LBADD_SN, | ||
1833 | TILE_OPC_LBADD_U_SN, | ||
1834 | BITFIELD(51, 3) /* index 983 */, | ||
1835 | TILE_OPC_LHADD_SN, TILE_OPC_LHADD_U_SN, TILE_OPC_LWADD_SN, | ||
1836 | TILE_OPC_LWADD_NA_SN, TILE_OPC_SBADD, TILE_OPC_SHADD, TILE_OPC_SWADD, | ||
1837 | TILE_OPC_NONE, | ||
1838 | BITFIELD(46, 7) /* index 992 */, | ||
1839 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(1121), | ||
1840 | CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1124), CHILD(1124), | ||
1841 | CHILD(1124), CHILD(1124), CHILD(1127), CHILD(1127), CHILD(1127), | ||
1842 | CHILD(1127), CHILD(1130), CHILD(1130), CHILD(1130), CHILD(1130), | ||
1843 | CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1136), | ||
1844 | CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1139), CHILD(1139), | ||
1845 | CHILD(1139), CHILD(1139), CHILD(1142), CHILD(1142), CHILD(1142), | ||
1846 | CHILD(1142), CHILD(1145), CHILD(1145), CHILD(1145), CHILD(1145), | ||
1847 | CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1151), | ||
1848 | CHILD(1242), CHILD(1290), CHILD(1323), TILE_OPC_NONE, TILE_OPC_NONE, | ||
1849 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1850 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1851 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1852 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1853 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1854 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1855 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1856 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1857 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1858 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1859 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1860 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1861 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1862 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1863 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1864 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1865 | BITFIELD(53, 1) /* index 1121 */, | ||
1866 | TILE_OPC_RLI, TILE_OPC_NONE, | ||
1867 | BITFIELD(53, 1) /* index 1124 */, | ||
1868 | TILE_OPC_SHLIB, TILE_OPC_NONE, | ||
1869 | BITFIELD(53, 1) /* index 1127 */, | ||
1870 | TILE_OPC_SHLIH, TILE_OPC_NONE, | ||
1871 | BITFIELD(53, 1) /* index 1130 */, | ||
1872 | TILE_OPC_SHLI, TILE_OPC_NONE, | ||
1873 | BITFIELD(53, 1) /* index 1133 */, | ||
1874 | TILE_OPC_SHRIB, TILE_OPC_NONE, | ||
1875 | BITFIELD(53, 1) /* index 1136 */, | ||
1876 | TILE_OPC_SHRIH, TILE_OPC_NONE, | ||
1877 | BITFIELD(53, 1) /* index 1139 */, | ||
1878 | TILE_OPC_SHRI, TILE_OPC_NONE, | ||
1879 | BITFIELD(53, 1) /* index 1142 */, | ||
1880 | TILE_OPC_SRAIB, TILE_OPC_NONE, | ||
1881 | BITFIELD(53, 1) /* index 1145 */, | ||
1882 | TILE_OPC_SRAIH, TILE_OPC_NONE, | ||
1883 | BITFIELD(53, 1) /* index 1148 */, | ||
1884 | TILE_OPC_SRAI, TILE_OPC_NONE, | ||
1885 | BITFIELD(43, 3) /* index 1151 */, | ||
1886 | TILE_OPC_NONE, CHILD(1160), CHILD(1163), CHILD(1166), CHILD(1169), | ||
1887 | CHILD(1172), CHILD(1175), CHILD(1178), | ||
1888 | BITFIELD(53, 1) /* index 1160 */, | ||
1889 | TILE_OPC_DRAIN, TILE_OPC_NONE, | ||
1890 | BITFIELD(53, 1) /* index 1163 */, | ||
1891 | TILE_OPC_DTLBPR, TILE_OPC_NONE, | ||
1892 | BITFIELD(53, 1) /* index 1166 */, | ||
1893 | TILE_OPC_FINV, TILE_OPC_NONE, | ||
1894 | BITFIELD(53, 1) /* index 1169 */, | ||
1895 | TILE_OPC_FLUSH, TILE_OPC_NONE, | ||
1896 | BITFIELD(53, 1) /* index 1172 */, | ||
1897 | TILE_OPC_FNOP, TILE_OPC_NONE, | ||
1898 | BITFIELD(53, 1) /* index 1175 */, | ||
1899 | TILE_OPC_ICOH, TILE_OPC_NONE, | ||
1900 | BITFIELD(31, 2) /* index 1178 */, | ||
1901 | CHILD(1183), CHILD(1211), CHILD(1239), CHILD(1239), | ||
1902 | BITFIELD(53, 1) /* index 1183 */, | ||
1903 | CHILD(1186), TILE_OPC_NONE, | ||
1904 | BITFIELD(33, 2) /* index 1186 */, | ||
1905 | TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_ILL, CHILD(1191), | ||
1906 | BITFIELD(35, 2) /* index 1191 */, | ||
1907 | TILE_OPC_ILL, CHILD(1196), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1908 | BITFIELD(37, 2) /* index 1196 */, | ||
1909 | TILE_OPC_ILL, CHILD(1201), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1910 | BITFIELD(39, 2) /* index 1201 */, | ||
1911 | TILE_OPC_ILL, CHILD(1206), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1912 | BITFIELD(41, 2) /* index 1206 */, | ||
1913 | TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_BPT, TILE_OPC_ILL, | ||
1914 | BITFIELD(53, 1) /* index 1211 */, | ||
1915 | CHILD(1214), TILE_OPC_NONE, | ||
1916 | BITFIELD(33, 2) /* index 1214 */, | ||
1917 | TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_ILL, CHILD(1219), | ||
1918 | BITFIELD(35, 2) /* index 1219 */, | ||
1919 | TILE_OPC_ILL, CHILD(1224), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1920 | BITFIELD(37, 2) /* index 1224 */, | ||
1921 | TILE_OPC_ILL, CHILD(1229), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1922 | BITFIELD(39, 2) /* index 1229 */, | ||
1923 | TILE_OPC_ILL, CHILD(1234), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1924 | BITFIELD(41, 2) /* index 1234 */, | ||
1925 | TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_RAISE, TILE_OPC_ILL, | ||
1926 | BITFIELD(53, 1) /* index 1239 */, | ||
1927 | TILE_OPC_ILL, TILE_OPC_NONE, | ||
1928 | BITFIELD(43, 3) /* index 1242 */, | ||
1929 | CHILD(1251), CHILD(1254), CHILD(1257), CHILD(1275), CHILD(1278), | ||
1930 | CHILD(1281), CHILD(1284), CHILD(1287), | ||
1931 | BITFIELD(53, 1) /* index 1251 */, | ||
1932 | TILE_OPC_INV, TILE_OPC_NONE, | ||
1933 | BITFIELD(53, 1) /* index 1254 */, | ||
1934 | TILE_OPC_IRET, TILE_OPC_NONE, | ||
1935 | BITFIELD(53, 1) /* index 1257 */, | ||
1936 | CHILD(1260), TILE_OPC_NONE, | ||
1937 | BITFIELD(31, 2) /* index 1260 */, | ||
1938 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(1265), | ||
1939 | BITFIELD(33, 2) /* index 1265 */, | ||
1940 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(1270), | ||
1941 | BITFIELD(35, 2) /* index 1270 */, | ||
1942 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_PREFETCH, | ||
1943 | BITFIELD(53, 1) /* index 1275 */, | ||
1944 | TILE_OPC_LB_U, TILE_OPC_NONE, | ||
1945 | BITFIELD(53, 1) /* index 1278 */, | ||
1946 | TILE_OPC_LH, TILE_OPC_NONE, | ||
1947 | BITFIELD(53, 1) /* index 1281 */, | ||
1948 | TILE_OPC_LH_U, TILE_OPC_NONE, | ||
1949 | BITFIELD(53, 1) /* index 1284 */, | ||
1950 | TILE_OPC_LW, TILE_OPC_NONE, | ||
1951 | BITFIELD(53, 1) /* index 1287 */, | ||
1952 | TILE_OPC_MF, TILE_OPC_NONE, | ||
1953 | BITFIELD(43, 3) /* index 1290 */, | ||
1954 | CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311), | ||
1955 | CHILD(1314), CHILD(1317), CHILD(1320), | ||
1956 | BITFIELD(53, 1) /* index 1299 */, | ||
1957 | TILE_OPC_NAP, TILE_OPC_NONE, | ||
1958 | BITFIELD(53, 1) /* index 1302 */, | ||
1959 | TILE_OPC_NOP, TILE_OPC_NONE, | ||
1960 | BITFIELD(53, 1) /* index 1305 */, | ||
1961 | TILE_OPC_SWINT0, TILE_OPC_NONE, | ||
1962 | BITFIELD(53, 1) /* index 1308 */, | ||
1963 | TILE_OPC_SWINT1, TILE_OPC_NONE, | ||
1964 | BITFIELD(53, 1) /* index 1311 */, | ||
1965 | TILE_OPC_SWINT2, TILE_OPC_NONE, | ||
1966 | BITFIELD(53, 1) /* index 1314 */, | ||
1967 | TILE_OPC_SWINT3, TILE_OPC_NONE, | ||
1968 | BITFIELD(53, 1) /* index 1317 */, | ||
1969 | TILE_OPC_TNS, TILE_OPC_NONE, | ||
1970 | BITFIELD(53, 1) /* index 1320 */, | ||
1971 | TILE_OPC_WH64, TILE_OPC_NONE, | ||
1972 | BITFIELD(43, 2) /* index 1323 */, | ||
1973 | CHILD(1328), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1974 | BITFIELD(45, 1) /* index 1328 */, | ||
1975 | CHILD(1331), TILE_OPC_NONE, | ||
1976 | BITFIELD(53, 1) /* index 1331 */, | ||
1977 | TILE_OPC_LW_NA, TILE_OPC_NONE, | ||
1978 | BITFIELD(46, 7) /* index 1334 */, | ||
1979 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(1463), | ||
1980 | CHILD(1463), CHILD(1463), CHILD(1463), CHILD(1466), CHILD(1466), | ||
1981 | CHILD(1466), CHILD(1466), CHILD(1469), CHILD(1469), CHILD(1469), | ||
1982 | CHILD(1469), CHILD(1472), CHILD(1472), CHILD(1472), CHILD(1472), | ||
1983 | CHILD(1475), CHILD(1475), CHILD(1475), CHILD(1475), CHILD(1478), | ||
1984 | CHILD(1478), CHILD(1478), CHILD(1478), CHILD(1481), CHILD(1481), | ||
1985 | CHILD(1481), CHILD(1481), CHILD(1484), CHILD(1484), CHILD(1484), | ||
1986 | CHILD(1484), CHILD(1487), CHILD(1487), CHILD(1487), CHILD(1487), | ||
1987 | CHILD(1490), CHILD(1490), CHILD(1490), CHILD(1490), CHILD(1151), | ||
1988 | CHILD(1493), CHILD(1517), CHILD(1529), TILE_OPC_NONE, TILE_OPC_NONE, | ||
1989 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1990 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1991 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1992 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1993 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1994 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1995 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1996 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1997 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1998 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1999 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2000 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2001 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2002 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2003 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2004 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2005 | BITFIELD(53, 1) /* index 1463 */, | ||
2006 | TILE_OPC_RLI_SN, TILE_OPC_NONE, | ||
2007 | BITFIELD(53, 1) /* index 1466 */, | ||
2008 | TILE_OPC_SHLIB_SN, TILE_OPC_NONE, | ||
2009 | BITFIELD(53, 1) /* index 1469 */, | ||
2010 | TILE_OPC_SHLIH_SN, TILE_OPC_NONE, | ||
2011 | BITFIELD(53, 1) /* index 1472 */, | ||
2012 | TILE_OPC_SHLI_SN, TILE_OPC_NONE, | ||
2013 | BITFIELD(53, 1) /* index 1475 */, | ||
2014 | TILE_OPC_SHRIB_SN, TILE_OPC_NONE, | ||
2015 | BITFIELD(53, 1) /* index 1478 */, | ||
2016 | TILE_OPC_SHRIH_SN, TILE_OPC_NONE, | ||
2017 | BITFIELD(53, 1) /* index 1481 */, | ||
2018 | TILE_OPC_SHRI_SN, TILE_OPC_NONE, | ||
2019 | BITFIELD(53, 1) /* index 1484 */, | ||
2020 | TILE_OPC_SRAIB_SN, TILE_OPC_NONE, | ||
2021 | BITFIELD(53, 1) /* index 1487 */, | ||
2022 | TILE_OPC_SRAIH_SN, TILE_OPC_NONE, | ||
2023 | BITFIELD(53, 1) /* index 1490 */, | ||
2024 | TILE_OPC_SRAI_SN, TILE_OPC_NONE, | ||
2025 | BITFIELD(43, 3) /* index 1493 */, | ||
2026 | CHILD(1251), CHILD(1254), CHILD(1502), CHILD(1505), CHILD(1508), | ||
2027 | CHILD(1511), CHILD(1514), CHILD(1287), | ||
2028 | BITFIELD(53, 1) /* index 1502 */, | ||
2029 | TILE_OPC_LB_SN, TILE_OPC_NONE, | ||
2030 | BITFIELD(53, 1) /* index 1505 */, | ||
2031 | TILE_OPC_LB_U_SN, TILE_OPC_NONE, | ||
2032 | BITFIELD(53, 1) /* index 1508 */, | ||
2033 | TILE_OPC_LH_SN, TILE_OPC_NONE, | ||
2034 | BITFIELD(53, 1) /* index 1511 */, | ||
2035 | TILE_OPC_LH_U_SN, TILE_OPC_NONE, | ||
2036 | BITFIELD(53, 1) /* index 1514 */, | ||
2037 | TILE_OPC_LW_SN, TILE_OPC_NONE, | ||
2038 | BITFIELD(43, 3) /* index 1517 */, | ||
2039 | CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311), | ||
2040 | CHILD(1314), CHILD(1526), CHILD(1320), | ||
2041 | BITFIELD(53, 1) /* index 1526 */, | ||
2042 | TILE_OPC_TNS_SN, TILE_OPC_NONE, | ||
2043 | BITFIELD(43, 2) /* index 1529 */, | ||
2044 | CHILD(1534), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2045 | BITFIELD(45, 1) /* index 1534 */, | ||
2046 | CHILD(1537), TILE_OPC_NONE, | ||
2047 | BITFIELD(53, 1) /* index 1537 */, | ||
2048 | TILE_OPC_LW_NA_SN, TILE_OPC_NONE, | ||
2049 | }; | ||
2050 | |||
2051 | static const unsigned short decode_Y0_fsm[168] = | ||
2052 | { | ||
2053 | BITFIELD(27, 4) /* index 0 */, | ||
2054 | TILE_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52), | ||
2055 | CHILD(57), CHILD(62), CHILD(67), TILE_OPC_ADDI, CHILD(72), CHILD(102), | ||
2056 | TILE_OPC_SEQI, CHILD(117), TILE_OPC_SLTI, TILE_OPC_SLTI_U, | ||
2057 | BITFIELD(18, 2) /* index 17 */, | ||
2058 | TILE_OPC_ADD, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_SUB, | ||
2059 | BITFIELD(18, 2) /* index 22 */, | ||
2060 | TILE_OPC_MNZ, TILE_OPC_MVNZ, TILE_OPC_MVZ, TILE_OPC_MZ, | ||
2061 | BITFIELD(18, 2) /* index 27 */, | ||
2062 | TILE_OPC_AND, TILE_OPC_NOR, CHILD(32), TILE_OPC_XOR, | ||
2063 | BITFIELD(12, 2) /* index 32 */, | ||
2064 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(37), | ||
2065 | BITFIELD(14, 2) /* index 37 */, | ||
2066 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(42), | ||
2067 | BITFIELD(16, 2) /* index 42 */, | ||
2068 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
2069 | BITFIELD(18, 2) /* index 47 */, | ||
2070 | TILE_OPC_RL, TILE_OPC_SHL, TILE_OPC_SHR, TILE_OPC_SRA, | ||
2071 | BITFIELD(18, 2) /* index 52 */, | ||
2072 | TILE_OPC_SLTE, TILE_OPC_SLTE_U, TILE_OPC_SLT, TILE_OPC_SLT_U, | ||
2073 | BITFIELD(18, 2) /* index 57 */, | ||
2074 | TILE_OPC_MULHLSA_UU, TILE_OPC_S3A, TILE_OPC_SEQ, TILE_OPC_SNE, | ||
2075 | BITFIELD(18, 2) /* index 62 */, | ||
2076 | TILE_OPC_MULHH_SS, TILE_OPC_MULHH_UU, TILE_OPC_MULLL_SS, TILE_OPC_MULLL_UU, | ||
2077 | BITFIELD(18, 2) /* index 67 */, | ||
2078 | TILE_OPC_MULHHA_SS, TILE_OPC_MULHHA_UU, TILE_OPC_MULLLA_SS, | ||
2079 | TILE_OPC_MULLLA_UU, | ||
2080 | BITFIELD(0, 2) /* index 72 */, | ||
2081 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(77), | ||
2082 | BITFIELD(2, 2) /* index 77 */, | ||
2083 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(82), | ||
2084 | BITFIELD(4, 2) /* index 82 */, | ||
2085 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(87), | ||
2086 | BITFIELD(6, 2) /* index 87 */, | ||
2087 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(92), | ||
2088 | BITFIELD(8, 2) /* index 92 */, | ||
2089 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(97), | ||
2090 | BITFIELD(10, 2) /* index 97 */, | ||
2091 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
2092 | BITFIELD(6, 2) /* index 102 */, | ||
2093 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(107), | ||
2094 | BITFIELD(8, 2) /* index 107 */, | ||
2095 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(112), | ||
2096 | BITFIELD(10, 2) /* index 112 */, | ||
2097 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
2098 | BITFIELD(15, 5) /* index 117 */, | ||
2099 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_RLI, | ||
2100 | TILE_OPC_RLI, TILE_OPC_RLI, TILE_OPC_RLI, TILE_OPC_SHLI, TILE_OPC_SHLI, | ||
2101 | TILE_OPC_SHLI, TILE_OPC_SHLI, TILE_OPC_SHRI, TILE_OPC_SHRI, TILE_OPC_SHRI, | ||
2102 | TILE_OPC_SHRI, TILE_OPC_SRAI, TILE_OPC_SRAI, TILE_OPC_SRAI, TILE_OPC_SRAI, | ||
2103 | CHILD(150), CHILD(159), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2104 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2105 | TILE_OPC_NONE, TILE_OPC_NONE, | ||
2106 | BITFIELD(12, 3) /* index 150 */, | ||
2107 | TILE_OPC_NONE, TILE_OPC_BITX, TILE_OPC_BYTEX, TILE_OPC_CLZ, TILE_OPC_CTZ, | ||
2108 | TILE_OPC_FNOP, TILE_OPC_NOP, TILE_OPC_PCNT, | ||
2109 | BITFIELD(12, 3) /* index 159 */, | ||
2110 | TILE_OPC_TBLIDXB0, TILE_OPC_TBLIDXB1, TILE_OPC_TBLIDXB2, TILE_OPC_TBLIDXB3, | ||
2111 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2112 | }; | ||
2113 | |||
2114 | static const unsigned short decode_Y1_fsm[140] = | ||
2115 | { | ||
2116 | BITFIELD(59, 4) /* index 0 */, | ||
2117 | TILE_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52), | ||
2118 | CHILD(57), TILE_OPC_ADDI, CHILD(62), CHILD(92), TILE_OPC_SEQI, CHILD(107), | ||
2119 | TILE_OPC_SLTI, TILE_OPC_SLTI_U, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2120 | BITFIELD(49, 2) /* index 17 */, | ||
2121 | TILE_OPC_ADD, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_SUB, | ||
2122 | BITFIELD(49, 2) /* index 22 */, | ||
2123 | TILE_OPC_NONE, TILE_OPC_MNZ, TILE_OPC_MZ, TILE_OPC_NONE, | ||
2124 | BITFIELD(49, 2) /* index 27 */, | ||
2125 | TILE_OPC_AND, TILE_OPC_NOR, CHILD(32), TILE_OPC_XOR, | ||
2126 | BITFIELD(43, 2) /* index 32 */, | ||
2127 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(37), | ||
2128 | BITFIELD(45, 2) /* index 37 */, | ||
2129 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(42), | ||
2130 | BITFIELD(47, 2) /* index 42 */, | ||
2131 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
2132 | BITFIELD(49, 2) /* index 47 */, | ||
2133 | TILE_OPC_RL, TILE_OPC_SHL, TILE_OPC_SHR, TILE_OPC_SRA, | ||
2134 | BITFIELD(49, 2) /* index 52 */, | ||
2135 | TILE_OPC_SLTE, TILE_OPC_SLTE_U, TILE_OPC_SLT, TILE_OPC_SLT_U, | ||
2136 | BITFIELD(49, 2) /* index 57 */, | ||
2137 | TILE_OPC_NONE, TILE_OPC_S3A, TILE_OPC_SEQ, TILE_OPC_SNE, | ||
2138 | BITFIELD(31, 2) /* index 62 */, | ||
2139 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(67), | ||
2140 | BITFIELD(33, 2) /* index 67 */, | ||
2141 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(72), | ||
2142 | BITFIELD(35, 2) /* index 72 */, | ||
2143 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(77), | ||
2144 | BITFIELD(37, 2) /* index 77 */, | ||
2145 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(82), | ||
2146 | BITFIELD(39, 2) /* index 82 */, | ||
2147 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(87), | ||
2148 | BITFIELD(41, 2) /* index 87 */, | ||
2149 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
2150 | BITFIELD(37, 2) /* index 92 */, | ||
2151 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(97), | ||
2152 | BITFIELD(39, 2) /* index 97 */, | ||
2153 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(102), | ||
2154 | BITFIELD(41, 2) /* index 102 */, | ||
2155 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
2156 | BITFIELD(48, 3) /* index 107 */, | ||
2157 | TILE_OPC_NONE, TILE_OPC_RLI, TILE_OPC_SHLI, TILE_OPC_SHRI, TILE_OPC_SRAI, | ||
2158 | CHILD(116), TILE_OPC_NONE, TILE_OPC_NONE, | ||
2159 | BITFIELD(43, 3) /* index 116 */, | ||
2160 | TILE_OPC_NONE, CHILD(125), CHILD(130), CHILD(135), TILE_OPC_NONE, | ||
2161 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2162 | BITFIELD(46, 2) /* index 125 */, | ||
2163 | TILE_OPC_FNOP, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2164 | BITFIELD(46, 2) /* index 130 */, | ||
2165 | TILE_OPC_ILL, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2166 | BITFIELD(46, 2) /* index 135 */, | ||
2167 | TILE_OPC_NOP, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2168 | }; | ||
2169 | |||
2170 | static const unsigned short decode_Y2_fsm[24] = | ||
2171 | { | ||
2172 | BITFIELD(56, 3) /* index 0 */, | ||
2173 | CHILD(9), TILE_OPC_LB_U, TILE_OPC_LH, TILE_OPC_LH_U, TILE_OPC_LW, | ||
2174 | TILE_OPC_SB, TILE_OPC_SH, TILE_OPC_SW, | ||
2175 | BITFIELD(20, 2) /* index 9 */, | ||
2176 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(14), | ||
2177 | BITFIELD(22, 2) /* index 14 */, | ||
2178 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(19), | ||
2179 | BITFIELD(24, 2) /* index 19 */, | ||
2180 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_PREFETCH, | ||
2181 | }; | ||
2182 | |||
2183 | #undef BITFIELD | ||
2184 | #undef CHILD | ||
2185 | const unsigned short * const | ||
2186 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS] = | ||
2187 | { | ||
2188 | decode_X0_fsm, | ||
2189 | decode_X1_fsm, | ||
2190 | decode_Y0_fsm, | ||
2191 | decode_Y1_fsm, | ||
2192 | decode_Y2_fsm | ||
2193 | }; | ||
2194 | const struct tile_operand tile_operands[43] = | ||
2195 | { | ||
2196 | { | ||
2197 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM8_X0), | ||
2198 | 8, 1, 0, 0, 0, 0, | ||
2199 | create_Imm8_X0, get_Imm8_X0 | ||
2200 | }, | ||
2201 | { | ||
2202 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM8_X1), | ||
2203 | 8, 1, 0, 0, 0, 0, | ||
2204 | create_Imm8_X1, get_Imm8_X1 | ||
2205 | }, | ||
2206 | { | ||
2207 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM8_Y0), | ||
2208 | 8, 1, 0, 0, 0, 0, | ||
2209 | create_Imm8_Y0, get_Imm8_Y0 | ||
2210 | }, | ||
2211 | { | ||
2212 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM8_Y1), | ||
2213 | 8, 1, 0, 0, 0, 0, | ||
2214 | create_Imm8_Y1, get_Imm8_Y1 | ||
2215 | }, | ||
2216 | { | ||
2217 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM16_X0), | ||
2218 | 16, 1, 0, 0, 0, 0, | ||
2219 | create_Imm16_X0, get_Imm16_X0 | ||
2220 | }, | ||
2221 | { | ||
2222 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM16_X1), | ||
2223 | 16, 1, 0, 0, 0, 0, | ||
2224 | create_Imm16_X1, get_Imm16_X1 | ||
2225 | }, | ||
2226 | { | ||
2227 | TILE_OP_TYPE_ADDRESS, BFD_RELOC(TILE_JOFFLONG_X1), | ||
2228 | 29, 1, 0, 0, 1, TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, | ||
2229 | create_JOffLong_X1, get_JOffLong_X1 | ||
2230 | }, | ||
2231 | { | ||
2232 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2233 | 6, 0, 0, 1, 0, 0, | ||
2234 | create_Dest_X0, get_Dest_X0 | ||
2235 | }, | ||
2236 | { | ||
2237 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2238 | 6, 0, 1, 0, 0, 0, | ||
2239 | create_SrcA_X0, get_SrcA_X0 | ||
2240 | }, | ||
2241 | { | ||
2242 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2243 | 6, 0, 0, 1, 0, 0, | ||
2244 | create_Dest_X1, get_Dest_X1 | ||
2245 | }, | ||
2246 | { | ||
2247 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2248 | 6, 0, 1, 0, 0, 0, | ||
2249 | create_SrcA_X1, get_SrcA_X1 | ||
2250 | }, | ||
2251 | { | ||
2252 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2253 | 6, 0, 0, 1, 0, 0, | ||
2254 | create_Dest_Y0, get_Dest_Y0 | ||
2255 | }, | ||
2256 | { | ||
2257 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2258 | 6, 0, 1, 0, 0, 0, | ||
2259 | create_SrcA_Y0, get_SrcA_Y0 | ||
2260 | }, | ||
2261 | { | ||
2262 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2263 | 6, 0, 0, 1, 0, 0, | ||
2264 | create_Dest_Y1, get_Dest_Y1 | ||
2265 | }, | ||
2266 | { | ||
2267 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2268 | 6, 0, 1, 0, 0, 0, | ||
2269 | create_SrcA_Y1, get_SrcA_Y1 | ||
2270 | }, | ||
2271 | { | ||
2272 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2273 | 6, 0, 1, 0, 0, 0, | ||
2274 | create_SrcA_Y2, get_SrcA_Y2 | ||
2275 | }, | ||
2276 | { | ||
2277 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2278 | 6, 0, 1, 0, 0, 0, | ||
2279 | create_SrcB_X0, get_SrcB_X0 | ||
2280 | }, | ||
2281 | { | ||
2282 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2283 | 6, 0, 1, 0, 0, 0, | ||
2284 | create_SrcB_X1, get_SrcB_X1 | ||
2285 | }, | ||
2286 | { | ||
2287 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2288 | 6, 0, 1, 0, 0, 0, | ||
2289 | create_SrcB_Y0, get_SrcB_Y0 | ||
2290 | }, | ||
2291 | { | ||
2292 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2293 | 6, 0, 1, 0, 0, 0, | ||
2294 | create_SrcB_Y1, get_SrcB_Y1 | ||
2295 | }, | ||
2296 | { | ||
2297 | TILE_OP_TYPE_ADDRESS, BFD_RELOC(TILE_BROFF_X1), | ||
2298 | 17, 1, 0, 0, 1, TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, | ||
2299 | create_BrOff_X1, get_BrOff_X1 | ||
2300 | }, | ||
2301 | { | ||
2302 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2303 | 6, 0, 1, 1, 0, 0, | ||
2304 | create_Dest_X0, get_Dest_X0 | ||
2305 | }, | ||
2306 | { | ||
2307 | TILE_OP_TYPE_ADDRESS, BFD_RELOC(NONE), | ||
2308 | 28, 1, 0, 0, 1, TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, | ||
2309 | create_JOff_X1, get_JOff_X1 | ||
2310 | }, | ||
2311 | { | ||
2312 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2313 | 6, 0, 0, 1, 0, 0, | ||
2314 | create_SrcBDest_Y2, get_SrcBDest_Y2 | ||
2315 | }, | ||
2316 | { | ||
2317 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2318 | 6, 0, 1, 1, 0, 0, | ||
2319 | create_SrcA_X1, get_SrcA_X1 | ||
2320 | }, | ||
2321 | { | ||
2322 | TILE_OP_TYPE_SPR, BFD_RELOC(TILE_MF_IMM15_X1), | ||
2323 | 15, 0, 0, 0, 0, 0, | ||
2324 | create_MF_Imm15_X1, get_MF_Imm15_X1 | ||
2325 | }, | ||
2326 | { | ||
2327 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_MMSTART_X0), | ||
2328 | 5, 0, 0, 0, 0, 0, | ||
2329 | create_MMStart_X0, get_MMStart_X0 | ||
2330 | }, | ||
2331 | { | ||
2332 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_MMEND_X0), | ||
2333 | 5, 0, 0, 0, 0, 0, | ||
2334 | create_MMEnd_X0, get_MMEnd_X0 | ||
2335 | }, | ||
2336 | { | ||
2337 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_MMSTART_X1), | ||
2338 | 5, 0, 0, 0, 0, 0, | ||
2339 | create_MMStart_X1, get_MMStart_X1 | ||
2340 | }, | ||
2341 | { | ||
2342 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_MMEND_X1), | ||
2343 | 5, 0, 0, 0, 0, 0, | ||
2344 | create_MMEnd_X1, get_MMEnd_X1 | ||
2345 | }, | ||
2346 | { | ||
2347 | TILE_OP_TYPE_SPR, BFD_RELOC(TILE_MT_IMM15_X1), | ||
2348 | 15, 0, 0, 0, 0, 0, | ||
2349 | create_MT_Imm15_X1, get_MT_Imm15_X1 | ||
2350 | }, | ||
2351 | { | ||
2352 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2353 | 6, 0, 1, 1, 0, 0, | ||
2354 | create_Dest_Y0, get_Dest_Y0 | ||
2355 | }, | ||
2356 | { | ||
2357 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SHAMT_X0), | ||
2358 | 5, 0, 0, 0, 0, 0, | ||
2359 | create_ShAmt_X0, get_ShAmt_X0 | ||
2360 | }, | ||
2361 | { | ||
2362 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SHAMT_X1), | ||
2363 | 5, 0, 0, 0, 0, 0, | ||
2364 | create_ShAmt_X1, get_ShAmt_X1 | ||
2365 | }, | ||
2366 | { | ||
2367 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SHAMT_Y0), | ||
2368 | 5, 0, 0, 0, 0, 0, | ||
2369 | create_ShAmt_Y0, get_ShAmt_Y0 | ||
2370 | }, | ||
2371 | { | ||
2372 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SHAMT_Y1), | ||
2373 | 5, 0, 0, 0, 0, 0, | ||
2374 | create_ShAmt_Y1, get_ShAmt_Y1 | ||
2375 | }, | ||
2376 | { | ||
2377 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2378 | 6, 0, 1, 0, 0, 0, | ||
2379 | create_SrcBDest_Y2, get_SrcBDest_Y2 | ||
2380 | }, | ||
2381 | { | ||
2382 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE), | ||
2383 | 8, 1, 0, 0, 0, 0, | ||
2384 | create_Dest_Imm8_X1, get_Dest_Imm8_X1 | ||
2385 | }, | ||
2386 | { | ||
2387 | TILE_OP_TYPE_ADDRESS, BFD_RELOC(TILE_SN_BROFF), | ||
2388 | 10, 1, 0, 0, 1, TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES, | ||
2389 | create_BrOff_SN, get_BrOff_SN | ||
2390 | }, | ||
2391 | { | ||
2392 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SN_UIMM8), | ||
2393 | 8, 0, 0, 0, 0, 0, | ||
2394 | create_Imm8_SN, get_Imm8_SN | ||
2395 | }, | ||
2396 | { | ||
2397 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SN_IMM8), | ||
2398 | 8, 1, 0, 0, 0, 0, | ||
2399 | create_Imm8_SN, get_Imm8_SN | ||
2400 | }, | ||
2401 | { | ||
2402 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2403 | 2, 0, 0, 1, 0, 0, | ||
2404 | create_Dest_SN, get_Dest_SN | ||
2405 | }, | ||
2406 | { | ||
2407 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2408 | 2, 0, 1, 0, 0, 0, | ||
2409 | create_Src_SN, get_Src_SN | ||
2410 | } | ||
2411 | }; | ||
2412 | |||
2413 | |||
2414 | |||
2415 | |||
2416 | /* Given a set of bundle bits and the lookup FSM for a specific pipe, | ||
2417 | * returns which instruction the bundle contains in that pipe. | ||
2418 | */ | ||
2419 | static const struct tile_opcode * | ||
2420 | find_opcode(tile_bundle_bits bits, const unsigned short *table) | ||
2421 | { | ||
2422 | int index = 0; | ||
2423 | |||
2424 | while (1) | ||
2425 | { | ||
2426 | unsigned short bitspec = table[index]; | ||
2427 | unsigned int bitfield = | ||
2428 | ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6); | ||
2429 | |||
2430 | unsigned short next = table[index + 1 + bitfield]; | ||
2431 | if (next <= TILE_OPC_NONE) | ||
2432 | return &tile_opcodes[next]; | ||
2433 | |||
2434 | index = next - TILE_OPC_NONE; | ||
2435 | } | ||
2436 | } | ||
2437 | |||
2438 | |||
2439 | int | ||
2440 | parse_insn_tile(tile_bundle_bits bits, | ||
2441 | unsigned int pc, | ||
2442 | struct tile_decoded_instruction | ||
2443 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]) | ||
2444 | { | ||
2445 | int num_instructions = 0; | ||
2446 | int pipe; | ||
2447 | |||
2448 | int min_pipe, max_pipe; | ||
2449 | if ((bits & TILE_BUNDLE_Y_ENCODING_MASK) == 0) | ||
2450 | { | ||
2451 | min_pipe = TILE_PIPELINE_X0; | ||
2452 | max_pipe = TILE_PIPELINE_X1; | ||
2453 | } | ||
2454 | else | ||
2455 | { | ||
2456 | min_pipe = TILE_PIPELINE_Y0; | ||
2457 | max_pipe = TILE_PIPELINE_Y2; | ||
2458 | } | ||
2459 | |||
2460 | /* For each pipe, find an instruction that fits. */ | ||
2461 | for (pipe = min_pipe; pipe <= max_pipe; pipe++) | ||
2462 | { | ||
2463 | const struct tile_opcode *opc; | ||
2464 | struct tile_decoded_instruction *d; | ||
2465 | int i; | ||
2466 | |||
2467 | d = &decoded[num_instructions++]; | ||
2468 | opc = find_opcode (bits, tile_bundle_decoder_fsms[pipe]); | ||
2469 | d->opcode = opc; | ||
2470 | |||
2471 | /* Decode each operand, sign extending, etc. as appropriate. */ | ||
2472 | for (i = 0; i < opc->num_operands; i++) | ||
2473 | { | ||
2474 | const struct tile_operand *op = | ||
2475 | &tile_operands[opc->operands[pipe][i]]; | ||
2476 | int opval = op->extract (bits); | ||
2477 | if (op->is_signed) | ||
2478 | { | ||
2479 | /* Sign-extend the operand. */ | ||
2480 | int shift = (int)((sizeof(int) * 8) - op->num_bits); | ||
2481 | opval = (opval << shift) >> shift; | ||
2482 | } | ||
2483 | |||
2484 | /* Adjust PC-relative scaled branch offsets. */ | ||
2485 | if (op->type == TILE_OP_TYPE_ADDRESS) | ||
2486 | { | ||
2487 | opval *= TILE_BUNDLE_SIZE_IN_BYTES; | ||
2488 | opval += (int)pc; | ||
2489 | } | ||
2490 | |||
2491 | /* Record the final value. */ | ||
2492 | d->operands[i] = op; | ||
2493 | d->operand_values[i] = opval; | ||
2494 | } | ||
2495 | } | ||
2496 | |||
2497 | return num_instructions; | ||
2498 | } | ||
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c new file mode 100644 index 000000000000..b9ab25a889b5 --- /dev/null +++ b/arch/tile/kernel/time.c | |||
@@ -0,0 +1,221 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Support the cycle counter clocksource and tile timer clock event device. | ||
15 | */ | ||
16 | |||
17 | #include <linux/time.h> | ||
18 | #include <linux/timex.h> | ||
19 | #include <linux/clocksource.h> | ||
20 | #include <linux/clockchips.h> | ||
21 | #include <linux/hardirq.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <asm/irq_regs.h> | ||
26 | #include <asm/traps.h> | ||
27 | #include <hv/hypervisor.h> | ||
28 | #include <arch/interrupts.h> | ||
29 | #include <arch/spr_def.h> | ||
30 | |||
31 | |||
32 | /* | ||
33 | * Define the cycle counter clock source. | ||
34 | */ | ||
35 | |||
36 | /* How many cycles per second we are running at. */ | ||
37 | static cycles_t cycles_per_sec __write_once; | ||
38 | |||
39 | /* | ||
40 | * We set up shift and multiply values with a minsec of five seconds, | ||
41 | * since our timer counter counts down 31 bits at a frequency of | ||
42 | * no less than 500 MHz. See @minsec for clocks_calc_mult_shift(). | ||
43 | * We could use a different value for the 64-bit free-running | ||
44 | * cycle counter, but we use the same one for consistency, and since | ||
45 | * we will be reasonably precise with this value anyway. | ||
46 | */ | ||
47 | #define TILE_MINSEC 5 | ||
48 | |||
49 | cycles_t get_clock_rate(void) | ||
50 | { | ||
51 | return cycles_per_sec; | ||
52 | } | ||
53 | |||
54 | #if CHIP_HAS_SPLIT_CYCLE() | ||
55 | cycles_t get_cycles(void) | ||
56 | { | ||
57 | unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH); | ||
58 | unsigned int low = __insn_mfspr(SPR_CYCLE_LOW); | ||
59 | unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH); | ||
60 | |||
61 | while (unlikely(high != high2)) { | ||
62 | low = __insn_mfspr(SPR_CYCLE_LOW); | ||
63 | high = high2; | ||
64 | high2 = __insn_mfspr(SPR_CYCLE_HIGH); | ||
65 | } | ||
66 | |||
67 | return (((cycles_t)high) << 32) | low; | ||
68 | } | ||
69 | #endif | ||
70 | |||
71 | static cycles_t clocksource_get_cycles(struct clocksource *cs) | ||
72 | { | ||
73 | return get_cycles(); | ||
74 | } | ||
75 | |||
76 | static struct clocksource cycle_counter_cs = { | ||
77 | .name = "cycle counter", | ||
78 | .rating = 300, | ||
79 | .read = clocksource_get_cycles, | ||
80 | .mask = CLOCKSOURCE_MASK(64), | ||
81 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
82 | }; | ||
83 | |||
84 | /* | ||
85 | * Called very early from setup_arch() to set cycles_per_sec. | ||
86 | * We initialize it early so we can use it to set up loops_per_jiffy. | ||
87 | */ | ||
88 | void __init setup_clock(void) | ||
89 | { | ||
90 | cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED); | ||
91 | clocksource_calc_mult_shift(&cycle_counter_cs, cycles_per_sec, | ||
92 | TILE_MINSEC); | ||
93 | } | ||
94 | |||
95 | void __init calibrate_delay(void) | ||
96 | { | ||
97 | loops_per_jiffy = get_clock_rate() / HZ; | ||
98 | pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n", | ||
99 | loops_per_jiffy/(500000/HZ), | ||
100 | (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); | ||
101 | } | ||
102 | |||
103 | /* Called fairly late in init/main.c, but before we go smp. */ | ||
104 | void __init time_init(void) | ||
105 | { | ||
106 | /* Initialize and register the clock source. */ | ||
107 | clocksource_register(&cycle_counter_cs); | ||
108 | |||
109 | /* Start up the tile-timer interrupt source on the boot cpu. */ | ||
110 | setup_tile_timer(); | ||
111 | } | ||
112 | |||
113 | |||
114 | /* | ||
115 | * Define the tile timer clock event device. The timer is driven by | ||
116 | * the TILE_TIMER_CONTROL register, which consists of a 31-bit down | ||
117 | * counter, plus bit 31, which signifies that the counter has wrapped | ||
118 | * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be | ||
119 | * raised as long as bit 31 is set. | ||
120 | */ | ||
121 | |||
122 | #define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */ | ||
123 | |||
124 | static int tile_timer_set_next_event(unsigned long ticks, | ||
125 | struct clock_event_device *evt) | ||
126 | { | ||
127 | BUG_ON(ticks > MAX_TICK); | ||
128 | __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks); | ||
129 | raw_local_irq_unmask_now(INT_TILE_TIMER); | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * Whenever anyone tries to change modes, we just mask interrupts | ||
135 | * and wait for the next event to get set. | ||
136 | */ | ||
137 | static void tile_timer_set_mode(enum clock_event_mode mode, | ||
138 | struct clock_event_device *evt) | ||
139 | { | ||
140 | raw_local_irq_mask_now(INT_TILE_TIMER); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Set min_delta_ns to 1 microsecond, since it takes about | ||
145 | * that long to fire the interrupt. | ||
146 | */ | ||
147 | static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = { | ||
148 | .name = "tile timer", | ||
149 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
150 | .min_delta_ns = 1000, | ||
151 | .rating = 100, | ||
152 | .irq = -1, | ||
153 | .set_next_event = tile_timer_set_next_event, | ||
154 | .set_mode = tile_timer_set_mode, | ||
155 | }; | ||
156 | |||
157 | void __cpuinit setup_tile_timer(void) | ||
158 | { | ||
159 | struct clock_event_device *evt = &__get_cpu_var(tile_timer); | ||
160 | |||
161 | /* Fill in fields that are speed-specific. */ | ||
162 | clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC); | ||
163 | evt->max_delta_ns = clockevent_delta2ns(MAX_TICK, evt); | ||
164 | |||
165 | /* Mark as being for this cpu only. */ | ||
166 | evt->cpumask = cpumask_of(smp_processor_id()); | ||
167 | |||
168 | /* Start out with timer not firing. */ | ||
169 | raw_local_irq_mask_now(INT_TILE_TIMER); | ||
170 | |||
171 | /* Register tile timer. */ | ||
172 | clockevents_register_device(evt); | ||
173 | } | ||
174 | |||
175 | /* Called from the interrupt vector. */ | ||
176 | void do_timer_interrupt(struct pt_regs *regs, int fault_num) | ||
177 | { | ||
178 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
179 | struct clock_event_device *evt = &__get_cpu_var(tile_timer); | ||
180 | |||
181 | /* | ||
182 | * Mask the timer interrupt here, since we are a oneshot timer | ||
183 | * and there are now by definition no events pending. | ||
184 | */ | ||
185 | raw_local_irq_mask(INT_TILE_TIMER); | ||
186 | |||
187 | /* Track time spent here in an interrupt context */ | ||
188 | irq_enter(); | ||
189 | |||
190 | /* Track interrupt count. */ | ||
191 | __get_cpu_var(irq_stat).irq_timer_count++; | ||
192 | |||
193 | /* Call the generic timer handler */ | ||
194 | evt->event_handler(evt); | ||
195 | |||
196 | /* | ||
197 | * Track time spent against the current process again and | ||
198 | * process any softirqs if they are waiting. | ||
199 | */ | ||
200 | irq_exit(); | ||
201 | |||
202 | set_irq_regs(old_regs); | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * Scheduler clock - returns current time in nanosec units. | ||
207 | * Note that with LOCKDEP, this is called during lockdep_init(), and | ||
208 | * we will claim that sched_clock() is zero for a little while, until | ||
209 | * we run setup_clock(), above. | ||
210 | */ | ||
211 | unsigned long long sched_clock(void) | ||
212 | { | ||
213 | return clocksource_cyc2ns(get_cycles(), | ||
214 | cycle_counter_cs.mult, | ||
215 | cycle_counter_cs.shift); | ||
216 | } | ||
217 | |||
218 | int setup_profiling_timer(unsigned int multiplier) | ||
219 | { | ||
220 | return -EINVAL; | ||
221 | } | ||
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c new file mode 100644 index 000000000000..2dffc1044d83 --- /dev/null +++ b/arch/tile/kernel/tlb.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/cpumask.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <asm/tlbflush.h> | ||
19 | #include <asm/homecache.h> | ||
20 | #include <hv/hypervisor.h> | ||
21 | |||
22 | /* From tlbflush.h */ | ||
23 | DEFINE_PER_CPU(int, current_asid); | ||
24 | int min_asid, max_asid; | ||
25 | |||
26 | /* | ||
27 | * Note that we flush the L1I (for VM_EXEC pages) as well as the TLB | ||
28 | * so that when we are unmapping an executable page, we also flush it. | ||
29 | * Combined with flushing the L1I at context switch time, this means | ||
30 | * we don't have to do any other icache flushes. | ||
31 | */ | ||
32 | |||
33 | void flush_tlb_mm(struct mm_struct *mm) | ||
34 | { | ||
35 | HV_Remote_ASID asids[NR_CPUS]; | ||
36 | int i = 0, cpu; | ||
37 | for_each_cpu(cpu, &mm->cpu_vm_mask) { | ||
38 | HV_Remote_ASID *asid = &asids[i++]; | ||
39 | asid->y = cpu / smp_topology.width; | ||
40 | asid->x = cpu % smp_topology.width; | ||
41 | asid->asid = per_cpu(current_asid, cpu); | ||
42 | } | ||
43 | flush_remote(0, HV_FLUSH_EVICT_L1I, &mm->cpu_vm_mask, | ||
44 | 0, 0, 0, NULL, asids, i); | ||
45 | } | ||
46 | |||
47 | void flush_tlb_current_task(void) | ||
48 | { | ||
49 | flush_tlb_mm(current->mm); | ||
50 | } | ||
51 | |||
52 | void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm, | ||
53 | unsigned long va) | ||
54 | { | ||
55 | unsigned long size = hv_page_size(vma); | ||
56 | int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; | ||
57 | flush_remote(0, cache, &mm->cpu_vm_mask, | ||
58 | va, size, size, &mm->cpu_vm_mask, NULL, 0); | ||
59 | } | ||
60 | |||
61 | void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va) | ||
62 | { | ||
63 | flush_tlb_page_mm(vma, vma->vm_mm, va); | ||
64 | } | ||
65 | EXPORT_SYMBOL(flush_tlb_page); | ||
66 | |||
67 | void flush_tlb_range(const struct vm_area_struct *vma, | ||
68 | unsigned long start, unsigned long end) | ||
69 | { | ||
70 | unsigned long size = hv_page_size(vma); | ||
71 | struct mm_struct *mm = vma->vm_mm; | ||
72 | int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; | ||
73 | flush_remote(0, cache, &mm->cpu_vm_mask, start, end - start, size, | ||
74 | &mm->cpu_vm_mask, NULL, 0); | ||
75 | } | ||
76 | |||
77 | void flush_tlb_all(void) | ||
78 | { | ||
79 | int i; | ||
80 | for (i = 0; ; ++i) { | ||
81 | HV_VirtAddrRange r = hv_inquire_virtual(i); | ||
82 | if (r.size == 0) | ||
83 | break; | ||
84 | flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, | ||
85 | r.start, r.size, PAGE_SIZE, cpu_online_mask, | ||
86 | NULL, 0); | ||
87 | flush_remote(0, 0, NULL, | ||
88 | r.start, r.size, HPAGE_SIZE, cpu_online_mask, | ||
89 | NULL, 0); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
94 | { | ||
95 | flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, | ||
96 | start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0); | ||
97 | } | ||
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c new file mode 100644 index 000000000000..3870abbeeaa2 --- /dev/null +++ b/arch/tile/kernel/traps.c | |||
@@ -0,0 +1,317 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/reboot.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <asm/opcode-tile.h> | ||
23 | #include <asm/opcode_constants.h> | ||
24 | #include <asm/stack.h> | ||
25 | #include <asm/traps.h> | ||
26 | |||
27 | #include <arch/interrupts.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | void __init trap_init(void) | ||
31 | { | ||
32 | /* Nothing needed here since we link code at .intrpt1 */ | ||
33 | } | ||
34 | |||
35 | int unaligned_fixup = 1; | ||
36 | |||
37 | static int __init setup_unaligned_fixup(char *str) | ||
38 | { | ||
39 | /* | ||
40 | * Say "=-1" to completely disable it. If you just do "=0", we | ||
41 | * will still parse the instruction, then fire a SIGBUS with | ||
42 | * the correct address from inside the single_step code. | ||
43 | */ | ||
44 | long val; | ||
45 | if (strict_strtol(str, 0, &val) != 0) | ||
46 | return 0; | ||
47 | unaligned_fixup = val; | ||
48 | pr_info("Fixups for unaligned data accesses are %s\n", | ||
49 | unaligned_fixup >= 0 ? | ||
50 | (unaligned_fixup ? "enabled" : "disabled") : | ||
51 | "completely disabled"); | ||
52 | return 1; | ||
53 | } | ||
54 | __setup("unaligned_fixup=", setup_unaligned_fixup); | ||
55 | |||
56 | #if CHIP_HAS_TILE_DMA() | ||
57 | |||
58 | static int dma_disabled; | ||
59 | |||
60 | static int __init nodma(char *str) | ||
61 | { | ||
62 | pr_info("User-space DMA is disabled\n"); | ||
63 | dma_disabled = 1; | ||
64 | return 1; | ||
65 | } | ||
66 | __setup("nodma", nodma); | ||
67 | |||
68 | /* How to decode SPR_GPV_REASON */ | ||
69 | #define IRET_ERROR (1U << 31) | ||
70 | #define MT_ERROR (1U << 30) | ||
71 | #define MF_ERROR (1U << 29) | ||
72 | #define SPR_INDEX ((1U << 15) - 1) | ||
73 | #define SPR_MPL_SHIFT 9 /* starting bit position for MPL encoded in SPR */ | ||
74 | |||
75 | /* | ||
76 | * See if this GPV is just to notify the kernel of SPR use and we can | ||
77 | * retry the user instruction after adjusting some MPLs suitably. | ||
78 | */ | ||
79 | static int retry_gpv(unsigned int gpv_reason) | ||
80 | { | ||
81 | int mpl; | ||
82 | |||
83 | if (gpv_reason & IRET_ERROR) | ||
84 | return 0; | ||
85 | |||
86 | BUG_ON((gpv_reason & (MT_ERROR|MF_ERROR)) == 0); | ||
87 | mpl = (gpv_reason & SPR_INDEX) >> SPR_MPL_SHIFT; | ||
88 | if (mpl == INT_DMA_NOTIFY && !dma_disabled) { | ||
89 | /* User is turning on DMA. Allow it and retry. */ | ||
90 | printk(KERN_DEBUG "Process %d/%s is now enabled for DMA\n", | ||
91 | current->pid, current->comm); | ||
92 | BUG_ON(current->thread.tile_dma_state.enabled); | ||
93 | current->thread.tile_dma_state.enabled = 1; | ||
94 | grant_dma_mpls(); | ||
95 | return 1; | ||
96 | } | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | #endif /* CHIP_HAS_TILE_DMA() */ | ||
102 | |||
103 | #ifdef __tilegx__ | ||
104 | #define bundle_bits tilegx_bundle_bits | ||
105 | #else | ||
106 | #define bundle_bits tile_bundle_bits | ||
107 | #endif | ||
108 | |||
109 | extern bundle_bits bpt_code; | ||
110 | |||
111 | asm(".pushsection .rodata.bpt_code,\"a\";" | ||
112 | ".align 8;" | ||
113 | "bpt_code: bpt;" | ||
114 | ".size bpt_code,.-bpt_code;" | ||
115 | ".popsection"); | ||
116 | |||
117 | static int special_ill(bundle_bits bundle, int *sigp, int *codep) | ||
118 | { | ||
119 | int sig, code, maxcode; | ||
120 | |||
121 | if (bundle == bpt_code) { | ||
122 | *sigp = SIGTRAP; | ||
123 | *codep = TRAP_BRKPT; | ||
124 | return 1; | ||
125 | } | ||
126 | |||
127 | /* If it's a "raise" bundle, then "ill" must be in pipe X1. */ | ||
128 | #ifdef __tilegx__ | ||
129 | if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0) | ||
130 | return 0; | ||
131 | if (get_Opcode_X1(bundle) != UNARY_OPCODE_X1) | ||
132 | return 0; | ||
133 | if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1) | ||
134 | return 0; | ||
135 | #else | ||
136 | if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) | ||
137 | return 0; | ||
138 | if (get_Opcode_X1(bundle) != SHUN_0_OPCODE_X1) | ||
139 | return 0; | ||
140 | if (get_UnShOpcodeExtension_X1(bundle) != UN_0_SHUN_0_OPCODE_X1) | ||
141 | return 0; | ||
142 | if (get_UnOpcodeExtension_X1(bundle) != ILL_UN_0_SHUN_0_OPCODE_X1) | ||
143 | return 0; | ||
144 | #endif | ||
145 | |||
146 | /* Check that the magic distinguishers are set to mean "raise". */ | ||
147 | if (get_Dest_X1(bundle) != 29 || get_SrcA_X1(bundle) != 37) | ||
148 | return 0; | ||
149 | |||
150 | /* There must be an "addli zero, zero, VAL" in X0. */ | ||
151 | if (get_Opcode_X0(bundle) != ADDLI_OPCODE_X0) | ||
152 | return 0; | ||
153 | if (get_Dest_X0(bundle) != TREG_ZERO) | ||
154 | return 0; | ||
155 | if (get_SrcA_X0(bundle) != TREG_ZERO) | ||
156 | return 0; | ||
157 | |||
158 | /* | ||
159 | * Validate the proposed signal number and si_code value. | ||
160 | * Note that we embed these in the static instruction itself | ||
161 | * so that we perturb the register state as little as possible | ||
162 | * at the time of the actual fault; it's unlikely you'd ever | ||
163 | * need to dynamically choose which kind of fault to raise | ||
164 | * from user space. | ||
165 | */ | ||
166 | sig = get_Imm16_X0(bundle) & 0x3f; | ||
167 | switch (sig) { | ||
168 | case SIGILL: | ||
169 | maxcode = NSIGILL; | ||
170 | break; | ||
171 | case SIGFPE: | ||
172 | maxcode = NSIGFPE; | ||
173 | break; | ||
174 | case SIGSEGV: | ||
175 | maxcode = NSIGSEGV; | ||
176 | break; | ||
177 | case SIGBUS: | ||
178 | maxcode = NSIGBUS; | ||
179 | break; | ||
180 | case SIGTRAP: | ||
181 | maxcode = NSIGTRAP; | ||
182 | break; | ||
183 | default: | ||
184 | return 0; | ||
185 | } | ||
186 | code = (get_Imm16_X0(bundle) >> 6) & 0xf; | ||
187 | if (code <= 0 || code > maxcode) | ||
188 | return 0; | ||
189 | |||
190 | /* Make it the requested signal. */ | ||
191 | *sigp = sig; | ||
192 | *codep = code | __SI_FAULT; | ||
193 | return 1; | ||
194 | } | ||
195 | |||
196 | void __kprobes do_trap(struct pt_regs *regs, int fault_num, | ||
197 | unsigned long reason) | ||
198 | { | ||
199 | siginfo_t info = { 0 }; | ||
200 | int signo, code; | ||
201 | unsigned long address; | ||
202 | bundle_bits instr; | ||
203 | |||
204 | /* Re-enable interrupts. */ | ||
205 | local_irq_enable(); | ||
206 | |||
207 | /* | ||
208 | * If it hits in kernel mode and we can't fix it up, just exit the | ||
209 | * current process and hope for the best. | ||
210 | */ | ||
211 | if (!user_mode(regs)) { | ||
212 | if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */ | ||
213 | return; | ||
214 | pr_alert("Kernel took bad trap %d at PC %#lx\n", | ||
215 | fault_num, regs->pc); | ||
216 | if (fault_num == INT_GPV) | ||
217 | pr_alert("GPV_REASON is %#lx\n", reason); | ||
218 | show_regs(regs); | ||
219 | do_exit(SIGKILL); /* FIXME: implement i386 die() */ | ||
220 | return; | ||
221 | } | ||
222 | |||
223 | switch (fault_num) { | ||
224 | case INT_ILL: | ||
225 | if (copy_from_user(&instr, (void __user *)regs->pc, | ||
226 | sizeof(instr))) { | ||
227 | pr_err("Unreadable instruction for INT_ILL:" | ||
228 | " %#lx\n", regs->pc); | ||
229 | do_exit(SIGKILL); | ||
230 | return; | ||
231 | } | ||
232 | if (!special_ill(instr, &signo, &code)) { | ||
233 | signo = SIGILL; | ||
234 | code = ILL_ILLOPC; | ||
235 | } | ||
236 | address = regs->pc; | ||
237 | break; | ||
238 | case INT_GPV: | ||
239 | #if CHIP_HAS_TILE_DMA() | ||
240 | if (retry_gpv(reason)) | ||
241 | return; | ||
242 | #endif | ||
243 | /*FALLTHROUGH*/ | ||
244 | case INT_UDN_ACCESS: | ||
245 | case INT_IDN_ACCESS: | ||
246 | #if CHIP_HAS_SN() | ||
247 | case INT_SN_ACCESS: | ||
248 | #endif | ||
249 | signo = SIGILL; | ||
250 | code = ILL_PRVREG; | ||
251 | address = regs->pc; | ||
252 | break; | ||
253 | case INT_SWINT_3: | ||
254 | case INT_SWINT_2: | ||
255 | case INT_SWINT_0: | ||
256 | signo = SIGILL; | ||
257 | code = ILL_ILLTRP; | ||
258 | address = regs->pc; | ||
259 | break; | ||
260 | case INT_UNALIGN_DATA: | ||
261 | #ifndef __tilegx__ /* FIXME: GX: no single-step yet */ | ||
262 | if (unaligned_fixup >= 0) { | ||
263 | struct single_step_state *state = | ||
264 | current_thread_info()->step_state; | ||
265 | if (!state || | ||
266 | (void __user *)(regs->pc) != state->buffer) { | ||
267 | single_step_once(regs); | ||
268 | return; | ||
269 | } | ||
270 | } | ||
271 | #endif | ||
272 | signo = SIGBUS; | ||
273 | code = BUS_ADRALN; | ||
274 | address = 0; | ||
275 | break; | ||
276 | case INT_DOUBLE_FAULT: | ||
277 | /* | ||
278 | * For double fault, "reason" is actually passed as | ||
279 | * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so | ||
280 | * we can provide the original fault number rather than | ||
281 | * the uninteresting "INT_DOUBLE_FAULT" so the user can | ||
282 | * learn what actually struck while PL0 ICS was set. | ||
283 | */ | ||
284 | fault_num = reason; | ||
285 | signo = SIGILL; | ||
286 | code = ILL_DBLFLT; | ||
287 | address = regs->pc; | ||
288 | break; | ||
289 | #ifdef __tilegx__ | ||
290 | case INT_ILL_TRANS: | ||
291 | signo = SIGSEGV; | ||
292 | code = SEGV_MAPERR; | ||
293 | if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) | ||
294 | address = regs->pc; | ||
295 | else | ||
296 | address = 0; /* FIXME: GX: single-step for address */ | ||
297 | break; | ||
298 | #endif | ||
299 | default: | ||
300 | panic("Unexpected do_trap interrupt number %d", fault_num); | ||
301 | return; | ||
302 | } | ||
303 | |||
304 | info.si_signo = signo; | ||
305 | info.si_code = code; | ||
306 | info.si_addr = (void __user *)address; | ||
307 | if (signo == SIGILL) | ||
308 | info.si_trapno = fault_num; | ||
309 | force_sig_info(signo, &info, current); | ||
310 | } | ||
311 | |||
312 | void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) | ||
313 | { | ||
314 | _dump_stack(dummy, pc, lr, sp, r52); | ||
315 | pr_emerg("Double fault: exiting\n"); | ||
316 | machine_halt(); | ||
317 | } | ||
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..25fdc0c1839a --- /dev/null +++ b/arch/tile/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,98 @@ | |||
1 | #include <asm-generic/vmlinux.lds.h> | ||
2 | #include <asm/page.h> | ||
3 | #include <asm/cache.h> | ||
4 | #include <asm/thread_info.h> | ||
5 | #include <hv/hypervisor.h> | ||
6 | |||
7 | /* Text loads starting from the supervisor interrupt vector address. */ | ||
8 | #define TEXT_OFFSET MEM_SV_INTRPT | ||
9 | |||
10 | OUTPUT_ARCH(tile) | ||
11 | ENTRY(_start) | ||
12 | jiffies = jiffies_64; | ||
13 | |||
14 | PHDRS | ||
15 | { | ||
16 | intrpt1 PT_LOAD ; | ||
17 | text PT_LOAD ; | ||
18 | data PT_LOAD ; | ||
19 | } | ||
20 | SECTIONS | ||
21 | { | ||
22 | /* Text is loaded with a different VA than data; start with text. */ | ||
23 | #undef LOAD_OFFSET | ||
24 | #define LOAD_OFFSET TEXT_OFFSET | ||
25 | |||
26 | /* Interrupt vectors */ | ||
27 | .intrpt1 (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */ | ||
28 | { | ||
29 | _text = .; | ||
30 | _stext = .; | ||
31 | *(.intrpt1) | ||
32 | } :intrpt1 =0 | ||
33 | |||
34 | /* Hypervisor call vectors */ | ||
35 | #include "hvglue.lds" | ||
36 | |||
37 | /* Now the real code */ | ||
38 | . = ALIGN(0x20000); | ||
39 | .text : AT (ADDR(.text) - LOAD_OFFSET) { | ||
40 | HEAD_TEXT | ||
41 | SCHED_TEXT | ||
42 | LOCK_TEXT | ||
43 | __fix_text_end = .; /* tile-cpack won't rearrange before this */ | ||
44 | TEXT_TEXT | ||
45 | *(.text.*) | ||
46 | *(.coldtext*) | ||
47 | *(.fixup) | ||
48 | *(.gnu.warning) | ||
49 | } :text =0 | ||
50 | _etext = .; | ||
51 | |||
52 | /* "Init" is divided into two areas with very different virtual addresses. */ | ||
53 | INIT_TEXT_SECTION(PAGE_SIZE) | ||
54 | |||
55 | /* Now we skip back to PAGE_OFFSET for the data. */ | ||
56 | . = (. - TEXT_OFFSET + PAGE_OFFSET); | ||
57 | #undef LOAD_OFFSET | ||
58 | #define LOAD_OFFSET PAGE_OFFSET | ||
59 | |||
60 | . = ALIGN(PAGE_SIZE); | ||
61 | VMLINUX_SYMBOL(_sinitdata) = .; | ||
62 | .init.page : AT (ADDR(.init.page) - LOAD_OFFSET) { | ||
63 | *(.init.page) | ||
64 | } :data =0 | ||
65 | INIT_DATA_SECTION(16) | ||
66 | PERCPU(PAGE_SIZE) | ||
67 | . = ALIGN(PAGE_SIZE); | ||
68 | VMLINUX_SYMBOL(_einitdata) = .; | ||
69 | |||
70 | _sdata = .; /* Start of data section */ | ||
71 | |||
72 | RO_DATA_SECTION(PAGE_SIZE) | ||
73 | |||
74 | /* initially writeable, then read-only */ | ||
75 | . = ALIGN(PAGE_SIZE); | ||
76 | __w1data_begin = .; | ||
77 | .w1data : AT(ADDR(.w1data) - LOAD_OFFSET) { | ||
78 | VMLINUX_SYMBOL(__w1data_begin) = .; | ||
79 | *(.w1data) | ||
80 | VMLINUX_SYMBOL(__w1data_end) = .; | ||
81 | } | ||
82 | |||
83 | RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) | ||
84 | |||
85 | _edata = .; | ||
86 | |||
87 | EXCEPTION_TABLE(L2_CACHE_BYTES) | ||
88 | NOTES | ||
89 | |||
90 | |||
91 | BSS_SECTION(8, PAGE_SIZE, 1) | ||
92 | _end = . ; | ||
93 | |||
94 | STABS_DEBUG | ||
95 | DWARF_DEBUG | ||
96 | |||
97 | DISCARDS | ||
98 | } | ||