aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 20:25:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 20:25:38 -0400
commite404f91ed2180dfecbab15dd4d39c543353385fb (patch)
treec256e29b1c738d5e5b5478f19b369b1fd90bd1e2 /arch/tile
parent18a043f9413277523cf5011e594caa1747db4948 (diff)
parente18105c128734b1671739ad4d85e216ebec28c61 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: arch/tile: convert a BUG_ON to BUILD_BUG_ON arch/tile: make ptrace() work properly for TILE-Gx COMPAT mode arch/tile: support new info op generated by compiler arch/tile: minor whitespace/naming changes for string support files arch/tile: enable single-step support for TILE-Gx arch/tile: parameterize system PLs to support KVM port arch/tile: add Tilera's <arch/sim.h> header as an open-source header arch/tile: Bomb C99 comments to C89 comments in tile's <arch/sim_def.h> arch/tile: prevent corrupt top frame from causing backtracer runaway arch/tile: various top-level Makefile cleanups arch/tile: change lower bound on syscall error return to -4095 arch/tile: properly export __mb_incoherent for modules arch/tile: provide a definition of MAP_STACK kmemleak: add TILE to the list of supported architectures. char: hvc: check for error case arch/tile: Add a warning if we try to allocate too much vmalloc memory. arch/tile: update some comments to clarify register usage. arch/tile: use better "punctuation" for VMSPLIT_3_5G and friends arch/tile: Use <asm-generic/syscalls.h> tile: replace some BUG_ON checks with BUILD_BUG_ON checks
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/Kconfig20
-rw-r--r--arch/tile/Makefile19
-rw-r--r--arch/tile/include/arch/sim.h619
-rw-r--r--arch/tile/include/arch/sim_def.h548
-rw-r--r--arch/tile/include/arch/spr_def.h85
-rw-r--r--arch/tile/include/arch/spr_def_32.h39
-rw-r--r--arch/tile/include/asm/backtrace.h5
-rw-r--r--arch/tile/include/asm/compat.h15
-rw-r--r--arch/tile/include/asm/irqflags.h64
-rw-r--r--arch/tile/include/asm/mman.h1
-rw-r--r--arch/tile/include/asm/page.h27
-rw-r--r--arch/tile/include/asm/processor.h11
-rw-r--r--arch/tile/include/asm/ptrace.h4
-rw-r--r--arch/tile/include/asm/syscalls.h73
-rw-r--r--arch/tile/include/asm/system.h14
-rw-r--r--arch/tile/include/asm/traps.h4
-rw-r--r--arch/tile/include/hv/hypervisor.h30
-rw-r--r--arch/tile/kernel/backtrace.c4
-rw-r--r--arch/tile/kernel/compat.c10
-rw-r--r--arch/tile/kernel/compat_signal.c10
-rw-r--r--arch/tile/kernel/entry.S34
-rw-r--r--arch/tile/kernel/head_32.S5
-rw-r--r--arch/tile/kernel/intvec_32.S101
-rw-r--r--arch/tile/kernel/irq.c16
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/process.c50
-rw-r--r--arch/tile/kernel/ptrace.c78
-rw-r--r--arch/tile/kernel/regs_32.S2
-rw-r--r--arch/tile/kernel/setup.c34
-rw-r--r--arch/tile/kernel/signal.c6
-rw-r--r--arch/tile/kernel/single_step.c73
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/stack.c35
-rw-r--r--arch/tile/kernel/sys.c9
-rw-r--r--arch/tile/kernel/traps.c4
-rw-r--r--arch/tile/kvm/Kconfig38
-rw-r--r--arch/tile/lib/Makefile4
-rw-r--r--arch/tile/lib/atomic_32.c8
-rw-r--r--arch/tile/lib/exports.c3
-rw-r--r--arch/tile/lib/memcpy_32.S206
-rw-r--r--arch/tile/lib/memmove.c (renamed from arch/tile/lib/memmove_32.c)0
-rw-r--r--arch/tile/lib/memset_32.c1
-rw-r--r--arch/tile/lib/strlen_32.c2
-rw-r--r--arch/tile/mm/fault.c12
-rw-r--r--arch/tile/mm/homecache.c11
-rw-r--r--arch/tile/mm/init.c2
46 files changed, 1608 insertions, 732 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 1eb308cb711a..89cfee07efa9 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -96,6 +96,7 @@ config HVC_TILE
96 96
97config TILE 97config TILE
98 def_bool y 98 def_bool y
99 select HAVE_KVM if !TILEGX
99 select GENERIC_FIND_FIRST_BIT 100 select GENERIC_FIND_FIRST_BIT
100 select GENERIC_FIND_NEXT_BIT 101 select GENERIC_FIND_NEXT_BIT
101 select USE_GENERIC_SMP_HELPERS 102 select USE_GENERIC_SMP_HELPERS
@@ -236,9 +237,9 @@ choice
236 If you are not absolutely sure what you are doing, leave this 237 If you are not absolutely sure what you are doing, leave this
237 option alone! 238 option alone!
238 239
239 config VMSPLIT_375G 240 config VMSPLIT_3_75G
240 bool "3.75G/0.25G user/kernel split (no kernel networking)" 241 bool "3.75G/0.25G user/kernel split (no kernel networking)"
241 config VMSPLIT_35G 242 config VMSPLIT_3_5G
242 bool "3.5G/0.5G user/kernel split" 243 bool "3.5G/0.5G user/kernel split"
243 config VMSPLIT_3G 244 config VMSPLIT_3G
244 bool "3G/1G user/kernel split" 245 bool "3G/1G user/kernel split"
@@ -252,8 +253,8 @@ endchoice
252 253
253config PAGE_OFFSET 254config PAGE_OFFSET
254 hex 255 hex
255 default 0xF0000000 if VMSPLIT_375G 256 default 0xF0000000 if VMSPLIT_3_75G
256 default 0xE0000000 if VMSPLIT_35G 257 default 0xE0000000 if VMSPLIT_3_5G
257 default 0xB0000000 if VMSPLIT_3G_OPT 258 default 0xB0000000 if VMSPLIT_3G_OPT
258 default 0x80000000 if VMSPLIT_2G 259 default 0x80000000 if VMSPLIT_2G
259 default 0x40000000 if VMSPLIT_1G 260 default 0x40000000 if VMSPLIT_1G
@@ -314,6 +315,15 @@ config HARDWALL
314 bool "Hardwall support to allow access to user dynamic network" 315 bool "Hardwall support to allow access to user dynamic network"
315 default y 316 default y
316 317
318config KERNEL_PL
319 int "Processor protection level for kernel"
320 range 1 2
321 default "1"
322 ---help---
323 This setting determines the processor protection level the
324 kernel will be built to run at. Generally you should use
325 the default value here.
326
317endmenu # Tilera-specific configuration 327endmenu # Tilera-specific configuration
318 328
319menu "Bus options" 329menu "Bus options"
@@ -354,3 +364,5 @@ source "security/Kconfig"
354source "crypto/Kconfig" 364source "crypto/Kconfig"
355 365
356source "lib/Kconfig" 366source "lib/Kconfig"
367
368source "arch/tile/kvm/Kconfig"
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index fd8f6bb5face..17acce70569b 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -26,8 +26,9 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
26 endif 26 endif
27endif 27endif
28 28
29 29ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
30KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) 30KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
31endif
31 32
32LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 33LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
33 34
@@ -49,6 +50,20 @@ head-y := arch/tile/kernel/head_$(BITS).o
49libs-y += arch/tile/lib/ 50libs-y += arch/tile/lib/
50libs-y += $(LIBGCC_PATH) 51libs-y += $(LIBGCC_PATH)
51 52
52
53# See arch/tile/Kbuild for content of core part of the kernel 53# See arch/tile/Kbuild for content of core part of the kernel
54core-y += arch/tile/ 54core-y += arch/tile/
55
56core-$(CONFIG_KVM) += arch/tile/kvm/
57
58ifdef TILERA_ROOT
59INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
60endif
61
62install:
63 install -D -m 755 vmlinux $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
64 install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
65 install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
66
67define archhelp
68 echo ' install - install kernel into $(INSTALL_PATH)'
69endef
diff --git a/arch/tile/include/arch/sim.h b/arch/tile/include/arch/sim.h
new file mode 100644
index 000000000000..74b7c1624d34
--- /dev/null
+++ b/arch/tile/include/arch/sim.h
@@ -0,0 +1,619 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/**
16 * @file
17 *
18 * Provides an API for controlling the simulator at runtime.
19 */
20
21/**
22 * @addtogroup arch_sim
23 * @{
24 *
25 * An API for controlling the simulator at runtime.
26 *
27 * The simulator's behavior can be modified while it is running.
28 * For example, human-readable trace output can be enabled and disabled
29 * around code of interest.
30 *
31 * There are two ways to modify simulator behavior:
32 * programmatically, by calling various sim_* functions, and
33 * interactively, by entering commands like "sim set functional true"
34 * at the tile-monitor prompt. Typing "sim help" at that prompt provides
35 * a list of interactive commands.
36 *
37 * All interactive commands can also be executed programmatically by
38 * passing a string to the sim_command function.
39 */
40
41#ifndef __ARCH_SIM_H__
42#define __ARCH_SIM_H__
43
44#include <arch/sim_def.h>
45#include <arch/abi.h>
46
47#ifndef __ASSEMBLER__
48
49#include <arch/spr_def.h>
50
51
52/**
53 * Return true if the current program is running under a simulator,
54 * rather than on real hardware. If running on hardware, other "sim_xxx()"
55 * calls have no useful effect.
56 */
57static inline int
58sim_is_simulator(void)
59{
60 return __insn_mfspr(SPR_SIM_CONTROL) != 0;
61}
62
63
64/**
65 * Checkpoint the simulator state to a checkpoint file.
66 *
67 * The checkpoint file name is either the default or the name specified
68 * on the command line with "--checkpoint-file".
69 */
70static __inline void
71sim_checkpoint(void)
72{
73 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_CHECKPOINT);
74}
75
76
77/**
78 * Report whether or not various kinds of simulator tracing are enabled.
79 *
80 * @return The bitwise OR of these values:
81 *
82 * SIM_TRACE_CYCLES (--trace-cycles),
83 * SIM_TRACE_ROUTER (--trace-router),
84 * SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
85 * SIM_TRACE_DISASM (--trace-disasm),
86 * SIM_TRACE_STALL_INFO (--trace-stall-info)
87 * SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
88 * SIM_TRACE_L2_CACHE (--trace-l2)
89 * SIM_TRACE_LINES (--trace-lines)
90 */
91static __inline unsigned int
92sim_get_tracing(void)
93{
94 return __insn_mfspr(SPR_SIM_CONTROL) & SIM_TRACE_FLAG_MASK;
95}
96
97
98/**
99 * Turn on or off different kinds of simulator tracing.
100 *
101 * @param mask Either one of these special values:
102 *
103 * SIM_TRACE_NONE (turns off tracing),
104 * SIM_TRACE_ALL (turns on all possible tracing).
105 *
106 * or the bitwise OR of these values:
107 *
108 * SIM_TRACE_CYCLES (--trace-cycles),
109 * SIM_TRACE_ROUTER (--trace-router),
110 * SIM_TRACE_REGISTER_WRITES (--trace-register-writes),
111 * SIM_TRACE_DISASM (--trace-disasm),
112 * SIM_TRACE_STALL_INFO (--trace-stall-info)
113 * SIM_TRACE_MEMORY_CONTROLLER (--trace-memory-controller)
114 * SIM_TRACE_L2_CACHE (--trace-l2)
115 * SIM_TRACE_LINES (--trace-lines)
116 */
117static __inline void
118sim_set_tracing(unsigned int mask)
119{
120 __insn_mtspr(SPR_SIM_CONTROL, SIM_TRACE_SPR_ARG(mask));
121}
122
123
124/**
125 * Request dumping of different kinds of simulator state.
126 *
127 * @param mask Either this special value:
128 *
129 * SIM_DUMP_ALL (dump all known state)
130 *
131 * or the bitwise OR of these values:
132 *
133 * SIM_DUMP_REGS (the register file),
134 * SIM_DUMP_SPRS (the SPRs),
135 * SIM_DUMP_ITLB (the iTLB),
136 * SIM_DUMP_DTLB (the dTLB),
137 * SIM_DUMP_L1I (the L1 I-cache),
138 * SIM_DUMP_L1D (the L1 D-cache),
139 * SIM_DUMP_L2 (the L2 cache),
140 * SIM_DUMP_SNREGS (the switch register file),
141 * SIM_DUMP_SNITLB (the switch iTLB),
142 * SIM_DUMP_SNL1I (the switch L1 I-cache),
143 * SIM_DUMP_BACKTRACE (the current backtrace)
144 */
145static __inline void
146sim_dump(unsigned int mask)
147{
148 __insn_mtspr(SPR_SIM_CONTROL, SIM_DUMP_SPR_ARG(mask));
149}
150
151
152/**
153 * Print a string to the simulator stdout.
154 *
155 * @param str The string to be written; a newline is automatically added.
156 */
157static __inline void
158sim_print_string(const char* str)
159{
160 int i;
161 for (i = 0; str[i] != 0; i++)
162 {
163 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
164 (str[i] << _SIM_CONTROL_OPERATOR_BITS));
165 }
166 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
167 (SIM_PUTC_FLUSH_STRING << _SIM_CONTROL_OPERATOR_BITS));
168}
169
170
171/**
172 * Execute a simulator command string.
173 *
174 * Type 'sim help' at the tile-monitor prompt to learn what commands
175 * are available. Note the use of the tile-monitor "sim" command to
176 * pass commands to the simulator.
177 *
178 * The argument to sim_command() does not include the leading "sim"
179 * prefix used at the tile-monitor prompt; for example, you might call
180 * sim_command("trace disasm").
181 */
182static __inline void
183sim_command(const char* str)
184{
185 int c;
186 do
187 {
188 c = *str++;
189 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_COMMAND |
190 (c << _SIM_CONTROL_OPERATOR_BITS));
191 }
192 while (c);
193}
194
195
196
197#ifndef __DOXYGEN__
198
199/**
200 * The underlying implementation of "_sim_syscall()".
201 *
202 * We use extra "and" instructions to ensure that all the values
203 * we are passing to the simulator are actually valid in the registers
204 * (i.e. returned from memory) prior to the SIM_CONTROL spr.
205 */
206static __inline int _sim_syscall0(int val)
207{
208 long result;
209 __asm__ __volatile__ ("mtspr SIM_CONTROL, r0"
210 : "=R00" (result) : "R00" (val));
211 return result;
212}
213
214static __inline int _sim_syscall1(int val, long arg1)
215{
216 long result;
217 __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
218 : "=R00" (result) : "R00" (val), "R01" (arg1));
219 return result;
220}
221
222static __inline int _sim_syscall2(int val, long arg1, long arg2)
223{
224 long result;
225 __asm__ __volatile__ ("{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
226 : "=R00" (result)
227 : "R00" (val), "R01" (arg1), "R02" (arg2));
228 return result;
229}
230
231/* Note that _sim_syscall3() and higher are technically at risk of
232 receiving an interrupt right before the mtspr bundle, in which case
233 the register values for arguments 3 and up may still be in flight
234 to the core from a stack frame reload. */
235
236static __inline int _sim_syscall3(int val, long arg1, long arg2, long arg3)
237{
238 long result;
239 __asm__ __volatile__ ("{ and zero, r3, r3 };"
240 "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
241 : "=R00" (result)
242 : "R00" (val), "R01" (arg1), "R02" (arg2),
243 "R03" (arg3));
244 return result;
245}
246
247static __inline int _sim_syscall4(int val, long arg1, long arg2, long arg3,
248 long arg4)
249{
250 long result;
251 __asm__ __volatile__ ("{ and zero, r3, r4 };"
252 "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
253 : "=R00" (result)
254 : "R00" (val), "R01" (arg1), "R02" (arg2),
255 "R03" (arg3), "R04" (arg4));
256 return result;
257}
258
259static __inline int _sim_syscall5(int val, long arg1, long arg2, long arg3,
260 long arg4, long arg5)
261{
262 long result;
263 __asm__ __volatile__ ("{ and zero, r3, r4; and zero, r5, r5 };"
264 "{ and zero, r1, r2; mtspr SIM_CONTROL, r0 }"
265 : "=R00" (result)
266 : "R00" (val), "R01" (arg1), "R02" (arg2),
267 "R03" (arg3), "R04" (arg4), "R05" (arg5));
268 return result;
269}
270
271
272/**
273 * Make a special syscall to the simulator itself, if running under
274 * simulation. This is used as the implementation of other functions
275 * and should not be used outside this file.
276 *
277 * @param syscall_num The simulator syscall number.
278 * @param nr The number of additional arguments provided.
279 *
280 * @return Varies by syscall.
281 */
282#define _sim_syscall(syscall_num, nr, args...) \
283 _sim_syscall##nr( \
284 ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS) | SIM_CONTROL_SYSCALL, args)
285
286
287/* Values for the "access_mask" parameters below. */
288#define SIM_WATCHPOINT_READ 1
289#define SIM_WATCHPOINT_WRITE 2
290#define SIM_WATCHPOINT_EXECUTE 4
291
292
293static __inline int
294sim_add_watchpoint(unsigned int process_id,
295 unsigned long address,
296 unsigned long size,
297 unsigned int access_mask,
298 unsigned long user_data)
299{
300 return _sim_syscall(SIM_SYSCALL_ADD_WATCHPOINT, 5, process_id,
301 address, size, access_mask, user_data);
302}
303
304
305static __inline int
306sim_remove_watchpoint(unsigned int process_id,
307 unsigned long address,
308 unsigned long size,
309 unsigned int access_mask,
310 unsigned long user_data)
311{
312 return _sim_syscall(SIM_SYSCALL_REMOVE_WATCHPOINT, 5, process_id,
313 address, size, access_mask, user_data);
314}
315
316
317/**
318 * Return value from sim_query_watchpoint.
319 */
320struct SimQueryWatchpointStatus
321{
322 /**
323 * 0 if a watchpoint fired, 1 if no watchpoint fired, or -1 for
324 * error (meaning a bad process_id).
325 */
326 int syscall_status;
327
328 /**
329 * The address of the watchpoint that fired (this is the address
330 * passed to sim_add_watchpoint, not an address within that range
331 * that actually triggered the watchpoint).
332 */
333 unsigned long address;
334
335 /** The arbitrary user_data installed by sim_add_watchpoint. */
336 unsigned long user_data;
337};
338
339
340static __inline struct SimQueryWatchpointStatus
341sim_query_watchpoint(unsigned int process_id)
342{
343 struct SimQueryWatchpointStatus status;
344 long val = SIM_CONTROL_SYSCALL |
345 (SIM_SYSCALL_QUERY_WATCHPOINT << _SIM_CONTROL_OPERATOR_BITS);
346 __asm__ __volatile__ ("{ and zero, r1, r1; mtspr SIM_CONTROL, r0 }"
347 : "=R00" (status.syscall_status),
348 "=R01" (status.address),
349 "=R02" (status.user_data)
350 : "R00" (val), "R01" (process_id));
351 return status;
352}
353
354
355/* On the simulator, confirm lines have been evicted everywhere. */
356static __inline void
357sim_validate_lines_evicted(unsigned long long pa, unsigned long length)
358{
359#ifdef __LP64__
360 _sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 2, pa, length);
361#else
362 _sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, 4,
363 0 /* dummy */, (long)(pa), (long)(pa >> 32), length);
364#endif
365}
366
367
368#endif /* !__DOXYGEN__ */
369
370
371
372
373/**
374 * Modify the shaping parameters of a shim.
375 *
376 * @param shim The shim to modify. One of:
377 * SIM_CONTROL_SHAPING_GBE_0
378 * SIM_CONTROL_SHAPING_GBE_1
379 * SIM_CONTROL_SHAPING_GBE_2
380 * SIM_CONTROL_SHAPING_GBE_3
381 * SIM_CONTROL_SHAPING_XGBE_0
382 * SIM_CONTROL_SHAPING_XGBE_1
383 *
384 * @param type The type of shaping. This should be the same type of
385 * shaping that is already in place on the shim. One of:
386 * SIM_CONTROL_SHAPING_MULTIPLIER
387 * SIM_CONTROL_SHAPING_PPS
388 * SIM_CONTROL_SHAPING_BPS
389 *
390 * @param units The magnitude of the rate. One of:
391 * SIM_CONTROL_SHAPING_UNITS_SINGLE
392 * SIM_CONTROL_SHAPING_UNITS_KILO
393 * SIM_CONTROL_SHAPING_UNITS_MEGA
394 * SIM_CONTROL_SHAPING_UNITS_GIGA
395 *
396 * @param rate The rate to which to change it. This must fit in
397 * SIM_CONTROL_SHAPING_RATE_BITS bits or a warning is issued and
398 * the shaping is not changed.
399 *
400 * @return 0 if no problems were detected in the arguments to sim_set_shaping
401 * or 1 if problems were detected (for example, rate does not fit in 17 bits).
402 */
403static __inline int
404sim_set_shaping(unsigned shim,
405 unsigned type,
406 unsigned units,
407 unsigned rate)
408{
409 if ((rate & ~((1 << SIM_CONTROL_SHAPING_RATE_BITS) - 1)) != 0)
410 return 1;
411
412 __insn_mtspr(SPR_SIM_CONTROL, SIM_SHAPING_SPR_ARG(shim, type, units, rate));
413 return 0;
414}
415
416#ifdef __tilegx__
417
418/** Enable a set of mPIPE links. Pass a -1 link_mask to enable all links. */
419static __inline void
420sim_enable_mpipe_links(unsigned mpipe, unsigned long link_mask)
421{
422 __insn_mtspr(SPR_SIM_CONTROL,
423 (SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
424 (mpipe << 8) | (1 << 16) | ((uint_reg_t)link_mask << 32)));
425}
426
427/** Disable a set of mPIPE links. Pass a -1 link_mask to disable all links. */
428static __inline void
429sim_disable_mpipe_links(unsigned mpipe, unsigned long link_mask)
430{
431 __insn_mtspr(SPR_SIM_CONTROL,
432 (SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE |
433 (mpipe << 8) | (0 << 16) | ((uint_reg_t)link_mask << 32)));
434}
435
436#endif /* __tilegx__ */
437
438
439/*
440 * An API for changing "functional" mode.
441 */
442
443#ifndef __DOXYGEN__
444
445#define sim_enable_functional() \
446 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_ENABLE_FUNCTIONAL)
447
448#define sim_disable_functional() \
449 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_DISABLE_FUNCTIONAL)
450
451#endif /* __DOXYGEN__ */
452
453
454/*
455 * Profiler support.
456 */
457
458/**
459 * Turn profiling on for the current task.
460 *
461 * Note that this has no effect if run in an environment without
462 * profiling support (thus, the proper flags to the simulator must
463 * be supplied).
464 */
465static __inline void
466sim_profiler_enable(void)
467{
468 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_ENABLE);
469}
470
471
472/** Turn profiling off for the current task. */
473static __inline void
474sim_profiler_disable(void)
475{
476 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_DISABLE);
477}
478
479
480/**
481 * Turn profiling on or off for the current task.
482 *
483 * @param enabled If true, turns on profiling. If false, turns it off.
484 *
485 * Note that this has no effect if run in an environment without
486 * profiling support (thus, the proper flags to the simulator must
487 * be supplied).
488 */
489static __inline void
490sim_profiler_set_enabled(int enabled)
491{
492 int val =
493 enabled ? SIM_CONTROL_PROFILER_ENABLE : SIM_CONTROL_PROFILER_DISABLE;
494 __insn_mtspr(SPR_SIM_CONTROL, val);
495}
496
497
498/**
499 * Return true if and only if profiling is currently enabled
500 * for the current task.
501 *
502 * This returns false even if sim_profiler_enable() was called
503 * if the current execution environment does not support profiling.
504 */
505static __inline int
506sim_profiler_is_enabled(void)
507{
508 return ((__insn_mfspr(SPR_SIM_CONTROL) & SIM_PROFILER_ENABLED_MASK) != 0);
509}
510
511
512/**
513 * Reset profiling counters to zero for the current task.
514 *
515 * Resetting can be done while profiling is enabled. It does not affect
516 * the chip-wide profiling counters.
517 */
518static __inline void
519sim_profiler_clear(void)
520{
521 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PROFILER_CLEAR);
522}
523
524
525/**
526 * Enable specified chip-level profiling counters.
527 *
528 * Does not affect the per-task profiling counters.
529 *
530 * @param mask Either this special value:
531 *
532 * SIM_CHIP_ALL (enables all chip-level components).
533 *
534 * or the bitwise OR of these values:
535 *
536 * SIM_CHIP_MEMCTL (enable all memory controllers)
537 * SIM_CHIP_XAUI (enable all XAUI controllers)
538 * SIM_CHIP_MPIPE (enable all MPIPE controllers)
539 */
540static __inline void
541sim_profiler_chip_enable(unsigned int mask)
542{
543 __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask));
544}
545
546
547/**
548 * Disable specified chip-level profiling counters.
549 *
550 * Does not affect the per-task profiling counters.
551 *
552 * @param mask Either this special value:
553 *
554 * SIM_CHIP_ALL (disables all chip-level components).
555 *
556 * or the bitwise OR of these values:
557 *
558 * SIM_CHIP_MEMCTL (disable all memory controllers)
559 * SIM_CHIP_XAUI (disable all XAUI controllers)
560 * SIM_CHIP_MPIPE (disable all MPIPE controllers)
561 */
562static __inline void
563sim_profiler_chip_disable(unsigned int mask)
564{
565 __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask));
566}
567
568
569/**
570 * Reset specified chip-level profiling counters to zero.
571 *
572 * Does not affect the per-task profiling counters.
573 *
574 * @param mask Either this special value:
575 *
576 * SIM_CHIP_ALL (clears all chip-level components).
577 *
578 * or the bitwise OR of these values:
579 *
580 * SIM_CHIP_MEMCTL (clear all memory controllers)
581 * SIM_CHIP_XAUI (clear all XAUI controllers)
582 * SIM_CHIP_MPIPE (clear all MPIPE controllers)
583 */
584static __inline void
585sim_profiler_chip_clear(unsigned int mask)
586{
587 __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask));
588}
589
590
591/*
592 * Event support.
593 */
594
595#ifndef __DOXYGEN__
596
597static __inline void
598sim_event_begin(unsigned int x)
599{
600#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
601 __insn_mtspr(SPR_EVENT_BEGIN, x);
602#endif
603}
604
605static __inline void
606sim_event_end(unsigned int x)
607{
608#if defined(__tile__) && !defined(__NO_EVENT_SPR__)
609 __insn_mtspr(SPR_EVENT_END, x);
610#endif
611}
612
613#endif /* !__DOXYGEN__ */
614
615#endif /* !__ASSEMBLER__ */
616
617#endif /* !__ARCH_SIM_H__ */
618
619/** @} */
diff --git a/arch/tile/include/arch/sim_def.h b/arch/tile/include/arch/sim_def.h
index 6418fbde063e..7a17082c3773 100644
--- a/arch/tile/include/arch/sim_def.h
+++ b/arch/tile/include/arch/sim_def.h
@@ -1,477 +1,461 @@
1// Copyright 2010 Tilera Corporation. All Rights Reserved. 1/*
2// 2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3// This program is free software; you can redistribute it and/or 3 *
4// modify it under the terms of the GNU General Public License 4 * This program is free software; you can redistribute it and/or
5// as published by the Free Software Foundation, version 2. 5 * modify it under the terms of the GNU General Public License
6// 6 * as published by the Free Software Foundation, version 2.
7// This program is distributed in the hope that it will be useful, but 7 *
8// WITHOUT ANY WARRANTY; without even the implied warranty of 8 * This program is distributed in the hope that it will be useful, but
9// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10// NON INFRINGEMENT. See the GNU General Public License for 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11// more details. 11 * NON INFRINGEMENT. See the GNU General Public License for
12 12 * more details.
13//! @file 13 */
14//! 14
15//! Some low-level simulator definitions. 15/**
16//! 16 * @file
17 *
18 * Some low-level simulator definitions.
19 */
17 20
18#ifndef __ARCH_SIM_DEF_H__ 21#ifndef __ARCH_SIM_DEF_H__
19#define __ARCH_SIM_DEF_H__ 22#define __ARCH_SIM_DEF_H__
20 23
21 24
22//! Internal: the low bits of the SIM_CONTROL_* SPR values specify 25/**
23//! the operation to perform, and the remaining bits are 26 * Internal: the low bits of the SIM_CONTROL_* SPR values specify
24//! an operation-specific parameter (often unused). 27 * the operation to perform, and the remaining bits are
25//! 28 * an operation-specific parameter (often unused).
29 */
26#define _SIM_CONTROL_OPERATOR_BITS 8 30#define _SIM_CONTROL_OPERATOR_BITS 8
27 31
28 32
29//== Values which can be written to SPR_SIM_CONTROL. 33/*
34 * Values which can be written to SPR_SIM_CONTROL.
35 */
30 36
31//! If written to SPR_SIM_CONTROL, stops profiling. 37/** If written to SPR_SIM_CONTROL, stops profiling. */
32//!
33#define SIM_CONTROL_PROFILER_DISABLE 0 38#define SIM_CONTROL_PROFILER_DISABLE 0
34 39
35//! If written to SPR_SIM_CONTROL, starts profiling. 40/** If written to SPR_SIM_CONTROL, starts profiling. */
36//!
37#define SIM_CONTROL_PROFILER_ENABLE 1 41#define SIM_CONTROL_PROFILER_ENABLE 1
38 42
39//! If written to SPR_SIM_CONTROL, clears profiling counters. 43/** If written to SPR_SIM_CONTROL, clears profiling counters. */
40//!
41#define SIM_CONTROL_PROFILER_CLEAR 2 44#define SIM_CONTROL_PROFILER_CLEAR 2
42 45
43//! If written to SPR_SIM_CONTROL, checkpoints the simulator. 46/** If written to SPR_SIM_CONTROL, checkpoints the simulator. */
44//!
45#define SIM_CONTROL_CHECKPOINT 3 47#define SIM_CONTROL_CHECKPOINT 3
46 48
47//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8), 49/**
48//! sets the tracing mask to the given mask. See "sim_set_tracing()". 50 * If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
49//! 51 * sets the tracing mask to the given mask. See "sim_set_tracing()".
52 */
50#define SIM_CONTROL_SET_TRACING 4 53#define SIM_CONTROL_SET_TRACING 4
51 54
52//! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8), 55/**
53//! dumps the requested items of machine state to the log. 56 * If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8),
54//! 57 * dumps the requested items of machine state to the log.
58 */
55#define SIM_CONTROL_DUMP 5 59#define SIM_CONTROL_DUMP 5
56 60
57//! If written to SPR_SIM_CONTROL, clears chip-level profiling counters. 61/** If written to SPR_SIM_CONTROL, clears chip-level profiling counters. */
58//!
59#define SIM_CONTROL_PROFILER_CHIP_CLEAR 6 62#define SIM_CONTROL_PROFILER_CHIP_CLEAR 6
60 63
61//! If written to SPR_SIM_CONTROL, disables chip-level profiling. 64/** If written to SPR_SIM_CONTROL, disables chip-level profiling. */
62//!
63#define SIM_CONTROL_PROFILER_CHIP_DISABLE 7 65#define SIM_CONTROL_PROFILER_CHIP_DISABLE 7
64 66
65//! If written to SPR_SIM_CONTROL, enables chip-level profiling. 67/** If written to SPR_SIM_CONTROL, enables chip-level profiling. */
66//!
67#define SIM_CONTROL_PROFILER_CHIP_ENABLE 8 68#define SIM_CONTROL_PROFILER_CHIP_ENABLE 8
68 69
69//! If written to SPR_SIM_CONTROL, enables chip-level functional mode 70/** If written to SPR_SIM_CONTROL, enables chip-level functional mode */
70//!
71#define SIM_CONTROL_ENABLE_FUNCTIONAL 9 71#define SIM_CONTROL_ENABLE_FUNCTIONAL 9
72 72
73//! If written to SPR_SIM_CONTROL, disables chip-level functional mode. 73/** If written to SPR_SIM_CONTROL, disables chip-level functional mode. */
74//!
75#define SIM_CONTROL_DISABLE_FUNCTIONAL 10 74#define SIM_CONTROL_DISABLE_FUNCTIONAL 10
76 75
77//! If written to SPR_SIM_CONTROL, enables chip-level functional mode. 76/**
78//! All tiles must perform this write for functional mode to be enabled. 77 * If written to SPR_SIM_CONTROL, enables chip-level functional mode.
79//! Ignored in naked boot mode unless --functional is specified. 78 * All tiles must perform this write for functional mode to be enabled.
80//! WARNING: Only the hypervisor startup code should use this! 79 * Ignored in naked boot mode unless --functional is specified.
81//! 80 * WARNING: Only the hypervisor startup code should use this!
81 */
82#define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11 82#define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11
83 83
84//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), 84/**
85//! writes a string directly to the simulator output. Written to once for 85 * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
86//! each character in the string, plus a final NUL. Instead of NUL, 86 * writes a string directly to the simulator output. Written to once for
87//! you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY". 87 * each character in the string, plus a final NUL. Instead of NUL,
88//! 88 * you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY".
89// ISSUE: Document the meaning of "newline", and the handling of NUL. 89 */
90// 90/* ISSUE: Document the meaning of "newline", and the handling of NUL. */
91#define SIM_CONTROL_PUTC 12 91#define SIM_CONTROL_PUTC 12
92 92
93//! If written to SPR_SIM_CONTROL, clears the --grind-coherence state for 93/**
94//! this core. This is intended to be used before a loop that will 94 * If written to SPR_SIM_CONTROL, clears the --grind-coherence state for
95//! invalidate the cache by loading new data and evicting all current data. 95 * this core. This is intended to be used before a loop that will
96//! Generally speaking, this API should only be used by system code. 96 * invalidate the cache by loading new data and evicting all current data.
97//! 97 * Generally speaking, this API should only be used by system code.
98 */
98#define SIM_CONTROL_GRINDER_CLEAR 13 99#define SIM_CONTROL_GRINDER_CLEAR 13
99 100
100//! If written to SPR_SIM_CONTROL, shuts down the simulator. 101/** If written to SPR_SIM_CONTROL, shuts down the simulator. */
101//!
102#define SIM_CONTROL_SHUTDOWN 14 102#define SIM_CONTROL_SHUTDOWN 14
103 103
104//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), 104/**
105//! indicates that a fork syscall just created the given process. 105 * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
106//! 106 * indicates that a fork syscall just created the given process.
107 */
107#define SIM_CONTROL_OS_FORK 15 108#define SIM_CONTROL_OS_FORK 15
108 109
109//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), 110/**
110//! indicates that an exit syscall was just executed by the given process. 111 * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
111//! 112 * indicates that an exit syscall was just executed by the given process.
113 */
112#define SIM_CONTROL_OS_EXIT 16 114#define SIM_CONTROL_OS_EXIT 16
113 115
114//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), 116/**
115//! indicates that the OS just switched to the given process. 117 * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
116//! 118 * indicates that the OS just switched to the given process.
119 */
117#define SIM_CONTROL_OS_SWITCH 17 120#define SIM_CONTROL_OS_SWITCH 17
118 121
119//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), 122/**
120//! indicates that an exec syscall was just executed. Written to once for 123 * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
121//! each character in the executable name, plus a final NUL. 124 * indicates that an exec syscall was just executed. Written to once for
122//! 125 * each character in the executable name, plus a final NUL.
126 */
123#define SIM_CONTROL_OS_EXEC 18 127#define SIM_CONTROL_OS_EXEC 18
124 128
125//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), 129/**
126//! indicates that an interpreter (PT_INTERP) was loaded. Written to once 130 * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
127//! for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a 131 * indicates that an interpreter (PT_INTERP) was loaded. Written to once
128//! hex load address starting with "0x", and "PATH" is the executable name. 132 * for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a
129//! 133 * hex load address starting with "0x", and "PATH" is the executable name.
134 */
130#define SIM_CONTROL_OS_INTERP 19 135#define SIM_CONTROL_OS_INTERP 19
131 136
132//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), 137/**
133//! indicates that a dll was loaded. Written to once for each character 138 * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
134//! in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load 139 * indicates that a dll was loaded. Written to once for each character
135//! address starting with "0x", and "PATH" is the executable name. 140 * in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load
136//! 141 * address starting with "0x", and "PATH" is the executable name.
142 */
137#define SIM_CONTROL_DLOPEN 20 143#define SIM_CONTROL_DLOPEN 20
138 144
139//! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), 145/**
140//! indicates that a dll was unloaded. Written to once for each character 146 * If written to SPR_SIM_CONTROL, combined with a character (shifted by 8),
141//! in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load 147 * indicates that a dll was unloaded. Written to once for each character
142//! address starting with "0x". 148 * in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load
143//! 149 * address starting with "0x".
150 */
144#define SIM_CONTROL_DLCLOSE 21 151#define SIM_CONTROL_DLCLOSE 21
145 152
146//! If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8), 153/**
147//! indicates whether to allow data reads to remotely-cached 154 * If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8),
148//! dirty cache lines to be cached locally without grinder warnings or 155 * indicates whether to allow data reads to remotely-cached
149//! assertions (used by Linux kernel fast memcpy). 156 * dirty cache lines to be cached locally without grinder warnings or
150//! 157 * assertions (used by Linux kernel fast memcpy).
158 */
151#define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22 159#define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22
152 160
153//! If written to SPR_SIM_CONTROL, enables memory tracing. 161/** If written to SPR_SIM_CONTROL, enables memory tracing. */
154//!
155#define SIM_CONTROL_ENABLE_MEM_LOGGING 23 162#define SIM_CONTROL_ENABLE_MEM_LOGGING 23
156 163
157//! If written to SPR_SIM_CONTROL, disables memory tracing. 164/** If written to SPR_SIM_CONTROL, disables memory tracing. */
158//!
159#define SIM_CONTROL_DISABLE_MEM_LOGGING 24 165#define SIM_CONTROL_DISABLE_MEM_LOGGING 24
160 166
161//! If written to SPR_SIM_CONTROL, changes the shaping parameters of one of 167/**
162//! the gbe or xgbe shims. Must specify the shim id, the type, the units, and 168 * If written to SPR_SIM_CONTROL, changes the shaping parameters of one of
163//! the rate, as defined in SIM_SHAPING_SPR_ARG. 169 * the gbe or xgbe shims. Must specify the shim id, the type, the units, and
164//! 170 * the rate, as defined in SIM_SHAPING_SPR_ARG.
171 */
165#define SIM_CONTROL_SHAPING 25 172#define SIM_CONTROL_SHAPING 25
166 173
167//! If written to SPR_SIM_CONTROL, combined with character (shifted by 8), 174/**
168//! requests that a simulator command be executed. Written to once for each 175 * If written to SPR_SIM_CONTROL, combined with character (shifted by 8),
169//! character in the command, plus a final NUL. 176 * requests that a simulator command be executed. Written to once for each
170//! 177 * character in the command, plus a final NUL.
178 */
171#define SIM_CONTROL_COMMAND 26 179#define SIM_CONTROL_COMMAND 26
172 180
173//! If written to SPR_SIM_CONTROL, indicates that the simulated system 181/**
174//! is panicking, to allow debugging via --debug-on-panic. 182 * If written to SPR_SIM_CONTROL, indicates that the simulated system
175//! 183 * is panicking, to allow debugging via --debug-on-panic.
184 */
176#define SIM_CONTROL_PANIC 27 185#define SIM_CONTROL_PANIC 27
177 186
178//! If written to SPR_SIM_CONTROL, triggers a simulator syscall. 187/**
179//! See "sim_syscall()" for more info. 188 * If written to SPR_SIM_CONTROL, triggers a simulator syscall.
180//! 189 * See "sim_syscall()" for more info.
190 */
181#define SIM_CONTROL_SYSCALL 32 191#define SIM_CONTROL_SYSCALL 32
182 192
183//! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), 193/**
184//! provides the pid that subsequent SIM_CONTROL_OS_FORK writes should 194 * If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8),
185//! use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH. 195 * provides the pid that subsequent SIM_CONTROL_OS_FORK writes should
186//! 196 * use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH.
197 */
187#define SIM_CONTROL_OS_FORK_PARENT 33 198#define SIM_CONTROL_OS_FORK_PARENT 33
188 199
189//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number 200/**
190//! (shifted by 8), clears the pending magic data section. The cleared 201 * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
191//! pending magic data section and any subsequently appended magic bytes 202 * (shifted by 8), clears the pending magic data section. The cleared
192//! will only take effect when the classifier blast programmer is run. 203 * pending magic data section and any subsequently appended magic bytes
204 * will only take effect when the classifier blast programmer is run.
205 */
193#define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34 206#define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34
194 207
195//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number 208/**
196//! (shifted by 8) and a byte of data (shifted by 16), appends that byte 209 * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
197//! to the shim's pending magic data section. The pending magic data 210 * (shifted by 8) and a byte of data (shifted by 16), appends that byte
198//! section takes effect when the classifier blast programmer is run. 211 * to the shim's pending magic data section. The pending magic data
212 * section takes effect when the classifier blast programmer is run.
213 */
199#define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35 214#define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35
200 215
201//! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number 216/**
202//! (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a 217 * If written to SPR_SIM_CONTROL, combined with a mPIPE shim number
203//! mask of links (shifted by 32), enable or disable the corresponding 218 * (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a
204//! mPIPE links. 219 * mask of links (shifted by 32), enable or disable the corresponding
220 * mPIPE links.
221 */
205#define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36 222#define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36
206 223
207//== Syscall numbers for use with "sim_syscall()".
208 224
209//! Syscall number for sim_add_watchpoint(). 225/*
210//! 226 * Syscall numbers for use with "sim_syscall()".
227 */
228
229/** Syscall number for sim_add_watchpoint(). */
211#define SIM_SYSCALL_ADD_WATCHPOINT 2 230#define SIM_SYSCALL_ADD_WATCHPOINT 2
212 231
213//! Syscall number for sim_remove_watchpoint(). 232/** Syscall number for sim_remove_watchpoint(). */
214//!
215#define SIM_SYSCALL_REMOVE_WATCHPOINT 3 233#define SIM_SYSCALL_REMOVE_WATCHPOINT 3
216 234
217//! Syscall number for sim_query_watchpoint(). 235/** Syscall number for sim_query_watchpoint(). */
218//!
219#define SIM_SYSCALL_QUERY_WATCHPOINT 4 236#define SIM_SYSCALL_QUERY_WATCHPOINT 4
220 237
221//! Syscall number that asserts that the cache lines whose 64-bit PA 238/**
222//! is passed as the second argument to sim_syscall(), and over a 239 * Syscall number that asserts that the cache lines whose 64-bit PA
223//! range passed as the third argument, are no longer in cache. 240 * is passed as the second argument to sim_syscall(), and over a
224//! The simulator raises an error if this is not the case. 241 * range passed as the third argument, are no longer in cache.
225//! 242 * The simulator raises an error if this is not the case.
243 */
226#define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5 244#define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5
227 245
228 246
229//== Bit masks which can be shifted by 8, combined with 247/*
230//== SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL. 248 * Bit masks which can be shifted by 8, combined with
249 * SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL.
250 */
231 251
232//! @addtogroup arch_sim 252/**
233//! @{ 253 * @addtogroup arch_sim
254 * @{
255 */
234 256
235//! Enable --trace-cycle when passed to simulator_set_tracing(). 257/** Enable --trace-cycle when passed to simulator_set_tracing(). */
236//!
237#define SIM_TRACE_CYCLES 0x01 258#define SIM_TRACE_CYCLES 0x01
238 259
239//! Enable --trace-router when passed to simulator_set_tracing(). 260/** Enable --trace-router when passed to simulator_set_tracing(). */
240//!
241#define SIM_TRACE_ROUTER 0x02 261#define SIM_TRACE_ROUTER 0x02
242 262
243//! Enable --trace-register-writes when passed to simulator_set_tracing(). 263/** Enable --trace-register-writes when passed to simulator_set_tracing(). */
244//!
245#define SIM_TRACE_REGISTER_WRITES 0x04 264#define SIM_TRACE_REGISTER_WRITES 0x04
246 265
247//! Enable --trace-disasm when passed to simulator_set_tracing(). 266/** Enable --trace-disasm when passed to simulator_set_tracing(). */
248//!
249#define SIM_TRACE_DISASM 0x08 267#define SIM_TRACE_DISASM 0x08
250 268
251//! Enable --trace-stall-info when passed to simulator_set_tracing(). 269/** Enable --trace-stall-info when passed to simulator_set_tracing(). */
252//!
253#define SIM_TRACE_STALL_INFO 0x10 270#define SIM_TRACE_STALL_INFO 0x10
254 271
255//! Enable --trace-memory-controller when passed to simulator_set_tracing(). 272/** Enable --trace-memory-controller when passed to simulator_set_tracing(). */
256//!
257#define SIM_TRACE_MEMORY_CONTROLLER 0x20 273#define SIM_TRACE_MEMORY_CONTROLLER 0x20
258 274
259//! Enable --trace-l2 when passed to simulator_set_tracing(). 275/** Enable --trace-l2 when passed to simulator_set_tracing(). */
260//!
261#define SIM_TRACE_L2_CACHE 0x40 276#define SIM_TRACE_L2_CACHE 0x40
262 277
263//! Enable --trace-lines when passed to simulator_set_tracing(). 278/** Enable --trace-lines when passed to simulator_set_tracing(). */
264//!
265#define SIM_TRACE_LINES 0x80 279#define SIM_TRACE_LINES 0x80
266 280
267//! Turn off all tracing when passed to simulator_set_tracing(). 281/** Turn off all tracing when passed to simulator_set_tracing(). */
268//!
269#define SIM_TRACE_NONE 0 282#define SIM_TRACE_NONE 0
270 283
271//! Turn on all tracing when passed to simulator_set_tracing(). 284/** Turn on all tracing when passed to simulator_set_tracing(). */
272//!
273#define SIM_TRACE_ALL (-1) 285#define SIM_TRACE_ALL (-1)
274 286
275//! @} 287/** @} */
276 288
277//! Computes the value to write to SPR_SIM_CONTROL to set tracing flags. 289/** Computes the value to write to SPR_SIM_CONTROL to set tracing flags. */
278//!
279#define SIM_TRACE_SPR_ARG(mask) \ 290#define SIM_TRACE_SPR_ARG(mask) \
280 (SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) 291 (SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
281 292
282 293
283//== Bit masks which can be shifted by 8, combined with 294/*
284//== SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL. 295 * Bit masks which can be shifted by 8, combined with
296 * SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL.
297 */
285 298
286//! @addtogroup arch_sim 299/**
287//! @{ 300 * @addtogroup arch_sim
301 * @{
302 */
288 303
289//! Dump the general-purpose registers. 304/** Dump the general-purpose registers. */
290//!
291#define SIM_DUMP_REGS 0x001 305#define SIM_DUMP_REGS 0x001
292 306
293//! Dump the SPRs. 307/** Dump the SPRs. */
294//!
295#define SIM_DUMP_SPRS 0x002 308#define SIM_DUMP_SPRS 0x002
296 309
297//! Dump the ITLB. 310/** Dump the ITLB. */
298//!
299#define SIM_DUMP_ITLB 0x004 311#define SIM_DUMP_ITLB 0x004
300 312
301//! Dump the DTLB. 313/** Dump the DTLB. */
302//!
303#define SIM_DUMP_DTLB 0x008 314#define SIM_DUMP_DTLB 0x008
304 315
305//! Dump the L1 I-cache. 316/** Dump the L1 I-cache. */
306//!
307#define SIM_DUMP_L1I 0x010 317#define SIM_DUMP_L1I 0x010
308 318
309//! Dump the L1 D-cache. 319/** Dump the L1 D-cache. */
310//!
311#define SIM_DUMP_L1D 0x020 320#define SIM_DUMP_L1D 0x020
312 321
313//! Dump the L2 cache. 322/** Dump the L2 cache. */
314//!
315#define SIM_DUMP_L2 0x040 323#define SIM_DUMP_L2 0x040
316 324
317//! Dump the switch registers. 325/** Dump the switch registers. */
318//!
319#define SIM_DUMP_SNREGS 0x080 326#define SIM_DUMP_SNREGS 0x080
320 327
321//! Dump the switch ITLB. 328/** Dump the switch ITLB. */
322//!
323#define SIM_DUMP_SNITLB 0x100 329#define SIM_DUMP_SNITLB 0x100
324 330
325//! Dump the switch L1 I-cache. 331/** Dump the switch L1 I-cache. */
326//!
327#define SIM_DUMP_SNL1I 0x200 332#define SIM_DUMP_SNL1I 0x200
328 333
329//! Dump the current backtrace. 334/** Dump the current backtrace. */
330//!
331#define SIM_DUMP_BACKTRACE 0x400 335#define SIM_DUMP_BACKTRACE 0x400
332 336
333//! Only dump valid lines in caches. 337/** Only dump valid lines in caches. */
334//!
335#define SIM_DUMP_VALID_LINES 0x800 338#define SIM_DUMP_VALID_LINES 0x800
336 339
337//! Dump everything that is dumpable. 340/** Dump everything that is dumpable. */
338//!
339#define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES) 341#define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES)
340 342
341// @} 343/** @} */
342 344
343//! Computes the value to write to SPR_SIM_CONTROL to dump machine state. 345/** Computes the value to write to SPR_SIM_CONTROL to dump machine state. */
344//!
345#define SIM_DUMP_SPR_ARG(mask) \ 346#define SIM_DUMP_SPR_ARG(mask) \
346 (SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) 347 (SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
347 348
348 349
349//== Bit masks which can be shifted by 8, combined with 350/*
350//== SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL. 351 * Bit masks which can be shifted by 8, combined with
352 * SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL.
353 */
351 354
352//! @addtogroup arch_sim 355/**
353//! @{ 356 * @addtogroup arch_sim
357 * @{
358 */
354 359
355//! Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. 360/** Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. */
356//!
357#define SIM_CHIP_MEMCTL 0x001 361#define SIM_CHIP_MEMCTL 0x001
358 362
359//! Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. 363/** Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */
360//!
361#define SIM_CHIP_XAUI 0x002 364#define SIM_CHIP_XAUI 0x002
362 365
363//! Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. 366/** Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */
364//!
365#define SIM_CHIP_PCIE 0x004 367#define SIM_CHIP_PCIE 0x004
366 368
367//! Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. 369/** Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */
368//!
369#define SIM_CHIP_MPIPE 0x008 370#define SIM_CHIP_MPIPE 0x008
370 371
371//! Reference all chip devices. 372/** Use with with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */
372//! 373#define SIM_CHIP_TRIO 0x010
374
375/** Reference all chip devices. */
373#define SIM_CHIP_ALL (-1) 376#define SIM_CHIP_ALL (-1)
374 377
375//! @} 378/** @} */
376 379
377//! Computes the value to write to SPR_SIM_CONTROL to clear chip statistics. 380/** Computes the value to write to SPR_SIM_CONTROL to clear chip statistics. */
378//!
379#define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \ 381#define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \
380 (SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) 382 (SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
381 383
382//! Computes the value to write to SPR_SIM_CONTROL to disable chip statistics. 384/** Computes the value to write to SPR_SIM_CONTROL to disable chip statistics.*/
383//!
384#define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \ 385#define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \
385 (SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) 386 (SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
386 387
387//! Computes the value to write to SPR_SIM_CONTROL to enable chip statistics. 388/** Computes the value to write to SPR_SIM_CONTROL to enable chip statistics. */
388//!
389#define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \ 389#define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \
390 (SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) 390 (SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS))
391 391
392 392
393 393
394// Shim bitrate controls. 394/* Shim bitrate controls. */
395 395
396//! The number of bits used to store the shim id. 396/** The number of bits used to store the shim id. */
397//!
398#define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3 397#define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3
399 398
400//! @addtogroup arch_sim 399/**
401//! @{ 400 * @addtogroup arch_sim
401 * @{
402 */
402 403
403//! Change the gbe 0 bitrate. 404/** Change the gbe 0 bitrate. */
404//!
405#define SIM_CONTROL_SHAPING_GBE_0 0x0 405#define SIM_CONTROL_SHAPING_GBE_0 0x0
406 406
407//! Change the gbe 1 bitrate. 407/** Change the gbe 1 bitrate. */
408//!
409#define SIM_CONTROL_SHAPING_GBE_1 0x1 408#define SIM_CONTROL_SHAPING_GBE_1 0x1
410 409
411//! Change the gbe 2 bitrate. 410/** Change the gbe 2 bitrate. */
412//!
413#define SIM_CONTROL_SHAPING_GBE_2 0x2 411#define SIM_CONTROL_SHAPING_GBE_2 0x2
414 412
415//! Change the gbe 3 bitrate. 413/** Change the gbe 3 bitrate. */
416//!
417#define SIM_CONTROL_SHAPING_GBE_3 0x3 414#define SIM_CONTROL_SHAPING_GBE_3 0x3
418 415
419//! Change the xgbe 0 bitrate. 416/** Change the xgbe 0 bitrate. */
420//!
421#define SIM_CONTROL_SHAPING_XGBE_0 0x4 417#define SIM_CONTROL_SHAPING_XGBE_0 0x4
422 418
423//! Change the xgbe 1 bitrate. 419/** Change the xgbe 1 bitrate. */
424//!
425#define SIM_CONTROL_SHAPING_XGBE_1 0x5 420#define SIM_CONTROL_SHAPING_XGBE_1 0x5
426 421
427//! The type of shaping to do. 422/** The type of shaping to do. */
428//!
429#define SIM_CONTROL_SHAPING_TYPE_BITS 2 423#define SIM_CONTROL_SHAPING_TYPE_BITS 2
430 424
431//! Control the multiplier. 425/** Control the multiplier. */
432//!
433#define SIM_CONTROL_SHAPING_MULTIPLIER 0 426#define SIM_CONTROL_SHAPING_MULTIPLIER 0
434 427
435//! Control the PPS. 428/** Control the PPS. */
436//!
437#define SIM_CONTROL_SHAPING_PPS 1 429#define SIM_CONTROL_SHAPING_PPS 1
438 430
439//! Control the BPS. 431/** Control the BPS. */
440//!
441#define SIM_CONTROL_SHAPING_BPS 2 432#define SIM_CONTROL_SHAPING_BPS 2
442 433
443//! The number of bits for the units for the shaping parameter. 434/** The number of bits for the units for the shaping parameter. */
444//!
445#define SIM_CONTROL_SHAPING_UNITS_BITS 2 435#define SIM_CONTROL_SHAPING_UNITS_BITS 2
446 436
447//! Provide a number in single units. 437/** Provide a number in single units. */
448//!
449#define SIM_CONTROL_SHAPING_UNITS_SINGLE 0 438#define SIM_CONTROL_SHAPING_UNITS_SINGLE 0
450 439
451//! Provide a number in kilo units. 440/** Provide a number in kilo units. */
452//!
453#define SIM_CONTROL_SHAPING_UNITS_KILO 1 441#define SIM_CONTROL_SHAPING_UNITS_KILO 1
454 442
455//! Provide a number in mega units. 443/** Provide a number in mega units. */
456//!
457#define SIM_CONTROL_SHAPING_UNITS_MEGA 2 444#define SIM_CONTROL_SHAPING_UNITS_MEGA 2
458 445
459//! Provide a number in giga units. 446/** Provide a number in giga units. */
460//!
461#define SIM_CONTROL_SHAPING_UNITS_GIGA 3 447#define SIM_CONTROL_SHAPING_UNITS_GIGA 3
462 448
463// @} 449/** @} */
464 450
465//! How many bits are available for the rate. 451/** How many bits are available for the rate. */
466//!
467#define SIM_CONTROL_SHAPING_RATE_BITS \ 452#define SIM_CONTROL_SHAPING_RATE_BITS \
468 (32 - (_SIM_CONTROL_OPERATOR_BITS + \ 453 (32 - (_SIM_CONTROL_OPERATOR_BITS + \
469 SIM_CONTROL_SHAPING_SHIM_ID_BITS + \ 454 SIM_CONTROL_SHAPING_SHIM_ID_BITS + \
470 SIM_CONTROL_SHAPING_TYPE_BITS + \ 455 SIM_CONTROL_SHAPING_TYPE_BITS + \
471 SIM_CONTROL_SHAPING_UNITS_BITS)) 456 SIM_CONTROL_SHAPING_UNITS_BITS))
472 457
473//! Computes the value to write to SPR_SIM_CONTROL to change a bitrate. 458/** Computes the value to write to SPR_SIM_CONTROL to change a bitrate. */
474//!
475#define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \ 459#define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \
476 (SIM_CONTROL_SHAPING | \ 460 (SIM_CONTROL_SHAPING | \
477 ((shim) | \ 461 ((shim) | \
@@ -483,30 +467,36 @@
483 SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS) 467 SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS)
484 468
485 469
486//== Values returned when reading SPR_SIM_CONTROL. 470/*
487// ISSUE: These names should share a longer common prefix. 471 * Values returned when reading SPR_SIM_CONTROL.
472 * ISSUE: These names should share a longer common prefix.
473 */
488 474
489//! When reading SPR_SIM_CONTROL, the mask of simulator tracing bits 475/**
490//! (SIM_TRACE_xxx values). 476 * When reading SPR_SIM_CONTROL, the mask of simulator tracing bits
491//! 477 * (SIM_TRACE_xxx values).
478 */
492#define SIM_TRACE_FLAG_MASK 0xFFFF 479#define SIM_TRACE_FLAG_MASK 0xFFFF
493 480
494//! When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled. 481/** When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled. */
495//!
496#define SIM_PROFILER_ENABLED_MASK 0x10000 482#define SIM_PROFILER_ENABLED_MASK 0x10000
497 483
498 484
499//== Special arguments for "SIM_CONTROL_PUTC". 485/*
486 * Special arguments for "SIM_CONTROL_PUTC".
487 */
500 488
501//! Flag value for forcing a PUTC string-flush, including 489/**
502//! coordinate/cycle prefix and newline. 490 * Flag value for forcing a PUTC string-flush, including
503//! 491 * coordinate/cycle prefix and newline.
492 */
504#define SIM_PUTC_FLUSH_STRING 0x100 493#define SIM_PUTC_FLUSH_STRING 0x100
505 494
506//! Flag value for forcing a PUTC binary-data-flush, which skips the 495/**
507//! prefix and does not append a newline. 496 * Flag value for forcing a PUTC binary-data-flush, which skips the
508//! 497 * prefix and does not append a newline.
498 */
509#define SIM_PUTC_FLUSH_BINARY 0x101 499#define SIM_PUTC_FLUSH_BINARY 0x101
510 500
511 501
512#endif //__ARCH_SIM_DEF_H__ 502#endif /* __ARCH_SIM_DEF_H__ */
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
index c8fdbd9a45e6..442fcba0d122 100644
--- a/arch/tile/include/arch/spr_def.h
+++ b/arch/tile/include/arch/spr_def.h
@@ -12,8 +12,93 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15/*
16 * In addition to including the proper base SPR definition file, depending
17 * on machine architecture, this file defines several macros which allow
18 * kernel code to use protection-level dependent SPRs without worrying
19 * about which PL it's running at. In these macros, the PL that the SPR
20 * or interrupt number applies to is replaced by K.
21 */
22
23#if CONFIG_KERNEL_PL != 1 && CONFIG_KERNEL_PL != 2
24#error CONFIG_KERNEL_PL must be 1 or 2
25#endif
26
27/* Concatenate 4 strings. */
28#define __concat4(a, b, c, d) a ## b ## c ## d
29#define _concat4(a, b, c, d) __concat4(a, b, c, d)
30
15#ifdef __tilegx__ 31#ifdef __tilegx__
16#include <arch/spr_def_64.h> 32#include <arch/spr_def_64.h>
33
34/* TILE-Gx dependent, protection-level dependent SPRs. */
35
36#define SPR_INTERRUPT_MASK_K \
37 _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL,,)
38#define SPR_INTERRUPT_MASK_SET_K \
39 _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL,,)
40#define SPR_INTERRUPT_MASK_RESET_K \
41 _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL,,)
42#define SPR_INTERRUPT_VECTOR_BASE_K \
43 _concat4(SPR_INTERRUPT_VECTOR_BASE_, CONFIG_KERNEL_PL,,)
44
45#define SPR_IPI_MASK_K \
46 _concat4(SPR_IPI_MASK_, CONFIG_KERNEL_PL,,)
47#define SPR_IPI_MASK_RESET_K \
48 _concat4(SPR_IPI_MASK_RESET_, CONFIG_KERNEL_PL,,)
49#define SPR_IPI_MASK_SET_K \
50 _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
51#define SPR_IPI_EVENT_K \
52 _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
53#define SPR_IPI_EVENT_RESET_K \
54 _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
55#define SPR_IPI_MASK_SET_K \
56 _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,)
57#define INT_IPI_K \
58 _concat4(INT_IPI_, CONFIG_KERNEL_PL,,)
59
60#define SPR_SINGLE_STEP_CONTROL_K \
61 _concat4(SPR_SINGLE_STEP_CONTROL_, CONFIG_KERNEL_PL,,)
62#define SPR_SINGLE_STEP_EN_K_K \
63 _concat4(SPR_SINGLE_STEP_EN_, CONFIG_KERNEL_PL, _, CONFIG_KERNEL_PL)
64#define INT_SINGLE_STEP_K \
65 _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
66
17#else 67#else
18#include <arch/spr_def_32.h> 68#include <arch/spr_def_32.h>
69
70/* TILEPro dependent, protection-level dependent SPRs. */
71
72#define SPR_INTERRUPT_MASK_K_0 \
73 _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _0,)
74#define SPR_INTERRUPT_MASK_K_1 \
75 _concat4(SPR_INTERRUPT_MASK_, CONFIG_KERNEL_PL, _1,)
76#define SPR_INTERRUPT_MASK_SET_K_0 \
77 _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _0,)
78#define SPR_INTERRUPT_MASK_SET_K_1 \
79 _concat4(SPR_INTERRUPT_MASK_SET_, CONFIG_KERNEL_PL, _1,)
80#define SPR_INTERRUPT_MASK_RESET_K_0 \
81 _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _0,)
82#define SPR_INTERRUPT_MASK_RESET_K_1 \
83 _concat4(SPR_INTERRUPT_MASK_RESET_, CONFIG_KERNEL_PL, _1,)
84
19#endif 85#endif
86
87/* Generic protection-level dependent SPRs. */
88
89#define SPR_SYSTEM_SAVE_K_0 \
90 _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _0,)
91#define SPR_SYSTEM_SAVE_K_1 \
92 _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _1,)
93#define SPR_SYSTEM_SAVE_K_2 \
94 _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _2,)
95#define SPR_SYSTEM_SAVE_K_3 \
96 _concat4(SPR_SYSTEM_SAVE_, CONFIG_KERNEL_PL, _3,)
97#define SPR_EX_CONTEXT_K_0 \
98 _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _0,)
99#define SPR_EX_CONTEXT_K_1 \
100 _concat4(SPR_EX_CONTEXT_, CONFIG_KERNEL_PL, _1,)
101#define SPR_INTCTRL_K_STATUS \
102 _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
103#define INT_INTCTRL_K \
104 _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h
index b4fc06864df6..bbc1f4c924ee 100644
--- a/arch/tile/include/arch/spr_def_32.h
+++ b/arch/tile/include/arch/spr_def_32.h
@@ -56,58 +56,93 @@
56#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2 56#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
57#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1 57#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
58#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4 58#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
59#define SPR_EX_CONTEXT_2_0 0x4605
60#define SPR_EX_CONTEXT_2_1 0x4606
61#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
62#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
63#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
64#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
65#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
66#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
59#define SPR_FAIL 0x4e09 67#define SPR_FAIL 0x4e09
60#define SPR_INTCTRL_0_STATUS 0x4a07 68#define SPR_INTCTRL_0_STATUS 0x4a07
61#define SPR_INTCTRL_1_STATUS 0x4807 69#define SPR_INTCTRL_1_STATUS 0x4807
70#define SPR_INTCTRL_2_STATUS 0x4607
62#define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a 71#define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a
63#define SPR_INTERRUPT_MASK_0_0 0x4a08 72#define SPR_INTERRUPT_MASK_0_0 0x4a08
64#define SPR_INTERRUPT_MASK_0_1 0x4a09 73#define SPR_INTERRUPT_MASK_0_1 0x4a09
65#define SPR_INTERRUPT_MASK_1_0 0x4809 74#define SPR_INTERRUPT_MASK_1_0 0x4809
66#define SPR_INTERRUPT_MASK_1_1 0x480a 75#define SPR_INTERRUPT_MASK_1_1 0x480a
76#define SPR_INTERRUPT_MASK_2_0 0x4608
77#define SPR_INTERRUPT_MASK_2_1 0x4609
67#define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a 78#define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a
68#define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b 79#define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b
69#define SPR_INTERRUPT_MASK_RESET_1_0 0x480b 80#define SPR_INTERRUPT_MASK_RESET_1_0 0x480b
70#define SPR_INTERRUPT_MASK_RESET_1_1 0x480c 81#define SPR_INTERRUPT_MASK_RESET_1_1 0x480c
82#define SPR_INTERRUPT_MASK_RESET_2_0 0x460a
83#define SPR_INTERRUPT_MASK_RESET_2_1 0x460b
71#define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c 84#define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c
72#define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d 85#define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d
73#define SPR_INTERRUPT_MASK_SET_1_0 0x480d 86#define SPR_INTERRUPT_MASK_SET_1_0 0x480d
74#define SPR_INTERRUPT_MASK_SET_1_1 0x480e 87#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
88#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
89#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
75#define SPR_MPL_DMA_CPL_SET_0 0x5800 90#define SPR_MPL_DMA_CPL_SET_0 0x5800
76#define SPR_MPL_DMA_CPL_SET_1 0x5801 91#define SPR_MPL_DMA_CPL_SET_1 0x5801
92#define SPR_MPL_DMA_CPL_SET_2 0x5802
77#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800 93#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
78#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801 94#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
95#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
79#define SPR_MPL_INTCTRL_0_SET_0 0x4a00 96#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
80#define SPR_MPL_INTCTRL_0_SET_1 0x4a01 97#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
98#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
81#define SPR_MPL_INTCTRL_1_SET_0 0x4800 99#define SPR_MPL_INTCTRL_1_SET_0 0x4800
82#define SPR_MPL_INTCTRL_1_SET_1 0x4801 100#define SPR_MPL_INTCTRL_1_SET_1 0x4801
101#define SPR_MPL_INTCTRL_1_SET_2 0x4802
102#define SPR_MPL_INTCTRL_2_SET_0 0x4600
103#define SPR_MPL_INTCTRL_2_SET_1 0x4601
104#define SPR_MPL_INTCTRL_2_SET_2 0x4602
83#define SPR_MPL_SN_ACCESS_SET_0 0x0800 105#define SPR_MPL_SN_ACCESS_SET_0 0x0800
84#define SPR_MPL_SN_ACCESS_SET_1 0x0801 106#define SPR_MPL_SN_ACCESS_SET_1 0x0801
107#define SPR_MPL_SN_ACCESS_SET_2 0x0802
85#define SPR_MPL_SN_CPL_SET_0 0x5a00 108#define SPR_MPL_SN_CPL_SET_0 0x5a00
86#define SPR_MPL_SN_CPL_SET_1 0x5a01 109#define SPR_MPL_SN_CPL_SET_1 0x5a01
110#define SPR_MPL_SN_CPL_SET_2 0x5a02
87#define SPR_MPL_SN_FIREWALL_SET_0 0x2c00 111#define SPR_MPL_SN_FIREWALL_SET_0 0x2c00
88#define SPR_MPL_SN_FIREWALL_SET_1 0x2c01 112#define SPR_MPL_SN_FIREWALL_SET_1 0x2c01
113#define SPR_MPL_SN_FIREWALL_SET_2 0x2c02
89#define SPR_MPL_SN_NOTIFY_SET_0 0x2a00 114#define SPR_MPL_SN_NOTIFY_SET_0 0x2a00
90#define SPR_MPL_SN_NOTIFY_SET_1 0x2a01 115#define SPR_MPL_SN_NOTIFY_SET_1 0x2a01
116#define SPR_MPL_SN_NOTIFY_SET_2 0x2a02
91#define SPR_MPL_UDN_ACCESS_SET_0 0x0c00 117#define SPR_MPL_UDN_ACCESS_SET_0 0x0c00
92#define SPR_MPL_UDN_ACCESS_SET_1 0x0c01 118#define SPR_MPL_UDN_ACCESS_SET_1 0x0c01
119#define SPR_MPL_UDN_ACCESS_SET_2 0x0c02
93#define SPR_MPL_UDN_AVAIL_SET_0 0x4000 120#define SPR_MPL_UDN_AVAIL_SET_0 0x4000
94#define SPR_MPL_UDN_AVAIL_SET_1 0x4001 121#define SPR_MPL_UDN_AVAIL_SET_1 0x4001
122#define SPR_MPL_UDN_AVAIL_SET_2 0x4002
95#define SPR_MPL_UDN_CA_SET_0 0x3c00 123#define SPR_MPL_UDN_CA_SET_0 0x3c00
96#define SPR_MPL_UDN_CA_SET_1 0x3c01 124#define SPR_MPL_UDN_CA_SET_1 0x3c01
125#define SPR_MPL_UDN_CA_SET_2 0x3c02
97#define SPR_MPL_UDN_COMPLETE_SET_0 0x1400 126#define SPR_MPL_UDN_COMPLETE_SET_0 0x1400
98#define SPR_MPL_UDN_COMPLETE_SET_1 0x1401 127#define SPR_MPL_UDN_COMPLETE_SET_1 0x1401
128#define SPR_MPL_UDN_COMPLETE_SET_2 0x1402
99#define SPR_MPL_UDN_FIREWALL_SET_0 0x3000 129#define SPR_MPL_UDN_FIREWALL_SET_0 0x3000
100#define SPR_MPL_UDN_FIREWALL_SET_1 0x3001 130#define SPR_MPL_UDN_FIREWALL_SET_1 0x3001
131#define SPR_MPL_UDN_FIREWALL_SET_2 0x3002
101#define SPR_MPL_UDN_REFILL_SET_0 0x1000 132#define SPR_MPL_UDN_REFILL_SET_0 0x1000
102#define SPR_MPL_UDN_REFILL_SET_1 0x1001 133#define SPR_MPL_UDN_REFILL_SET_1 0x1001
134#define SPR_MPL_UDN_REFILL_SET_2 0x1002
103#define SPR_MPL_UDN_TIMER_SET_0 0x3600 135#define SPR_MPL_UDN_TIMER_SET_0 0x3600
104#define SPR_MPL_UDN_TIMER_SET_1 0x3601 136#define SPR_MPL_UDN_TIMER_SET_1 0x3601
137#define SPR_MPL_UDN_TIMER_SET_2 0x3602
105#define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00 138#define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00
106#define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01 139#define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01
140#define SPR_MPL_WORLD_ACCESS_SET_2 0x4e02
107#define SPR_PASS 0x4e0b 141#define SPR_PASS 0x4e0b
108#define SPR_PERF_COUNT_0 0x4205 142#define SPR_PERF_COUNT_0 0x4205
109#define SPR_PERF_COUNT_1 0x4206 143#define SPR_PERF_COUNT_1 0x4206
110#define SPR_PERF_COUNT_CTL 0x4207 144#define SPR_PERF_COUNT_CTL 0x4207
145#define SPR_PERF_COUNT_DN_CTL 0x4210
111#define SPR_PERF_COUNT_STS 0x4208 146#define SPR_PERF_COUNT_STS 0x4208
112#define SPR_PROC_STATUS 0x4f00 147#define SPR_PROC_STATUS 0x4f00
113#define SPR_SIM_CONTROL 0x4e0c 148#define SPR_SIM_CONTROL 0x4e0c
@@ -124,6 +159,10 @@
124#define SPR_SYSTEM_SAVE_1_1 0x4901 159#define SPR_SYSTEM_SAVE_1_1 0x4901
125#define SPR_SYSTEM_SAVE_1_2 0x4902 160#define SPR_SYSTEM_SAVE_1_2 0x4902
126#define SPR_SYSTEM_SAVE_1_3 0x4903 161#define SPR_SYSTEM_SAVE_1_3 0x4903
162#define SPR_SYSTEM_SAVE_2_0 0x4700
163#define SPR_SYSTEM_SAVE_2_1 0x4701
164#define SPR_SYSTEM_SAVE_2_2 0x4702
165#define SPR_SYSTEM_SAVE_2_3 0x4703
127#define SPR_TILE_COORD 0x4c17 166#define SPR_TILE_COORD 0x4c17
128#define SPR_TILE_RTF_HWM 0x4e10 167#define SPR_TILE_RTF_HWM 0x4e10
129#define SPR_TILE_TIMER_CONTROL 0x3205 168#define SPR_TILE_TIMER_CONTROL 0x3205
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h
index 758ca4619d50..f18887d82399 100644
--- a/arch/tile/include/asm/backtrace.h
+++ b/arch/tile/include/asm/backtrace.h
@@ -146,7 +146,10 @@ enum {
146 146
147 CALLER_SP_IN_R52_BASE = 4, 147 CALLER_SP_IN_R52_BASE = 4,
148 148
149 CALLER_SP_OFFSET_BASE = 8 149 CALLER_SP_OFFSET_BASE = 8,
150
151 /* Marks the entry point of certain functions. */
152 ENTRY_POINT_INFO_OP = 16
150}; 153};
151 154
152 155
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 8b60ec8b2d19..c3ae570c0a5d 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -216,15 +216,16 @@ struct compat_siginfo;
216struct compat_sigaltstack; 216struct compat_sigaltstack;
217long compat_sys_execve(const char __user *path, 217long compat_sys_execve(const char __user *path,
218 const compat_uptr_t __user *argv, 218 const compat_uptr_t __user *argv,
219 const compat_uptr_t __user *envp); 219 const compat_uptr_t __user *envp, struct pt_regs *);
220long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, 220long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
221 struct compat_sigaction __user *oact, 221 struct compat_sigaction __user *oact,
222 size_t sigsetsize); 222 size_t sigsetsize);
223long compat_sys_rt_sigqueueinfo(int pid, int sig, 223long compat_sys_rt_sigqueueinfo(int pid, int sig,
224 struct compat_siginfo __user *uinfo); 224 struct compat_siginfo __user *uinfo);
225long compat_sys_rt_sigreturn(void); 225long compat_sys_rt_sigreturn(struct pt_regs *);
226long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, 226long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
227 struct compat_sigaltstack __user *uoss_ptr); 227 struct compat_sigaltstack __user *uoss_ptr,
228 struct pt_regs *);
228long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high); 229long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high);
229long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high); 230long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high);
230long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count, 231long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
@@ -255,4 +256,12 @@ long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid,
255/* Tilera Linux syscalls that don't have "compat" versions. */ 256/* Tilera Linux syscalls that don't have "compat" versions. */
256#define compat_sys_flush_cache sys_flush_cache 257#define compat_sys_flush_cache sys_flush_cache
257 258
259/* These are the intvec_64.S trampolines. */
260long _compat_sys_execve(const char __user *path,
261 const compat_uptr_t __user *argv,
262 const compat_uptr_t __user *envp);
263long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
264 struct compat_sigaltstack __user *uoss_ptr);
265long _compat_sys_rt_sigreturn(void);
266
258#endif /* _ASM_TILE_COMPAT_H */ 267#endif /* _ASM_TILE_COMPAT_H */
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index a11d4837ee4d..641e4ff3d805 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -47,53 +47,53 @@
47 int __n = (n); \ 47 int __n = (n); \
48 int __mask = 1 << (__n & 0x1f); \ 48 int __mask = 1 << (__n & 0x1f); \
49 if (__n < 32) \ 49 if (__n < 32) \
50 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ 50 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
51 else \ 51 else \
52 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ 52 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
53} while (0) 53} while (0)
54#define interrupt_mask_reset(n) do { \ 54#define interrupt_mask_reset(n) do { \
55 int __n = (n); \ 55 int __n = (n); \
56 int __mask = 1 << (__n & 0x1f); \ 56 int __mask = 1 << (__n & 0x1f); \
57 if (__n < 32) \ 57 if (__n < 32) \
58 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ 58 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
59 else \ 59 else \
60 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ 60 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
61} while (0) 61} while (0)
62#define interrupt_mask_check(n) ({ \ 62#define interrupt_mask_check(n) ({ \
63 int __n = (n); \ 63 int __n = (n); \
64 (((__n < 32) ? \ 64 (((__n < 32) ? \
65 __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ 65 __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
66 __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ 66 __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
67 >> (__n & 0x1f)) & 1; \ 67 >> (__n & 0x1f)) & 1; \
68}) 68})
69#define interrupt_mask_set_mask(mask) do { \ 69#define interrupt_mask_set_mask(mask) do { \
70 unsigned long long __m = (mask); \ 70 unsigned long long __m = (mask); \
71 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ 71 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
72 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ 72 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
73} while (0) 73} while (0)
74#define interrupt_mask_reset_mask(mask) do { \ 74#define interrupt_mask_reset_mask(mask) do { \
75 unsigned long long __m = (mask); \ 75 unsigned long long __m = (mask); \
76 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ 76 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
77 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ 77 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
78} while (0) 78} while (0)
79#else 79#else
80#define interrupt_mask_set(n) \ 80#define interrupt_mask_set(n) \
81 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) 81 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
82#define interrupt_mask_reset(n) \ 82#define interrupt_mask_reset(n) \
83 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) 83 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
84#define interrupt_mask_check(n) \ 84#define interrupt_mask_check(n) \
85 ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) 85 ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
86#define interrupt_mask_set_mask(mask) \ 86#define interrupt_mask_set_mask(mask) \
87 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) 87 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
88#define interrupt_mask_reset_mask(mask) \ 88#define interrupt_mask_reset_mask(mask) \
89 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) 89 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
90#endif 90#endif
91 91
92/* 92/*
93 * The set of interrupts we want active if irqs are enabled. 93 * The set of interrupts we want active if irqs are enabled.
94 * Note that in particular, the tile timer interrupt comes and goes 94 * Note that in particular, the tile timer interrupt comes and goes
95 * from this set, since we have no other way to turn off the timer. 95 * from this set, since we have no other way to turn off the timer.
96 * Likewise, INTCTRL_1 is removed and re-added during device 96 * Likewise, INTCTRL_K is removed and re-added during device
97 * interrupts, as is the the hardwall UDN_FIREWALL interrupt. 97 * interrupts, as is the the hardwall UDN_FIREWALL interrupt.
98 * We use a low bit (MEM_ERROR) as our sentinel value and make sure it 98 * We use a low bit (MEM_ERROR) as our sentinel value and make sure it
99 * is always claimed as an "active interrupt" so we can query that bit 99 * is always claimed as an "active interrupt" so we can query that bit
@@ -170,14 +170,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
170 170
171/* Return 0 or 1 to indicate whether interrupts are currently disabled. */ 171/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
172#define IRQS_DISABLED(tmp) \ 172#define IRQS_DISABLED(tmp) \
173 mfspr tmp, INTERRUPT_MASK_1; \ 173 mfspr tmp, SPR_INTERRUPT_MASK_K; \
174 andi tmp, tmp, 1 174 andi tmp, tmp, 1
175 175
176/* Load up a pointer to &interrupts_enabled_mask. */ 176/* Load up a pointer to &interrupts_enabled_mask. */
177#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ 177#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
178 moveli reg, hw2_last(interrupts_enabled_mask); \ 178 moveli reg, hw2_last(interrupts_enabled_mask); \
179 shl16insli reg, reg, hw1(interrupts_enabled_mask); \ 179 shl16insli reg, reg, hw1(interrupts_enabled_mask); \
180 shl16insli reg, reg, hw0(interrupts_enabled_mask); \ 180 shl16insli reg, reg, hw0(interrupts_enabled_mask); \
181 add reg, reg, tp 181 add reg, reg, tp
182 182
183/* Disable interrupts. */ 183/* Disable interrupts. */
@@ -185,18 +185,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
185 moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ 185 moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
186 shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ 186 shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
187 shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ 187 shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
188 mtspr INTERRUPT_MASK_SET_1, tmp0 188 mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
189 189
190/* Disable ALL synchronous interrupts (used by NMI entry). */ 190/* Disable ALL synchronous interrupts (used by NMI entry). */
191#define IRQ_DISABLE_ALL(tmp) \ 191#define IRQ_DISABLE_ALL(tmp) \
192 movei tmp, -1; \ 192 movei tmp, -1; \
193 mtspr INTERRUPT_MASK_SET_1, tmp 193 mtspr SPR_INTERRUPT_MASK_SET_K, tmp
194 194
195/* Enable interrupts. */ 195/* Enable interrupts. */
196#define IRQ_ENABLE(tmp0, tmp1) \ 196#define IRQ_ENABLE(tmp0, tmp1) \
197 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ 197 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
198 ld tmp0, tmp0; \ 198 ld tmp0, tmp0; \
199 mtspr INTERRUPT_MASK_RESET_1, tmp0 199 mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
200 200
201#else /* !__tilegx__ */ 201#else /* !__tilegx__ */
202 202
@@ -210,14 +210,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
210 * (making the original code's write of the "high" mask word idempotent). 210 * (making the original code's write of the "high" mask word idempotent).
211 */ 211 */
212#define IRQS_DISABLED(tmp) \ 212#define IRQS_DISABLED(tmp) \
213 mfspr tmp, INTERRUPT_MASK_1_0; \ 213 mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
214 shri tmp, tmp, INT_MEM_ERROR; \ 214 shri tmp, tmp, INT_MEM_ERROR; \
215 andi tmp, tmp, 1 215 andi tmp, tmp, 1
216 216
217/* Load up a pointer to &interrupts_enabled_mask. */ 217/* Load up a pointer to &interrupts_enabled_mask. */
218#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ 218#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
219 moveli reg, lo16(interrupts_enabled_mask); \ 219 moveli reg, lo16(interrupts_enabled_mask); \
220 auli reg, reg, ha16(interrupts_enabled_mask);\ 220 auli reg, reg, ha16(interrupts_enabled_mask); \
221 add reg, reg, tp 221 add reg, reg, tp
222 222
223/* Disable interrupts. */ 223/* Disable interrupts. */
@@ -227,16 +227,16 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
227 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ 227 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \
228 }; \ 228 }; \
229 { \ 229 { \
230 mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ 230 mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
231 auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ 231 auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \
232 }; \ 232 }; \
233 mtspr INTERRUPT_MASK_SET_1_1, tmp1 233 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
234 234
235/* Disable ALL synchronous interrupts (used by NMI entry). */ 235/* Disable ALL synchronous interrupts (used by NMI entry). */
236#define IRQ_DISABLE_ALL(tmp) \ 236#define IRQ_DISABLE_ALL(tmp) \
237 movei tmp, -1; \ 237 movei tmp, -1; \
238 mtspr INTERRUPT_MASK_SET_1_0, tmp; \ 238 mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
239 mtspr INTERRUPT_MASK_SET_1_1, tmp 239 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
240 240
241/* Enable interrupts. */ 241/* Enable interrupts. */
242#define IRQ_ENABLE(tmp0, tmp1) \ 242#define IRQ_ENABLE(tmp0, tmp1) \
@@ -246,8 +246,8 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
246 addi tmp1, tmp0, 4 \ 246 addi tmp1, tmp0, 4 \
247 }; \ 247 }; \
248 lw tmp1, tmp1; \ 248 lw tmp1, tmp1; \
249 mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ 249 mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
250 mtspr INTERRUPT_MASK_RESET_1_1, tmp1 250 mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
251#endif 251#endif
252 252
253/* 253/*
diff --git a/arch/tile/include/asm/mman.h b/arch/tile/include/asm/mman.h
index 4c6811e3e8dc..81b8fc348d63 100644
--- a/arch/tile/include/asm/mman.h
+++ b/arch/tile/include/asm/mman.h
@@ -23,6 +23,7 @@
23#define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */ 23#define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */
24#define MAP_NONBLOCK 0x0080 /* do not block on IO */ 24#define MAP_NONBLOCK 0x0080 /* do not block on IO */
25#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 25#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
26#define MAP_STACK MAP_GROWSDOWN /* provide convenience alias */
26#define MAP_LOCKED 0x0200 /* pages are locked */ 27#define MAP_LOCKED 0x0200 /* pages are locked */
27#define MAP_NORESERVE 0x0400 /* don't check for reservations */ 28#define MAP_NORESERVE 0x0400 /* don't check for reservations */
28#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 29#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 7d90641cf18d..7979a45430d3 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -199,17 +199,17 @@ static inline __attribute_const__ int get_order(unsigned long size)
199 * If you want more physical memory than this then see the CONFIG_HIGHMEM 199 * If you want more physical memory than this then see the CONFIG_HIGHMEM
200 * option in the kernel configuration. 200 * option in the kernel configuration.
201 * 201 *
202 * The top two 16MB chunks in the table below (VIRT and HV) are 202 * The top 16MB chunk in the table below is unavailable to Linux. Since
203 * unavailable to Linux. Since the kernel interrupt vectors must live 203 * the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
204 * at 0xfd000000, we map all of the bottom of RAM at this address with 204 * (depending on whether the kernel is at PL2 or Pl1), we map all of the
205 * a huge page table entry to minimize its ITLB footprint (as well as 205 * bottom of RAM at this address with a huge page table entry to minimize
206 * at PAGE_OFFSET). The last architected requirement is that user 206 * its ITLB footprint (as well as at PAGE_OFFSET). The last architected
207 * interrupt vectors live at 0xfc000000, so we make that range of 207 * requirement is that user interrupt vectors live at 0xfc000000, so we
208 * memory available to user processes. The remaining regions are sized 208 * make that range of memory available to user processes. The remaining
209 * as shown; after the first four addresses, we show "typical" values, 209 * regions are sized as shown; the first four addresses use the PL 1
210 * since the actual addresses depend on kernel #defines. 210 * values, and after that, we show "typical" values, since the actual
211 * addresses depend on kernel #defines.
211 * 212 *
212 * MEM_VIRT_INTRPT 0xff000000
213 * MEM_HV_INTRPT 0xfe000000 213 * MEM_HV_INTRPT 0xfe000000
214 * MEM_SV_INTRPT (kernel code) 0xfd000000 214 * MEM_SV_INTRPT (kernel code) 0xfd000000
215 * MEM_USER_INTRPT (user vector) 0xfc000000 215 * MEM_USER_INTRPT (user vector) 0xfc000000
@@ -221,9 +221,14 @@ static inline __attribute_const__ int get_order(unsigned long size)
221 */ 221 */
222 222
223#define MEM_USER_INTRPT _AC(0xfc000000, UL) 223#define MEM_USER_INTRPT _AC(0xfc000000, UL)
224#if CONFIG_KERNEL_PL == 1
224#define MEM_SV_INTRPT _AC(0xfd000000, UL) 225#define MEM_SV_INTRPT _AC(0xfd000000, UL)
225#define MEM_HV_INTRPT _AC(0xfe000000, UL) 226#define MEM_HV_INTRPT _AC(0xfe000000, UL)
226#define MEM_VIRT_INTRPT _AC(0xff000000, UL) 227#else
228#define MEM_GUEST_INTRPT _AC(0xfd000000, UL)
229#define MEM_SV_INTRPT _AC(0xfe000000, UL)
230#define MEM_HV_INTRPT _AC(0xff000000, UL)
231#endif
227 232
228#define INTRPT_SIZE 0x4000 233#define INTRPT_SIZE 0x4000
229 234
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index ccd5f8425688..1747ff3946b2 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -328,18 +328,21 @@ extern int kdata_huge;
328 * Note that assembly code assumes that USER_PL is zero. 328 * Note that assembly code assumes that USER_PL is zero.
329 */ 329 */
330#define USER_PL 0 330#define USER_PL 0
331#define KERNEL_PL 1 331#if CONFIG_KERNEL_PL == 2
332#define GUEST_PL 1
333#endif
334#define KERNEL_PL CONFIG_KERNEL_PL
332 335
333/* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */ 336/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
334#define CPU_LOG_MASK_VALUE 12 337#define CPU_LOG_MASK_VALUE 12
335#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1) 338#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
336#if CONFIG_NR_CPUS > CPU_MASK_VALUE 339#if CONFIG_NR_CPUS > CPU_MASK_VALUE
337# error Too many cpus! 340# error Too many cpus!
338#endif 341#endif
339#define raw_smp_processor_id() \ 342#define raw_smp_processor_id() \
340 ((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE) 343 ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
341#define get_current_ksp0() \ 344#define get_current_ksp0() \
342 (__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE) 345 (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
343#define next_current_ksp0(task) ({ \ 346#define next_current_ksp0(task) ({ \
344 unsigned long __ksp0 = task_ksp0(task); \ 347 unsigned long __ksp0 = task_ksp0(task); \
345 int __cpu = raw_smp_processor_id(); \ 348 int __cpu = raw_smp_processor_id(); \
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index 4a02bb073979..ac6d343129d3 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -62,8 +62,8 @@ struct pt_regs {
62 pt_reg_t lr; /* aliases regs[TREG_LR] */ 62 pt_reg_t lr; /* aliases regs[TREG_LR] */
63 63
64 /* Saved special registers. */ 64 /* Saved special registers. */
65 pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */ 65 pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */
66 pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */ 66 pt_reg_t ex1; /* stored in EX_CONTEXT_K_1 (PL and ICS bit) */
67 pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */ 67 pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */
68 pt_reg_t orig_r0; /* r0 at syscall entry, else zero */ 68 pt_reg_t orig_r0; /* r0 at syscall entry, else zero */
69 pt_reg_t flags; /* flags (see below) */ 69 pt_reg_t flags; /* flags (see below) */
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index ce99ffefeacf..3b5507c31eae 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -32,8 +32,9 @@ extern void *compat_sys_call_table[];
32 32
33/* 33/*
34 * Note that by convention, any syscall which requires the current 34 * Note that by convention, any syscall which requires the current
35 * register set takes an additional "struct pt_regs *" pointer; the 35 * register set takes an additional "struct pt_regs *" pointer; a
36 * sys_xxx() function just adds the pointer and tail-calls to _sys_xxx(). 36 * _sys_xxx() trampoline in intvec*.S just sets up the pointer and
37 * jumps to sys_xxx().
37 */ 38 */
38 39
39/* kernel/sys.c */ 40/* kernel/sys.c */
@@ -43,66 +44,17 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
43int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, 44int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
44 u32 len_lo, u32 len_hi, int advice); 45 u32 len_lo, u32 len_hi, int advice);
45long sys_flush_cache(void); 46long sys_flush_cache(void);
46long sys_mmap2(unsigned long addr, unsigned long len, 47#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */
47 unsigned long prot, unsigned long flags, 48#define sys_mmap sys_mmap
48 unsigned long fd, unsigned long pgoff);
49#ifdef __tilegx__
50long sys_mmap(unsigned long addr, unsigned long len,
51 unsigned long prot, unsigned long flags,
52 unsigned long fd, off_t pgoff);
53#endif 49#endif
54 50
55/* kernel/process.c */
56long sys_clone(unsigned long clone_flags, unsigned long newsp,
57 void __user *parent_tid, void __user *child_tid);
58long _sys_clone(unsigned long clone_flags, unsigned long newsp,
59 void __user *parent_tid, void __user *child_tid,
60 struct pt_regs *regs);
61long sys_fork(void);
62long _sys_fork(struct pt_regs *regs);
63long sys_vfork(void);
64long _sys_vfork(struct pt_regs *regs);
65long sys_execve(const char __user *filename,
66 const char __user *const __user *argv,
67 const char __user *const __user *envp);
68long _sys_execve(const char __user *filename,
69 const char __user *const __user *argv,
70 const char __user *const __user *envp, struct pt_regs *regs);
71
72/* kernel/signal.c */
73long sys_sigaltstack(const stack_t __user *, stack_t __user *);
74long _sys_sigaltstack(const stack_t __user *, stack_t __user *,
75 struct pt_regs *);
76long sys_rt_sigreturn(void);
77long _sys_rt_sigreturn(struct pt_regs *regs);
78
79/* platform-independent functions */
80long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
81long sys_rt_sigaction(int sig, const struct sigaction __user *act,
82 struct sigaction __user *oact, size_t sigsetsize);
83
84#ifndef __tilegx__ 51#ifndef __tilegx__
85/* mm/fault.c */ 52/* mm/fault.c */
86int sys_cmpxchg_badaddr(unsigned long address); 53long sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
87int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *); 54long _sys_cmpxchg_badaddr(unsigned long address);
88#endif 55#endif
89 56
90#ifdef CONFIG_COMPAT 57#ifdef CONFIG_COMPAT
91long compat_sys_execve(const char __user *path,
92 const compat_uptr_t __user *argv,
93 const compat_uptr_t __user *envp);
94long _compat_sys_execve(const char __user *path,
95 const compat_uptr_t __user *argv,
96 const compat_uptr_t __user *envp,
97 struct pt_regs *regs);
98long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
99 struct compat_sigaltstack __user *uoss_ptr);
100long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
101 struct compat_sigaltstack __user *uoss_ptr,
102 struct pt_regs *regs);
103long compat_sys_rt_sigreturn(void);
104long _compat_sys_rt_sigreturn(struct pt_regs *regs);
105
106/* These four are not defined for 64-bit, but serve as "compat" syscalls. */ 58/* These four are not defined for 64-bit, but serve as "compat" syscalls. */
107long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg); 59long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg);
108long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); 60long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
@@ -110,4 +62,15 @@ long sys_truncate64(const char __user *path, loff_t length);
110long sys_ftruncate64(unsigned int fd, loff_t length); 62long sys_ftruncate64(unsigned int fd, loff_t length);
111#endif 63#endif
112 64
65/* These are the intvec*.S trampolines. */
66long _sys_sigaltstack(const stack_t __user *, stack_t __user *);
67long _sys_rt_sigreturn(void);
68long _sys_clone(unsigned long clone_flags, unsigned long newsp,
69 void __user *parent_tid, void __user *child_tid);
70long _sys_execve(const char __user *filename,
71 const char __user *const __user *argv,
72 const char __user *const __user *envp);
73
74#include <asm-generic/syscalls.h>
75
113#endif /* _ASM_TILE_SYSCALLS_H */ 76#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h
index f749be327ce0..5388850deeb2 100644
--- a/arch/tile/include/asm/system.h
+++ b/arch/tile/include/asm/system.h
@@ -89,6 +89,10 @@
89#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ 89#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
90#endif 90#endif
91 91
92#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
93int __mb_incoherent(void); /* Helper routine for mb_incoherent(). */
94#endif
95
92/* Fence to guarantee visibility of stores to incoherent memory. */ 96/* Fence to guarantee visibility of stores to incoherent memory. */
93static inline void 97static inline void
94mb_incoherent(void) 98mb_incoherent(void)
@@ -97,7 +101,6 @@ mb_incoherent(void)
97 101
98#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() 102#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
99 { 103 {
100 int __mb_incoherent(void);
101#if CHIP_HAS_TILE_WRITE_PENDING() 104#if CHIP_HAS_TILE_WRITE_PENDING()
102 const unsigned long WRITE_TIMEOUT_CYCLES = 400; 105 const unsigned long WRITE_TIMEOUT_CYCLES = 400;
103 unsigned long start = get_cycles_low(); 106 unsigned long start = get_cycles_low();
@@ -161,7 +164,7 @@ extern struct task_struct *_switch_to(struct task_struct *prev,
161/* Helper function for _switch_to(). */ 164/* Helper function for _switch_to(). */
162extern struct task_struct *__switch_to(struct task_struct *prev, 165extern struct task_struct *__switch_to(struct task_struct *prev,
163 struct task_struct *next, 166 struct task_struct *next,
164 unsigned long new_system_save_1_0); 167 unsigned long new_system_save_k_0);
165 168
166/* Address that switched-away from tasks are at. */ 169/* Address that switched-away from tasks are at. */
167extern unsigned long get_switch_to_pc(void); 170extern unsigned long get_switch_to_pc(void);
@@ -214,13 +217,6 @@ int hardwall_deactivate(struct task_struct *task);
214} while (0) 217} while (0)
215#endif 218#endif
216 219
217/* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */
218extern int _sim_syscall(int syscall_num, ...);
219#define sim_syscall(syscall_num, ...) \
220 _sim_syscall(SIM_CONTROL_SYSCALL + \
221 ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \
222 ## __VA_ARGS__)
223
224/* 220/*
225 * Kernel threads can check to see if they need to migrate their 221 * Kernel threads can check to see if they need to migrate their
226 * stack whenever they return from a context switch; for user 222 * stack whenever they return from a context switch; for user
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index 432a9c15c8a2..d06e35f57201 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -59,4 +59,8 @@ void do_hardwall_trap(struct pt_regs *, int fault_num);
59void do_breakpoint(struct pt_regs *, int fault_num); 59void do_breakpoint(struct pt_regs *, int fault_num);
60 60
61 61
62#ifdef __tilegx__
63void gx_singlestep_handle(struct pt_regs *, int fault_num);
64#endif
65
62#endif /* _ASM_TILE_SYSCALLS_H */ 66#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 9bd303a141b2..f672544cd4f9 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -1003,37 +1003,37 @@ int hv_console_write(HV_VirtAddr bytes, int len);
1003 * when these occur in a client's interrupt critical section, they must 1003 * when these occur in a client's interrupt critical section, they must
1004 * be delivered through the downcall mechanism. 1004 * be delivered through the downcall mechanism.
1005 * 1005 *
1006 * A downcall is initially delivered to the client as an INTCTRL_1 1006 * A downcall is initially delivered to the client as an INTCTRL_CL
1007 * interrupt. Upon entry to the INTCTRL_1 vector, the client must 1007 * interrupt, where CL is the client's PL. Upon entry to the INTCTRL_CL
1008 * immediately invoke the hv_downcall_dispatch service. This service 1008 * vector, the client must immediately invoke the hv_downcall_dispatch
1009 * will not return; instead it will cause one of the client's actual 1009 * service. This service will not return; instead it will cause one of
1010 * downcall-handling interrupt vectors to be entered. The EX_CONTEXT 1010 * the client's actual downcall-handling interrupt vectors to be entered.
1011 * registers in the client will be set so that when the client irets, 1011 * The EX_CONTEXT registers in the client will be set so that when the
1012 * it will return to the code which was interrupted by the INTCTRL_1 1012 * client irets, it will return to the code which was interrupted by the
1013 * interrupt. 1013 * INTCTRL_CL interrupt.
1014 * 1014 *
1015 * Under some circumstances, the firing of INTCTRL_1 can race with 1015 * Under some circumstances, the firing of INTCTRL_CL can race with
1016 * the lowering of a device interrupt. In such a case, the 1016 * the lowering of a device interrupt. In such a case, the
1017 * hv_downcall_dispatch service may issue an iret instruction instead 1017 * hv_downcall_dispatch service may issue an iret instruction instead
1018 * of entering one of the client's actual downcall-handling interrupt 1018 * of entering one of the client's actual downcall-handling interrupt
1019 * vectors. This will return execution to the location that was 1019 * vectors. This will return execution to the location that was
1020 * interrupted by INTCTRL_1. 1020 * interrupted by INTCTRL_CL.
1021 * 1021 *
1022 * Any saving of registers should be done by the actual handling 1022 * Any saving of registers should be done by the actual handling
1023 * vectors; no registers should be changed by the INTCTRL_1 handler. 1023 * vectors; no registers should be changed by the INTCTRL_CL handler.
1024 * In particular, the client should not use a jal instruction to invoke 1024 * In particular, the client should not use a jal instruction to invoke
1025 * the hv_downcall_dispatch service, as that would overwrite the client's 1025 * the hv_downcall_dispatch service, as that would overwrite the client's
1026 * lr register. Note that the hv_downcall_dispatch service may overwrite 1026 * lr register. Note that the hv_downcall_dispatch service may overwrite
1027 * one or more of the client's system save registers. 1027 * one or more of the client's system save registers.
1028 * 1028 *
1029 * The client must not modify the INTCTRL_1_STATUS SPR. The hypervisor 1029 * The client must not modify the INTCTRL_CL_STATUS SPR. The hypervisor
1030 * will set this register to cause a downcall to happen, and will clear 1030 * will set this register to cause a downcall to happen, and will clear
1031 * it when no further downcalls are pending. 1031 * it when no further downcalls are pending.
1032 * 1032 *
1033 * When a downcall vector is entered, the INTCTRL_1 interrupt will be 1033 * When a downcall vector is entered, the INTCTRL_CL interrupt will be
1034 * masked. When the client is done processing a downcall, and is ready 1034 * masked. When the client is done processing a downcall, and is ready
1035 * to accept another, it must unmask this interrupt; if more downcalls 1035 * to accept another, it must unmask this interrupt; if more downcalls
1036 * are pending, this will cause the INTCTRL_1 vector to be reentered. 1036 * are pending, this will cause the INTCTRL_CL vector to be reentered.
1037 * Currently the following interrupt vectors can be entered through a 1037 * Currently the following interrupt vectors can be entered through a
1038 * downcall: 1038 * downcall:
1039 * 1039 *
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index d3c41c1ff6bd..55a6a74974b4 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -369,6 +369,10 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
369 /* Weird; reserved value, ignore it. */ 369 /* Weird; reserved value, ignore it. */
370 continue; 370 continue;
371 } 371 }
372 if (info_operand & ENTRY_POINT_INFO_OP) {
373 /* This info op is ignored by the backtracer. */
374 continue;
375 }
372 376
373 /* Skip info ops which are not in the 377 /* Skip info ops which are not in the
374 * "one_ago" mode we want right now. 378 * "one_ago" mode we want right now.
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index b1e06d041555..77739cdd9462 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -154,8 +154,14 @@ long tile_compat_sys_msgrcv(int msqid,
154#define compat_sys_fstat64 sys_newfstat 154#define compat_sys_fstat64 sys_newfstat
155#define compat_sys_fstatat64 sys_newfstatat 155#define compat_sys_fstatat64 sys_newfstatat
156 156
157/* Pass full 64-bit values through ptrace. */ 157/* The native sys_ptrace dynamically handles compat binaries. */
158#define compat_sys_ptrace tile_compat_sys_ptrace 158#define compat_sys_ptrace sys_ptrace
159
160/* Call the trampolines to manage pt_regs where necessary. */
161#define compat_sys_execve _compat_sys_execve
162#define compat_sys_sigaltstack _compat_sys_sigaltstack
163#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
164#define sys_clone _sys_clone
159 165
160/* 166/*
161 * Note that we can't include <linux/unistd.h> here since the header 167 * Note that we can't include <linux/unistd.h> here since the header
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 9c710db43f13..fb64b99959d4 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -256,9 +256,9 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
256 return err; 256 return err;
257} 257}
258 258
259long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, 259long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
260 struct compat_sigaltstack __user *uoss_ptr, 260 struct compat_sigaltstack __user *uoss_ptr,
261 struct pt_regs *regs) 261 struct pt_regs *regs)
262{ 262{
263 stack_t uss, uoss; 263 stack_t uss, uoss;
264 int ret; 264 int ret;
@@ -291,7 +291,7 @@ long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
291 return ret; 291 return ret;
292} 292}
293 293
294long _compat_sys_rt_sigreturn(struct pt_regs *regs) 294long compat_sys_rt_sigreturn(struct pt_regs *regs)
295{ 295{
296 struct compat_rt_sigframe __user *frame = 296 struct compat_rt_sigframe __user *frame =
297 (struct compat_rt_sigframe __user *) compat_ptr(regs->sp); 297 (struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
@@ -312,7 +312,7 @@ long _compat_sys_rt_sigreturn(struct pt_regs *regs)
312 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) 312 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
313 goto badframe; 313 goto badframe;
314 314
315 if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0) 315 if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
316 goto badframe; 316 goto badframe;
317 317
318 return r0; 318 return r0;
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 3d01383b1b0e..fd8dc42abdcb 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -15,7 +15,9 @@
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/unistd.h> 16#include <linux/unistd.h>
17#include <asm/irqflags.h> 17#include <asm/irqflags.h>
18#include <asm/processor.h>
18#include <arch/abi.h> 19#include <arch/abi.h>
20#include <arch/spr_def.h>
19 21
20#ifdef __tilegx__ 22#ifdef __tilegx__
21#define bnzt bnezt 23#define bnzt bnezt
@@ -25,28 +27,6 @@ STD_ENTRY(current_text_addr)
25 { move r0, lr; jrp lr } 27 { move r0, lr; jrp lr }
26 STD_ENDPROC(current_text_addr) 28 STD_ENDPROC(current_text_addr)
27 29
28STD_ENTRY(_sim_syscall)
29 /*
30 * Wait for r0-r9 to be ready (and lr on the off chance we
31 * want the syscall to locate its caller), then make a magic
32 * simulator syscall.
33 *
34 * We carefully stall until the registers are readable in case they
35 * are the target of a slow load, etc. so that tile-sim will
36 * definitely be able to read all of them inside the magic syscall.
37 *
38 * Technically this is wrong for r3-r9 and lr, since an interrupt
39 * could come in and restore the registers with a slow load right
40 * before executing the mtspr. We may need to modify tile-sim to
41 * explicitly stall for this case, but we do not yet have
42 * a way to implement such a stall.
43 */
44 { and zero, lr, r9 ; and zero, r8, r7 }
45 { and zero, r6, r5 ; and zero, r4, r3 }
46 { and zero, r2, r1 ; mtspr SIM_CONTROL, r0 }
47 { jrp lr }
48 STD_ENDPROC(_sim_syscall)
49
50/* 30/*
51 * Implement execve(). The i386 code has a note that forking from kernel 31 * Implement execve(). The i386 code has a note that forking from kernel
52 * space results in no copy on write until the execve, so we should be 32 * space results in no copy on write until the execve, so we should be
@@ -102,7 +82,7 @@ STD_ENTRY(KBacktraceIterator_init_current)
102STD_ENTRY(cpu_idle_on_new_stack) 82STD_ENTRY(cpu_idle_on_new_stack)
103 { 83 {
104 move sp, r1 84 move sp, r1
105 mtspr SYSTEM_SAVE_1_0, r2 85 mtspr SPR_SYSTEM_SAVE_K_0, r2
106 } 86 }
107 jal free_thread_info 87 jal free_thread_info
108 j cpu_idle 88 j cpu_idle
@@ -124,15 +104,15 @@ STD_ENTRY(smp_nap)
124STD_ENTRY(_cpu_idle) 104STD_ENTRY(_cpu_idle)
125 { 105 {
126 lnk r0 106 lnk r0
127 movei r1, 1 107 movei r1, KERNEL_PL
128 } 108 }
129 { 109 {
130 addli r0, r0, _cpu_idle_nap - . 110 addli r0, r0, _cpu_idle_nap - .
131 mtspr INTERRUPT_CRITICAL_SECTION, r1 111 mtspr INTERRUPT_CRITICAL_SECTION, r1
132 } 112 }
133 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ 113 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
134 mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */ 114 mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */
135 mtspr EX_CONTEXT_1_0, r0 115 mtspr SPR_EX_CONTEXT_K_0, r0
136 iret 116 iret
137 .global _cpu_idle_nap 117 .global _cpu_idle_nap
138_cpu_idle_nap: 118_cpu_idle_nap:
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 2b4f6c091701..90e7c4435693 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -23,6 +23,7 @@
23#include <asm/asm-offsets.h> 23#include <asm/asm-offsets.h>
24#include <hv/hypervisor.h> 24#include <hv/hypervisor.h>
25#include <arch/chip.h> 25#include <arch/chip.h>
26#include <arch/spr_def.h>
26 27
27/* 28/*
28 * This module contains the entry code for kernel images. It performs the 29 * This module contains the entry code for kernel images. It performs the
@@ -76,7 +77,7 @@ ENTRY(_start)
76 } 77 }
771: 781:
78 79
79 /* Get our processor number and save it away in SAVE_1_0. */ 80 /* Get our processor number and save it away in SAVE_K_0. */
80 jal hv_inquire_topology 81 jal hv_inquire_topology
81 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ 82 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
82 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ 83 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
@@ -124,7 +125,7 @@ ENTRY(_start)
124 lw r0, r0 125 lw r0, r0
125 lw sp, r1 126 lw sp, r1
126 or r4, sp, r4 127 or r4, sp, r4
127 mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */ 128 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
128 addi sp, sp, -STACK_TOP_DELTA 129 addi sp, sp, -STACK_TOP_DELTA
129 { 130 {
130 move lr, zero /* stop backtraces in the called function */ 131 move lr, zero /* stop backtraces in the called function */
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 8f58bdff20d7..f5821626247f 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -32,8 +32,8 @@
32# error "No support for kernel preemption currently" 32# error "No support for kernel preemption currently"
33#endif 33#endif
34 34
35#if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48 35#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
36# error INT_INTCTRL_1 coded to set high interrupt mask 36# error INT_INTCTRL_K coded to set high interrupt mask
37#endif 37#endif
38 38
39#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) 39#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
@@ -132,8 +132,8 @@ intvec_\vecname:
132 132
133 /* Temporarily save a register so we have somewhere to work. */ 133 /* Temporarily save a register so we have somewhere to work. */
134 134
135 mtspr SYSTEM_SAVE_1_1, r0 135 mtspr SPR_SYSTEM_SAVE_K_1, r0
136 mfspr r0, EX_CONTEXT_1_1 136 mfspr r0, SPR_EX_CONTEXT_K_1
137 137
138 /* The cmpxchg code clears sp to force us to reset it here on fault. */ 138 /* The cmpxchg code clears sp to force us to reset it here on fault. */
139 { 139 {
@@ -167,18 +167,18 @@ intvec_\vecname:
167 * The page_fault handler may be downcalled directly by the 167 * The page_fault handler may be downcalled directly by the
168 * hypervisor even when Linux is running and has ICS set. 168 * hypervisor even when Linux is running and has ICS set.
169 * 169 *
170 * In this case the contents of EX_CONTEXT_1_1 reflect the 170 * In this case the contents of EX_CONTEXT_K_1 reflect the
171 * previous fault and can't be relied on to choose whether or 171 * previous fault and can't be relied on to choose whether or
172 * not to reinitialize the stack pointer. So we add a test 172 * not to reinitialize the stack pointer. So we add a test
173 * to see whether SYSTEM_SAVE_1_2 has the high bit set, 173 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
174 * and if so we don't reinitialize sp, since we must be coming 174 * and if so we don't reinitialize sp, since we must be coming
175 * from Linux. (In fact the precise case is !(val & ~1), 175 * from Linux. (In fact the precise case is !(val & ~1),
176 * but any Linux PC has to have the high bit set.) 176 * but any Linux PC has to have the high bit set.)
177 * 177 *
178 * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for 178 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
179 * any path that turns into a downcall to one of our TLB handlers. 179 * any path that turns into a downcall to one of our TLB handlers.
180 */ 180 */
181 mfspr r0, SYSTEM_SAVE_1_2 181 mfspr r0, SPR_SYSTEM_SAVE_K_2
182 { 182 {
183 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */ 183 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
184 move r0, sp 184 move r0, sp
@@ -187,12 +187,12 @@ intvec_\vecname:
187 187
1882: 1882:
189 /* 189 /*
190 * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and 190 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
191 * the current stack top in the higher bits. So we recover 191 * the current stack top in the higher bits. So we recover
192 * our stack top by just masking off the low bits, then 192 * our stack top by just masking off the low bits, then
193 * point sp at the top aligned address on the actual stack page. 193 * point sp at the top aligned address on the actual stack page.
194 */ 194 */
195 mfspr r0, SYSTEM_SAVE_1_0 195 mfspr r0, SPR_SYSTEM_SAVE_K_0
196 mm r0, r0, zero, LOG2_THREAD_SIZE, 31 196 mm r0, r0, zero, LOG2_THREAD_SIZE, 31
197 197
1980: 1980:
@@ -254,7 +254,7 @@ intvec_\vecname:
254 sw sp, r3 254 sw sp, r3
255 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3) 255 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
256 } 256 }
257 mfspr r0, EX_CONTEXT_1_0 257 mfspr r0, SPR_EX_CONTEXT_K_0
258 .ifc \processing,handle_syscall 258 .ifc \processing,handle_syscall
259 /* 259 /*
260 * Bump the saved PC by one bundle so that when we return, we won't 260 * Bump the saved PC by one bundle so that when we return, we won't
@@ -267,7 +267,7 @@ intvec_\vecname:
267 sw sp, r0 267 sw sp, r0
268 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC 268 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
269 } 269 }
270 mfspr r0, EX_CONTEXT_1_1 270 mfspr r0, SPR_EX_CONTEXT_K_1
271 { 271 {
272 sw sp, r0 272 sw sp, r0
273 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 273 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
@@ -289,7 +289,7 @@ intvec_\vecname:
289 .endif 289 .endif
290 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM 290 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
291 } 291 }
292 mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */ 292 mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
293 { 293 {
294 sw sp, r0 294 sw sp, r0
295 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4 295 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
@@ -309,12 +309,12 @@ intvec_\vecname:
309 * See discussion below at "finish_interrupt_save". 309 * See discussion below at "finish_interrupt_save".
310 */ 310 */
311 .ifc \c_routine, do_page_fault 311 .ifc \c_routine, do_page_fault
312 mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */ 312 mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
313 mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */ 313 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
314 .else 314 .else
315 .ifc \vecnum, INT_DOUBLE_FAULT 315 .ifc \vecnum, INT_DOUBLE_FAULT
316 { 316 {
317 mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */ 317 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
318 movei r3, 0 318 movei r3, 0
319 } 319 }
320 .else 320 .else
@@ -467,7 +467,7 @@ intvec_\vecname:
467 /* Load tp with our per-cpu offset. */ 467 /* Load tp with our per-cpu offset. */
468#ifdef CONFIG_SMP 468#ifdef CONFIG_SMP
469 { 469 {
470 mfspr r20, SYSTEM_SAVE_1_0 470 mfspr r20, SPR_SYSTEM_SAVE_K_0
471 moveli r21, lo16(__per_cpu_offset) 471 moveli r21, lo16(__per_cpu_offset)
472 } 472 }
473 { 473 {
@@ -487,7 +487,7 @@ intvec_\vecname:
487 * We load flags in r32 here so we can jump to .Lrestore_regs 487 * We load flags in r32 here so we can jump to .Lrestore_regs
488 * directly after do_page_fault_ics() if necessary. 488 * directly after do_page_fault_ics() if necessary.
489 */ 489 */
490 mfspr r32, EX_CONTEXT_1_1 490 mfspr r32, SPR_EX_CONTEXT_K_1
491 { 491 {
492 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 492 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
493 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) 493 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
@@ -957,11 +957,11 @@ STD_ENTRY(interrupt_return)
957 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC 957 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
958 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1 958 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
959 { 959 {
960 mtspr EX_CONTEXT_1_0, r21 960 mtspr SPR_EX_CONTEXT_K_0, r21
961 move r5, zero 961 move r5, zero
962 } 962 }
963 { 963 {
964 mtspr EX_CONTEXT_1_1, lr 964 mtspr SPR_EX_CONTEXT_K_1, lr
965 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 965 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
966 } 966 }
967 967
@@ -1020,7 +1020,7 @@ STD_ENTRY(interrupt_return)
1020 1020
1021 /* Set r1 to errno if we are returning an error, otherwise zero. */ 1021 /* Set r1 to errno if we are returning an error, otherwise zero. */
1022 { 1022 {
1023 moveli r29, 1024 1023 moveli r29, 4096
1024 sub r1, zero, r0 1024 sub r1, zero, r0
1025 } 1025 }
1026 slt_u r29, r1, r29 1026 slt_u r29, r1, r29
@@ -1199,7 +1199,7 @@ STD_ENTRY(interrupt_return)
1199 STD_ENDPROC(interrupt_return) 1199 STD_ENDPROC(interrupt_return)
1200 1200
1201 /* 1201 /*
1202 * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit 1202 * This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
1203 * before returning, so we can properly get more downcalls. 1203 * before returning, so we can properly get more downcalls.
1204 */ 1204 */
1205 .pushsection .text.handle_interrupt_downcall,"ax" 1205 .pushsection .text.handle_interrupt_downcall,"ax"
@@ -1208,11 +1208,11 @@ handle_interrupt_downcall:
1208 check_single_stepping normal, .Ldispatch_downcall 1208 check_single_stepping normal, .Ldispatch_downcall
1209.Ldispatch_downcall: 1209.Ldispatch_downcall:
1210 1210
1211 /* Clear INTCTRL_1 from the set of interrupts we ever enable. */ 1211 /* Clear INTCTRL_K from the set of interrupts we ever enable. */
1212 GET_INTERRUPTS_ENABLED_MASK_PTR(r30) 1212 GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
1213 { 1213 {
1214 addi r30, r30, 4 1214 addi r30, r30, 4
1215 movei r31, INT_MASK(INT_INTCTRL_1) 1215 movei r31, INT_MASK(INT_INTCTRL_K)
1216 } 1216 }
1217 { 1217 {
1218 lw r20, r30 1218 lw r20, r30
@@ -1227,7 +1227,7 @@ handle_interrupt_downcall:
1227 } 1227 }
1228 FEEDBACK_REENTER(handle_interrupt_downcall) 1228 FEEDBACK_REENTER(handle_interrupt_downcall)
1229 1229
1230 /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */ 1230 /* Allow INTCTRL_K to be enabled next time we enable interrupts. */
1231 lw r20, r30 1231 lw r20, r30
1232 or r20, r20, r31 1232 or r20, r20, r31
1233 sw r30, r20 1233 sw r30, r20
@@ -1472,7 +1472,12 @@ handle_ill:
1472 lw r26, r24 1472 lw r26, r24
1473 sw r28, r26 1473 sw r28, r26
1474 1474
1475 /* Clear TIF_SINGLESTEP */ 1475 /*
1476 * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
1477 * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
1478 * need to clear it here and can't really impose on all other arches.
1479 * So what's another write between friends?
1480 */
1476 GET_THREAD_INFO(r0) 1481 GET_THREAD_INFO(r0)
1477 1482
1478 addi r1, r0, THREAD_INFO_FLAGS_OFFSET 1483 addi r1, r0, THREAD_INFO_FLAGS_OFFSET
@@ -1509,7 +1514,7 @@ handle_ill:
1509/* Various stub interrupt handlers and syscall handlers */ 1514/* Various stub interrupt handlers and syscall handlers */
1510 1515
1511STD_ENTRY_LOCAL(_kernel_double_fault) 1516STD_ENTRY_LOCAL(_kernel_double_fault)
1512 mfspr r1, EX_CONTEXT_1_0 1517 mfspr r1, SPR_EX_CONTEXT_K_0
1513 move r2, lr 1518 move r2, lr
1514 move r3, sp 1519 move r3, sp
1515 move r4, r52 1520 move r4, r52
@@ -1518,34 +1523,29 @@ STD_ENTRY_LOCAL(_kernel_double_fault)
1518 STD_ENDPROC(_kernel_double_fault) 1523 STD_ENDPROC(_kernel_double_fault)
1519 1524
1520STD_ENTRY_LOCAL(bad_intr) 1525STD_ENTRY_LOCAL(bad_intr)
1521 mfspr r2, EX_CONTEXT_1_0 1526 mfspr r2, SPR_EX_CONTEXT_K_0
1522 panic "Unhandled interrupt %#x: PC %#lx" 1527 panic "Unhandled interrupt %#x: PC %#lx"
1523 STD_ENDPROC(bad_intr) 1528 STD_ENDPROC(bad_intr)
1524 1529
1525/* Put address of pt_regs in reg and jump. */ 1530/* Put address of pt_regs in reg and jump. */
1526#define PTREGS_SYSCALL(x, reg) \ 1531#define PTREGS_SYSCALL(x, reg) \
1527 STD_ENTRY(x); \ 1532 STD_ENTRY(_##x); \
1528 { \ 1533 { \
1529 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ 1534 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1530 j _##x \ 1535 j x \
1531 }; \ 1536 }; \
1532 STD_ENDPROC(x) 1537 STD_ENDPROC(_##x)
1533 1538
1534PTREGS_SYSCALL(sys_execve, r3) 1539PTREGS_SYSCALL(sys_execve, r3)
1535PTREGS_SYSCALL(sys_sigaltstack, r2) 1540PTREGS_SYSCALL(sys_sigaltstack, r2)
1536PTREGS_SYSCALL(sys_rt_sigreturn, r0) 1541PTREGS_SYSCALL(sys_rt_sigreturn, r0)
1542PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
1537 1543
1538/* Save additional callee-saves to pt_regs, put address in reg and jump. */ 1544/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
1539#define PTREGS_SYSCALL_ALL_REGS(x, reg) \ 1545STD_ENTRY(_sys_clone)
1540 STD_ENTRY(x); \ 1546 push_extra_callee_saves r4
1541 push_extra_callee_saves reg; \ 1547 j sys_clone
1542 j _##x; \ 1548 STD_ENDPROC(_sys_clone)
1543 STD_ENDPROC(x)
1544
1545PTREGS_SYSCALL_ALL_REGS(sys_fork, r0)
1546PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0)
1547PTREGS_SYSCALL_ALL_REGS(sys_clone, r4)
1548PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
1549 1549
1550/* 1550/*
1551 * This entrypoint is taken for the cmpxchg and atomic_update fast 1551 * This entrypoint is taken for the cmpxchg and atomic_update fast
@@ -1558,12 +1558,14 @@ PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
1558 * to be available to it on entry. It does not modify any callee-save 1558 * to be available to it on entry. It does not modify any callee-save
1559 * registers (including "lr"). It does not check what PL it is being 1559 * registers (including "lr"). It does not check what PL it is being
1560 * called at, so you'd better not call it other than at PL0. 1560 * called at, so you'd better not call it other than at PL0.
1561 * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
1562 * it ever is necessary to use more registers, be aware.
1561 * 1563 *
1562 * It does not use the stack, but since it might be re-interrupted by 1564 * It does not use the stack, but since it might be re-interrupted by
1563 * a page fault which would assume the stack was valid, it does 1565 * a page fault which would assume the stack was valid, it does
1564 * save/restore the stack pointer and zero it out to make sure it gets reset. 1566 * save/restore the stack pointer and zero it out to make sure it gets reset.
1565 * Since we always keep interrupts disabled, the hypervisor won't 1567 * Since we always keep interrupts disabled, the hypervisor won't
1566 * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them 1568 * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
1567 * (other than to advance the PC on return). 1569 * (other than to advance the PC on return).
1568 * 1570 *
1569 * We have to manually validate the user vs kernel address range 1571 * We have to manually validate the user vs kernel address range
@@ -1769,7 +1771,7 @@ ENTRY(sys_cmpxchg)
1769 /* Do slow mtspr here so the following "mf" waits less. */ 1771 /* Do slow mtspr here so the following "mf" waits less. */
1770 { 1772 {
1771 move sp, r27 1773 move sp, r27
1772 mtspr EX_CONTEXT_1_0, r28 1774 mtspr SPR_EX_CONTEXT_K_0, r28
1773 } 1775 }
1774 mf 1776 mf
1775 1777
@@ -1788,7 +1790,7 @@ ENTRY(sys_cmpxchg)
1788 } 1790 }
1789 { 1791 {
1790 move sp, r27 1792 move sp, r27
1791 mtspr EX_CONTEXT_1_0, r28 1793 mtspr SPR_EX_CONTEXT_K_0, r28
1792 } 1794 }
1793 iret 1795 iret
1794 1796
@@ -1816,7 +1818,7 @@ ENTRY(sys_cmpxchg)
1816#endif 1818#endif
1817 1819
1818 /* Issue the slow SPR here while the tns result is in flight. */ 1820 /* Issue the slow SPR here while the tns result is in flight. */
1819 mfspr r28, EX_CONTEXT_1_0 1821 mfspr r28, SPR_EX_CONTEXT_K_0
1820 1822
1821 { 1823 {
1822 addi r28, r28, 8 /* return to the instruction after the swint1 */ 1824 addi r28, r28, 8 /* return to the instruction after the swint1 */
@@ -1904,7 +1906,7 @@ ENTRY(sys_cmpxchg)
1904.Lcmpxchg64_mismatch: 1906.Lcmpxchg64_mismatch:
1905 { 1907 {
1906 move sp, r27 1908 move sp, r27
1907 mtspr EX_CONTEXT_1_0, r28 1909 mtspr SPR_EX_CONTEXT_K_0, r28
1908 } 1910 }
1909 mf 1911 mf
1910 { 1912 {
@@ -1985,8 +1987,13 @@ int_unalign:
1985 int_hand INT_PERF_COUNT, PERF_COUNT, \ 1987 int_hand INT_PERF_COUNT, PERF_COUNT, \
1986 op_handle_perf_interrupt, handle_nmi 1988 op_handle_perf_interrupt, handle_nmi
1987 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr 1989 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
1990#if CONFIG_KERNEL_PL == 2
1991 dc_dispatch INT_INTCTRL_2, INTCTRL_2
1992 int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
1993#else
1988 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr 1994 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1989 dc_dispatch INT_INTCTRL_1, INTCTRL_1 1995 dc_dispatch INT_INTCTRL_1, INTCTRL_1
1996#endif
1990 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr 1997 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1991 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ 1998 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1992 hv_message_intr, handle_interrupt_downcall 1999 hv_message_intr, handle_interrupt_downcall
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 9a27d563fc30..e63917687e99 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -61,9 +61,9 @@ static DEFINE_SPINLOCK(available_irqs_lock);
61 61
62#if CHIP_HAS_IPI() 62#if CHIP_HAS_IPI()
63/* Use SPRs to manipulate device interrupts. */ 63/* Use SPRs to manipulate device interrupts. */
64#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask) 64#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
65#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask) 65#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
66#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask) 66#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
67#else 67#else
68/* Use HV to manipulate device interrupts. */ 68/* Use HV to manipulate device interrupts. */
69#define mask_irqs(irq_mask) hv_disable_intr(irq_mask) 69#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
@@ -89,16 +89,16 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
89 * masked by a previous interrupt. Then, mask out the ones 89 * masked by a previous interrupt. Then, mask out the ones
90 * we're going to handle. 90 * we're going to handle.
91 */ 91 */
92 unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1); 92 unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
93 original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked; 93 original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
94 __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs); 94 __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
95#else 95#else
96 /* 96 /*
97 * Hypervisor performs the equivalent of the Gx code above and 97 * Hypervisor performs the equivalent of the Gx code above and
98 * then puts the pending interrupt mask into a system save reg 98 * then puts the pending interrupt mask into a system save reg
99 * for us to find. 99 * for us to find.
100 */ 100 */
101 original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); 101 original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
102#endif 102#endif
103 remaining_irqs = original_irqs; 103 remaining_irqs = original_irqs;
104 104
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
225 /* Enable interrupt delivery. */ 225 /* Enable interrupt delivery. */
226 unmask_irqs(~0UL); 226 unmask_irqs(~0UL);
227#if CHIP_HAS_IPI() 227#if CHIP_HAS_IPI()
228 raw_local_irq_unmask(INT_IPI_1); 228 raw_local_irq_unmask(INT_IPI_K);
229#endif 229#endif
230} 230}
231 231
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 6d23ed271d10..997e3933f726 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
34 panic("hv_register_message_state: error %d", rc); 34 panic("hv_register_message_state: error %d", rc);
35 35
36 /* Make sure downcall interrupts will be enabled. */ 36 /* Make sure downcall interrupts will be enabled. */
37 raw_local_irq_unmask(INT_INTCTRL_1); 37 raw_local_irq_unmask(INT_INTCTRL_K);
38} 38}
39 39
40void hv_message_intr(struct pt_regs *regs, int intnum) 40void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 84c29111756c..8430f45daea6 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -214,9 +214,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
214 /* 214 /*
215 * Copy the callee-saved registers from the passed pt_regs struct 215 * Copy the callee-saved registers from the passed pt_regs struct
216 * into the context-switch callee-saved registers area. 216 * into the context-switch callee-saved registers area.
217 * We have to restore the callee-saved registers since we may 217 * This way when we start the interrupt-return sequence, the
218 * be cloning a userspace task with userspace register state, 218 * callee-save registers will be correctly in registers, which
219 * and we won't be unwinding the same kernel frames to restore them. 219 * is how we assume the compiler leaves them as we start doing
220 * the normal return-from-interrupt path after calling C code.
220 * Zero out the C ABI save area to mark the top of the stack. 221 * Zero out the C ABI save area to mark the top of the stack.
221 */ 222 */
222 ksp = (unsigned long) childregs; 223 ksp = (unsigned long) childregs;
@@ -304,15 +305,25 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
304/* Allow user processes to access the DMA SPRs */ 305/* Allow user processes to access the DMA SPRs */
305void grant_dma_mpls(void) 306void grant_dma_mpls(void)
306{ 307{
308#if CONFIG_KERNEL_PL == 2
309 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
310 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
311#else
307 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1); 312 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
308 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1); 313 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
314#endif
309} 315}
310 316
311/* Forbid user processes from accessing the DMA SPRs */ 317/* Forbid user processes from accessing the DMA SPRs */
312void restrict_dma_mpls(void) 318void restrict_dma_mpls(void)
313{ 319{
320#if CONFIG_KERNEL_PL == 2
321 __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
322 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
323#else
314 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); 324 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
315 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); 325 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
326#endif
316} 327}
317 328
318/* Pause the DMA engine, then save off its state registers. */ 329/* Pause the DMA engine, then save off its state registers. */
@@ -523,19 +534,14 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
523 * Switch kernel SP, PC, and callee-saved registers. 534 * Switch kernel SP, PC, and callee-saved registers.
524 * In the context of the new task, return the old task pointer 535 * In the context of the new task, return the old task pointer
525 * (i.e. the task that actually called __switch_to). 536 * (i.e. the task that actually called __switch_to).
526 * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp. 537 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
527 */ 538 */
528 return __switch_to(prev, next, next_current_ksp0(next)); 539 return __switch_to(prev, next, next_current_ksp0(next));
529} 540}
530 541
531long _sys_fork(struct pt_regs *regs) 542SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
532{ 543 void __user *, parent_tidptr, void __user *, child_tidptr,
533 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); 544 struct pt_regs *, regs)
534}
535
536long _sys_clone(unsigned long clone_flags, unsigned long newsp,
537 void __user *parent_tidptr, void __user *child_tidptr,
538 struct pt_regs *regs)
539{ 545{
540 if (!newsp) 546 if (!newsp)
541 newsp = regs->sp; 547 newsp = regs->sp;
@@ -543,18 +549,13 @@ long _sys_clone(unsigned long clone_flags, unsigned long newsp,
543 parent_tidptr, child_tidptr); 549 parent_tidptr, child_tidptr);
544} 550}
545 551
546long _sys_vfork(struct pt_regs *regs)
547{
548 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp,
549 regs, 0, NULL, NULL);
550}
551
552/* 552/*
553 * sys_execve() executes a new program. 553 * sys_execve() executes a new program.
554 */ 554 */
555long _sys_execve(const char __user *path, 555SYSCALL_DEFINE4(execve, const char __user *, path,
556 const char __user *const __user *argv, 556 const char __user *const __user *, argv,
557 const char __user *const __user *envp, struct pt_regs *regs) 557 const char __user *const __user *, envp,
558 struct pt_regs *, regs)
558{ 559{
559 long error; 560 long error;
560 char *filename; 561 char *filename;
@@ -570,9 +571,10 @@ out:
570} 571}
571 572
572#ifdef CONFIG_COMPAT 573#ifdef CONFIG_COMPAT
573long _compat_sys_execve(const char __user *path, 574long compat_sys_execve(const char __user *path,
574 const compat_uptr_t __user *argv, 575 const compat_uptr_t __user *argv,
575 const compat_uptr_t __user *envp, struct pt_regs *regs) 576 const compat_uptr_t __user *envp,
577 struct pt_regs *regs)
576{ 578{
577 long error; 579 long error;
578 char *filename; 580 char *filename;
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 7161bd03d2fd..5b20c2874d51 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -32,25 +32,6 @@ void user_disable_single_step(struct task_struct *child)
32} 32}
33 33
34/* 34/*
35 * This routine will put a word on the process's privileged stack.
36 */
37static void putreg(struct task_struct *task,
38 unsigned long addr, unsigned long value)
39{
40 unsigned int regno = addr / sizeof(unsigned long);
41 struct pt_regs *childregs = task_pt_regs(task);
42 childregs->regs[regno] = value;
43 childregs->flags |= PT_FLAGS_RESTORE_REGS;
44}
45
46static unsigned long getreg(struct task_struct *task, unsigned long addr)
47{
48 unsigned int regno = addr / sizeof(unsigned long);
49 struct pt_regs *childregs = task_pt_regs(task);
50 return childregs->regs[regno];
51}
52
53/*
54 * Called by kernel/ptrace.c when detaching.. 35 * Called by kernel/ptrace.c when detaching..
55 */ 36 */
56void ptrace_disable(struct task_struct *child) 37void ptrace_disable(struct task_struct *child)
@@ -66,59 +47,72 @@ void ptrace_disable(struct task_struct *child)
66 47
67long arch_ptrace(struct task_struct *child, long request, long addr, long data) 48long arch_ptrace(struct task_struct *child, long request, long addr, long data)
68{ 49{
69 unsigned long __user *datap; 50 unsigned long __user *datap = (long __user __force *)data;
70 unsigned long tmp; 51 unsigned long tmp;
71 int i; 52 int i;
72 long ret = -EIO; 53 long ret = -EIO;
73 54 unsigned long *childregs;
74#ifdef CONFIG_COMPAT 55 char *childreg;
75 if (task_thread_info(current)->status & TS_COMPAT)
76 data = (u32)data;
77 if (task_thread_info(child)->status & TS_COMPAT)
78 addr = (u32)addr;
79#endif
80 datap = (unsigned long __user __force *)data;
81 56
82 switch (request) { 57 switch (request) {
83 58
84 case PTRACE_PEEKUSR: /* Read register from pt_regs. */ 59 case PTRACE_PEEKUSR: /* Read register from pt_regs. */
85 if (addr & (sizeof(data)-1))
86 break;
87 if (addr < 0 || addr >= PTREGS_SIZE) 60 if (addr < 0 || addr >= PTREGS_SIZE)
88 break; 61 break;
89 tmp = getreg(child, addr); /* Read register */ 62 childreg = (char *)task_pt_regs(child) + addr;
90 ret = put_user(tmp, datap); 63#ifdef CONFIG_COMPAT
64 if (is_compat_task()) {
65 if (addr & (sizeof(compat_long_t)-1))
66 break;
67 ret = put_user(*(compat_long_t *)childreg,
68 (compat_long_t __user *)datap);
69 } else
70#endif
71 {
72 if (addr & (sizeof(long)-1))
73 break;
74 ret = put_user(*(long *)childreg, datap);
75 }
91 break; 76 break;
92 77
93 case PTRACE_POKEUSR: /* Write register in pt_regs. */ 78 case PTRACE_POKEUSR: /* Write register in pt_regs. */
94 if (addr & (sizeof(data)-1))
95 break;
96 if (addr < 0 || addr >= PTREGS_SIZE) 79 if (addr < 0 || addr >= PTREGS_SIZE)
97 break; 80 break;
98 putreg(child, addr, data); /* Write register */ 81 childreg = (char *)task_pt_regs(child) + addr;
82#ifdef CONFIG_COMPAT
83 if (is_compat_task()) {
84 if (addr & (sizeof(compat_long_t)-1))
85 break;
86 *(compat_long_t *)childreg = data;
87 } else
88#endif
89 {
90 if (addr & (sizeof(long)-1))
91 break;
92 *(long *)childreg = data;
93 }
99 ret = 0; 94 ret = 0;
100 break; 95 break;
101 96
102 case PTRACE_GETREGS: /* Get all registers from the child. */ 97 case PTRACE_GETREGS: /* Get all registers from the child. */
103 if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE)) 98 if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
104 break; 99 break;
105 for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) { 100 childregs = (long *)task_pt_regs(child);
106 ret = __put_user(getreg(child, i), datap); 101 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
102 ret = __put_user(childregs[i], &datap[i]);
107 if (ret != 0) 103 if (ret != 0)
108 break; 104 break;
109 datap++;
110 } 105 }
111 break; 106 break;
112 107
113 case PTRACE_SETREGS: /* Set all registers in the child. */ 108 case PTRACE_SETREGS: /* Set all registers in the child. */
114 if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE)) 109 if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
115 break; 110 break;
116 for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) { 111 childregs = (long *)task_pt_regs(child);
117 ret = __get_user(tmp, datap); 112 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
113 ret = __get_user(childregs[i], &datap[i]);
118 if (ret != 0) 114 if (ret != 0)
119 break; 115 break;
120 putreg(child, i, tmp);
121 datap++;
122 } 116 }
123 break; 117 break;
124 118
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S
index e88d6e122783..caa13101c264 100644
--- a/arch/tile/kernel/regs_32.S
+++ b/arch/tile/kernel/regs_32.S
@@ -85,7 +85,7 @@ STD_ENTRY_SECTION(__switch_to, .sched.text)
85 { 85 {
86 /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */ 86 /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
87 move sp, r13 87 move sp, r13
88 mtspr SYSTEM_SAVE_1_0, r2 88 mtspr SPR_SYSTEM_SAVE_K_0, r2
89 } 89 }
90 FOR_EACH_CALLEE_SAVED_REG(LOAD_REG) 90 FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
91.L__switch_to_pc: 91.L__switch_to_pc:
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index e7d54c73d5c1..f3a50e74f9a4 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -187,11 +187,11 @@ early_param("vmalloc", parse_vmalloc);
187 187
188#ifdef CONFIG_HIGHMEM 188#ifdef CONFIG_HIGHMEM
189/* 189/*
190 * Determine for each controller where its lowmem is mapped and how 190 * Determine for each controller where its lowmem is mapped and how much of
191 * much of it is mapped there. On controller zero, the first few 191 * it is mapped there. On controller zero, the first few megabytes are
192 * megabytes are mapped at 0xfd000000 as code, so in principle we 192 * already mapped in as code at MEM_SV_INTRPT, so in principle we could
193 * could start our data mappings higher up, but for now we don't 193 * start our data mappings higher up, but for now we don't bother, to avoid
194 * bother, to avoid additional confusion. 194 * additional confusion.
195 * 195 *
196 * One question is whether, on systems with more than 768 Mb and 196 * One question is whether, on systems with more than 768 Mb and
197 * controllers of different sizes, to map in a proportionate amount of 197 * controllers of different sizes, to map in a proportionate amount of
@@ -311,7 +311,7 @@ static void __init setup_memory(void)
311#endif 311#endif
312 312
313 /* We are using a char to hold the cpu_2_node[] mapping */ 313 /* We are using a char to hold the cpu_2_node[] mapping */
314 BUG_ON(MAX_NUMNODES > 127); 314 BUILD_BUG_ON(MAX_NUMNODES > 127);
315 315
316 /* Discover the ranges of memory available to us */ 316 /* Discover the ranges of memory available to us */
317 for (i = 0; ; ++i) { 317 for (i = 0; ; ++i) {
@@ -876,6 +876,9 @@ void __cpuinit setup_cpu(int boot)
876#if CHIP_HAS_SN_PROC() 876#if CHIP_HAS_SN_PROC()
877 raw_local_irq_unmask(INT_SNITLB_MISS); 877 raw_local_irq_unmask(INT_SNITLB_MISS);
878#endif 878#endif
879#ifdef __tilegx__
880 raw_local_irq_unmask(INT_SINGLE_STEP_K);
881#endif
879 882
880 /* 883 /*
881 * Allow user access to many generic SPRs, like the cycle 884 * Allow user access to many generic SPRs, like the cycle
@@ -893,11 +896,12 @@ void __cpuinit setup_cpu(int boot)
893#endif 896#endif
894 897
895 /* 898 /*
896 * Set the MPL for interrupt control 0 to user level. 899 * Set the MPL for interrupt control 0 & 1 to the corresponding
897 * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs, 900 * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
898 * as well as the PL 0 interrupt mask. 901 * SPRs, as well as the interrupt mask.
899 */ 902 */
900 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); 903 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
904 __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
901 905
902 /* Initialize IRQ support for this cpu. */ 906 /* Initialize IRQ support for this cpu. */
903 setup_irq_regs(); 907 setup_irq_regs();
@@ -1033,7 +1037,7 @@ static void __init validate_va(void)
1033 * In addition, make sure we CAN'T use the end of memory, since 1037 * In addition, make sure we CAN'T use the end of memory, since
1034 * we use the last chunk of each pgd for the pgd_list. 1038 * we use the last chunk of each pgd for the pgd_list.
1035 */ 1039 */
1036 int i, fc_fd_ok = 0; 1040 int i, user_kernel_ok = 0;
1037 unsigned long max_va = 0; 1041 unsigned long max_va = 0;
1038 unsigned long list_va = 1042 unsigned long list_va =
1039 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); 1043 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
@@ -1044,13 +1048,13 @@ static void __init validate_va(void)
1044 break; 1048 break;
1045 if (range.start <= MEM_USER_INTRPT && 1049 if (range.start <= MEM_USER_INTRPT &&
1046 range.start + range.size >= MEM_HV_INTRPT) 1050 range.start + range.size >= MEM_HV_INTRPT)
1047 fc_fd_ok = 1; 1051 user_kernel_ok = 1;
1048 if (range.start == 0) 1052 if (range.start == 0)
1049 max_va = range.size; 1053 max_va = range.size;
1050 BUG_ON(range.start + range.size > list_va); 1054 BUG_ON(range.start + range.size > list_va);
1051 } 1055 }
1052 if (!fc_fd_ok) 1056 if (!user_kernel_ok)
1053 early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n"); 1057 early_panic("Hypervisor not configured for user/kernel VAs\n");
1054 if (max_va == 0) 1058 if (max_va == 0)
1055 early_panic("Hypervisor not configured for low VAs\n"); 1059 early_panic("Hypervisor not configured for low VAs\n");
1056 if (max_va < KERNEL_HIGH_VADDR) 1060 if (max_va < KERNEL_HIGH_VADDR)
@@ -1334,6 +1338,10 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
1334 pte_t *pte; 1338 pte_t *pte;
1335 1339
1336 BUG_ON(pgd_addr_invalid(addr)); 1340 BUG_ON(pgd_addr_invalid(addr));
1341 if (addr < VMALLOC_START || addr >= VMALLOC_END)
1342 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
1343 " try increasing CONFIG_VMALLOC_RESERVE\n",
1344 addr, VMALLOC_START, VMALLOC_END);
1337 1345
1338 pgd = swapper_pg_dir + pgd_index(addr); 1346 pgd = swapper_pg_dir + pgd_index(addr);
1339 pud = pud_offset(pgd, addr); 1347 pud = pud_offset(pgd, addr);
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index ce183aa1492c..fb28e85ae3ae 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -41,8 +41,8 @@
41#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 41#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
42 42
43 43
44long _sys_sigaltstack(const stack_t __user *uss, 44SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
45 stack_t __user *uoss, struct pt_regs *regs) 45 stack_t __user *, uoss, struct pt_regs *, regs)
46{ 46{
47 return do_sigaltstack(uss, uoss, regs->sp); 47 return do_sigaltstack(uss, uoss, regs->sp);
48} 48}
@@ -78,7 +78,7 @@ int restore_sigcontext(struct pt_regs *regs,
78} 78}
79 79
80/* sigreturn() returns long since it restores r0 in the interrupted code. */ 80/* sigreturn() returns long since it restores r0 in the interrupted code. */
81long _sys_rt_sigreturn(struct pt_regs *regs) 81SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
82{ 82{
83 struct rt_sigframe __user *frame = 83 struct rt_sigframe __user *frame =
84 (struct rt_sigframe __user *)(regs->sp); 84 (struct rt_sigframe __user *)(regs->sp);
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 5ec4b9c651f2..1eb3b39e36c7 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -15,7 +15,7 @@
15 * Derived from iLib's single-stepping code. 15 * Derived from iLib's single-stepping code.
16 */ 16 */
17 17
18#ifndef __tilegx__ /* No support for single-step yet. */ 18#ifndef __tilegx__ /* Hardware support for single step unavailable. */
19 19
20/* These functions are only used on the TILE platform */ 20/* These functions are only used on the TILE platform */
21#include <linux/slab.h> 21#include <linux/slab.h>
@@ -660,4 +660,75 @@ void single_step_once(struct pt_regs *regs)
660 regs->pc += 8; 660 regs->pc += 8;
661} 661}
662 662
663#else
664#include <linux/smp.h>
665#include <linux/ptrace.h>
666#include <arch/spr_def.h>
667
668static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
669
670
671/*
672 * Called directly on the occasion of an interrupt.
673 *
674 * If the process doesn't have single step set, then we use this as an
675 * opportunity to turn single step off.
676 *
677 * It has been mentioned that we could conditionally turn off single stepping
678 * on each entry into the kernel and rely on single_step_once to turn it
679 * on for the processes that matter (as we already do), but this
680 * implementation is somewhat more efficient in that we muck with registers
681 * once on a bum interrupt rather than on every entry into the kernel.
682 *
683 * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
684 * so we have to run through this process again before we can say that an
685 * instruction has executed.
686 *
687 * swint will set CANCELED, but it's a legitimate instruction. Fortunately
688 * it changes the PC. If it hasn't changed, then we know that the interrupt
689 * wasn't generated by swint and we'll need to run this process again before
690 * we can say an instruction has executed.
691 *
692 * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
693 * on with our lives.
694 */
695
696void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
697{
698 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
699 struct thread_info *info = (void *)current_thread_info();
700 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
701 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
702
703 if (is_single_step == 0) {
704 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
705
706 } else if ((*ss_pc != regs->pc) ||
707 (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
708
709 ptrace_notify(SIGTRAP);
710 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
711 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
712 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
713 }
714}
715
716
717/*
718 * Called from need_singlestep. Set up the control registers and the enable
719 * register, then return back.
720 */
721
722void single_step_once(struct pt_regs *regs)
723{
724 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
725 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
726
727 *ss_pc = regs->pc;
728 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
729 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
730 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
731 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
732}
733
663#endif /* !__tilegx__ */ 734#endif /* !__tilegx__ */
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 1cb5ec79de04..75255d90aff3 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -212,7 +212,7 @@ void __init ipi_init(void)
212 212
213 tile.x = cpu_x(cpu); 213 tile.x = cpu_x(cpu);
214 tile.y = cpu_y(cpu); 214 tile.y = cpu_y(cpu);
215 if (hv_get_ipi_pte(tile, 1, &pte) != 0) 215 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
216 panic("Failed to initialize IPI for cpu %d\n", cpu); 216 panic("Failed to initialize IPI for cpu %d\n", cpu);
217 217
218 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; 218 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index ea2e0ce28380..0d54106be3d6 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -30,6 +30,10 @@
30#include <arch/abi.h> 30#include <arch/abi.h>
31#include <arch/interrupts.h> 31#include <arch/interrupts.h>
32 32
33#define KBT_ONGOING 0 /* Backtrace still ongoing */
34#define KBT_DONE 1 /* Backtrace cleanly completed */
35#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
36#define KBT_LOOP 3 /* Backtrace entered a loop */
33 37
34/* Is address on the specified kernel stack? */ 38/* Is address on the specified kernel stack? */
35static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp) 39static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
@@ -207,11 +211,11 @@ static int KBacktraceIterator_next_item_inclusive(
207 for (;;) { 211 for (;;) {
208 do { 212 do {
209 if (!KBacktraceIterator_is_sigreturn(kbt)) 213 if (!KBacktraceIterator_is_sigreturn(kbt))
210 return 1; 214 return KBT_ONGOING;
211 } while (backtrace_next(&kbt->it)); 215 } while (backtrace_next(&kbt->it));
212 216
213 if (!KBacktraceIterator_restart(kbt)) 217 if (!KBacktraceIterator_restart(kbt))
214 return 0; 218 return KBT_DONE;
215 } 219 }
216} 220}
217 221
@@ -264,7 +268,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
264 kbt->pgtable = NULL; 268 kbt->pgtable = NULL;
265 kbt->verbose = 0; /* override in caller if desired */ 269 kbt->verbose = 0; /* override in caller if desired */
266 kbt->profile = 0; /* override in caller if desired */ 270 kbt->profile = 0; /* override in caller if desired */
267 kbt->end = 0; 271 kbt->end = KBT_ONGOING;
268 kbt->new_context = 0; 272 kbt->new_context = 0;
269 if (is_current) { 273 if (is_current) {
270 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; 274 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
@@ -290,7 +294,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
290 if (regs == NULL) { 294 if (regs == NULL) {
291 if (is_current || t->state == TASK_RUNNING) { 295 if (is_current || t->state == TASK_RUNNING) {
292 /* Can't do this; we need registers */ 296 /* Can't do this; we need registers */
293 kbt->end = 1; 297 kbt->end = KBT_RUNNING;
294 return; 298 return;
295 } 299 }
296 pc = get_switch_to_pc(); 300 pc = get_switch_to_pc();
@@ -305,26 +309,29 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
305 } 309 }
306 310
307 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52); 311 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
308 kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); 312 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
309} 313}
310EXPORT_SYMBOL(KBacktraceIterator_init); 314EXPORT_SYMBOL(KBacktraceIterator_init);
311 315
312int KBacktraceIterator_end(struct KBacktraceIterator *kbt) 316int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
313{ 317{
314 return kbt->end; 318 return kbt->end != KBT_ONGOING;
315} 319}
316EXPORT_SYMBOL(KBacktraceIterator_end); 320EXPORT_SYMBOL(KBacktraceIterator_end);
317 321
318void KBacktraceIterator_next(struct KBacktraceIterator *kbt) 322void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
319{ 323{
324 VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp;
320 kbt->new_context = 0; 325 kbt->new_context = 0;
321 if (!backtrace_next(&kbt->it) && 326 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
322 !KBacktraceIterator_restart(kbt)) { 327 kbt->end = KBT_DONE;
323 kbt->end = 1; 328 return;
324 return; 329 }
325 } 330 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
326 331 if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
327 kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); 332 /* Trapped in a loop; give up. */
333 kbt->end = KBT_LOOP;
334 }
328} 335}
329EXPORT_SYMBOL(KBacktraceIterator_next); 336EXPORT_SYMBOL(KBacktraceIterator_next);
330 337
@@ -387,6 +394,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
387 break; 394 break;
388 } 395 }
389 } 396 }
397 if (kbt->end == KBT_LOOP)
398 pr_err("Stack dump stopped; next frame identical to this one\n");
390 if (headers) 399 if (headers)
391 pr_err("Stack dump complete\n"); 400 pr_err("Stack dump complete\n");
392} 401}
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index f0f87eab8c39..7e764669a022 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -110,6 +110,15 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
110#define sys_sync_file_range sys_sync_file_range2 110#define sys_sync_file_range sys_sync_file_range2
111#endif 111#endif
112 112
113/* Call the trampolines to manage pt_regs where necessary. */
114#define sys_execve _sys_execve
115#define sys_sigaltstack _sys_sigaltstack
116#define sys_rt_sigreturn _sys_rt_sigreturn
117#define sys_clone _sys_clone
118#ifndef __tilegx__
119#define sys_cmpxchg_badaddr _sys_cmpxchg_badaddr
120#endif
121
113/* 122/*
114 * Note that we can't include <linux/unistd.h> here since the header 123 * Note that we can't include <linux/unistd.h> here since the header
115 * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well. 124 * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 0f362dc2c57f..5474fc2e77e8 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -260,7 +260,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
260 address = regs->pc; 260 address = regs->pc;
261 break; 261 break;
262 case INT_UNALIGN_DATA: 262 case INT_UNALIGN_DATA:
263#ifndef __tilegx__ /* FIXME: GX: no single-step yet */ 263#ifndef __tilegx__ /* Emulated support for single step debugging */
264 if (unaligned_fixup >= 0) { 264 if (unaligned_fixup >= 0) {
265 struct single_step_state *state = 265 struct single_step_state *state =
266 current_thread_info()->step_state; 266 current_thread_info()->step_state;
@@ -278,7 +278,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
278 case INT_DOUBLE_FAULT: 278 case INT_DOUBLE_FAULT:
279 /* 279 /*
280 * For double fault, "reason" is actually passed as 280 * For double fault, "reason" is actually passed as
281 * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so 281 * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
282 * we can provide the original fault number rather than 282 * we can provide the original fault number rather than
283 * the uninteresting "INT_DOUBLE_FAULT" so the user can 283 * the uninteresting "INT_DOUBLE_FAULT" so the user can
284 * learn what actually struck while PL0 ICS was set. 284 * learn what actually struck while PL0 ICS was set.
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
new file mode 100644
index 000000000000..b88f9c047781
--- /dev/null
+++ b/arch/tile/kvm/Kconfig
@@ -0,0 +1,38 @@
1#
2# KVM configuration
3#
4
5source "virt/kvm/Kconfig"
6
7menuconfig VIRTUALIZATION
8 bool "Virtualization"
9 ---help---
10 Say Y here to get to see options for using your Linux host to run
11 other operating systems inside virtual machines (guests).
12 This option alone does not add any kernel code.
13
14 If you say N, all options in this submenu will be skipped and
15 disabled.
16
17if VIRTUALIZATION
18
19config KVM
20 tristate "Kernel-based Virtual Machine (KVM) support"
21 depends on HAVE_KVM && MODULES && EXPERIMENTAL
22 select PREEMPT_NOTIFIERS
23 select ANON_INODES
24 ---help---
25 Support hosting paravirtualized guest machines.
26
27 This module provides access to the hardware capabilities through
28 a character device node named /dev/kvm.
29
30 To compile this as a module, choose M here: the module
31 will be called kvm.
32
33 If unsure, say N.
34
35source drivers/vhost/Kconfig
36source drivers/virtio/Kconfig
37
38endif # VIRTUALIZATION
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile
index 746dc81ed3c4..93122d5b1558 100644
--- a/arch/tile/lib/Makefile
+++ b/arch/tile/lib/Makefile
@@ -3,8 +3,8 @@
3# 3#
4 4
5lib-y = cacheflush.o checksum.o cpumask.o delay.o \ 5lib-y = cacheflush.o checksum.o cpumask.o delay.o \
6 mb_incoherent.o uaccess.o \ 6 mb_incoherent.o uaccess.o memmove.o \
7 memcpy_$(BITS).o memchr_$(BITS).o memmove_$(BITS).o memset_$(BITS).o \ 7 memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \
8 strchr_$(BITS).o strlen_$(BITS).o 8 strchr_$(BITS).o strlen_$(BITS).o
9 9
10ifeq ($(CONFIG_TILEGX),y) 10ifeq ($(CONFIG_TILEGX),y)
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 8040b42a8eea..7a5cc706ab62 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -300,7 +300,7 @@ void __init __init_atomic_per_cpu(void)
300#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 300#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
301 301
302 /* Validate power-of-two and "bigger than cpus" assumption */ 302 /* Validate power-of-two and "bigger than cpus" assumption */
303 BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1)); 303 BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
304 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); 304 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
305 305
306 /* 306 /*
@@ -314,17 +314,17 @@ void __init __init_atomic_per_cpu(void)
314 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0); 314 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
315 315
316 /* The locks must all fit on one page. */ 316 /* The locks must all fit on one page. */
317 BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE); 317 BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
318 318
319 /* 319 /*
320 * We use the page offset of the atomic value's address as 320 * We use the page offset of the atomic value's address as
321 * an index into atomic_locks, excluding the low 3 bits. 321 * an index into atomic_locks, excluding the low 3 bits.
322 * That should not produce more indices than ATOMIC_HASH_SIZE. 322 * That should not produce more indices than ATOMIC_HASH_SIZE.
323 */ 323 */
324 BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); 324 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
325 325
326#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 326#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
327 327
328 /* The futex code makes this assumption, so we validate it here. */ 328 /* The futex code makes this assumption, so we validate it here. */
329 BUG_ON(sizeof(atomic_t) != sizeof(int)); 329 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
330} 330}
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index ce5dbf56578f..1509c5597653 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -45,6 +45,9 @@ EXPORT_SYMBOL(__copy_from_user_zeroing);
45EXPORT_SYMBOL(__copy_in_user_inatomic); 45EXPORT_SYMBOL(__copy_in_user_inatomic);
46#endif 46#endif
47 47
48/* arch/tile/lib/mb_incoherent.S */
49EXPORT_SYMBOL(__mb_incoherent);
50
48/* hypervisor glue */ 51/* hypervisor glue */
49#include <hv/hypervisor.h> 52#include <hv/hypervisor.h>
50EXPORT_SYMBOL(hv_dev_open); 53EXPORT_SYMBOL(hv_dev_open);
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S
index 30c3b7ebb55d..2a419a6122db 100644
--- a/arch/tile/lib/memcpy_32.S
+++ b/arch/tile/lib/memcpy_32.S
@@ -10,14 +10,16 @@
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for 11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * This file shares the implementation of the userspace memcpy and
15 * the kernel's memcpy, copy_to_user and copy_from_user.
16 */ 13 */
17 14
18#include <arch/chip.h> 15#include <arch/chip.h>
19 16
20 17
18/*
19 * This file shares the implementation of the userspace memcpy and
20 * the kernel's memcpy, copy_to_user and copy_from_user.
21 */
22
21#include <linux/linkage.h> 23#include <linux/linkage.h>
22 24
23/* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */ 25/* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */
@@ -53,9 +55,9 @@
53 */ 55 */
54ENTRY(__copy_from_user_inatomic) 56ENTRY(__copy_from_user_inatomic)
55.type __copy_from_user_inatomic, @function 57.type __copy_from_user_inatomic, @function
56 FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \ 58 FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
57 .text.memcpy_common, \ 59 .text.memcpy_common, \
58 .Lend_memcpy_common - __copy_from_user_inatomic) 60 .Lend_memcpy_common - __copy_from_user_inatomic)
59 { movei r29, IS_COPY_FROM_USER; j memcpy_common } 61 { movei r29, IS_COPY_FROM_USER; j memcpy_common }
60 .size __copy_from_user_inatomic, . - __copy_from_user_inatomic 62 .size __copy_from_user_inatomic, . - __copy_from_user_inatomic
61 63
@@ -64,7 +66,7 @@ ENTRY(__copy_from_user_inatomic)
64 */ 66 */
65ENTRY(__copy_from_user_zeroing) 67ENTRY(__copy_from_user_zeroing)
66.type __copy_from_user_zeroing, @function 68.type __copy_from_user_zeroing, @function
67 FEEDBACK_REENTER(__copy_from_user_inatomic) 69 FEEDBACK_REENTER(__copy_from_user_inatomic)
68 { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common } 70 { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
69 .size __copy_from_user_zeroing, . - __copy_from_user_zeroing 71 .size __copy_from_user_zeroing, . - __copy_from_user_zeroing
70 72
@@ -74,13 +76,13 @@ ENTRY(__copy_from_user_zeroing)
74 */ 76 */
75ENTRY(__copy_to_user_inatomic) 77ENTRY(__copy_to_user_inatomic)
76.type __copy_to_user_inatomic, @function 78.type __copy_to_user_inatomic, @function
77 FEEDBACK_REENTER(__copy_from_user_inatomic) 79 FEEDBACK_REENTER(__copy_from_user_inatomic)
78 { movei r29, IS_COPY_TO_USER; j memcpy_common } 80 { movei r29, IS_COPY_TO_USER; j memcpy_common }
79 .size __copy_to_user_inatomic, . - __copy_to_user_inatomic 81 .size __copy_to_user_inatomic, . - __copy_to_user_inatomic
80 82
81ENTRY(memcpy) 83ENTRY(memcpy)
82.type memcpy, @function 84.type memcpy, @function
83 FEEDBACK_REENTER(__copy_from_user_inatomic) 85 FEEDBACK_REENTER(__copy_from_user_inatomic)
84 { movei r29, IS_MEMCPY } 86 { movei r29, IS_MEMCPY }
85 .size memcpy, . - memcpy 87 .size memcpy, . - memcpy
86 /* Fall through */ 88 /* Fall through */
@@ -157,35 +159,35 @@ EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
157 { addi r3, r1, 60; andi r9, r9, -64 } 159 { addi r3, r1, 60; andi r9, r9, -64 }
158 160
159#if CHIP_HAS_WH64() 161#if CHIP_HAS_WH64()
160 /* No need to prefetch dst, we'll just do the wh64 162 /* No need to prefetch dst, we'll just do the wh64
161 * right before we copy a line. 163 * right before we copy a line.
162 */ 164 */
163#endif 165#endif
164 166
165EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 } 167EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 }
166 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 168 /* Intentionally stall for a few cycles to leave L2 cache alone. */
167 { bnzt zero, .; move r27, lr } 169 { bnzt zero, .; move r27, lr }
168EX: { lw r6, r3; addi r3, r3, 64 } 170EX: { lw r6, r3; addi r3, r3, 64 }
169 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 171 /* Intentionally stall for a few cycles to leave L2 cache alone. */
170 { bnzt zero, . } 172 { bnzt zero, . }
171EX: { lw r7, r3; addi r3, r3, 64 } 173EX: { lw r7, r3; addi r3, r3, 64 }
172#if !CHIP_HAS_WH64() 174#if !CHIP_HAS_WH64()
173 /* Prefetch the dest */ 175 /* Prefetch the dest */
174 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 176 /* Intentionally stall for a few cycles to leave L2 cache alone. */
175 { bnzt zero, . } 177 { bnzt zero, . }
176 /* Use a real load to cause a TLB miss if necessary. We aren't using 178 /* Use a real load to cause a TLB miss if necessary. We aren't using
177 * r28, so this should be fine. 179 * r28, so this should be fine.
178 */ 180 */
179EX: { lw r28, r9; addi r9, r9, 64 } 181EX: { lw r28, r9; addi r9, r9, 64 }
180 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 182 /* Intentionally stall for a few cycles to leave L2 cache alone. */
181 { bnzt zero, . } 183 { bnzt zero, . }
182 { prefetch r9; addi r9, r9, 64 } 184 { prefetch r9; addi r9, r9, 64 }
183 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 185 /* Intentionally stall for a few cycles to leave L2 cache alone. */
184 { bnzt zero, . } 186 { bnzt zero, . }
185 { prefetch r9; addi r9, r9, 64 } 187 { prefetch r9; addi r9, r9, 64 }
186#endif 188#endif
187 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 189 /* Intentionally stall for a few cycles to leave L2 cache alone. */
188 { bz zero, .Lbig_loop2 } 190 { bz zero, .Lbig_loop2 }
189 191
190 /* On entry to this loop: 192 /* On entry to this loop:
191 * - r0 points to the start of dst line 0 193 * - r0 points to the start of dst line 0
@@ -197,7 +199,7 @@ EX: { lw r28, r9; addi r9, r9, 64 }
197 * to some "safe" recently loaded address. 199 * to some "safe" recently loaded address.
198 * - r5 contains *(r1 + 60) [i.e. last word of source line 0] 200 * - r5 contains *(r1 + 60) [i.e. last word of source line 0]
199 * - r6 contains *(r1 + 64 + 60) [i.e. last word of source line 1] 201 * - r6 contains *(r1 + 64 + 60) [i.e. last word of source line 1]
200 * - r9 contains ((r0 + 63) & -64) 202 * - r9 contains ((r0 + 63) & -64)
201 * [start of next dst cache line.] 203 * [start of next dst cache line.]
202 */ 204 */
203 205
@@ -208,137 +210,137 @@ EX: { lw r28, r9; addi r9, r9, 64 }
208 /* Copy line 0, first stalling until r5 is ready. */ 210 /* Copy line 0, first stalling until r5 is ready. */
209EX: { move r12, r5; lw r16, r1 } 211EX: { move r12, r5; lw r16, r1 }
210 { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } 212 { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
211 /* Prefetch several lines ahead. */ 213 /* Prefetch several lines ahead. */
212EX: { lw r5, r3; addi r3, r3, 64 } 214EX: { lw r5, r3; addi r3, r3, 64 }
213 { jal .Lcopy_line } 215 { jal .Lcopy_line }
214 216
215 /* Copy line 1, first stalling until r6 is ready. */ 217 /* Copy line 1, first stalling until r6 is ready. */
216EX: { move r12, r6; lw r16, r1 } 218EX: { move r12, r6; lw r16, r1 }
217 { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } 219 { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
218 /* Prefetch several lines ahead. */ 220 /* Prefetch several lines ahead. */
219EX: { lw r6, r3; addi r3, r3, 64 } 221EX: { lw r6, r3; addi r3, r3, 64 }
220 { jal .Lcopy_line } 222 { jal .Lcopy_line }
221 223
222 /* Copy line 2, first stalling until r7 is ready. */ 224 /* Copy line 2, first stalling until r7 is ready. */
223EX: { move r12, r7; lw r16, r1 } 225EX: { move r12, r7; lw r16, r1 }
224 { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } 226 { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
225 /* Prefetch several lines ahead. */ 227 /* Prefetch several lines ahead. */
226EX: { lw r7, r3; addi r3, r3, 64 } 228EX: { lw r7, r3; addi r3, r3, 64 }
227 /* Use up a caches-busy cycle by jumping back to the top of the 229 /* Use up a caches-busy cycle by jumping back to the top of the
228 * loop. Might as well get it out of the way now. 230 * loop. Might as well get it out of the way now.
229 */ 231 */
230 { j .Lbig_loop } 232 { j .Lbig_loop }
231 233
232 234
233 /* On entry: 235 /* On entry:
234 * - r0 points to the destination line. 236 * - r0 points to the destination line.
235 * - r1 points to the source line. 237 * - r1 points to the source line.
236 * - r3 is the next prefetch address. 238 * - r3 is the next prefetch address.
237 * - r9 holds the last address used for wh64. 239 * - r9 holds the last address used for wh64.
238 * - r12 = WORD_15 240 * - r12 = WORD_15
239 * - r16 = WORD_0. 241 * - r16 = WORD_0.
240 * - r17 == r1 + 16. 242 * - r17 == r1 + 16.
241 * - r27 holds saved lr to restore. 243 * - r27 holds saved lr to restore.
242 * 244 *
243 * On exit: 245 * On exit:
244 * - r0 is incremented by 64. 246 * - r0 is incremented by 64.
245 * - r1 is incremented by 64, unless that would point to a word 247 * - r1 is incremented by 64, unless that would point to a word
246 * beyond the end of the source array, in which case it is redirected 248 * beyond the end of the source array, in which case it is redirected
247 * to point to an arbitrary word already in the cache. 249 * to point to an arbitrary word already in the cache.
248 * - r2 is decremented by 64. 250 * - r2 is decremented by 64.
249 * - r3 is unchanged, unless it points to a word beyond the 251 * - r3 is unchanged, unless it points to a word beyond the
250 * end of the source array, in which case it is redirected 252 * end of the source array, in which case it is redirected
251 * to point to an arbitrary word already in the cache. 253 * to point to an arbitrary word already in the cache.
252 * Redirecting is OK since if we are that close to the end 254 * Redirecting is OK since if we are that close to the end
253 * of the array we will not come back to this subroutine 255 * of the array we will not come back to this subroutine
254 * and use the contents of the prefetched address. 256 * and use the contents of the prefetched address.
255 * - r4 is nonzero iff r2 >= 64. 257 * - r4 is nonzero iff r2 >= 64.
256 * - r9 is incremented by 64, unless it points beyond the 258 * - r9 is incremented by 64, unless it points beyond the
257 * end of the last full destination cache line, in which 259 * end of the last full destination cache line, in which
258 * case it is redirected to a "safe address" that can be 260 * case it is redirected to a "safe address" that can be
259 * clobbered (sp - 64) 261 * clobbered (sp - 64)
260 * - lr contains the value in r27. 262 * - lr contains the value in r27.
261 */ 263 */
262 264
263/* r26 unused */ 265/* r26 unused */
264 266
265.Lcopy_line: 267.Lcopy_line:
266 /* TODO: when r3 goes past the end, we would like to redirect it 268 /* TODO: when r3 goes past the end, we would like to redirect it
267 * to prefetch the last partial cache line (if any) just once, for the 269 * to prefetch the last partial cache line (if any) just once, for the
268 * benefit of the final cleanup loop. But we don't want to 270 * benefit of the final cleanup loop. But we don't want to
269 * prefetch that line more than once, or subsequent prefetches 271 * prefetch that line more than once, or subsequent prefetches
270 * will go into the RTF. But then .Lbig_loop should unconditionally 272 * will go into the RTF. But then .Lbig_loop should unconditionally
271 * branch to top of loop to execute final prefetch, and its 273 * branch to top of loop to execute final prefetch, and its
272 * nop should become a conditional branch. 274 * nop should become a conditional branch.
273 */ 275 */
274 276
275 /* We need two non-memory cycles here to cover the resources 277 /* We need two non-memory cycles here to cover the resources
276 * used by the loads initiated by the caller. 278 * used by the loads initiated by the caller.
277 */ 279 */
278 { add r15, r1, r2 } 280 { add r15, r1, r2 }
279.Lcopy_line2: 281.Lcopy_line2:
280 { slt_u r13, r3, r15; addi r17, r1, 16 } 282 { slt_u r13, r3, r15; addi r17, r1, 16 }
281 283
282 /* NOTE: this will stall for one cycle as L1 is busy. */ 284 /* NOTE: this will stall for one cycle as L1 is busy. */
283 285
284 /* Fill second L1D line. */ 286 /* Fill second L1D line. */
285EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */ 287EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
286 288
287#if CHIP_HAS_WH64() 289#if CHIP_HAS_WH64()
288 /* Prepare destination line for writing. */ 290 /* Prepare destination line for writing. */
289EX: { wh64 r9; addi r9, r9, 64 } 291EX: { wh64 r9; addi r9, r9, 64 }
290#else 292#else
291 /* Prefetch dest line */ 293 /* Prefetch dest line */
292 { prefetch r9; addi r9, r9, 64 } 294 { prefetch r9; addi r9, r9, 64 }
293#endif 295#endif
294 /* Load seven words that are L1D hits to cover wh64 L2 usage. */ 296 /* Load seven words that are L1D hits to cover wh64 L2 usage. */
295 297
296 /* Load the three remaining words from the last L1D line, which 298 /* Load the three remaining words from the last L1D line, which
297 * we know has already filled the L1D. 299 * we know has already filled the L1D.
298 */ 300 */
299EX: { lw r4, r1; addi r1, r1, 4; addi r20, r1, 16 } /* r4 = WORD_12 */ 301EX: { lw r4, r1; addi r1, r1, 4; addi r20, r1, 16 } /* r4 = WORD_12 */
300EX: { lw r8, r1; addi r1, r1, 4; slt_u r13, r20, r15 }/* r8 = WORD_13 */ 302EX: { lw r8, r1; addi r1, r1, 4; slt_u r13, r20, r15 }/* r8 = WORD_13 */
301EX: { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 } /* r11 = WORD_14 */ 303EX: { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 } /* r11 = WORD_14 */
302 304
303 /* Load the three remaining words from the first L1D line, first 305 /* Load the three remaining words from the first L1D line, first
304 * stalling until it has filled by "looking at" r16. 306 * stalling until it has filled by "looking at" r16.
305 */ 307 */
306EX: { lw r13, r1; addi r1, r1, 4; move zero, r16 } /* r13 = WORD_1 */ 308EX: { lw r13, r1; addi r1, r1, 4; move zero, r16 } /* r13 = WORD_1 */
307EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_2 */ 309EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_2 */
308EX: { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */ 310EX: { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */
309 311
310 /* Load second word from the second L1D line, first 312 /* Load second word from the second L1D line, first
311 * stalling until it has filled by "looking at" r17. 313 * stalling until it has filled by "looking at" r17.
312 */ 314 */
313EX: { lw r19, r1; addi r1, r1, 4; move zero, r17 } /* r19 = WORD_5 */ 315EX: { lw r19, r1; addi r1, r1, 4; move zero, r17 } /* r19 = WORD_5 */
314 316
315 /* Store last word to the destination line, potentially dirtying it 317 /* Store last word to the destination line, potentially dirtying it
316 * for the first time, which keeps the L2 busy for two cycles. 318 * for the first time, which keeps the L2 busy for two cycles.
317 */ 319 */
318EX: { sw r10, r12 } /* store(WORD_15) */ 320EX: { sw r10, r12 } /* store(WORD_15) */
319 321
320 /* Use two L1D hits to cover the sw L2 access above. */ 322 /* Use two L1D hits to cover the sw L2 access above. */
321EX: { lw r10, r1; addi r1, r1, 4 } /* r10 = WORD_6 */ 323EX: { lw r10, r1; addi r1, r1, 4 } /* r10 = WORD_6 */
322EX: { lw r12, r1; addi r1, r1, 4 } /* r12 = WORD_7 */ 324EX: { lw r12, r1; addi r1, r1, 4 } /* r12 = WORD_7 */
323 325
324 /* Fill third L1D line. */ 326 /* Fill third L1D line. */
325EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */ 327EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */
326 328
327 /* Store first L1D line. */ 329 /* Store first L1D line. */
328EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */ 330EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
329EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */ 331EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
330EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */ 332EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
331#if CHIP_HAS_WH64() 333#if CHIP_HAS_WH64()
332EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */ 334EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
333#else 335#else
334 /* Back up the r9 to a cache line we are already storing to 336 /* Back up the r9 to a cache line we are already storing to
335 * if it gets past the end of the dest vector. Strictly speaking, 337 * if it gets past the end of the dest vector. Strictly speaking,
336 * we don't need to back up to the start of a cache line, but it's free 338 * we don't need to back up to the start of a cache line, but it's free
337 * and tidy, so why not? 339 * and tidy, so why not?
338 */ 340 */
339EX: { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */ 341EX: { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */
340#endif 342#endif
341 /* Store second L1D line. */ 343 /* Store second L1D line. */
342EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */ 344EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */
343EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */ 345EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */
344EX: { sw r0, r10; addi r0, r0, 4 } /* store(WORD_6) */ 346EX: { sw r0, r10; addi r0, r0, 4 } /* store(WORD_6) */
@@ -348,30 +350,30 @@ EX: { lw r13, r1; addi r1, r1, 4; move zero, r18 } /* r13 = WORD_9 */
348EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_10 */ 350EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_10 */
349EX: { lw r15, r1; move r1, r20 } /* r15 = WORD_11 */ 351EX: { lw r15, r1; move r1, r20 } /* r15 = WORD_11 */
350 352
351 /* Store third L1D line. */ 353 /* Store third L1D line. */
352EX: { sw r0, r18; addi r0, r0, 4 } /* store(WORD_8) */ 354EX: { sw r0, r18; addi r0, r0, 4 } /* store(WORD_8) */
353EX: { sw r0, r13; addi r0, r0, 4 } /* store(WORD_9) */ 355EX: { sw r0, r13; addi r0, r0, 4 } /* store(WORD_9) */
354EX: { sw r0, r14; addi r0, r0, 4 } /* store(WORD_10) */ 356EX: { sw r0, r14; addi r0, r0, 4 } /* store(WORD_10) */
355EX: { sw r0, r15; addi r0, r0, 4 } /* store(WORD_11) */ 357EX: { sw r0, r15; addi r0, r0, 4 } /* store(WORD_11) */
356 358
357 /* Store rest of fourth L1D line. */ 359 /* Store rest of fourth L1D line. */
358EX: { sw r0, r4; addi r0, r0, 4 } /* store(WORD_12) */ 360EX: { sw r0, r4; addi r0, r0, 4 } /* store(WORD_12) */
359 { 361 {
360EX: sw r0, r8 /* store(WORD_13) */ 362EX: sw r0, r8 /* store(WORD_13) */
361 addi r0, r0, 4 363 addi r0, r0, 4
362 /* Will r2 be > 64 after we subtract 64 below? */ 364 /* Will r2 be > 64 after we subtract 64 below? */
363 shri r4, r2, 7 365 shri r4, r2, 7
364 } 366 }
365 { 367 {
366EX: sw r0, r11 /* store(WORD_14) */ 368EX: sw r0, r11 /* store(WORD_14) */
367 addi r0, r0, 8 369 addi r0, r0, 8
368 /* Record 64 bytes successfully copied. */ 370 /* Record 64 bytes successfully copied. */
369 addi r2, r2, -64 371 addi r2, r2, -64
370 } 372 }
371 373
372 { jrp lr; move lr, r27 } 374 { jrp lr; move lr, r27 }
373 375
374 /* Convey to the backtrace library that the stack frame is size 376 /* Convey to the backtrace library that the stack frame is size
375 * zero, and the real return address is on the stack rather than 377 * zero, and the real return address is on the stack rather than
376 * in 'lr'. 378 * in 'lr'.
377 */ 379 */
diff --git a/arch/tile/lib/memmove_32.c b/arch/tile/lib/memmove.c
index fd615ae6ade7..fd615ae6ade7 100644
--- a/arch/tile/lib/memmove_32.c
+++ b/arch/tile/lib/memmove.c
diff --git a/arch/tile/lib/memset_32.c b/arch/tile/lib/memset_32.c
index d014c1fbcbc2..57dbb3a5bff8 100644
--- a/arch/tile/lib/memset_32.c
+++ b/arch/tile/lib/memset_32.c
@@ -18,6 +18,7 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/module.h> 19#include <linux/module.h>
20 20
21#undef memset
21 22
22void *memset(void *s, int c, size_t n) 23void *memset(void *s, int c, size_t n)
23{ 24{
diff --git a/arch/tile/lib/strlen_32.c b/arch/tile/lib/strlen_32.c
index f26f88e11e4a..4974292a5534 100644
--- a/arch/tile/lib/strlen_32.c
+++ b/arch/tile/lib/strlen_32.c
@@ -16,6 +16,8 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/module.h> 17#include <linux/module.h>
18 18
19#undef strlen
20
19size_t strlen(const char *s) 21size_t strlen(const char *s)
20{ 22{
21 /* Get an aligned pointer. */ 23 /* Get an aligned pointer. */
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 704f3e8a4385..f295b4ac941d 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -66,10 +66,10 @@ static noinline void force_sig_info_fault(int si_signo, int si_code,
66#ifndef __tilegx__ 66#ifndef __tilegx__
67/* 67/*
68 * Synthesize the fault a PL0 process would get by doing a word-load of 68 * Synthesize the fault a PL0 process would get by doing a word-load of
69 * an unaligned address or a high kernel address. Called indirectly 69 * an unaligned address or a high kernel address.
70 * from sys_cmpxchg() in kernel/intvec.S.
71 */ 70 */
72int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs) 71SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address,
72 struct pt_regs *, regs)
73{ 73{
74 if (address >= PAGE_OFFSET) 74 if (address >= PAGE_OFFSET)
75 force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address, 75 force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address,
@@ -563,10 +563,10 @@ do_sigbus:
563/* 563/*
564 * When we take an ITLB or DTLB fault or access violation in the 564 * When we take an ITLB or DTLB fault or access violation in the
565 * supervisor while the critical section bit is set, the hypervisor is 565 * supervisor while the critical section bit is set, the hypervisor is
566 * reluctant to write new values into the EX_CONTEXT_1_x registers, 566 * reluctant to write new values into the EX_CONTEXT_K_x registers,
567 * since that might indicate we have not yet squirreled the SPR 567 * since that might indicate we have not yet squirreled the SPR
568 * contents away and can thus safely take a recursive interrupt. 568 * contents away and can thus safely take a recursive interrupt.
569 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2. 569 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
570 * 570 *
571 * Note that this routine is called before homecache_tlb_defer_enter(), 571 * Note that this routine is called before homecache_tlb_defer_enter(),
572 * which means that we can properly unlock any atomics that might 572 * which means that we can properly unlock any atomics that might
@@ -610,7 +610,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
610 * fault. We didn't set up a kernel stack on initial entry to 610 * fault. We didn't set up a kernel stack on initial entry to
611 * sys_cmpxchg, but instead had one set up by the fault, which 611 * sys_cmpxchg, but instead had one set up by the fault, which
612 * (because sys_cmpxchg never releases ICS) came to us via the 612 * (because sys_cmpxchg never releases ICS) came to us via the
613 * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are 613 * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
614 * still referencing the original user code. We release the 614 * still referencing the original user code. We release the
615 * atomic lock and rewrite pt_regs so that it appears that we 615 * atomic lock and rewrite pt_regs so that it appears that we
616 * came from user-space directly, and after we finish the 616 * came from user-space directly, and after we finish the
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index fb3b4a55cec4..d78df3a6ee15 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -37,6 +37,8 @@
37#include <asm/pgalloc.h> 37#include <asm/pgalloc.h>
38#include <asm/homecache.h> 38#include <asm/homecache.h>
39 39
40#include <arch/sim.h>
41
40#include "migrate.h" 42#include "migrate.h"
41 43
42 44
@@ -217,13 +219,6 @@ static unsigned long cache_flush_length(unsigned long length)
217 return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; 219 return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length;
218} 220}
219 221
220/* On the simulator, confirm lines have been evicted everywhere. */
221static void validate_lines_evicted(unsigned long pfn, size_t length)
222{
223 sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED,
224 (HV_PhysAddr)pfn << PAGE_SHIFT, length);
225}
226
227/* Flush a page out of whatever cache(s) it is in. */ 222/* Flush a page out of whatever cache(s) it is in. */
228void homecache_flush_cache(struct page *page, int order) 223void homecache_flush_cache(struct page *page, int order)
229{ 224{
@@ -234,7 +229,7 @@ void homecache_flush_cache(struct page *page, int order)
234 229
235 homecache_mask(page, pages, &home_mask); 230 homecache_mask(page, pages, &home_mask);
236 flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); 231 flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
237 validate_lines_evicted(pfn, pages * PAGE_SIZE); 232 sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE);
238} 233}
239 234
240 235
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index d89c9eacd162..78e1982cb6c9 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -1060,7 +1060,7 @@ void free_initmem(void)
1060 1060
1061 /* 1061 /*
1062 * Free the pages mapped from 0xc0000000 that correspond to code 1062 * Free the pages mapped from 0xc0000000 that correspond to code
1063 * pages from 0xfd000000 that we won't use again after init. 1063 * pages from MEM_SV_INTRPT that we won't use again after init.
1064 */ 1064 */
1065 free_init_pages("unused kernel text", 1065 free_init_pages("unused kernel text",
1066 (unsigned long)_sinittext - text_delta, 1066 (unsigned long)_sinittext - text_delta,