aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-06-25 17:04:17 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-07-06 13:41:51 -0400
commit0707ad30d10110aebc01a5a64fb63f4b32d20b73 (patch)
tree64d8ba73e605ac26e56808d1d77701b3f83cf8b2 /arch/tile/kernel
parentc78095bd8c77fca2619769ff8efb639fd100e373 (diff)
arch/tile: Miscellaneous cleanup changes.
This commit is primarily changes caused by reviewing "sparse" and "checkpatch" output on our sources, so is somewhat noisy, since things like "printk() -> pr_err()" (or whatever) throughout the codebase tend to get tedious to read. Rather than trying to tease apart precisely which things changed due to which type of code review, this commit includes various cleanups in the code: - sparse: Add declarations in headers for globals. - sparse: Fix __user annotations. - sparse: Using gfp_t consistently instead of int. - sparse: removing functions not actually used. - checkpatch: Clean up printk() warnings by using pr_info(), etc.; also avoid partial-line printks except in bootup code. - checkpatch: Use exposed structs rather than typedefs. - checkpatch: Change some C99 comments to C89 comments. In addition, a couple of minor other changes are rolled in to this commit: - Add support for a "raise" instruction to cause SIGFPE, etc., to be raised. - Remove some compat code that is unnecessary when we fully eliminate some of the deprecated syscalls from the generic syscall ABI. - Update the tile_defconfig to reflect current config contents. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/backtrace.c81
-rw-r--r--arch/tile/kernel/compat.c28
-rw-r--r--arch/tile/kernel/compat_signal.c10
-rw-r--r--arch/tile/kernel/early_printk.c2
-rw-r--r--arch/tile/kernel/entry.S4
-rw-r--r--arch/tile/kernel/machine_kexec.c38
-rw-r--r--arch/tile/kernel/messaging.c5
-rw-r--r--arch/tile/kernel/module.c16
-rw-r--r--arch/tile/kernel/process.c110
-rw-r--r--arch/tile/kernel/ptrace.c3
-rw-r--r--arch/tile/kernel/reboot.c7
-rw-r--r--arch/tile/kernel/setup.c132
-rw-r--r--arch/tile/kernel/signal.c19
-rw-r--r--arch/tile/kernel/single_step.c75
-rw-r--r--arch/tile/kernel/smpboot.c37
-rw-r--r--arch/tile/kernel/stack.c43
-rw-r--r--arch/tile/kernel/sys.c18
-rw-r--r--arch/tile/kernel/time.c7
-rw-r--r--arch/tile/kernel/traps.c130
-rw-r--r--arch/tile/kernel/vmlinux.lds.S4
20 files changed, 420 insertions, 349 deletions
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index 1b0a410ef5e7..77265f3b58d6 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -30,18 +30,18 @@
30 30
31 31
32/** A decoded bundle used for backtracer analysis. */ 32/** A decoded bundle used for backtracer analysis. */
33typedef struct { 33struct BacktraceBundle {
34 tile_bundle_bits bits; 34 tile_bundle_bits bits;
35 int num_insns; 35 int num_insns;
36 struct tile_decoded_instruction 36 struct tile_decoded_instruction
37 insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]; 37 insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE];
38} BacktraceBundle; 38};
39 39
40 40
41/* This implementation only makes sense for native tools. */ 41/* This implementation only makes sense for native tools. */
42/** Default function to read memory. */ 42/** Default function to read memory. */
43static bool 43static bool bt_read_memory(void *result, VirtualAddress addr,
44bt_read_memory(void *result, VirtualAddress addr, size_t size, void *extra) 44 size_t size, void *extra)
45{ 45{
46 /* FIXME: this should do some horrible signal stuff to catch 46 /* FIXME: this should do some horrible signal stuff to catch
47 * SEGV cleanly and fail. 47 * SEGV cleanly and fail.
@@ -58,11 +58,11 @@ bt_read_memory(void *result, VirtualAddress addr, size_t size, void *extra)
58 * has the specified mnemonic, and whose first 'num_operands_to_match' 58 * has the specified mnemonic, and whose first 'num_operands_to_match'
59 * operands exactly match those in 'operand_values'. 59 * operands exactly match those in 'operand_values'.
60 */ 60 */
61static const struct tile_decoded_instruction* 61static const struct tile_decoded_instruction *find_matching_insn(
62find_matching_insn(const BacktraceBundle *bundle, 62 const struct BacktraceBundle *bundle,
63 tile_mnemonic mnemonic, 63 tile_mnemonic mnemonic,
64 const int *operand_values, 64 const int *operand_values,
65 int num_operands_to_match) 65 int num_operands_to_match)
66{ 66{
67 int i, j; 67 int i, j;
68 bool match; 68 bool match;
@@ -90,8 +90,7 @@ find_matching_insn(const BacktraceBundle *bundle,
90} 90}
91 91
92/** Does this bundle contain an 'iret' instruction? */ 92/** Does this bundle contain an 'iret' instruction? */
93static inline bool 93static inline bool bt_has_iret(const struct BacktraceBundle *bundle)
94bt_has_iret(const BacktraceBundle *bundle)
95{ 94{
96 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL; 95 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
97} 96}
@@ -99,8 +98,7 @@ bt_has_iret(const BacktraceBundle *bundle)
99/** Does this bundle contain an 'addi sp, sp, OFFSET' or 98/** Does this bundle contain an 'addi sp, sp, OFFSET' or
100 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET? 99 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
101 */ 100 */
102static bool 101static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
103bt_has_addi_sp(const BacktraceBundle *bundle, int *adjust)
104{ 102{
105 static const int vals[2] = { TREG_SP, TREG_SP }; 103 static const int vals[2] = { TREG_SP, TREG_SP };
106 104
@@ -120,8 +118,7 @@ bt_has_addi_sp(const BacktraceBundle *bundle, int *adjust)
120 * as an unsigned value by this code since that's what the caller wants. 118 * as an unsigned value by this code since that's what the caller wants.
121 * Returns the number of info ops found. 119 * Returns the number of info ops found.
122 */ 120 */
123static int 121static int bt_get_info_ops(const struct BacktraceBundle *bundle,
124bt_get_info_ops(const BacktraceBundle *bundle,
125 int operands[MAX_INFO_OPS_PER_BUNDLE]) 122 int operands[MAX_INFO_OPS_PER_BUNDLE])
126{ 123{
127 int num_ops = 0; 124 int num_ops = 0;
@@ -143,8 +140,7 @@ bt_get_info_ops(const BacktraceBundle *bundle,
143/** Does this bundle contain a jrp instruction, and if so, to which 140/** Does this bundle contain a jrp instruction, and if so, to which
144 * register is it jumping? 141 * register is it jumping?
145 */ 142 */
146static bool 143static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
147bt_has_jrp(const BacktraceBundle *bundle, int *target_reg)
148{ 144{
149 const struct tile_decoded_instruction *insn = 145 const struct tile_decoded_instruction *insn =
150 find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0); 146 find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0);
@@ -156,8 +152,7 @@ bt_has_jrp(const BacktraceBundle *bundle, int *target_reg)
156} 152}
157 153
158/** Does this bundle modify the specified register in any way? */ 154/** Does this bundle modify the specified register in any way? */
159static bool 155static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
160bt_modifies_reg(const BacktraceBundle *bundle, int reg)
161{ 156{
162 int i, j; 157 int i, j;
163 for (i = 0; i < bundle->num_insns; i++) { 158 for (i = 0; i < bundle->num_insns; i++) {
@@ -177,30 +172,26 @@ bt_modifies_reg(const BacktraceBundle *bundle, int reg)
177} 172}
178 173
179/** Does this bundle modify sp? */ 174/** Does this bundle modify sp? */
180static inline bool 175static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle)
181bt_modifies_sp(const BacktraceBundle *bundle)
182{ 176{
183 return bt_modifies_reg(bundle, TREG_SP); 177 return bt_modifies_reg(bundle, TREG_SP);
184} 178}
185 179
186/** Does this bundle modify lr? */ 180/** Does this bundle modify lr? */
187static inline bool 181static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle)
188bt_modifies_lr(const BacktraceBundle *bundle)
189{ 182{
190 return bt_modifies_reg(bundle, TREG_LR); 183 return bt_modifies_reg(bundle, TREG_LR);
191} 184}
192 185
193/** Does this bundle contain the instruction 'move fp, sp'? */ 186/** Does this bundle contain the instruction 'move fp, sp'? */
194static inline bool 187static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
195bt_has_move_r52_sp(const BacktraceBundle *bundle)
196{ 188{
197 static const int vals[2] = { 52, TREG_SP }; 189 static const int vals[2] = { 52, TREG_SP };
198 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; 190 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
199} 191}
200 192
201/** Does this bundle contain the instruction 'sw sp, lr'? */ 193/** Does this bundle contain the instruction 'sw sp, lr'? */
202static inline bool 194static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
203bt_has_sw_sp_lr(const BacktraceBundle *bundle)
204{ 195{
205 static const int vals[2] = { TREG_SP, TREG_LR }; 196 static const int vals[2] = { TREG_SP, TREG_LR };
206 return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL; 197 return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL;
@@ -209,11 +200,10 @@ bt_has_sw_sp_lr(const BacktraceBundle *bundle)
209/** Locates the caller's PC and SP for a program starting at the 200/** Locates the caller's PC and SP for a program starting at the
210 * given address. 201 * given address.
211 */ 202 */
212static void 203static void find_caller_pc_and_caller_sp(CallerLocation *location,
213find_caller_pc_and_caller_sp(CallerLocation *location, 204 const VirtualAddress start_pc,
214 const VirtualAddress start_pc, 205 BacktraceMemoryReader read_memory_func,
215 BacktraceMemoryReader read_memory_func, 206 void *read_memory_func_extra)
216 void *read_memory_func_extra)
217{ 207{
218 /* Have we explicitly decided what the sp is, 208 /* Have we explicitly decided what the sp is,
219 * rather than just the default? 209 * rather than just the default?
@@ -253,7 +243,7 @@ find_caller_pc_and_caller_sp(CallerLocation *location,
253 243
254 for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) { 244 for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) {
255 245
256 BacktraceBundle bundle; 246 struct BacktraceBundle bundle;
257 int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE]; 247 int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE];
258 int one_ago, jrp_reg; 248 int one_ago, jrp_reg;
259 bool has_jrp; 249 bool has_jrp;
@@ -475,12 +465,11 @@ find_caller_pc_and_caller_sp(CallerLocation *location,
475 } 465 }
476} 466}
477 467
478void 468void backtrace_init(BacktraceIterator *state,
479backtrace_init(BacktraceIterator *state, 469 BacktraceMemoryReader read_memory_func,
480 BacktraceMemoryReader read_memory_func, 470 void *read_memory_func_extra,
481 void *read_memory_func_extra, 471 VirtualAddress pc, VirtualAddress lr,
482 VirtualAddress pc, VirtualAddress lr, 472 VirtualAddress sp, VirtualAddress r52)
483 VirtualAddress sp, VirtualAddress r52)
484{ 473{
485 CallerLocation location; 474 CallerLocation location;
486 VirtualAddress fp, initial_frame_caller_pc; 475 VirtualAddress fp, initial_frame_caller_pc;
@@ -558,8 +547,7 @@ backtrace_init(BacktraceIterator *state,
558 state->read_memory_func_extra = read_memory_func_extra; 547 state->read_memory_func_extra = read_memory_func_extra;
559} 548}
560 549
561bool 550bool backtrace_next(BacktraceIterator *state)
562backtrace_next(BacktraceIterator *state)
563{ 551{
564 VirtualAddress next_fp, next_pc, next_frame[2]; 552 VirtualAddress next_fp, next_pc, next_frame[2];
565 553
@@ -614,12 +602,11 @@ backtrace_next(BacktraceIterator *state)
614 602
615#else /* TILE_CHIP < 10 */ 603#else /* TILE_CHIP < 10 */
616 604
617void 605void backtrace_init(BacktraceIterator *state,
618backtrace_init(BacktraceIterator *state, 606 BacktraceMemoryReader read_memory_func,
619 BacktraceMemoryReader read_memory_func, 607 void *read_memory_func_extra,
620 void *read_memory_func_extra, 608 VirtualAddress pc, VirtualAddress lr,
621 VirtualAddress pc, VirtualAddress lr, 609 VirtualAddress sp, VirtualAddress r52)
622 VirtualAddress sp, VirtualAddress r52)
623{ 610{
624 state->pc = pc; 611 state->pc = pc;
625 state->sp = sp; 612 state->sp = sp;
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index a374c99deeb6..b1e06d041555 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -88,34 +88,14 @@ long compat_sys_sched_rr_get_interval(compat_pid_t pid,
88 mm_segment_t old_fs = get_fs(); 88 mm_segment_t old_fs = get_fs();
89 89
90 set_fs(KERNEL_DS); 90 set_fs(KERNEL_DS);
91 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); 91 ret = sys_sched_rr_get_interval(pid,
92 (struct timespec __force __user *)&t);
92 set_fs(old_fs); 93 set_fs(old_fs);
93 if (put_compat_timespec(&t, interval)) 94 if (put_compat_timespec(&t, interval))
94 return -EFAULT; 95 return -EFAULT;
95 return ret; 96 return ret;
96} 97}
97 98
98ssize_t compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
99 size_t count)
100{
101 mm_segment_t old_fs = get_fs();
102 int ret;
103 off_t of;
104
105 if (offset && get_user(of, offset))
106 return -EFAULT;
107
108 set_fs(KERNEL_DS);
109 ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
110 count);
111 set_fs(old_fs);
112
113 if (offset && put_user(of, offset))
114 return -EFAULT;
115 return ret;
116}
117
118
119/* 99/*
120 * The usual compat_sys_msgsnd() and _msgrcv() seem to be assuming 100 * The usual compat_sys_msgsnd() and _msgrcv() seem to be assuming
121 * some different calling convention than our normal 32-bit tile code. 101 * some different calling convention than our normal 32-bit tile code.
@@ -177,6 +157,10 @@ long tile_compat_sys_msgrcv(int msqid,
177/* Pass full 64-bit values through ptrace. */ 157/* Pass full 64-bit values through ptrace. */
178#define compat_sys_ptrace tile_compat_sys_ptrace 158#define compat_sys_ptrace tile_compat_sys_ptrace
179 159
160/*
161 * Note that we can't include <linux/unistd.h> here since the header
162 * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
163 */
180void *compat_sys_call_table[__NR_syscalls] = { 164void *compat_sys_call_table[__NR_syscalls] = {
181 [0 ... __NR_syscalls-1] = sys_ni_syscall, 165 [0 ... __NR_syscalls-1] = sys_ni_syscall,
182#include <asm/unistd.h> 166#include <asm/unistd.h>
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 9fa4ba8ed5f4..d5efb215dd5f 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -32,13 +32,14 @@
32#include <asm/processor.h> 32#include <asm/processor.h>
33#include <asm/ucontext.h> 33#include <asm/ucontext.h>
34#include <asm/sigframe.h> 34#include <asm/sigframe.h>
35#include <asm/syscalls.h>
35#include <arch/interrupts.h> 36#include <arch/interrupts.h>
36 37
37struct compat_sigaction { 38struct compat_sigaction {
38 compat_uptr_t sa_handler; 39 compat_uptr_t sa_handler;
39 compat_ulong_t sa_flags; 40 compat_ulong_t sa_flags;
40 compat_uptr_t sa_restorer; 41 compat_uptr_t sa_restorer;
41 sigset_t sa_mask; /* mask last for extensibility */ 42 sigset_t sa_mask __packed;
42}; 43};
43 44
44struct compat_sigaltstack { 45struct compat_sigaltstack {
@@ -170,7 +171,7 @@ long compat_sys_rt_sigqueueinfo(int pid, int sig,
170 if (copy_siginfo_from_user32(&info, uinfo)) 171 if (copy_siginfo_from_user32(&info, uinfo))
171 return -EFAULT; 172 return -EFAULT;
172 set_fs(KERNEL_DS); 173 set_fs(KERNEL_DS);
173 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info); 174 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *)&info);
174 set_fs(old_fs); 175 set_fs(old_fs);
175 return ret; 176 return ret;
176} 177}
@@ -274,7 +275,8 @@ long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
274 } 275 }
275 seg = get_fs(); 276 seg = get_fs();
276 set_fs(KERNEL_DS); 277 set_fs(KERNEL_DS);
277 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, 278 ret = do_sigaltstack(uss_ptr ? (stack_t __user __force *)&uss : NULL,
279 (stack_t __user __force *)&uoss,
278 (unsigned long)compat_ptr(regs->sp)); 280 (unsigned long)compat_ptr(regs->sp));
279 set_fs(seg); 281 set_fs(seg);
280 if (ret >= 0 && uoss_ptr) { 282 if (ret >= 0 && uoss_ptr) {
@@ -336,7 +338,7 @@ static inline void __user *compat_get_sigframe(struct k_sigaction *ka,
336 * will die with SIGSEGV. 338 * will die with SIGSEGV.
337 */ 339 */
338 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) 340 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
339 return (void __user *) -1L; 341 return (void __user __force *)-1UL;
340 342
341 /* This is the X/Open sanctioned signal stack switching. */ 343 /* This is the X/Open sanctioned signal stack switching. */
342 if (ka->sa.sa_flags & SA_ONSTACK) { 344 if (ka->sa.sa_flags & SA_ONSTACK) {
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index e44d441e3f3f..2c54fd43a8a0 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -32,7 +32,7 @@ static struct console early_hv_console = {
32}; 32};
33 33
34/* Direct interface for emergencies */ 34/* Direct interface for emergencies */
35struct console *early_console = &early_hv_console; 35static struct console *early_console = &early_hv_console;
36static int early_console_initialized; 36static int early_console_initialized;
37static int early_console_complete; 37static int early_console_complete;
38 38
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 136261f7d7f9..3d01383b1b0e 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -13,9 +13,9 @@
13 */ 13 */
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <arch/abi.h> 16#include <linux/unistd.h>
17#include <asm/unistd.h>
18#include <asm/irqflags.h> 17#include <asm/irqflags.h>
18#include <arch/abi.h>
19 19
20#ifdef __tilegx__ 20#ifdef __tilegx__
21#define bnzt bnezt 21#define bnzt bnezt
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ed3e1cb8dcc4..ba7a265d6179 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -75,13 +75,13 @@ void machine_crash_shutdown(struct pt_regs *regs)
75int machine_kexec_prepare(struct kimage *image) 75int machine_kexec_prepare(struct kimage *image)
76{ 76{
77 if (num_online_cpus() > 1) { 77 if (num_online_cpus() > 1) {
78 printk(KERN_WARNING "%s: detected attempt to kexec " 78 pr_warning("%s: detected attempt to kexec "
79 "with num_online_cpus() > 1\n", 79 "with num_online_cpus() > 1\n",
80 __func__); 80 __func__);
81 return -ENOSYS; 81 return -ENOSYS;
82 } 82 }
83 if (image->type != KEXEC_TYPE_DEFAULT) { 83 if (image->type != KEXEC_TYPE_DEFAULT) {
84 printk(KERN_WARNING "%s: detected attempt to kexec " 84 pr_warning("%s: detected attempt to kexec "
85 "with unsupported type: %d\n", 85 "with unsupported type: %d\n",
86 __func__, 86 __func__,
87 image->type); 87 image->type);
@@ -124,22 +124,13 @@ static unsigned char *kexec_bn2cl(void *pg)
124 return 0; 124 return 0;
125 125
126 /* 126 /*
127 * If we get a checksum mismatch, it's possible that this is 127 * If we get a checksum mismatch, warn with the checksum
128 * just a false positive, but relatively unlikely. We dump 128 * so we can diagnose better.
129 * out the contents of the section so we can diagnose better.
130 */ 129 */
131 csum = ip_compute_csum(pg, bhdrp->b_size); 130 csum = ip_compute_csum(pg, bhdrp->b_size);
132 if (csum != 0) { 131 if (csum != 0) {
133 int i; 132 pr_warning("%s: bad checksum %#x (size %d)\n",
134 unsigned char *p = pg; 133 __func__, csum, bhdrp->b_size);
135 int nbytes = min((Elf32_Word)1000, bhdrp->b_size);
136 printk(KERN_INFO "%s: bad checksum %#x\n", __func__, csum);
137 printk(KERN_INFO "bytes (%d):", bhdrp->b_size);
138 for (i = 0; i < nbytes; ++i)
139 printk(" %02x", p[i]);
140 if (bhdrp->b_size != nbytes)
141 printk(" ...");
142 printk("\n");
143 return 0; 134 return 0;
144 } 135 }
145 136
@@ -156,7 +147,7 @@ static unsigned char *kexec_bn2cl(void *pg)
156 if ((unsigned char *) (nhdrp + 1) > 147 if ((unsigned char *) (nhdrp + 1) >
157 ((unsigned char *) pg) + bhdrp->b_size) { 148 ((unsigned char *) pg) + bhdrp->b_size) {
158 149
159 printk(KERN_INFO "%s: out of bounds\n", __func__); 150 pr_info("%s: out of bounds\n", __func__);
160 return 0; 151 return 0;
161 } 152 }
162 } 153 }
@@ -167,7 +158,7 @@ static unsigned char *kexec_bn2cl(void *pg)
167 while (*desc != '\0') { 158 while (*desc != '\0') {
168 desc++; 159 desc++;
169 if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { 160 if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
170 printk(KERN_INFO "%s: ran off end of page\n", 161 pr_info("%s: ran off end of page\n",
171 __func__); 162 __func__);
172 return 0; 163 return 0;
173 } 164 }
@@ -202,23 +193,20 @@ static void kexec_find_and_set_command_line(struct kimage *image)
202 } 193 }
203 194
204 if (command_line != 0) { 195 if (command_line != 0) {
205 printk(KERN_INFO "setting new command line to \"%s\"\n", 196 pr_info("setting new command line to \"%s\"\n",
206 command_line); 197 command_line);
207 198
208 hverr = hv_set_command_line( 199 hverr = hv_set_command_line(
209 (HV_VirtAddr) command_line, strlen(command_line)); 200 (HV_VirtAddr) command_line, strlen(command_line));
210 kunmap_atomic(command_line, KM_USER0); 201 kunmap_atomic(command_line, KM_USER0);
211 } else { 202 } else {
212 printk(KERN_INFO "%s: no command line found; making empty\n", 203 pr_info("%s: no command line found; making empty\n",
213 __func__); 204 __func__);
214 hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); 205 hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
215 } 206 }
216 if (hverr) { 207 if (hverr)
217 printk(KERN_WARNING 208 pr_warning("%s: hv_set_command_line returned error: %d\n",
218 "%s: call to hv_set_command_line returned error: %d\n", 209 __func__, hverr);
219 __func__, hverr);
220
221 }
222} 210}
223 211
224/* 212/*
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index f991f5285d8a..6d23ed271d10 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -18,13 +18,14 @@
18#include <linux/ptrace.h> 18#include <linux/ptrace.h>
19#include <asm/hv_driver.h> 19#include <asm/hv_driver.h>
20#include <asm/irq_regs.h> 20#include <asm/irq_regs.h>
21#include <asm/traps.h>
21#include <hv/hypervisor.h> 22#include <hv/hypervisor.h>
22#include <arch/interrupts.h> 23#include <arch/interrupts.h>
23 24
24/* All messages are stored here */ 25/* All messages are stored here */
25static DEFINE_PER_CPU(HV_MsgState, msg_state); 26static DEFINE_PER_CPU(HV_MsgState, msg_state);
26 27
27void __cpuinit init_messaging() 28void __cpuinit init_messaging(void)
28{ 29{
29 /* Allocate storage for messages in kernel space */ 30 /* Allocate storage for messages in kernel space */
30 HV_MsgState *state = &__get_cpu_var(msg_state); 31 HV_MsgState *state = &__get_cpu_var(msg_state);
@@ -58,7 +59,7 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
58 { 59 {
59 long sp = stack_pointer - (long) current_thread_info(); 60 long sp = stack_pointer - (long) current_thread_info();
60 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 61 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
61 printk(KERN_EMERG "hv_message_intr: " 62 pr_emerg("hv_message_intr: "
62 "stack overflow: %ld\n", 63 "stack overflow: %ld\n",
63 sp - sizeof(struct thread_info)); 64 sp - sizeof(struct thread_info));
64 dump_stack(); 65 dump_stack();
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index ed3e91161f88..e2ab82b7c7e7 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -107,7 +107,7 @@ int apply_relocate(Elf_Shdr *sechdrs,
107 unsigned int relsec, 107 unsigned int relsec,
108 struct module *me) 108 struct module *me)
109{ 109{
110 printk(KERN_ERR "module %s: .rel relocation unsupported\n", me->name); 110 pr_err("module %s: .rel relocation unsupported\n", me->name);
111 return -ENOEXEC; 111 return -ENOEXEC;
112} 112}
113 113
@@ -119,8 +119,8 @@ int apply_relocate(Elf_Shdr *sechdrs,
119static int validate_hw2_last(long value, struct module *me) 119static int validate_hw2_last(long value, struct module *me)
120{ 120{
121 if (((value << 16) >> 16) != value) { 121 if (((value << 16) >> 16) != value) {
122 printk("module %s: Out of range HW2_LAST value %#lx\n", 122 pr_warning("module %s: Out of range HW2_LAST value %#lx\n",
123 me->name, value); 123 me->name, value);
124 return 0; 124 return 0;
125 } 125 }
126 return 1; 126 return 1;
@@ -223,10 +223,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
223 value -= (unsigned long) location; /* pc-relative */ 223 value -= (unsigned long) location; /* pc-relative */
224 value = (long) value >> 3; /* count by instrs */ 224 value = (long) value >> 3; /* count by instrs */
225 if (!validate_jumpoff(value)) { 225 if (!validate_jumpoff(value)) {
226 printk("module %s: Out of range jump to" 226 pr_warning("module %s: Out of range jump to"
227 " %#llx at %#llx (%p)\n", me->name, 227 " %#llx at %#llx (%p)\n", me->name,
228 sym->st_value + rel[i].r_addend, 228 sym->st_value + rel[i].r_addend,
229 rel[i].r_offset, location); 229 rel[i].r_offset, location);
230 return -ENOEXEC; 230 return -ENOEXEC;
231 } 231 }
232 MUNGE(create_JumpOff_X1); 232 MUNGE(create_JumpOff_X1);
@@ -236,7 +236,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
236#undef MUNGE 236#undef MUNGE
237 237
238 default: 238 default:
239 printk(KERN_ERR "module %s: Unknown relocation: %d\n", 239 pr_err("module %s: Unknown relocation: %d\n",
240 me->name, (int) ELF_R_TYPE(rel[i].r_info)); 240 me->name, (int) ELF_R_TYPE(rel[i].r_info));
241 return -ENOEXEC; 241 return -ENOEXEC;
242 } 242 }
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index c70ff14a48e4..ed590ad0acdc 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -24,9 +24,14 @@
24#include <linux/compat.h> 24#include <linux/compat.h>
25#include <linux/hardirq.h> 25#include <linux/hardirq.h>
26#include <linux/syscalls.h> 26#include <linux/syscalls.h>
27#include <linux/kernel.h>
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/stack.h> 29#include <asm/stack.h>
29#include <asm/homecache.h> 30#include <asm/homecache.h>
31#include <asm/syscalls.h>
32#ifdef CONFIG_HARDWALL
33#include <asm/hardwall.h>
34#endif
30#include <arch/chip.h> 35#include <arch/chip.h>
31#include <arch/abi.h> 36#include <arch/abi.h>
32 37
@@ -43,7 +48,7 @@ static int __init idle_setup(char *str)
43 return -EINVAL; 48 return -EINVAL;
44 49
45 if (!strcmp(str, "poll")) { 50 if (!strcmp(str, "poll")) {
46 printk("using polling idle threads.\n"); 51 pr_info("using polling idle threads.\n");
47 no_idle_nap = 1; 52 no_idle_nap = 1;
48 } else if (!strcmp(str, "halt")) 53 } else if (!strcmp(str, "halt"))
49 no_idle_nap = 0; 54 no_idle_nap = 0;
@@ -62,7 +67,6 @@ early_param("idle", idle_setup);
62 */ 67 */
63void cpu_idle(void) 68void cpu_idle(void)
64{ 69{
65 extern void _cpu_idle(void);
66 int cpu = smp_processor_id(); 70 int cpu = smp_processor_id();
67 71
68 72
@@ -108,7 +112,7 @@ void cpu_idle(void)
108struct thread_info *alloc_thread_info(struct task_struct *task) 112struct thread_info *alloc_thread_info(struct task_struct *task)
109{ 113{
110 struct page *page; 114 struct page *page;
111 int flags = GFP_KERNEL; 115 gfp_t flags = GFP_KERNEL;
112 116
113#ifdef CONFIG_DEBUG_STACK_USAGE 117#ifdef CONFIG_DEBUG_STACK_USAGE
114 flags |= __GFP_ZERO; 118 flags |= __GFP_ZERO;
@@ -116,7 +120,7 @@ struct thread_info *alloc_thread_info(struct task_struct *task)
116 120
117 page = alloc_pages(flags, THREAD_SIZE_ORDER); 121 page = alloc_pages(flags, THREAD_SIZE_ORDER);
118 if (!page) 122 if (!page)
119 return 0; 123 return NULL;
120 124
121 return (struct thread_info *)page_address(page); 125 return (struct thread_info *)page_address(page);
122} 126}
@@ -129,6 +133,18 @@ void free_thread_info(struct thread_info *info)
129{ 133{
130 struct single_step_state *step_state = info->step_state; 134 struct single_step_state *step_state = info->step_state;
131 135
136#ifdef CONFIG_HARDWALL
137 /*
138 * We free a thread_info from the context of the task that has
139 * been scheduled next, so the original task is already dead.
140 * Calling deactivate here just frees up the data structures.
141 * If the task we're freeing held the last reference to a
142 * hardwall fd, it would have been released prior to this point
143 * anyway via exit_files(), and "hardwall" would be NULL by now.
144 */
145 if (info->task->thread.hardwall)
146 hardwall_deactivate(info->task);
147#endif
132 148
133 if (step_state) { 149 if (step_state) {
134 150
@@ -154,8 +170,6 @@ void free_thread_info(struct thread_info *info)
154 170
155static void save_arch_state(struct thread_struct *t); 171static void save_arch_state(struct thread_struct *t);
156 172
157extern void ret_from_fork(void);
158
159int copy_thread(unsigned long clone_flags, unsigned long sp, 173int copy_thread(unsigned long clone_flags, unsigned long sp,
160 unsigned long stack_size, 174 unsigned long stack_size,
161 struct task_struct *p, struct pt_regs *regs) 175 struct task_struct *p, struct pt_regs *regs)
@@ -235,6 +249,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
235 p->thread.proc_status = 0; 249 p->thread.proc_status = 0;
236#endif 250#endif
237 251
252#ifdef CONFIG_HARDWALL
253 /* New thread does not own any networks. */
254 p->thread.hardwall = NULL;
255#endif
238 256
239 257
240 /* 258 /*
@@ -257,7 +275,7 @@ struct task_struct *validate_current(void)
257 if (unlikely((unsigned long)tsk < PAGE_OFFSET || 275 if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
258 (void *)tsk > high_memory || 276 (void *)tsk > high_memory ||
259 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { 277 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
260 printk("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); 278 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
261 tsk = &corrupt; 279 tsk = &corrupt;
262 } 280 }
263 return tsk; 281 return tsk;
@@ -447,10 +465,6 @@ void _prepare_arch_switch(struct task_struct *next)
447} 465}
448 466
449 467
450extern struct task_struct *__switch_to(struct task_struct *prev,
451 struct task_struct *next,
452 unsigned long new_system_save_1_0);
453
454struct task_struct *__sched _switch_to(struct task_struct *prev, 468struct task_struct *__sched _switch_to(struct task_struct *prev,
455 struct task_struct *next) 469 struct task_struct *next)
456{ 470{
@@ -486,6 +500,15 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
486 } 500 }
487#endif 501#endif
488 502
503#ifdef CONFIG_HARDWALL
504 /* Enable or disable access to the network registers appropriately. */
505 if (prev->thread.hardwall != NULL) {
506 if (next->thread.hardwall == NULL)
507 restrict_network_mpls();
508 } else if (next->thread.hardwall != NULL) {
509 grant_network_mpls();
510 }
511#endif
489 512
490 /* 513 /*
491 * Switch kernel SP, PC, and callee-saved registers. 514 * Switch kernel SP, PC, and callee-saved registers.
@@ -496,14 +519,14 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
496 return __switch_to(prev, next, next_current_ksp0(next)); 519 return __switch_to(prev, next, next_current_ksp0(next));
497} 520}
498 521
499int _sys_fork(struct pt_regs *regs) 522long _sys_fork(struct pt_regs *regs)
500{ 523{
501 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); 524 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
502} 525}
503 526
504int _sys_clone(unsigned long clone_flags, unsigned long newsp, 527long _sys_clone(unsigned long clone_flags, unsigned long newsp,
505 void __user *parent_tidptr, void __user *child_tidptr, 528 void __user *parent_tidptr, void __user *child_tidptr,
506 struct pt_regs *regs) 529 struct pt_regs *regs)
507{ 530{
508 if (!newsp) 531 if (!newsp)
509 newsp = regs->sp; 532 newsp = regs->sp;
@@ -511,7 +534,7 @@ int _sys_clone(unsigned long clone_flags, unsigned long newsp,
511 parent_tidptr, child_tidptr); 534 parent_tidptr, child_tidptr);
512} 535}
513 536
514int _sys_vfork(struct pt_regs *regs) 537long _sys_vfork(struct pt_regs *regs)
515{ 538{
516 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, 539 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp,
517 regs, 0, NULL, NULL); 540 regs, 0, NULL, NULL);
@@ -520,10 +543,10 @@ int _sys_vfork(struct pt_regs *regs)
520/* 543/*
521 * sys_execve() executes a new program. 544 * sys_execve() executes a new program.
522 */ 545 */
523int _sys_execve(char __user *path, char __user *__user *argv, 546long _sys_execve(char __user *path, char __user *__user *argv,
524 char __user *__user *envp, struct pt_regs *regs) 547 char __user *__user *envp, struct pt_regs *regs)
525{ 548{
526 int error; 549 long error;
527 char *filename; 550 char *filename;
528 551
529 filename = getname(path); 552 filename = getname(path);
@@ -537,10 +560,10 @@ out:
537} 560}
538 561
539#ifdef CONFIG_COMPAT 562#ifdef CONFIG_COMPAT
540int _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 563long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
541 compat_uptr_t __user *envp, struct pt_regs *regs) 564 compat_uptr_t __user *envp, struct pt_regs *regs)
542{ 565{
543 int error; 566 long error;
544 char *filename; 567 char *filename;
545 568
546 filename = getname(path); 569 filename = getname(path);
@@ -616,31 +639,32 @@ void exit_thread(void)
616 /* Nothing */ 639 /* Nothing */
617} 640}
618 641
619#ifdef __tilegx__
620# define LINECOUNT 3
621# define EXTRA_NL "\n"
622#else
623# define LINECOUNT 4
624# define EXTRA_NL ""
625#endif
626
627void show_regs(struct pt_regs *regs) 642void show_regs(struct pt_regs *regs)
628{ 643{
629 struct task_struct *tsk = validate_current(); 644 struct task_struct *tsk = validate_current();
630 int i, linebreak; 645 int i;
631 printk("\n"); 646
632 printk(" Pid: %d, comm: %20s, CPU: %d\n", 647 pr_err("\n");
648 pr_err(" Pid: %d, comm: %20s, CPU: %d\n",
633 tsk->pid, tsk->comm, smp_processor_id()); 649 tsk->pid, tsk->comm, smp_processor_id());
634 for (i = linebreak = 0; i < 53; ++i) { 650#ifdef __tilegx__
635 printk(" r%-2d: "REGFMT, i, regs->regs[i]); 651 for (i = 0; i < 51; i += 3)
636 if (++linebreak == LINECOUNT) { 652 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
637 linebreak = 0; 653 i, regs->regs[i], i+1, regs->regs[i+1],
638 printk("\n"); 654 i+2, regs->regs[i+2]);
639 } 655 pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
640 } 656 regs->regs[51], regs->regs[52], regs->tp);
641 printk(" tp : "REGFMT EXTRA_NL " sp : "REGFMT" lr : "REGFMT"\n", 657 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
642 regs->tp, regs->sp, regs->lr); 658#else
643 printk(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", 659 for (i = 0; i < 52; i += 3)
660 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
661 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
662 i, regs->regs[i], i+1, regs->regs[i+1],
663 i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
664 pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
665 regs->regs[52], regs->tp, regs->sp, regs->lr);
666#endif
667 pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
644 regs->pc, regs->ex1, regs->faultnum); 668 regs->pc, regs->ex1, regs->faultnum);
645 669
646 dump_stack_regs(regs); 670 dump_stack_regs(regs);
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 468054928e7d..e5701d1a52d7 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -19,6 +19,7 @@
19#include <linux/kprobes.h> 19#include <linux/kprobes.h>
20#include <linux/compat.h> 20#include <linux/compat.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <asm/traps.h>
22 23
23void user_enable_single_step(struct task_struct *child) 24void user_enable_single_step(struct task_struct *child)
24{ 25{
@@ -76,7 +77,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
76 if (task_thread_info(child)->status & TS_COMPAT) 77 if (task_thread_info(child)->status & TS_COMPAT)
77 addr = (u32)addr; 78 addr = (u32)addr;
78#endif 79#endif
79 datap = (unsigned long __user *)data; 80 datap = (unsigned long __user __force *)data;
80 81
81 switch (request) { 82 switch (request) {
82 83
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index a4523923605e..acd86d20beba 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -15,6 +15,7 @@
15#include <linux/stddef.h> 15#include <linux/stddef.h>
16#include <linux/reboot.h> 16#include <linux/reboot.h>
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/pm.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/setup.h> 20#include <asm/setup.h>
20#include <hv/hypervisor.h> 21#include <hv/hypervisor.h>
@@ -46,7 +47,5 @@ void machine_restart(char *cmd)
46 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); 47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
47} 48}
48 49
49/* 50/* No interesting distinction to be made here. */
50 * Power off function, if any 51void (*pm_power_off)(void) = NULL;
51 */
52void (*pm_power_off)(void) = machine_power_off;
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 934136b61ceb..4dd21c1e6d5e 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -20,6 +20,7 @@
20#include <linux/node.h> 20#include <linux/node.h>
21#include <linux/cpu.h> 21#include <linux/cpu.h>
22#include <linux/ioport.h> 22#include <linux/ioport.h>
23#include <linux/irq.h>
23#include <linux/kexec.h> 24#include <linux/kexec.h>
24#include <linux/pci.h> 25#include <linux/pci.h>
25#include <linux/initrd.h> 26#include <linux/initrd.h>
@@ -109,7 +110,7 @@ static int __init setup_maxmem(char *str)
109 110
110 maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << 111 maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) <<
111 (HPAGE_SHIFT - PAGE_SHIFT); 112 (HPAGE_SHIFT - PAGE_SHIFT);
112 printk("Forcing RAM used to no more than %dMB\n", 113 pr_info("Forcing RAM used to no more than %dMB\n",
113 maxmem_pfn >> (20 - PAGE_SHIFT)); 114 maxmem_pfn >> (20 - PAGE_SHIFT));
114 return 0; 115 return 0;
115} 116}
@@ -127,7 +128,7 @@ static int __init setup_maxnodemem(char *str)
127 128
128 maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << 129 maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) <<
129 (HPAGE_SHIFT - PAGE_SHIFT); 130 (HPAGE_SHIFT - PAGE_SHIFT);
130 printk("Forcing RAM used on node %ld to no more than %dMB\n", 131 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
131 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); 132 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
132 return 0; 133 return 0;
133} 134}
@@ -140,7 +141,7 @@ static int __init setup_isolnodes(char *str)
140 return -EINVAL; 141 return -EINVAL;
141 142
142 nodelist_scnprintf(buf, sizeof(buf), isolnodes); 143 nodelist_scnprintf(buf, sizeof(buf), isolnodes);
143 printk("Set isolnodes value to '%s'\n", buf); 144 pr_info("Set isolnodes value to '%s'\n", buf);
144 return 0; 145 return 0;
145} 146}
146early_param("isolnodes", setup_isolnodes); 147early_param("isolnodes", setup_isolnodes);
@@ -155,7 +156,7 @@ static int __init setup_pci_reserve(char* str)
155 return -EINVAL; 156 return -EINVAL;
156 157
157 pci_reserve_mb = mb; 158 pci_reserve_mb = mb;
158 printk("Reserving %dMB for PCIE root complex mappings\n", 159 pr_info("Reserving %dMB for PCIE root complex mappings\n",
159 pci_reserve_mb); 160 pci_reserve_mb);
160 return 0; 161 return 0;
161} 162}
@@ -269,7 +270,7 @@ static void *__init setup_pa_va_mapping(void)
269 * This is up to 4 mappings for lowmem, one mapping per memory 270 * This is up to 4 mappings for lowmem, one mapping per memory
270 * controller, plus one for our text segment. 271 * controller, plus one for our text segment.
271 */ 272 */
272void __cpuinit store_permanent_mappings(void) 273static void __cpuinit store_permanent_mappings(void)
273{ 274{
274 int i; 275 int i;
275 276
@@ -320,14 +321,14 @@ static void __init setup_memory(void)
320 break; 321 break;
321#ifdef CONFIG_FLATMEM 322#ifdef CONFIG_FLATMEM
322 if (i > 0) { 323 if (i > 0) {
323 printk("Can't use discontiguous PAs: %#llx..%#llx\n", 324 pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
324 range.size, range.start + range.size); 325 range.size, range.start + range.size);
325 continue; 326 continue;
326 } 327 }
327#endif 328#endif
328#ifndef __tilegx__ 329#ifndef __tilegx__
329 if ((unsigned long)range.start) { 330 if ((unsigned long)range.start) {
330 printk("Range not at 4GB multiple: %#llx..%#llx\n", 331 pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
331 range.start, range.start + range.size); 332 range.start, range.start + range.size);
332 continue; 333 continue;
333 } 334 }
@@ -335,51 +336,51 @@ static void __init setup_memory(void)
335 if ((range.start & (HPAGE_SIZE-1)) != 0 || 336 if ((range.start & (HPAGE_SIZE-1)) != 0 ||
336 (range.size & (HPAGE_SIZE-1)) != 0) { 337 (range.size & (HPAGE_SIZE-1)) != 0) {
337 unsigned long long start_pa = range.start; 338 unsigned long long start_pa = range.start;
338 unsigned long long size = range.size; 339 unsigned long long orig_size = range.size;
339 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; 340 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
340 range.size -= (range.start - start_pa); 341 range.size -= (range.start - start_pa);
341 range.size &= HPAGE_MASK; 342 range.size &= HPAGE_MASK;
342 printk("Range not hugepage-aligned: %#llx..%#llx:" 343 pr_err("Range not hugepage-aligned: %#llx..%#llx:"
343 " now %#llx-%#llx\n", 344 " now %#llx-%#llx\n",
344 start_pa, start_pa + size, 345 start_pa, start_pa + orig_size,
345 range.start, range.start + range.size); 346 range.start, range.start + range.size);
346 } 347 }
347 highbits = __pa_to_highbits(range.start); 348 highbits = __pa_to_highbits(range.start);
348 if (highbits >= NR_PA_HIGHBIT_VALUES) { 349 if (highbits >= NR_PA_HIGHBIT_VALUES) {
349 printk("PA high bits too high: %#llx..%#llx\n", 350 pr_err("PA high bits too high: %#llx..%#llx\n",
350 range.start, range.start + range.size); 351 range.start, range.start + range.size);
351 continue; 352 continue;
352 } 353 }
353 if (highbits_seen[highbits]) { 354 if (highbits_seen[highbits]) {
354 printk("Range overlaps in high bits: %#llx..%#llx\n", 355 pr_err("Range overlaps in high bits: %#llx..%#llx\n",
355 range.start, range.start + range.size); 356 range.start, range.start + range.size);
356 continue; 357 continue;
357 } 358 }
358 highbits_seen[highbits] = 1; 359 highbits_seen[highbits] = 1;
359 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { 360 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
360 int size = maxnodemem_pfn[i]; 361 int max_size = maxnodemem_pfn[i];
361 if (size > 0) { 362 if (max_size > 0) {
362 printk("Maxnodemem reduced node %d to" 363 pr_err("Maxnodemem reduced node %d to"
363 " %d pages\n", i, size); 364 " %d pages\n", i, max_size);
364 range.size = (HV_PhysAddr)size << PAGE_SHIFT; 365 range.size = PFN_PHYS(max_size);
365 } else { 366 } else {
366 printk("Maxnodemem disabled node %d\n", i); 367 pr_err("Maxnodemem disabled node %d\n", i);
367 continue; 368 continue;
368 } 369 }
369 } 370 }
370 if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { 371 if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) {
371 int size = maxmem_pfn - num_physpages; 372 int max_size = maxmem_pfn - num_physpages;
372 if (size > 0) { 373 if (max_size > 0) {
373 printk("Maxmem reduced node %d to %d pages\n", 374 pr_err("Maxmem reduced node %d to %d pages\n",
374 i, size); 375 i, max_size);
375 range.size = (HV_PhysAddr)size << PAGE_SHIFT; 376 range.size = PFN_PHYS(max_size);
376 } else { 377 } else {
377 printk("Maxmem disabled node %d\n", i); 378 pr_err("Maxmem disabled node %d\n", i);
378 continue; 379 continue;
379 } 380 }
380 } 381 }
381 if (i >= MAX_NUMNODES) { 382 if (i >= MAX_NUMNODES) {
382 printk("Too many PA nodes (#%d): %#llx...%#llx\n", 383 pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
383 i, range.size, range.size + range.start); 384 i, range.size, range.size + range.start);
384 continue; 385 continue;
385 } 386 }
@@ -391,7 +392,7 @@ static void __init setup_memory(void)
391#ifndef __tilegx__ 392#ifndef __tilegx__
392 if (((HV_PhysAddr)end << PAGE_SHIFT) != 393 if (((HV_PhysAddr)end << PAGE_SHIFT) !=
393 (range.start + range.size)) { 394 (range.start + range.size)) {
394 printk("PAs too high to represent: %#llx..%#llx\n", 395 pr_err("PAs too high to represent: %#llx..%#llx\n",
395 range.start, range.start + range.size); 396 range.start, range.start + range.size);
396 continue; 397 continue;
397 } 398 }
@@ -412,7 +413,7 @@ static void __init setup_memory(void)
412 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); 413 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
413 if (end < pci_reserve_end_pfn + percpu_pages) { 414 if (end < pci_reserve_end_pfn + percpu_pages) {
414 end = pci_reserve_start_pfn; 415 end = pci_reserve_start_pfn;
415 printk("PCI mapping region reduced node %d to" 416 pr_err("PCI mapping region reduced node %d to"
416 " %ld pages\n", i, end - start); 417 " %ld pages\n", i, end - start);
417 } 418 }
418 } 419 }
@@ -456,11 +457,11 @@ static void __init setup_memory(void)
456 } 457 }
457 } 458 }
458 num_physpages -= dropped_pages; 459 num_physpages -= dropped_pages;
459 printk(KERN_WARNING "Only using %ldMB memory;" 460 pr_warning("Only using %ldMB memory;"
460 " ignoring %ldMB.\n", 461 " ignoring %ldMB.\n",
461 num_physpages >> (20 - PAGE_SHIFT), 462 num_physpages >> (20 - PAGE_SHIFT),
462 dropped_pages >> (20 - PAGE_SHIFT)); 463 dropped_pages >> (20 - PAGE_SHIFT));
463 printk(KERN_WARNING "Consider using a larger page size.\n"); 464 pr_warning("Consider using a larger page size.\n");
464 } 465 }
465#endif 466#endif
466 467
@@ -478,9 +479,9 @@ static void __init setup_memory(void)
478 MAXMEM_PFN : mappable_physpages; 479 MAXMEM_PFN : mappable_physpages;
479 highmem_pages = (long) (num_physpages - lowmem_pages); 480 highmem_pages = (long) (num_physpages - lowmem_pages);
480 481
481 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 482 pr_notice("%ldMB HIGHMEM available.\n",
482 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); 483 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
483 printk(KERN_NOTICE "%ldMB LOWMEM available.\n", 484 pr_notice("%ldMB LOWMEM available.\n",
484 pages_to_mb(lowmem_pages)); 485 pages_to_mb(lowmem_pages));
485#else 486#else
486 /* Set max_low_pfn based on what node 0 can directly address. */ 487 /* Set max_low_pfn based on what node 0 can directly address. */
@@ -488,15 +489,15 @@ static void __init setup_memory(void)
488 489
489#ifndef __tilegx__ 490#ifndef __tilegx__
490 if (node_end_pfn[0] > MAXMEM_PFN) { 491 if (node_end_pfn[0] > MAXMEM_PFN) {
491 printk(KERN_WARNING "Only using %ldMB LOWMEM.\n", 492 pr_warning("Only using %ldMB LOWMEM.\n",
492 MAXMEM>>20); 493 MAXMEM>>20);
493 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); 494 pr_warning("Use a HIGHMEM enabled kernel.\n");
494 max_low_pfn = MAXMEM_PFN; 495 max_low_pfn = MAXMEM_PFN;
495 max_pfn = MAXMEM_PFN; 496 max_pfn = MAXMEM_PFN;
496 num_physpages = MAXMEM_PFN; 497 num_physpages = MAXMEM_PFN;
497 node_end_pfn[0] = MAXMEM_PFN; 498 node_end_pfn[0] = MAXMEM_PFN;
498 } else { 499 } else {
499 printk(KERN_NOTICE "%ldMB memory available.\n", 500 pr_notice("%ldMB memory available.\n",
500 pages_to_mb(node_end_pfn[0])); 501 pages_to_mb(node_end_pfn[0]));
501 } 502 }
502 for (i = 1; i < MAX_NUMNODES; ++i) { 503 for (i = 1; i < MAX_NUMNODES; ++i) {
@@ -512,7 +513,7 @@ static void __init setup_memory(void)
512 if (pages) 513 if (pages)
513 high_memory = pfn_to_kaddr(node_end_pfn[i]); 514 high_memory = pfn_to_kaddr(node_end_pfn[i]);
514 } 515 }
515 printk(KERN_NOTICE "%ldMB memory available.\n", 516 pr_notice("%ldMB memory available.\n",
516 pages_to_mb(lowmem_pages)); 517 pages_to_mb(lowmem_pages));
517#endif 518#endif
518#endif 519#endif
@@ -744,7 +745,7 @@ static void __init setup_numa_mapping(void)
744 nodes_andnot(default_nodes, node_online_map, isolnodes); 745 nodes_andnot(default_nodes, node_online_map, isolnodes);
745 if (nodes_empty(default_nodes)) { 746 if (nodes_empty(default_nodes)) {
746 BUG_ON(!node_isset(0, node_online_map)); 747 BUG_ON(!node_isset(0, node_online_map));
747 printk("Forcing NUMA node zero available as a default node\n"); 748 pr_err("Forcing NUMA node zero available as a default node\n");
748 node_set(0, default_nodes); 749 node_set(0, default_nodes);
749 } 750 }
750 751
@@ -822,13 +823,13 @@ static void __init setup_numa_mapping(void)
822 printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y); 823 printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
823 for (x = 0; x < smp_width; ++x, ++cpu) { 824 for (x = 0; x < smp_width; ++x, ++cpu) {
824 if (cpu_to_node(cpu) < 0) { 825 if (cpu_to_node(cpu) < 0) {
825 printk(" -"); 826 pr_cont(" -");
826 cpu_2_node[cpu] = first_node(default_nodes); 827 cpu_2_node[cpu] = first_node(default_nodes);
827 } else { 828 } else {
828 printk(" %d", cpu_to_node(cpu)); 829 pr_cont(" %d", cpu_to_node(cpu));
829 } 830 }
830 } 831 }
831 printk("\n"); 832 pr_cont("\n");
832 } 833 }
833} 834}
834 835
@@ -856,12 +857,17 @@ subsys_initcall(topology_init);
856#endif /* CONFIG_NUMA */ 857#endif /* CONFIG_NUMA */
857 858
858/** 859/**
859 * setup_mpls() - Allow the user-space code to access various SPRs. 860 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
861 * @boot: Is this the boot cpu?
860 * 862 *
861 * Also called from online_secondary(). 863 * Called from setup_arch() on the boot cpu, or online_secondary().
862 */ 864 */
863void __cpuinit setup_mpls(void) 865void __cpuinit setup_cpu(int boot)
864{ 866{
867 /* The boot cpu sets up its permanent mappings much earlier. */
868 if (!boot)
869 store_permanent_mappings();
870
865 /* Allow asynchronous TLB interrupts. */ 871 /* Allow asynchronous TLB interrupts. */
866#if CHIP_HAS_TILE_DMA() 872#if CHIP_HAS_TILE_DMA()
867 raw_local_irq_unmask(INT_DMATLB_MISS); 873 raw_local_irq_unmask(INT_DMATLB_MISS);
@@ -892,6 +898,14 @@ void __cpuinit setup_mpls(void)
892 * as well as the PL 0 interrupt mask. 898 * as well as the PL 0 interrupt mask.
893 */ 899 */
894 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); 900 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
901
902 /* Initialize IRQ support for this cpu. */
903 setup_irq_regs();
904
905#ifdef CONFIG_HARDWALL
906 /* Reset the network state on this cpu. */
907 reset_network_state();
908#endif
895} 909}
896 910
897static int __initdata set_initramfs_file; 911static int __initdata set_initramfs_file;
@@ -922,22 +936,22 @@ static void __init load_hv_initrd(void)
922 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); 936 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
923 if (fd == HV_ENOENT) { 937 if (fd == HV_ENOENT) {
924 if (set_initramfs_file) 938 if (set_initramfs_file)
925 printk("No such hvfs initramfs file '%s'\n", 939 pr_warning("No such hvfs initramfs file '%s'\n",
926 initramfs_file); 940 initramfs_file);
927 return; 941 return;
928 } 942 }
929 BUG_ON(fd < 0); 943 BUG_ON(fd < 0);
930 stat = hv_fs_fstat(fd); 944 stat = hv_fs_fstat(fd);
931 BUG_ON(stat.size < 0); 945 BUG_ON(stat.size < 0);
932 if (stat.flags & HV_FS_ISDIR) { 946 if (stat.flags & HV_FS_ISDIR) {
933 printk("Ignoring hvfs file '%s': it's a directory.\n", 947 pr_warning("Ignoring hvfs file '%s': it's a directory.\n",
934 initramfs_file); 948 initramfs_file);
935 return; 949 return;
936 } 950 }
937 initrd = alloc_bootmem_pages(stat.size); 951 initrd = alloc_bootmem_pages(stat.size);
938 rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0); 952 rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
939 if (rc != stat.size) { 953 if (rc != stat.size) {
940 printk("Error reading %d bytes from hvfs file '%s': %d\n", 954 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
941 stat.size, initramfs_file, rc); 955 stat.size, initramfs_file, rc);
942 free_bootmem((unsigned long) initrd, stat.size); 956 free_bootmem((unsigned long) initrd, stat.size);
943 return; 957 return;
@@ -966,9 +980,9 @@ static void __init validate_hv(void)
966 HV_Topology topology = hv_inquire_topology(); 980 HV_Topology topology = hv_inquire_topology();
967 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); 981 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
968 if (topology.width != 1 || topology.height != 1) { 982 if (topology.width != 1 || topology.height != 1) {
969 printk("Warning: booting UP kernel on %dx%d grid;" 983 pr_warning("Warning: booting UP kernel on %dx%d grid;"
970 " will ignore all but first tile.\n", 984 " will ignore all but first tile.\n",
971 topology.width, topology.height); 985 topology.width, topology.height);
972 } 986 }
973#endif 987#endif
974 988
@@ -1004,7 +1018,7 @@ static void __init validate_hv(void)
1004 1018
1005 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, 1019 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
1006 sizeof(chip_model)) < 0) { 1020 sizeof(chip_model)) < 0) {
1007 printk("Warning: HV_CONFSTR_CHIP_MODEL not available\n"); 1021 pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1008 strlcpy(chip_model, "unknown", sizeof(chip_model)); 1022 strlcpy(chip_model, "unknown", sizeof(chip_model));
1009 } 1023 }
1010} 1024}
@@ -1096,7 +1110,7 @@ static int __init disabled_cpus(char *str)
1096 if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0) 1110 if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
1097 return -EINVAL; 1111 return -EINVAL;
1098 if (cpumask_test_cpu(boot_cpu, &disabled_map)) { 1112 if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
1099 printk("disabled_cpus: can't disable boot cpu %d\n", boot_cpu); 1113 pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
1100 cpumask_clear_cpu(boot_cpu, &disabled_map); 1114 cpumask_clear_cpu(boot_cpu, &disabled_map);
1101 } 1115 }
1102 return 0; 1116 return 0;
@@ -1104,12 +1118,12 @@ static int __init disabled_cpus(char *str)
1104 1118
1105early_param("disabled_cpus", disabled_cpus); 1119early_param("disabled_cpus", disabled_cpus);
1106 1120
1107void __init print_disabled_cpus() 1121void __init print_disabled_cpus(void)
1108{ 1122{
1109 if (!cpumask_empty(&disabled_map)) { 1123 if (!cpumask_empty(&disabled_map)) {
1110 char buf[100]; 1124 char buf[100];
1111 cpulist_scnprintf(buf, sizeof(buf), &disabled_map); 1125 cpulist_scnprintf(buf, sizeof(buf), &disabled_map);
1112 printk(KERN_INFO "CPUs not available for Linux: %s\n", buf); 1126 pr_info("CPUs not available for Linux: %s\n", buf);
1113 } 1127 }
1114} 1128}
1115 1129
@@ -1162,7 +1176,7 @@ static void __init setup_cpu_maps(void)
1162 (HV_VirtAddr) cpumask_bits(&cpu_lotar_map), 1176 (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
1163 sizeof(cpu_lotar_map)); 1177 sizeof(cpu_lotar_map));
1164 if (rc < 0) { 1178 if (rc < 0) {
1165 printk("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); 1179 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1166 cpu_lotar_map = cpu_possible_map; 1180 cpu_lotar_map = cpu_possible_map;
1167 } 1181 }
1168 1182
@@ -1182,7 +1196,7 @@ static void __init setup_cpu_maps(void)
1182 1196
1183static int __init dataplane(char *str) 1197static int __init dataplane(char *str)
1184{ 1198{
1185 printk("WARNING: dataplane support disabled in this kernel\n"); 1199 pr_warning("WARNING: dataplane support disabled in this kernel\n");
1186 return 0; 1200 return 0;
1187} 1201}
1188 1202
@@ -1200,8 +1214,8 @@ void __init setup_arch(char **cmdline_p)
1200 len = hv_get_command_line((HV_VirtAddr) boot_command_line, 1214 len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1201 COMMAND_LINE_SIZE); 1215 COMMAND_LINE_SIZE);
1202 if (boot_command_line[0]) 1216 if (boot_command_line[0])
1203 printk("WARNING: ignoring dynamic command line \"%s\"\n", 1217 pr_warning("WARNING: ignoring dynamic command line \"%s\"\n",
1204 boot_command_line); 1218 boot_command_line);
1205 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 1219 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1206#else 1220#else
1207 char *hv_cmdline; 1221 char *hv_cmdline;
@@ -1269,7 +1283,7 @@ void __init setup_arch(char **cmdline_p)
1269 setup_numa_mapping(); 1283 setup_numa_mapping();
1270 zone_sizes_init(); 1284 zone_sizes_init();
1271 set_page_homes(); 1285 set_page_homes();
1272 setup_mpls(); 1286 setup_cpu(1);
1273 setup_clock(); 1287 setup_clock();
1274 load_hv_initrd(); 1288 load_hv_initrd();
1275} 1289}
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 45835cfad407..45b66a3c991f 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -33,6 +33,7 @@
33#include <asm/processor.h> 33#include <asm/processor.h>
34#include <asm/ucontext.h> 34#include <asm/ucontext.h>
35#include <asm/sigframe.h> 35#include <asm/sigframe.h>
36#include <asm/syscalls.h>
36#include <arch/interrupts.h> 37#include <arch/interrupts.h>
37 38
38#define DEBUG_SIG 0 39#define DEBUG_SIG 0
@@ -40,11 +41,8 @@
40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 41#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
41 42
42 43
43/* Caller before callee in this file; other callee is in assembler */
44void do_signal(struct pt_regs *regs);
45
46long _sys_sigaltstack(const stack_t __user *uss, 44long _sys_sigaltstack(const stack_t __user *uss,
47 stack_t __user *uoss, struct pt_regs *regs) 45 stack_t __user *uoss, struct pt_regs *regs)
48{ 46{
49 return do_sigaltstack(uss, uoss, regs->sp); 47 return do_sigaltstack(uss, uoss, regs->sp);
50} 48}
@@ -65,7 +63,7 @@ int restore_sigcontext(struct pt_regs *regs,
65 63
66 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 64 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
67 err |= __get_user(((long *)regs)[i], 65 err |= __get_user(((long *)regs)[i],
68 &((long *)(&sc->regs))[i]); 66 &((long __user *)(&sc->regs))[i]);
69 67
70 regs->faultnum = INT_SWINT_1_SIGRETURN; 68 regs->faultnum = INT_SWINT_1_SIGRETURN;
71 69
@@ -73,7 +71,8 @@ int restore_sigcontext(struct pt_regs *regs,
73 return err; 71 return err;
74} 72}
75 73
76int _sys_rt_sigreturn(struct pt_regs *regs) 74/* sigreturn() returns long since it restores r0 in the interrupted code. */
75long _sys_rt_sigreturn(struct pt_regs *regs)
77{ 76{
78 struct rt_sigframe __user *frame = 77 struct rt_sigframe __user *frame =
79 (struct rt_sigframe __user *)(regs->sp); 78 (struct rt_sigframe __user *)(regs->sp);
@@ -114,7 +113,7 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
114 113
115 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 114 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
116 err |= __put_user(((long *)regs)[i], 115 err |= __put_user(((long *)regs)[i],
117 &((long *)(&sc->regs))[i]); 116 &((long __user *)(&sc->regs))[i]);
118 117
119 return err; 118 return err;
120} 119}
@@ -137,7 +136,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka,
137 * will die with SIGSEGV. 136 * will die with SIGSEGV.
138 */ 137 */
139 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) 138 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
140 return (void __user *) -1L; 139 return (void __user __force *)-1UL;
141 140
142 /* This is the X/Open sanctioned signal stack switching. */ 141 /* This is the X/Open sanctioned signal stack switching. */
143 if (ka->sa.sa_flags & SA_ONSTACK) { 142 if (ka->sa.sa_flags & SA_ONSTACK) {
@@ -185,8 +184,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
185 /* Create the ucontext. */ 184 /* Create the ucontext. */
186 err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); 185 err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
187 err |= __put_user(0, &frame->uc.uc_flags); 186 err |= __put_user(0, &frame->uc.uc_flags);
188 err |= __put_user(0, &frame->uc.uc_link); 187 err |= __put_user(NULL, &frame->uc.uc_link);
189 err |= __put_user((void *)(current->sas_ss_sp), 188 err |= __put_user((void __user *)(current->sas_ss_sp),
190 &frame->uc.uc_stack.ss_sp); 189 &frame->uc.uc_stack.ss_sp);
191 err |= __put_user(sas_ss_flags(regs->sp), 190 err |= __put_user(sas_ss_flags(regs->sp),
192 &frame->uc.uc_stack.ss_flags); 191 &frame->uc.uc_stack.ss_flags);
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 266aae123632..5ec4b9c651f2 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -23,6 +23,7 @@
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/mman.h> 24#include <linux/mman.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/err.h>
26#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
27#include <asm/opcode-tile.h> 28#include <asm/opcode-tile.h>
28#include <asm/opcode_constants.h> 29#include <asm/opcode_constants.h>
@@ -39,8 +40,8 @@ static int __init setup_unaligned_printk(char *str)
39 if (strict_strtol(str, 0, &val) != 0) 40 if (strict_strtol(str, 0, &val) != 0)
40 return 0; 41 return 0;
41 unaligned_printk = val; 42 unaligned_printk = val;
42 printk("Printk for each unaligned data accesses is %s\n", 43 pr_info("Printk for each unaligned data accesses is %s\n",
43 unaligned_printk ? "enabled" : "disabled"); 44 unaligned_printk ? "enabled" : "disabled");
44 return 1; 45 return 1;
45} 46}
46__setup("unaligned_printk=", setup_unaligned_printk); 47__setup("unaligned_printk=", setup_unaligned_printk);
@@ -113,7 +114,7 @@ static tile_bundle_bits rewrite_load_store_unaligned(
113 enum mem_op mem_op, 114 enum mem_op mem_op,
114 int size, int sign_ext) 115 int size, int sign_ext)
115{ 116{
116 unsigned char *addr; 117 unsigned char __user *addr;
117 int val_reg, addr_reg, err, val; 118 int val_reg, addr_reg, err, val;
118 119
119 /* Get address and value registers */ 120 /* Get address and value registers */
@@ -148,7 +149,7 @@ static tile_bundle_bits rewrite_load_store_unaligned(
148 return bundle; 149 return bundle;
149 150
150 /* If it's aligned, don't handle it specially */ 151 /* If it's aligned, don't handle it specially */
151 addr = (void *)regs->regs[addr_reg]; 152 addr = (void __user *)regs->regs[addr_reg];
152 if (((unsigned long)addr % size) == 0) 153 if (((unsigned long)addr % size) == 0)
153 return bundle; 154 return bundle;
154 155
@@ -183,7 +184,7 @@ static tile_bundle_bits rewrite_load_store_unaligned(
183 siginfo_t info = { 184 siginfo_t info = {
184 .si_signo = SIGSEGV, 185 .si_signo = SIGSEGV,
185 .si_code = SEGV_MAPERR, 186 .si_code = SEGV_MAPERR,
186 .si_addr = (void __user *)addr 187 .si_addr = addr
187 }; 188 };
188 force_sig_info(info.si_signo, &info, current); 189 force_sig_info(info.si_signo, &info, current);
189 return (tile_bundle_bits) 0; 190 return (tile_bundle_bits) 0;
@@ -193,30 +194,33 @@ static tile_bundle_bits rewrite_load_store_unaligned(
193 siginfo_t info = { 194 siginfo_t info = {
194 .si_signo = SIGBUS, 195 .si_signo = SIGBUS,
195 .si_code = BUS_ADRALN, 196 .si_code = BUS_ADRALN,
196 .si_addr = (void __user *)addr 197 .si_addr = addr
197 }; 198 };
198 force_sig_info(info.si_signo, &info, current); 199 force_sig_info(info.si_signo, &info, current);
199 return (tile_bundle_bits) 0; 200 return (tile_bundle_bits) 0;
200 } 201 }
201 202
202 if (unaligned_printk || unaligned_fixup_count == 0) { 203 if (unaligned_printk || unaligned_fixup_count == 0) {
203 printk("Process %d/%s: PC %#lx: Fixup of" 204 pr_info("Process %d/%s: PC %#lx: Fixup of"
204 " unaligned %s at %#lx.\n", 205 " unaligned %s at %#lx.\n",
205 current->pid, current->comm, regs->pc, 206 current->pid, current->comm, regs->pc,
206 (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) ? 207 (mem_op == MEMOP_LOAD ||
207 "load" : "store", 208 mem_op == MEMOP_LOAD_POSTINCR) ?
208 (unsigned long)addr); 209 "load" : "store",
210 (unsigned long)addr);
209 if (!unaligned_printk) { 211 if (!unaligned_printk) {
210 printk("\n" 212#define P pr_info
211"Unaligned fixups in the kernel will slow your application considerably.\n" 213P("\n");
212"You can find them by writing \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n" 214P("Unaligned fixups in the kernel will slow your application considerably.\n");
213"which requests the kernel show all unaligned fixups, or writing a \"0\"\n" 215P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
214"to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n" 216P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
215"access will become a SIGBUS you can debug. No further warnings will be\n" 217P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
216"shown so as to avoid additional slowdown, but you can track the number\n" 218P("access will become a SIGBUS you can debug. No further warnings will be\n");
217"of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n" 219P("shown so as to avoid additional slowdown, but you can track the number\n");
218"Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n" 220P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
219 "\n"); 221P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
222P("\n");
223#undef P
220 } 224 }
221 } 225 }
222 ++unaligned_fixup_count; 226 ++unaligned_fixup_count;
@@ -276,7 +280,7 @@ void single_step_once(struct pt_regs *regs)
276 struct thread_info *info = (void *)current_thread_info(); 280 struct thread_info *info = (void *)current_thread_info();
277 struct single_step_state *state = info->step_state; 281 struct single_step_state *state = info->step_state;
278 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); 282 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
279 tile_bundle_bits *buffer, *pc; 283 tile_bundle_bits __user *buffer, *pc;
280 tile_bundle_bits bundle; 284 tile_bundle_bits bundle;
281 int temp_reg; 285 int temp_reg;
282 int target_reg = TREG_LR; 286 int target_reg = TREG_LR;
@@ -306,21 +310,21 @@ void single_step_once(struct pt_regs *regs)
306 /* allocate a page of writable, executable memory */ 310 /* allocate a page of writable, executable memory */
307 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); 311 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
308 if (state == NULL) { 312 if (state == NULL) {
309 printk("Out of kernel memory trying to single-step\n"); 313 pr_err("Out of kernel memory trying to single-step\n");
310 return; 314 return;
311 } 315 }
312 316
313 /* allocate a cache line of writable, executable memory */ 317 /* allocate a cache line of writable, executable memory */
314 down_write(&current->mm->mmap_sem); 318 down_write(&current->mm->mmap_sem);
315 buffer = (void *) do_mmap(0, 0, 64, 319 buffer = (void __user *) do_mmap(NULL, 0, 64,
316 PROT_EXEC | PROT_READ | PROT_WRITE, 320 PROT_EXEC | PROT_READ | PROT_WRITE,
317 MAP_PRIVATE | MAP_ANONYMOUS, 321 MAP_PRIVATE | MAP_ANONYMOUS,
318 0); 322 0);
319 up_write(&current->mm->mmap_sem); 323 up_write(&current->mm->mmap_sem);
320 324
321 if ((int)buffer < 0 && (int)buffer > -PAGE_SIZE) { 325 if (IS_ERR((void __force *)buffer)) {
322 kfree(state); 326 kfree(state);
323 printk("Out of kernel pages trying to single-step\n"); 327 pr_err("Out of kernel pages trying to single-step\n");
324 return; 328 return;
325 } 329 }
326 330
@@ -349,11 +353,14 @@ void single_step_once(struct pt_regs *regs)
349 if (regs->faultnum == INT_SWINT_1) 353 if (regs->faultnum == INT_SWINT_1)
350 regs->pc -= 8; 354 regs->pc -= 8;
351 355
352 pc = (tile_bundle_bits *)(regs->pc); 356 pc = (tile_bundle_bits __user *)(regs->pc);
353 bundle = pc[0]; 357 if (get_user(bundle, pc) != 0) {
358 pr_err("Couldn't read instruction at %p trying to step\n", pc);
359 return;
360 }
354 361
355 /* We'll follow the instruction with 2 ill op bundles */ 362 /* We'll follow the instruction with 2 ill op bundles */
356 state->orig_pc = (unsigned long) pc; 363 state->orig_pc = (unsigned long)pc;
357 state->next_pc = (unsigned long)(pc + 1); 364 state->next_pc = (unsigned long)(pc + 1);
358 state->branch_next_pc = 0; 365 state->branch_next_pc = 0;
359 state->update = 0; 366 state->update = 0;
@@ -633,7 +640,7 @@ void single_step_once(struct pt_regs *regs)
633 } 640 }
634 641
635 if (err) { 642 if (err) {
636 printk("Fault when writing to single-step buffer\n"); 643 pr_err("Fault when writing to single-step buffer\n");
637 return; 644 return;
638 } 645 }
639 646
@@ -641,12 +648,12 @@ void single_step_once(struct pt_regs *regs)
641 * Flush the buffer. 648 * Flush the buffer.
642 * We do a local flush only, since this is a thread-specific buffer. 649 * We do a local flush only, since this is a thread-specific buffer.
643 */ 650 */
644 __flush_icache_range((unsigned long) state->buffer, 651 __flush_icache_range((unsigned long)state->buffer,
645 (unsigned long) buffer); 652 (unsigned long)buffer);
646 653
647 /* Indicate enabled */ 654 /* Indicate enabled */
648 state->is_enabled = is_single_step; 655 state->is_enabled = is_single_step;
649 regs->pc = (unsigned long) state->buffer; 656 regs->pc = (unsigned long)state->buffer;
650 657
651 /* Fault immediately if we are coming back from a syscall. */ 658 /* Fault immediately if we are coming back from a syscall. */
652 if (regs->faultnum == INT_SWINT_1) 659 if (regs->faultnum == INT_SWINT_1)
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index aa3aafdb4b93..74d62d098edf 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -25,19 +25,13 @@
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/irq.h>
28#include <asm/mmu_context.h> 29#include <asm/mmu_context.h>
29#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
30#include <asm/sections.h> 31#include <asm/sections.h>
31 32
32/*
33 * This assembly function is provided in entry.S.
34 * When called, it loops on a nap instruction forever.
35 * FIXME: should be in a header somewhere.
36 */
37extern void smp_nap(void);
38
39/* State of each CPU. */ 33/* State of each CPU. */
40DEFINE_PER_CPU(int, cpu_state) = { 0 }; 34static DEFINE_PER_CPU(int, cpu_state) = { 0 };
41 35
42/* The messaging code jumps to this pointer during boot-up */ 36/* The messaging code jumps to this pointer during boot-up */
43unsigned long start_cpu_function_addr; 37unsigned long start_cpu_function_addr;
@@ -74,7 +68,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
74 */ 68 */
75 rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu)); 69 rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
76 if (rc != 0) 70 if (rc != 0)
77 printk("Couldn't set init affinity to boot cpu (%ld)\n", rc); 71 pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc);
78 72
79 /* Print information about disabled and dataplane cpus. */ 73 /* Print information about disabled and dataplane cpus. */
80 print_disabled_cpus(); 74 print_disabled_cpus();
@@ -134,13 +128,13 @@ static __init int reset_init_affinity(void)
134{ 128{
135 long rc = sched_setaffinity(current->pid, &init_affinity); 129 long rc = sched_setaffinity(current->pid, &init_affinity);
136 if (rc != 0) 130 if (rc != 0)
137 printk(KERN_WARNING "couldn't reset init affinity (%ld)\n", 131 pr_warning("couldn't reset init affinity (%ld)\n",
138 rc); 132 rc);
139 return 0; 133 return 0;
140} 134}
141late_initcall(reset_init_affinity); 135late_initcall(reset_init_affinity);
142 136
143struct cpumask cpu_started __cpuinitdata; 137static struct cpumask cpu_started __cpuinitdata;
144 138
145/* 139/*
146 * Activate a secondary processor. Very minimal; don't add anything 140 * Activate a secondary processor. Very minimal; don't add anything
@@ -172,9 +166,6 @@ static void __cpuinit start_secondary(void)
172 BUG(); 166 BUG();
173 enter_lazy_tlb(&init_mm, current); 167 enter_lazy_tlb(&init_mm, current);
174 168
175 /* Enable IRQs. */
176 init_per_tile_IRQs();
177
178 /* Allow hypervisor messages to be received */ 169 /* Allow hypervisor messages to be received */
179 init_messaging(); 170 init_messaging();
180 local_irq_enable(); 171 local_irq_enable();
@@ -182,7 +173,7 @@ static void __cpuinit start_secondary(void)
182 /* Indicate that we're ready to come up. */ 173 /* Indicate that we're ready to come up. */
183 /* Must not do this before we're ready to receive messages */ 174 /* Must not do this before we're ready to receive messages */
184 if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { 175 if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
185 printk(KERN_WARNING "CPU#%d already started!\n", cpuid); 176 pr_warning("CPU#%d already started!\n", cpuid);
186 for (;;) 177 for (;;)
187 local_irq_enable(); 178 local_irq_enable();
188 } 179 }
@@ -190,13 +181,10 @@ static void __cpuinit start_secondary(void)
190 smp_nap(); 181 smp_nap();
191} 182}
192 183
193void setup_mpls(void); /* from kernel/setup.c */
194void store_permanent_mappings(void);
195
196/* 184/*
197 * Bring a secondary processor online. 185 * Bring a secondary processor online.
198 */ 186 */
199void __cpuinit online_secondary() 187void __cpuinit online_secondary(void)
200{ 188{
201 /* 189 /*
202 * low-memory mappings have been cleared, flush them from 190 * low-memory mappings have been cleared, flush them from
@@ -222,17 +210,14 @@ void __cpuinit online_secondary()
222 ipi_call_unlock(); 210 ipi_call_unlock();
223 __get_cpu_var(cpu_state) = CPU_ONLINE; 211 __get_cpu_var(cpu_state) = CPU_ONLINE;
224 212
225 /* Set up MPLs for this processor */ 213 /* Set up tile-specific state for this cpu. */
226 setup_mpls(); 214 setup_cpu(0);
227
228 215
229 /* Set up tile-timer clock-event device on this cpu */ 216 /* Set up tile-timer clock-event device on this cpu */
230 setup_tile_timer(); 217 setup_tile_timer();
231 218
232 preempt_enable(); 219 preempt_enable();
233 220
234 store_permanent_mappings();
235
236 cpu_idle(); 221 cpu_idle();
237} 222}
238 223
@@ -242,7 +227,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
242 static int timeout; 227 static int timeout;
243 for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) { 228 for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) {
244 if (timeout >= 50000) { 229 if (timeout >= 50000) {
245 printk(KERN_INFO "skipping unresponsive cpu%d\n", cpu); 230 pr_info("skipping unresponsive cpu%d\n", cpu);
246 local_irq_enable(); 231 local_irq_enable();
247 return -EIO; 232 return -EIO;
248 } 233 }
@@ -289,5 +274,5 @@ void __init smp_cpus_done(unsigned int max_cpus)
289 ; 274 ;
290 rc = sched_setaffinity(current->pid, cpumask_of(cpu)); 275 rc = sched_setaffinity(current->pid, cpumask_of(cpu));
291 if (rc != 0) 276 if (rc != 0)
292 printk("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc); 277 pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc);
293} 278}
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 382170b4b40a..b6268d3ae869 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -56,13 +56,16 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
56 HV_PTE pte; 56 HV_PTE pte;
57 struct page *page; 57 struct page *page;
58 58
59 if (l1_pgtable == NULL)
60 return 0; /* can't read user space in other tasks */
61
59 pte = l1_pgtable[HV_L1_INDEX(address)]; 62 pte = l1_pgtable[HV_L1_INDEX(address)];
60 if (!hv_pte_get_present(pte)) 63 if (!hv_pte_get_present(pte))
61 return 0; 64 return 0;
62 pfn = hv_pte_get_pfn(pte); 65 pfn = hv_pte_get_pfn(pte);
63 if (pte_huge(pte)) { 66 if (pte_huge(pte)) {
64 if (!pfn_valid(pfn)) { 67 if (!pfn_valid(pfn)) {
65 printk(KERN_ERR "huge page has bad pfn %#lx\n", pfn); 68 pr_err("huge page has bad pfn %#lx\n", pfn);
66 return 0; 69 return 0;
67 } 70 }
68 return hv_pte_get_present(pte) && hv_pte_get_readable(pte); 71 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
@@ -70,7 +73,7 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
70 73
71 page = pfn_to_page(pfn); 74 page = pfn_to_page(pfn);
72 if (PageHighMem(page)) { 75 if (PageHighMem(page)) {
73 printk(KERN_ERR "L2 page table not in LOWMEM (%#llx)\n", 76 pr_err("L2 page table not in LOWMEM (%#llx)\n",
74 HV_PFN_TO_CPA(pfn)); 77 HV_PFN_TO_CPA(pfn));
75 return 0; 78 return 0;
76 } 79 }
@@ -91,13 +94,12 @@ static bool read_memory_func(void *result, VirtualAddress address,
91 /* We only tolerate kernel-space reads of this task's stack */ 94 /* We only tolerate kernel-space reads of this task's stack */
92 if (!in_kernel_stack(kbt, address)) 95 if (!in_kernel_stack(kbt, address))
93 return 0; 96 return 0;
94 } else if (kbt->pgtable == NULL) {
95 return 0; /* can't read user space in other tasks */
96 } else if (!valid_address(kbt, address)) { 97 } else if (!valid_address(kbt, address)) {
97 return 0; /* invalid user-space address */ 98 return 0; /* invalid user-space address */
98 } 99 }
99 pagefault_disable(); 100 pagefault_disable();
100 retval = __copy_from_user_inatomic(result, (const void *)address, 101 retval = __copy_from_user_inatomic(result,
102 (void __user __force *)address,
101 size); 103 size);
102 pagefault_enable(); 104 pagefault_enable();
103 return (retval == 0); 105 return (retval == 0);
@@ -131,14 +133,14 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
131 in_kernel_stack(kbt, p->sp) && 133 in_kernel_stack(kbt, p->sp) &&
132 p->sp >= sp) { 134 p->sp >= sp) {
133 if (kbt->verbose) 135 if (kbt->verbose)
134 printk(KERN_ERR " <%s while in kernel mode>\n", fault); 136 pr_err(" <%s while in kernel mode>\n", fault);
135 } else if (EX1_PL(p->ex1) == USER_PL && 137 } else if (EX1_PL(p->ex1) == USER_PL &&
136 p->pc < PAGE_OFFSET && 138 p->pc < PAGE_OFFSET &&
137 p->sp < PAGE_OFFSET) { 139 p->sp < PAGE_OFFSET) {
138 if (kbt->verbose) 140 if (kbt->verbose)
139 printk(KERN_ERR " <%s while in user mode>\n", fault); 141 pr_err(" <%s while in user mode>\n", fault);
140 } else if (kbt->verbose) { 142 } else if (kbt->verbose) {
141 printk(KERN_ERR " (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", 143 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
142 p->pc, p->sp, p->ex1); 144 p->pc, p->sp, p->ex1);
143 p = NULL; 145 p = NULL;
144 } 146 }
@@ -166,13 +168,13 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
166 if (!valid_address(kbt, b->sp) || 168 if (!valid_address(kbt, b->sp) ||
167 !valid_address(kbt, sigframe_top)) { 169 !valid_address(kbt, sigframe_top)) {
168 if (kbt->verbose) 170 if (kbt->verbose)
169 printk(" (odd signal: sp %#lx?)\n", 171 pr_err(" (odd signal: sp %#lx?)\n",
170 (unsigned long)(b->sp)); 172 (unsigned long)(b->sp));
171 return NULL; 173 return NULL;
172 } 174 }
173 frame = (struct rt_sigframe *)b->sp; 175 frame = (struct rt_sigframe *)b->sp;
174 if (kbt->verbose) { 176 if (kbt->verbose) {
175 printk(KERN_ERR " <received signal %d>\n", 177 pr_err(" <received signal %d>\n",
176 frame->info.si_signo); 178 frame->info.si_signo);
177 } 179 }
178 return &frame->uc.uc_mcontext.regs; 180 return &frame->uc.uc_mcontext.regs;
@@ -180,7 +182,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
180 return NULL; 182 return NULL;
181} 183}
182 184
183int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) 185static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
184{ 186{
185 return is_sigreturn(kbt->it.pc); 187 return is_sigreturn(kbt->it.pc);
186} 188}
@@ -231,13 +233,13 @@ static void validate_stack(struct pt_regs *regs)
231 unsigned long sp = stack_pointer; 233 unsigned long sp = stack_pointer;
232 234
233 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { 235 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
234 printk("WARNING: cpu %d: kernel stack page %#lx underrun!\n" 236 pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
235 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", 237 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
236 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); 238 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
237 } 239 }
238 240
239 else if (sp < ksp0_base + sizeof(struct thread_info)) { 241 else if (sp < ksp0_base + sizeof(struct thread_info)) {
240 printk("WARNING: cpu %d: kernel stack page %#lx overrun!\n" 242 pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
241 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", 243 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
242 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); 244 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
243 } 245 }
@@ -280,7 +282,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
280 if (!PageHighMem(page)) 282 if (!PageHighMem(page))
281 kbt->pgtable = __va(pgdir_pa); 283 kbt->pgtable = __va(pgdir_pa);
282 else 284 else
283 printk(KERN_ERR "page table not in LOWMEM" 285 pr_err("page table not in LOWMEM"
284 " (%#llx)\n", pgdir_pa); 286 " (%#llx)\n", pgdir_pa);
285 } 287 }
286 local_flush_tlb_all(); 288 local_flush_tlb_all();
@@ -288,13 +290,12 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
288 } 290 }
289 291
290 if (regs == NULL) { 292 if (regs == NULL) {
291 extern const void *get_switch_to_pc(void);
292 if (is_current || t->state == TASK_RUNNING) { 293 if (is_current || t->state == TASK_RUNNING) {
293 /* Can't do this; we need registers */ 294 /* Can't do this; we need registers */
294 kbt->end = 1; 295 kbt->end = 1;
295 return; 296 return;
296 } 297 }
297 pc = (ulong) get_switch_to_pc(); 298 pc = get_switch_to_pc();
298 lr = t->thread.pc; 299 lr = t->thread.pc;
299 sp = t->thread.ksp; 300 sp = t->thread.ksp;
300 r52 = 0; 301 r52 = 0;
@@ -344,8 +345,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
344 * then bust_spinlocks() spit out a space in front of us 345 * then bust_spinlocks() spit out a space in front of us
345 * and it will mess up our KERN_ERR. 346 * and it will mess up our KERN_ERR.
346 */ 347 */
347 printk("\n"); 348 pr_err("\n");
348 printk(KERN_ERR "Starting stack dump of tid %d, pid %d (%s)" 349 pr_err("Starting stack dump of tid %d, pid %d (%s)"
349 " on cpu %d at cycle %lld\n", 350 " on cpu %d at cycle %lld\n",
350 kbt->task->pid, kbt->task->tgid, kbt->task->comm, 351 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
351 smp_processor_id(), get_cycles()); 352 smp_processor_id(), get_cycles());
@@ -385,17 +386,17 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
385 namebuf[sizeof(namebuf)-1] = '\0'; 386 namebuf[sizeof(namebuf)-1] = '\0';
386 } 387 }
387 388
388 printk(KERN_ERR " frame %d: 0x%lx %s(sp 0x%lx)\n", 389 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
389 i++, address, namebuf, (unsigned long)(kbt->it.sp)); 390 i++, address, namebuf, (unsigned long)(kbt->it.sp));
390 391
391 if (i >= 100) { 392 if (i >= 100) {
392 printk(KERN_ERR "Stack dump truncated" 393 pr_err("Stack dump truncated"
393 " (%d frames)\n", i); 394 " (%d frames)\n", i);
394 break; 395 break;
395 } 396 }
396 } 397 }
397 if (headers) 398 if (headers)
398 printk(KERN_ERR "Stack dump complete\n"); 399 pr_err("Stack dump complete\n");
399} 400}
400EXPORT_SYMBOL(tile_show_stack); 401EXPORT_SYMBOL(tile_show_stack);
401 402
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index 0427978cea0a..f0f87eab8c39 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -27,11 +27,10 @@
27#include <linux/mempolicy.h> 27#include <linux/mempolicy.h>
28#include <linux/binfmts.h> 28#include <linux/binfmts.h>
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/syscalls.h> 30#include <linux/compat.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/signal.h> 32#include <linux/signal.h>
33#include <asm/syscalls.h> 33#include <asm/syscalls.h>
34
35#include <asm/pgtable.h> 34#include <asm/pgtable.h>
36#include <asm/homecache.h> 35#include <asm/homecache.h>
37#include <arch/chip.h> 36#include <arch/chip.h>
@@ -74,10 +73,7 @@ int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
74 73
75#endif /* 32-bit syscall wrappers */ 74#endif /* 32-bit syscall wrappers */
76 75
77/* 76/* Note: used by the compat code even in 64-bit Linux. */
78 * This API uses a 4KB-page-count offset into the file descriptor.
79 * It is likely not the right API to use on a 64-bit platform.
80 */
81SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, 77SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
82 unsigned long, prot, unsigned long, flags, 78 unsigned long, prot, unsigned long, flags,
83 unsigned long, fd, unsigned long, off_4k) 79 unsigned long, fd, unsigned long, off_4k)
@@ -89,10 +85,7 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
89 off_4k >> PAGE_ADJUST); 85 off_4k >> PAGE_ADJUST);
90} 86}
91 87
92/* 88#ifdef __tilegx__
93 * This API uses a byte offset into the file descriptor.
94 * It is likely not the right API to use on a 32-bit platform.
95 */
96SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, 89SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
97 unsigned long, prot, unsigned long, flags, 90 unsigned long, prot, unsigned long, flags,
98 unsigned long, fd, off_t, offset) 91 unsigned long, fd, off_t, offset)
@@ -102,6 +95,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
102 return sys_mmap_pgoff(addr, len, prot, flags, fd, 95 return sys_mmap_pgoff(addr, len, prot, flags, fd,
103 offset >> PAGE_SHIFT); 96 offset >> PAGE_SHIFT);
104} 97}
98#endif
105 99
106 100
107/* Provide the actual syscall number to call mapping. */ 101/* Provide the actual syscall number to call mapping. */
@@ -116,6 +110,10 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
116#define sys_sync_file_range sys_sync_file_range2 110#define sys_sync_file_range sys_sync_file_range2
117#endif 111#endif
118 112
113/*
114 * Note that we can't include <linux/unistd.h> here since the header
115 * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
116 */
119void *sys_call_table[__NR_syscalls] = { 117void *sys_call_table[__NR_syscalls] = {
120 [0 ... __NR_syscalls-1] = sys_ni_syscall, 118 [0 ... __NR_syscalls-1] = sys_ni_syscall,
121#include <asm/unistd.h> 119#include <asm/unistd.h>
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 47500a324e32..b9ab25a889b5 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -23,6 +23,7 @@
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
26#include <asm/traps.h>
26#include <hv/hypervisor.h> 27#include <hv/hypervisor.h>
27#include <arch/interrupts.h> 28#include <arch/interrupts.h>
28#include <arch/spr_def.h> 29#include <arch/spr_def.h>
@@ -45,13 +46,13 @@ static cycles_t cycles_per_sec __write_once;
45 */ 46 */
46#define TILE_MINSEC 5 47#define TILE_MINSEC 5
47 48
48cycles_t get_clock_rate() 49cycles_t get_clock_rate(void)
49{ 50{
50 return cycles_per_sec; 51 return cycles_per_sec;
51} 52}
52 53
53#if CHIP_HAS_SPLIT_CYCLE() 54#if CHIP_HAS_SPLIT_CYCLE()
54cycles_t get_cycles() 55cycles_t get_cycles(void)
55{ 56{
56 unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH); 57 unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH);
57 unsigned int low = __insn_mfspr(SPR_CYCLE_LOW); 58 unsigned int low = __insn_mfspr(SPR_CYCLE_LOW);
@@ -67,7 +68,7 @@ cycles_t get_cycles()
67} 68}
68#endif 69#endif
69 70
70cycles_t clocksource_get_cycles(struct clocksource *cs) 71static cycles_t clocksource_get_cycles(struct clocksource *cs)
71{ 72{
72 return get_cycles(); 73 return get_cycles();
73} 74}
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 12cb10f38527..3870abbeeaa2 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -20,6 +20,9 @@
20#include <linux/uaccess.h> 20#include <linux/uaccess.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <asm/opcode-tile.h> 22#include <asm/opcode-tile.h>
23#include <asm/opcode_constants.h>
24#include <asm/stack.h>
25#include <asm/traps.h>
23 26
24#include <arch/interrupts.h> 27#include <arch/interrupts.h>
25#include <arch/spr_def.h> 28#include <arch/spr_def.h>
@@ -42,7 +45,7 @@ static int __init setup_unaligned_fixup(char *str)
42 if (strict_strtol(str, 0, &val) != 0) 45 if (strict_strtol(str, 0, &val) != 0)
43 return 0; 46 return 0;
44 unaligned_fixup = val; 47 unaligned_fixup = val;
45 printk("Fixups for unaligned data accesses are %s\n", 48 pr_info("Fixups for unaligned data accesses are %s\n",
46 unaligned_fixup >= 0 ? 49 unaligned_fixup >= 0 ?
47 (unaligned_fixup ? "enabled" : "disabled") : 50 (unaligned_fixup ? "enabled" : "disabled") :
48 "completely disabled"); 51 "completely disabled");
@@ -56,7 +59,7 @@ static int dma_disabled;
56 59
57static int __init nodma(char *str) 60static int __init nodma(char *str)
58{ 61{
59 printk("User-space DMA is disabled\n"); 62 pr_info("User-space DMA is disabled\n");
60 dma_disabled = 1; 63 dma_disabled = 1;
61 return 1; 64 return 1;
62} 65}
@@ -97,20 +100,106 @@ static int retry_gpv(unsigned int gpv_reason)
97 100
98#endif /* CHIP_HAS_TILE_DMA() */ 101#endif /* CHIP_HAS_TILE_DMA() */
99 102
100/* Defined inside do_trap(), below. */
101#ifdef __tilegx__ 103#ifdef __tilegx__
102extern tilegx_bundle_bits bpt_code; 104#define bundle_bits tilegx_bundle_bits
103#else 105#else
104extern tile_bundle_bits bpt_code; 106#define bundle_bits tile_bundle_bits
105#endif 107#endif
106 108
109extern bundle_bits bpt_code;
110
111asm(".pushsection .rodata.bpt_code,\"a\";"
112 ".align 8;"
113 "bpt_code: bpt;"
114 ".size bpt_code,.-bpt_code;"
115 ".popsection");
116
117static int special_ill(bundle_bits bundle, int *sigp, int *codep)
118{
119 int sig, code, maxcode;
120
121 if (bundle == bpt_code) {
122 *sigp = SIGTRAP;
123 *codep = TRAP_BRKPT;
124 return 1;
125 }
126
127 /* If it's a "raise" bundle, then "ill" must be in pipe X1. */
128#ifdef __tilegx__
129 if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0)
130 return 0;
131 if (get_Opcode_X1(bundle) != UNARY_OPCODE_X1)
132 return 0;
133 if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1)
134 return 0;
135#else
136 if (bundle & TILE_BUNDLE_Y_ENCODING_MASK)
137 return 0;
138 if (get_Opcode_X1(bundle) != SHUN_0_OPCODE_X1)
139 return 0;
140 if (get_UnShOpcodeExtension_X1(bundle) != UN_0_SHUN_0_OPCODE_X1)
141 return 0;
142 if (get_UnOpcodeExtension_X1(bundle) != ILL_UN_0_SHUN_0_OPCODE_X1)
143 return 0;
144#endif
145
146 /* Check that the magic distinguishers are set to mean "raise". */
147 if (get_Dest_X1(bundle) != 29 || get_SrcA_X1(bundle) != 37)
148 return 0;
149
150 /* There must be an "addli zero, zero, VAL" in X0. */
151 if (get_Opcode_X0(bundle) != ADDLI_OPCODE_X0)
152 return 0;
153 if (get_Dest_X0(bundle) != TREG_ZERO)
154 return 0;
155 if (get_SrcA_X0(bundle) != TREG_ZERO)
156 return 0;
157
158 /*
159 * Validate the proposed signal number and si_code value.
160 * Note that we embed these in the static instruction itself
161 * so that we perturb the register state as little as possible
162 * at the time of the actual fault; it's unlikely you'd ever
163 * need to dynamically choose which kind of fault to raise
164 * from user space.
165 */
166 sig = get_Imm16_X0(bundle) & 0x3f;
167 switch (sig) {
168 case SIGILL:
169 maxcode = NSIGILL;
170 break;
171 case SIGFPE:
172 maxcode = NSIGFPE;
173 break;
174 case SIGSEGV:
175 maxcode = NSIGSEGV;
176 break;
177 case SIGBUS:
178 maxcode = NSIGBUS;
179 break;
180 case SIGTRAP:
181 maxcode = NSIGTRAP;
182 break;
183 default:
184 return 0;
185 }
186 code = (get_Imm16_X0(bundle) >> 6) & 0xf;
187 if (code <= 0 || code > maxcode)
188 return 0;
189
190 /* Make it the requested signal. */
191 *sigp = sig;
192 *codep = code | __SI_FAULT;
193 return 1;
194}
195
107void __kprobes do_trap(struct pt_regs *regs, int fault_num, 196void __kprobes do_trap(struct pt_regs *regs, int fault_num,
108 unsigned long reason) 197 unsigned long reason)
109{ 198{
110 siginfo_t info = { 0 }; 199 siginfo_t info = { 0 };
111 int signo, code; 200 int signo, code;
112 unsigned long address; 201 unsigned long address;
113 __typeof__(bpt_code) instr; 202 bundle_bits instr;
114 203
115 /* Re-enable interrupts. */ 204 /* Re-enable interrupts. */
116 local_irq_enable(); 205 local_irq_enable();
@@ -122,10 +211,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
122 if (!user_mode(regs)) { 211 if (!user_mode(regs)) {
123 if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */ 212 if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */
124 return; 213 return;
125 printk(KERN_ALERT "Kernel took bad trap %d at PC %#lx\n", 214 pr_alert("Kernel took bad trap %d at PC %#lx\n",
126 fault_num, regs->pc); 215 fault_num, regs->pc);
127 if (fault_num == INT_GPV) 216 if (fault_num == INT_GPV)
128 printk(KERN_ALERT "GPV_REASON is %#lx\n", reason); 217 pr_alert("GPV_REASON is %#lx\n", reason);
129 show_regs(regs); 218 show_regs(regs);
130 do_exit(SIGKILL); /* FIXME: implement i386 die() */ 219 do_exit(SIGKILL); /* FIXME: implement i386 die() */
131 return; 220 return;
@@ -133,22 +222,14 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
133 222
134 switch (fault_num) { 223 switch (fault_num) {
135 case INT_ILL: 224 case INT_ILL:
136 asm(".pushsection .rodata.bpt_code,\"a\";" 225 if (copy_from_user(&instr, (void __user *)regs->pc,
137 ".align 8;" 226 sizeof(instr))) {
138 "bpt_code: bpt;" 227 pr_err("Unreadable instruction for INT_ILL:"
139 ".size bpt_code,.-bpt_code;"
140 ".popsection");
141
142 if (copy_from_user(&instr, (void *)regs->pc, sizeof(instr))) {
143 printk(KERN_ERR "Unreadable instruction for INT_ILL:"
144 " %#lx\n", regs->pc); 228 " %#lx\n", regs->pc);
145 do_exit(SIGKILL); 229 do_exit(SIGKILL);
146 return; 230 return;
147 } 231 }
148 if (instr == bpt_code) { 232 if (!special_ill(instr, &signo, &code)) {
149 signo = SIGTRAP;
150 code = TRAP_BRKPT;
151 } else {
152 signo = SIGILL; 233 signo = SIGILL;
153 code = ILL_ILLOPC; 234 code = ILL_ILLOPC;
154 } 235 }
@@ -181,7 +262,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
181 if (unaligned_fixup >= 0) { 262 if (unaligned_fixup >= 0) {
182 struct single_step_state *state = 263 struct single_step_state *state =
183 current_thread_info()->step_state; 264 current_thread_info()->step_state;
184 if (!state || (void *)(regs->pc) != state->buffer) { 265 if (!state ||
266 (void __user *)(regs->pc) != state->buffer) {
185 single_step_once(regs); 267 single_step_once(regs);
186 return; 268 return;
187 } 269 }
@@ -221,17 +303,15 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
221 303
222 info.si_signo = signo; 304 info.si_signo = signo;
223 info.si_code = code; 305 info.si_code = code;
224 info.si_addr = (void *)address; 306 info.si_addr = (void __user *)address;
225 if (signo == SIGILL) 307 if (signo == SIGILL)
226 info.si_trapno = fault_num; 308 info.si_trapno = fault_num;
227 force_sig_info(signo, &info, current); 309 force_sig_info(signo, &info, current);
228} 310}
229 311
230extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
231
232void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) 312void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
233{ 313{
234 _dump_stack(dummy, pc, lr, sp, r52); 314 _dump_stack(dummy, pc, lr, sp, r52);
235 printk("Double fault: exiting\n"); 315 pr_emerg("Double fault: exiting\n");
236 machine_halt(); 316 machine_halt();
237} 317}
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index 77388c1415bd..25fdc0c1839a 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -36,8 +36,8 @@ SECTIONS
36 36
37 /* Now the real code */ 37 /* Now the real code */
38 . = ALIGN(0x20000); 38 . = ALIGN(0x20000);
39 HEAD_TEXT_SECTION :text =0
40 .text : AT (ADDR(.text) - LOAD_OFFSET) { 39 .text : AT (ADDR(.text) - LOAD_OFFSET) {
40 HEAD_TEXT
41 SCHED_TEXT 41 SCHED_TEXT
42 LOCK_TEXT 42 LOCK_TEXT
43 __fix_text_end = .; /* tile-cpack won't rearrange before this */ 43 __fix_text_end = .; /* tile-cpack won't rearrange before this */
@@ -46,7 +46,7 @@ SECTIONS
46 *(.coldtext*) 46 *(.coldtext*)
47 *(.fixup) 47 *(.fixup)
48 *(.gnu.warning) 48 *(.gnu.warning)
49 } 49 } :text =0
50 _etext = .; 50 _etext = .;
51 51
52 /* "Init" is divided into two areas with very different virtual addresses. */ 52 /* "Init" is divided into two areas with very different virtual addresses. */