aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-12-30 00:20:30 -0500
committerGrant Likely <grant.likely@secretlab.ca>2010-12-30 00:21:47 -0500
commitd392da5207352f09030e95d9ea335a4225667ec0 (patch)
tree7d6cd1932afcad0a5619a5c504a6d93ca318187c /arch/tile/kernel
parente39d5ef678045d61812c1401f04fe8edb14d6359 (diff)
parent387c31c7e5c9805b0aef8833d1731a5fe7bdea14 (diff)
Merge v2.6.37-rc8 into powerpc/next
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/Makefile1
-rw-r--r--arch/tile/kernel/backtrace.c141
-rw-r--r--arch/tile/kernel/compat.c21
-rw-r--r--arch/tile/kernel/compat_signal.c21
-rw-r--r--arch/tile/kernel/early_printk.c2
-rw-r--r--arch/tile/kernel/entry.S34
-rw-r--r--arch/tile/kernel/hardwall.c5
-rw-r--r--arch/tile/kernel/head_32.S5
-rw-r--r--arch/tile/kernel/intvec_32.S144
-rw-r--r--arch/tile/kernel/irq.c22
-rw-r--r--arch/tile/kernel/machine_kexec.c6
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/pci.c621
-rw-r--r--arch/tile/kernel/proc.c1
-rw-r--r--arch/tile/kernel/process.c81
-rw-r--r--arch/tile/kernel/ptrace.c108
-rw-r--r--arch/tile/kernel/reboot.c6
-rw-r--r--arch/tile/kernel/regs_32.S2
-rw-r--r--arch/tile/kernel/setup.c48
-rw-r--r--arch/tile/kernel/signal.c51
-rw-r--r--arch/tile/kernel/single_step.c73
-rw-r--r--arch/tile/kernel/smp.c4
-rw-r--r--arch/tile/kernel/smpboot.c1
-rw-r--r--arch/tile/kernel/stack.c45
-rw-r--r--arch/tile/kernel/sys.c10
-rw-r--r--arch/tile/kernel/time.c41
-rw-r--r--arch/tile/kernel/traps.c8
27 files changed, 1163 insertions, 341 deletions
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 112b1e248f05..b4c8e8ec45dc 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
15obj-$(CONFIG_MODULES) += module.o 15obj-$(CONFIG_MODULES) += module.o
16obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 16obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
17obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 17obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
18obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index 77265f3b58d6..55a6a74974b4 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -19,9 +19,6 @@
19 19
20#include <arch/chip.h> 20#include <arch/chip.h>
21 21
22#if TILE_CHIP < 10
23
24
25#include <asm/opcode-tile.h> 22#include <asm/opcode-tile.h>
26 23
27 24
@@ -29,6 +26,27 @@
29#define TREG_LR 55 26#define TREG_LR 55
30 27
31 28
29#if TILE_CHIP >= 10
30#define tile_bundle_bits tilegx_bundle_bits
31#define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE
32#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
33#define tile_decoded_instruction tilegx_decoded_instruction
34#define tile_mnemonic tilegx_mnemonic
35#define parse_insn_tile parse_insn_tilegx
36#define TILE_OPC_IRET TILEGX_OPC_IRET
37#define TILE_OPC_ADDI TILEGX_OPC_ADDI
38#define TILE_OPC_ADDLI TILEGX_OPC_ADDLI
39#define TILE_OPC_INFO TILEGX_OPC_INFO
40#define TILE_OPC_INFOL TILEGX_OPC_INFOL
41#define TILE_OPC_JRP TILEGX_OPC_JRP
42#define TILE_OPC_MOVE TILEGX_OPC_MOVE
43#define OPCODE_STORE TILEGX_OPC_ST
44typedef long long bt_int_reg_t;
45#else
46#define OPCODE_STORE TILE_OPC_SW
47typedef int bt_int_reg_t;
48#endif
49
32/** A decoded bundle used for backtracer analysis. */ 50/** A decoded bundle used for backtracer analysis. */
33struct BacktraceBundle { 51struct BacktraceBundle {
34 tile_bundle_bits bits; 52 tile_bundle_bits bits;
@@ -41,7 +59,7 @@ struct BacktraceBundle {
41/* This implementation only makes sense for native tools. */ 59/* This implementation only makes sense for native tools. */
42/** Default function to read memory. */ 60/** Default function to read memory. */
43static bool bt_read_memory(void *result, VirtualAddress addr, 61static bool bt_read_memory(void *result, VirtualAddress addr,
44 size_t size, void *extra) 62 unsigned int size, void *extra)
45{ 63{
46 /* FIXME: this should do some horrible signal stuff to catch 64 /* FIXME: this should do some horrible signal stuff to catch
47 * SEGV cleanly and fail. 65 * SEGV cleanly and fail.
@@ -106,6 +124,12 @@ static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
106 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2); 124 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
107 if (insn == NULL) 125 if (insn == NULL)
108 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2); 126 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
127#if TILE_CHIP >= 10
128 if (insn == NULL)
129 insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2);
130 if (insn == NULL)
131 insn = find_matching_insn(bundle, TILEGX_OPC_ADDXI, vals, 2);
132#endif
109 if (insn == NULL) 133 if (insn == NULL)
110 return false; 134 return false;
111 135
@@ -190,13 +214,52 @@ static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
190 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; 214 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
191} 215}
192 216
193/** Does this bundle contain the instruction 'sw sp, lr'? */ 217/** Does this bundle contain a store of lr to sp? */
194static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle) 218static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
195{ 219{
196 static const int vals[2] = { TREG_SP, TREG_LR }; 220 static const int vals[2] = { TREG_SP, TREG_LR };
197 return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL; 221 return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL;
198} 222}
199 223
224#if TILE_CHIP >= 10
225/** Track moveli values placed into registers. */
226static inline void bt_update_moveli(const struct BacktraceBundle *bundle,
227 int moveli_args[])
228{
229 int i;
230 for (i = 0; i < bundle->num_insns; i++) {
231 const struct tile_decoded_instruction *insn =
232 &bundle->insns[i];
233
234 if (insn->opcode->mnemonic == TILEGX_OPC_MOVELI) {
235 int reg = insn->operand_values[0];
236 moveli_args[reg] = insn->operand_values[1];
237 }
238 }
239}
240
241/** Does this bundle contain an 'add sp, sp, reg' instruction
242 * from a register that we saw a moveli into, and if so, what
243 * is the value in the register?
244 */
245static bool bt_has_add_sp(const struct BacktraceBundle *bundle, int *adjust,
246 int moveli_args[])
247{
248 static const int vals[2] = { TREG_SP, TREG_SP };
249
250 const struct tile_decoded_instruction *insn =
251 find_matching_insn(bundle, TILEGX_OPC_ADDX, vals, 2);
252 if (insn) {
253 int reg = insn->operand_values[2];
254 if (moveli_args[reg]) {
255 *adjust = moveli_args[reg];
256 return true;
257 }
258 }
259 return false;
260}
261#endif
262
200/** Locates the caller's PC and SP for a program starting at the 263/** Locates the caller's PC and SP for a program starting at the
201 * given address. 264 * given address.
202 */ 265 */
@@ -227,6 +290,11 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
227 int next_bundle = 0; 290 int next_bundle = 0;
228 VirtualAddress pc; 291 VirtualAddress pc;
229 292
293#if TILE_CHIP >= 10
294 /* Naively try to track moveli values to support addx for -m32. */
295 int moveli_args[TILEGX_NUM_REGISTERS] = { 0 };
296#endif
297
230 /* Default to assuming that the caller's sp is the current sp. 298 /* Default to assuming that the caller's sp is the current sp.
231 * This is necessary to handle the case where we start backtracing 299 * This is necessary to handle the case where we start backtracing
232 * right at the end of the epilog. 300 * right at the end of the epilog.
@@ -301,6 +369,10 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
301 /* Weird; reserved value, ignore it. */ 369 /* Weird; reserved value, ignore it. */
302 continue; 370 continue;
303 } 371 }
372 if (info_operand & ENTRY_POINT_INFO_OP) {
373 /* This info op is ignored by the backtracer. */
374 continue;
375 }
304 376
305 /* Skip info ops which are not in the 377 /* Skip info ops which are not in the
306 * "one_ago" mode we want right now. 378 * "one_ago" mode we want right now.
@@ -380,7 +452,11 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
380 452
381 if (!sp_determined) { 453 if (!sp_determined) {
382 int adjust; 454 int adjust;
383 if (bt_has_addi_sp(&bundle, &adjust)) { 455 if (bt_has_addi_sp(&bundle, &adjust)
456#if TILE_CHIP >= 10
457 || bt_has_add_sp(&bundle, &adjust, moveli_args)
458#endif
459 ) {
384 location->sp_location = SP_LOC_OFFSET; 460 location->sp_location = SP_LOC_OFFSET;
385 461
386 if (adjust <= 0) { 462 if (adjust <= 0) {
@@ -427,6 +503,11 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
427 sp_determined = true; 503 sp_determined = true;
428 } 504 }
429 } 505 }
506
507#if TILE_CHIP >= 10
508 /* Track moveli arguments for -m32 mode. */
509 bt_update_moveli(&bundle, moveli_args);
510#endif
430 } 511 }
431 512
432 if (bt_has_iret(&bundle)) { 513 if (bt_has_iret(&bundle)) {
@@ -502,11 +583,10 @@ void backtrace_init(BacktraceIterator *state,
502 break; 583 break;
503 } 584 }
504 585
505 /* The frame pointer should theoretically be aligned mod 8. If 586 /* If the frame pointer is not aligned to the basic word size
506 * it's not even aligned mod 4 then something terrible happened 587 * something terrible happened and we should mark it as invalid.
507 * and we should mark it as invalid.
508 */ 588 */
509 if (fp % 4 != 0) 589 if (fp % sizeof(bt_int_reg_t) != 0)
510 fp = -1; 590 fp = -1;
511 591
512 /* -1 means "don't know initial_frame_caller_pc". */ 592 /* -1 means "don't know initial_frame_caller_pc". */
@@ -547,9 +627,16 @@ void backtrace_init(BacktraceIterator *state,
547 state->read_memory_func_extra = read_memory_func_extra; 627 state->read_memory_func_extra = read_memory_func_extra;
548} 628}
549 629
630/* Handle the case where the register holds more bits than the VA. */
631static bool valid_addr_reg(bt_int_reg_t reg)
632{
633 return ((VirtualAddress)reg == reg);
634}
635
550bool backtrace_next(BacktraceIterator *state) 636bool backtrace_next(BacktraceIterator *state)
551{ 637{
552 VirtualAddress next_fp, next_pc, next_frame[2]; 638 VirtualAddress next_fp, next_pc;
639 bt_int_reg_t next_frame[2];
553 640
554 if (state->fp == -1) { 641 if (state->fp == -1) {
555 /* No parent frame. */ 642 /* No parent frame. */
@@ -563,11 +650,9 @@ bool backtrace_next(BacktraceIterator *state)
563 } 650 }
564 651
565 next_fp = next_frame[1]; 652 next_fp = next_frame[1];
566 if (next_fp % 4 != 0) { 653 if (!valid_addr_reg(next_frame[1]) ||
567 /* Caller's frame pointer is suspect, so give up. 654 next_fp % sizeof(bt_int_reg_t) != 0) {
568 * Technically it should be aligned mod 8, but we will 655 /* Caller's frame pointer is suspect, so give up. */
569 * be forgiving here.
570 */
571 return false; 656 return false;
572 } 657 }
573 658
@@ -585,7 +670,7 @@ bool backtrace_next(BacktraceIterator *state)
585 } else { 670 } else {
586 /* Get the caller PC from the frame linkage area. */ 671 /* Get the caller PC from the frame linkage area. */
587 next_pc = next_frame[0]; 672 next_pc = next_frame[0];
588 if (next_pc == 0 || 673 if (!valid_addr_reg(next_frame[0]) || next_pc == 0 ||
589 next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { 674 next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
590 /* The PC is suspect, so give up. */ 675 /* The PC is suspect, so give up. */
591 return false; 676 return false;
@@ -599,23 +684,3 @@ bool backtrace_next(BacktraceIterator *state)
599 684
600 return true; 685 return true;
601} 686}
602
603#else /* TILE_CHIP < 10 */
604
605void backtrace_init(BacktraceIterator *state,
606 BacktraceMemoryReader read_memory_func,
607 void *read_memory_func_extra,
608 VirtualAddress pc, VirtualAddress lr,
609 VirtualAddress sp, VirtualAddress r52)
610{
611 state->pc = pc;
612 state->sp = sp;
613 state->fp = -1;
614 state->initial_frame_caller_pc = -1;
615 state->read_memory_func = read_memory_func;
616 state->read_memory_func_extra = read_memory_func_extra;
617}
618
619bool backtrace_next(BacktraceIterator *state) { return false; }
620
621#endif /* TILE_CHIP < 10 */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index b1e06d041555..dbc213adf5e1 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -21,7 +21,6 @@
21#include <linux/kdev_t.h> 21#include <linux/kdev_t.h>
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/fcntl.h> 23#include <linux/fcntl.h>
24#include <linux/smp_lock.h>
25#include <linux/uaccess.h> 24#include <linux/uaccess.h>
26#include <linux/signal.h> 25#include <linux/signal.h>
27#include <asm/syscalls.h> 26#include <asm/syscalls.h>
@@ -148,14 +147,20 @@ long tile_compat_sys_msgrcv(int msqid,
148#define compat_sys_readahead sys32_readahead 147#define compat_sys_readahead sys32_readahead
149#define compat_sys_sync_file_range compat_sys_sync_file_range2 148#define compat_sys_sync_file_range compat_sys_sync_file_range2
150 149
151/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */ 150/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
152#define compat_sys_stat64 sys_newstat 151#define compat_sys_stat64 sys_stat64
153#define compat_sys_lstat64 sys_newlstat 152#define compat_sys_lstat64 sys_lstat64
154#define compat_sys_fstat64 sys_newfstat 153#define compat_sys_fstat64 sys_fstat64
155#define compat_sys_fstatat64 sys_newfstatat 154#define compat_sys_fstatat64 sys_fstatat64
156 155
157/* Pass full 64-bit values through ptrace. */ 156/* The native sys_ptrace dynamically handles compat binaries. */
158#define compat_sys_ptrace tile_compat_sys_ptrace 157#define compat_sys_ptrace sys_ptrace
158
159/* Call the trampolines to manage pt_regs where necessary. */
160#define compat_sys_execve _compat_sys_execve
161#define compat_sys_sigaltstack _compat_sys_sigaltstack
162#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
163#define sys_clone _sys_clone
159 164
160/* 165/*
161 * Note that we can't include <linux/unistd.h> here since the header 166 * Note that we can't include <linux/unistd.h> here since the header
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index d5efb215dd5f..dbb0dfc7bece 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -15,7 +15,6 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/smp_lock.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
20#include <linux/signal.h> 19#include <linux/signal.h>
21#include <linux/errno.h> 20#include <linux/errno.h>
@@ -56,13 +55,15 @@ struct compat_ucontext {
56 sigset_t uc_sigmask; /* mask last for extensibility */ 55 sigset_t uc_sigmask; /* mask last for extensibility */
57}; 56};
58 57
58#define COMPAT_SI_PAD_SIZE ((SI_MAX_SIZE - 3 * sizeof(int)) / sizeof(int))
59
59struct compat_siginfo { 60struct compat_siginfo {
60 int si_signo; 61 int si_signo;
61 int si_errno; 62 int si_errno;
62 int si_code; 63 int si_code;
63 64
64 union { 65 union {
65 int _pad[SI_PAD_SIZE]; 66 int _pad[COMPAT_SI_PAD_SIZE];
66 67
67 /* kill() */ 68 /* kill() */
68 struct { 69 struct {
@@ -254,9 +255,9 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
254 return err; 255 return err;
255} 256}
256 257
257long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, 258long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
258 struct compat_sigaltstack __user *uoss_ptr, 259 struct compat_sigaltstack __user *uoss_ptr,
259 struct pt_regs *regs) 260 struct pt_regs *regs)
260{ 261{
261 stack_t uss, uoss; 262 stack_t uss, uoss;
262 int ret; 263 int ret;
@@ -289,12 +290,12 @@ long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
289 return ret; 290 return ret;
290} 291}
291 292
292long _compat_sys_rt_sigreturn(struct pt_regs *regs) 293/* The assembly shim for this function arranges to ignore the return value. */
294long compat_sys_rt_sigreturn(struct pt_regs *regs)
293{ 295{
294 struct compat_rt_sigframe __user *frame = 296 struct compat_rt_sigframe __user *frame =
295 (struct compat_rt_sigframe __user *) compat_ptr(regs->sp); 297 (struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
296 sigset_t set; 298 sigset_t set;
297 long r0;
298 299
299 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 300 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
300 goto badframe; 301 goto badframe;
@@ -307,13 +308,13 @@ long _compat_sys_rt_sigreturn(struct pt_regs *regs)
307 recalc_sigpending(); 308 recalc_sigpending();
308 spin_unlock_irq(&current->sighand->siglock); 309 spin_unlock_irq(&current->sighand->siglock);
309 310
310 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) 311 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
311 goto badframe; 312 goto badframe;
312 313
313 if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0) 314 if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
314 goto badframe; 315 goto badframe;
315 316
316 return r0; 317 return 0;
317 318
318badframe: 319badframe:
319 force_sig(SIGSEGV, current); 320 force_sig(SIGSEGV, current);
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index 2c54fd43a8a0..493a0e66d916 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -54,7 +54,7 @@ void early_printk(const char *fmt, ...)
54void early_panic(const char *fmt, ...) 54void early_panic(const char *fmt, ...)
55{ 55{
56 va_list ap; 56 va_list ap;
57 raw_local_irq_disable_all(); 57 arch_local_irq_disable_all();
58 va_start(ap, fmt); 58 va_start(ap, fmt);
59 early_printk("Kernel panic - not syncing: "); 59 early_printk("Kernel panic - not syncing: ");
60 early_vprintk(fmt, ap); 60 early_vprintk(fmt, ap);
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 3d01383b1b0e..fd8dc42abdcb 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -15,7 +15,9 @@
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/unistd.h> 16#include <linux/unistd.h>
17#include <asm/irqflags.h> 17#include <asm/irqflags.h>
18#include <asm/processor.h>
18#include <arch/abi.h> 19#include <arch/abi.h>
20#include <arch/spr_def.h>
19 21
20#ifdef __tilegx__ 22#ifdef __tilegx__
21#define bnzt bnezt 23#define bnzt bnezt
@@ -25,28 +27,6 @@ STD_ENTRY(current_text_addr)
25 { move r0, lr; jrp lr } 27 { move r0, lr; jrp lr }
26 STD_ENDPROC(current_text_addr) 28 STD_ENDPROC(current_text_addr)
27 29
28STD_ENTRY(_sim_syscall)
29 /*
30 * Wait for r0-r9 to be ready (and lr on the off chance we
31 * want the syscall to locate its caller), then make a magic
32 * simulator syscall.
33 *
34 * We carefully stall until the registers are readable in case they
35 * are the target of a slow load, etc. so that tile-sim will
36 * definitely be able to read all of them inside the magic syscall.
37 *
38 * Technically this is wrong for r3-r9 and lr, since an interrupt
39 * could come in and restore the registers with a slow load right
40 * before executing the mtspr. We may need to modify tile-sim to
41 * explicitly stall for this case, but we do not yet have
42 * a way to implement such a stall.
43 */
44 { and zero, lr, r9 ; and zero, r8, r7 }
45 { and zero, r6, r5 ; and zero, r4, r3 }
46 { and zero, r2, r1 ; mtspr SIM_CONTROL, r0 }
47 { jrp lr }
48 STD_ENDPROC(_sim_syscall)
49
50/* 30/*
51 * Implement execve(). The i386 code has a note that forking from kernel 31 * Implement execve(). The i386 code has a note that forking from kernel
52 * space results in no copy on write until the execve, so we should be 32 * space results in no copy on write until the execve, so we should be
@@ -102,7 +82,7 @@ STD_ENTRY(KBacktraceIterator_init_current)
102STD_ENTRY(cpu_idle_on_new_stack) 82STD_ENTRY(cpu_idle_on_new_stack)
103 { 83 {
104 move sp, r1 84 move sp, r1
105 mtspr SYSTEM_SAVE_1_0, r2 85 mtspr SPR_SYSTEM_SAVE_K_0, r2
106 } 86 }
107 jal free_thread_info 87 jal free_thread_info
108 j cpu_idle 88 j cpu_idle
@@ -124,15 +104,15 @@ STD_ENTRY(smp_nap)
124STD_ENTRY(_cpu_idle) 104STD_ENTRY(_cpu_idle)
125 { 105 {
126 lnk r0 106 lnk r0
127 movei r1, 1 107 movei r1, KERNEL_PL
128 } 108 }
129 { 109 {
130 addli r0, r0, _cpu_idle_nap - . 110 addli r0, r0, _cpu_idle_nap - .
131 mtspr INTERRUPT_CRITICAL_SECTION, r1 111 mtspr INTERRUPT_CRITICAL_SECTION, r1
132 } 112 }
133 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ 113 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
134 mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */ 114 mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */
135 mtspr EX_CONTEXT_1_0, r0 115 mtspr SPR_EX_CONTEXT_K_0, r0
136 iret 116 iret
137 .global _cpu_idle_nap 117 .global _cpu_idle_nap
138_cpu_idle_nap: 118_cpu_idle_nap:
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 584b965dc824..e910530436e6 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -151,12 +151,12 @@ enum direction_protect {
151 151
152static void enable_firewall_interrupts(void) 152static void enable_firewall_interrupts(void)
153{ 153{
154 raw_local_irq_unmask_now(INT_UDN_FIREWALL); 154 arch_local_irq_unmask_now(INT_UDN_FIREWALL);
155} 155}
156 156
157static void disable_firewall_interrupts(void) 157static void disable_firewall_interrupts(void)
158{ 158{
159 raw_local_irq_mask_now(INT_UDN_FIREWALL); 159 arch_local_irq_mask_now(INT_UDN_FIREWALL);
160} 160}
161 161
162/* Set up hardwall on this cpu based on the passed hardwall_info. */ 162/* Set up hardwall on this cpu based on the passed hardwall_info. */
@@ -768,6 +768,7 @@ static int hardwall_release(struct inode *inode, struct file *file)
768} 768}
769 769
770static const struct file_operations dev_hardwall_fops = { 770static const struct file_operations dev_hardwall_fops = {
771 .open = nonseekable_open,
771 .unlocked_ioctl = hardwall_ioctl, 772 .unlocked_ioctl = hardwall_ioctl,
772#ifdef CONFIG_COMPAT 773#ifdef CONFIG_COMPAT
773 .compat_ioctl = hardwall_compat_ioctl, 774 .compat_ioctl = hardwall_compat_ioctl,
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 2b4f6c091701..90e7c4435693 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -23,6 +23,7 @@
23#include <asm/asm-offsets.h> 23#include <asm/asm-offsets.h>
24#include <hv/hypervisor.h> 24#include <hv/hypervisor.h>
25#include <arch/chip.h> 25#include <arch/chip.h>
26#include <arch/spr_def.h>
26 27
27/* 28/*
28 * This module contains the entry code for kernel images. It performs the 29 * This module contains the entry code for kernel images. It performs the
@@ -76,7 +77,7 @@ ENTRY(_start)
76 } 77 }
771: 781:
78 79
79 /* Get our processor number and save it away in SAVE_1_0. */ 80 /* Get our processor number and save it away in SAVE_K_0. */
80 jal hv_inquire_topology 81 jal hv_inquire_topology
81 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ 82 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
82 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ 83 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
@@ -124,7 +125,7 @@ ENTRY(_start)
124 lw r0, r0 125 lw r0, r0
125 lw sp, r1 126 lw sp, r1
126 or r4, sp, r4 127 or r4, sp, r4
127 mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */ 128 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
128 addi sp, sp, -STACK_TOP_DELTA 129 addi sp, sp, -STACK_TOP_DELTA
129 { 130 {
130 move lr, zero /* stop backtraces in the called function */ 131 move lr, zero /* stop backtraces in the called function */
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 3404c75f8e64..5eed4a02bf62 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -32,8 +32,8 @@
32# error "No support for kernel preemption currently" 32# error "No support for kernel preemption currently"
33#endif 33#endif
34 34
35#if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48 35#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
36# error INT_INTCTRL_1 coded to set high interrupt mask 36# error INT_INTCTRL_K coded to set high interrupt mask
37#endif 37#endif
38 38
39#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) 39#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
@@ -132,8 +132,8 @@ intvec_\vecname:
132 132
133 /* Temporarily save a register so we have somewhere to work. */ 133 /* Temporarily save a register so we have somewhere to work. */
134 134
135 mtspr SYSTEM_SAVE_1_1, r0 135 mtspr SPR_SYSTEM_SAVE_K_1, r0
136 mfspr r0, EX_CONTEXT_1_1 136 mfspr r0, SPR_EX_CONTEXT_K_1
137 137
138 /* The cmpxchg code clears sp to force us to reset it here on fault. */ 138 /* The cmpxchg code clears sp to force us to reset it here on fault. */
139 { 139 {
@@ -167,18 +167,18 @@ intvec_\vecname:
167 * The page_fault handler may be downcalled directly by the 167 * The page_fault handler may be downcalled directly by the
168 * hypervisor even when Linux is running and has ICS set. 168 * hypervisor even when Linux is running and has ICS set.
169 * 169 *
170 * In this case the contents of EX_CONTEXT_1_1 reflect the 170 * In this case the contents of EX_CONTEXT_K_1 reflect the
171 * previous fault and can't be relied on to choose whether or 171 * previous fault and can't be relied on to choose whether or
172 * not to reinitialize the stack pointer. So we add a test 172 * not to reinitialize the stack pointer. So we add a test
173 * to see whether SYSTEM_SAVE_1_2 has the high bit set, 173 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
174 * and if so we don't reinitialize sp, since we must be coming 174 * and if so we don't reinitialize sp, since we must be coming
175 * from Linux. (In fact the precise case is !(val & ~1), 175 * from Linux. (In fact the precise case is !(val & ~1),
176 * but any Linux PC has to have the high bit set.) 176 * but any Linux PC has to have the high bit set.)
177 * 177 *
178 * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for 178 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
179 * any path that turns into a downcall to one of our TLB handlers. 179 * any path that turns into a downcall to one of our TLB handlers.
180 */ 180 */
181 mfspr r0, SYSTEM_SAVE_1_2 181 mfspr r0, SPR_SYSTEM_SAVE_K_2
182 { 182 {
183 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */ 183 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
184 move r0, sp 184 move r0, sp
@@ -187,12 +187,12 @@ intvec_\vecname:
187 187
1882: 1882:
189 /* 189 /*
190 * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and 190 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
191 * the current stack top in the higher bits. So we recover 191 * the current stack top in the higher bits. So we recover
192 * our stack top by just masking off the low bits, then 192 * our stack top by just masking off the low bits, then
193 * point sp at the top aligned address on the actual stack page. 193 * point sp at the top aligned address on the actual stack page.
194 */ 194 */
195 mfspr r0, SYSTEM_SAVE_1_0 195 mfspr r0, SPR_SYSTEM_SAVE_K_0
196 mm r0, r0, zero, LOG2_THREAD_SIZE, 31 196 mm r0, r0, zero, LOG2_THREAD_SIZE, 31
197 197
1980: 1980:
@@ -254,7 +254,7 @@ intvec_\vecname:
254 sw sp, r3 254 sw sp, r3
255 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3) 255 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
256 } 256 }
257 mfspr r0, EX_CONTEXT_1_0 257 mfspr r0, SPR_EX_CONTEXT_K_0
258 .ifc \processing,handle_syscall 258 .ifc \processing,handle_syscall
259 /* 259 /*
260 * Bump the saved PC by one bundle so that when we return, we won't 260 * Bump the saved PC by one bundle so that when we return, we won't
@@ -267,7 +267,7 @@ intvec_\vecname:
267 sw sp, r0 267 sw sp, r0
268 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC 268 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
269 } 269 }
270 mfspr r0, EX_CONTEXT_1_1 270 mfspr r0, SPR_EX_CONTEXT_K_1
271 { 271 {
272 sw sp, r0 272 sw sp, r0
273 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 273 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
@@ -289,7 +289,7 @@ intvec_\vecname:
289 .endif 289 .endif
290 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM 290 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
291 } 291 }
292 mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */ 292 mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
293 { 293 {
294 sw sp, r0 294 sw sp, r0
295 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4 295 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
@@ -309,12 +309,12 @@ intvec_\vecname:
309 * See discussion below at "finish_interrupt_save". 309 * See discussion below at "finish_interrupt_save".
310 */ 310 */
311 .ifc \c_routine, do_page_fault 311 .ifc \c_routine, do_page_fault
312 mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */ 312 mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
313 mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */ 313 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
314 .else 314 .else
315 .ifc \vecnum, INT_DOUBLE_FAULT 315 .ifc \vecnum, INT_DOUBLE_FAULT
316 { 316 {
317 mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */ 317 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
318 movei r3, 0 318 movei r3, 0
319 } 319 }
320 .else 320 .else
@@ -467,7 +467,7 @@ intvec_\vecname:
467 /* Load tp with our per-cpu offset. */ 467 /* Load tp with our per-cpu offset. */
468#ifdef CONFIG_SMP 468#ifdef CONFIG_SMP
469 { 469 {
470 mfspr r20, SYSTEM_SAVE_1_0 470 mfspr r20, SPR_SYSTEM_SAVE_K_0
471 moveli r21, lo16(__per_cpu_offset) 471 moveli r21, lo16(__per_cpu_offset)
472 } 472 }
473 { 473 {
@@ -487,7 +487,7 @@ intvec_\vecname:
487 * We load flags in r32 here so we can jump to .Lrestore_regs 487 * We load flags in r32 here so we can jump to .Lrestore_regs
488 * directly after do_page_fault_ics() if necessary. 488 * directly after do_page_fault_ics() if necessary.
489 */ 489 */
490 mfspr r32, EX_CONTEXT_1_1 490 mfspr r32, SPR_EX_CONTEXT_K_1
491 { 491 {
492 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 492 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
493 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) 493 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
@@ -952,16 +952,16 @@ STD_ENTRY(interrupt_return)
952 * able to safely read all the remaining words on those cache 952 * able to safely read all the remaining words on those cache
953 * lines without waiting for the memory subsystem. 953 * lines without waiting for the memory subsystem.
954 */ 954 */
955 pop_reg_zero r0, r1, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0) 955 pop_reg_zero r0, r28, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
956 pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30) 956 pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30)
957 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC 957 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
958 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1 958 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
959 { 959 {
960 mtspr EX_CONTEXT_1_0, r21 960 mtspr SPR_EX_CONTEXT_K_0, r21
961 move r5, zero 961 move r5, zero
962 } 962 }
963 { 963 {
964 mtspr EX_CONTEXT_1_1, lr 964 mtspr SPR_EX_CONTEXT_K_1, lr
965 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 965 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
966 } 966 }
967 967
@@ -1017,7 +1017,17 @@ STD_ENTRY(interrupt_return)
1017 { move r22, zero; move r23, zero } 1017 { move r22, zero; move r23, zero }
1018 { move r24, zero; move r25, zero } 1018 { move r24, zero; move r25, zero }
1019 { move r26, zero; move r27, zero } 1019 { move r26, zero; move r27, zero }
1020 { move r28, zero; move r29, zero } 1020
1021 /* Set r1 to errno if we are returning an error, otherwise zero. */
1022 {
1023 moveli r29, 4096
1024 sub r1, zero, r0
1025 }
1026 slt_u r29, r1, r29
1027 {
1028 mnz r1, r29, r1
1029 move r29, zero
1030 }
1021 iret 1031 iret
1022 1032
1023 /* 1033 /*
@@ -1189,7 +1199,7 @@ STD_ENTRY(interrupt_return)
1189 STD_ENDPROC(interrupt_return) 1199 STD_ENDPROC(interrupt_return)
1190 1200
1191 /* 1201 /*
1192 * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit 1202 * This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
1193 * before returning, so we can properly get more downcalls. 1203 * before returning, so we can properly get more downcalls.
1194 */ 1204 */
1195 .pushsection .text.handle_interrupt_downcall,"ax" 1205 .pushsection .text.handle_interrupt_downcall,"ax"
@@ -1198,11 +1208,11 @@ handle_interrupt_downcall:
1198 check_single_stepping normal, .Ldispatch_downcall 1208 check_single_stepping normal, .Ldispatch_downcall
1199.Ldispatch_downcall: 1209.Ldispatch_downcall:
1200 1210
1201 /* Clear INTCTRL_1 from the set of interrupts we ever enable. */ 1211 /* Clear INTCTRL_K from the set of interrupts we ever enable. */
1202 GET_INTERRUPTS_ENABLED_MASK_PTR(r30) 1212 GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
1203 { 1213 {
1204 addi r30, r30, 4 1214 addi r30, r30, 4
1205 movei r31, INT_MASK(INT_INTCTRL_1) 1215 movei r31, INT_MASK(INT_INTCTRL_K)
1206 } 1216 }
1207 { 1217 {
1208 lw r20, r30 1218 lw r20, r30
@@ -1217,7 +1227,7 @@ handle_interrupt_downcall:
1217 } 1227 }
1218 FEEDBACK_REENTER(handle_interrupt_downcall) 1228 FEEDBACK_REENTER(handle_interrupt_downcall)
1219 1229
1220 /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */ 1230 /* Allow INTCTRL_K to be enabled next time we enable interrupts. */
1221 lw r20, r30 1231 lw r20, r30
1222 or r20, r20, r31 1232 or r20, r20, r31
1223 sw r30, r20 1233 sw r30, r20
@@ -1332,8 +1342,8 @@ handle_syscall:
1332 lw r20, r20 1342 lw r20, r20
1333 1343
1334 /* Jump to syscall handler. */ 1344 /* Jump to syscall handler. */
1335 jalr r20; .Lhandle_syscall_link: 1345 jalr r20
1336 FEEDBACK_REENTER(handle_syscall) 1346.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
1337 1347
1338 /* 1348 /*
1339 * Write our r0 onto the stack so it gets restored instead 1349 * Write our r0 onto the stack so it gets restored instead
@@ -1342,6 +1352,9 @@ handle_syscall:
1342 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) 1352 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1343 sw r29, r0 1353 sw r29, r0
1344 1354
1355.Lsyscall_sigreturn_skip:
1356 FEEDBACK_REENTER(handle_syscall)
1357
1345 /* Do syscall trace again, if requested. */ 1358 /* Do syscall trace again, if requested. */
1346 lw r30, r31 1359 lw r30, r31
1347 andi r30, r30, _TIF_SYSCALL_TRACE 1360 andi r30, r30, _TIF_SYSCALL_TRACE
@@ -1462,7 +1475,12 @@ handle_ill:
1462 lw r26, r24 1475 lw r26, r24
1463 sw r28, r26 1476 sw r28, r26
1464 1477
1465 /* Clear TIF_SINGLESTEP */ 1478 /*
1479 * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
1480 * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
1481 * need to clear it here and can't really impose on all other arches.
1482 * So what's another write between friends?
1483 */
1466 GET_THREAD_INFO(r0) 1484 GET_THREAD_INFO(r0)
1467 1485
1468 addi r1, r0, THREAD_INFO_FLAGS_OFFSET 1486 addi r1, r0, THREAD_INFO_FLAGS_OFFSET
@@ -1496,17 +1514,10 @@ handle_ill:
1496 } 1514 }
1497 STD_ENDPROC(handle_ill) 1515 STD_ENDPROC(handle_ill)
1498 1516
1499 .pushsection .rodata, "a"
1500 .align 8
1501bpt_code:
1502 bpt
1503 ENDPROC(bpt_code)
1504 .popsection
1505
1506/* Various stub interrupt handlers and syscall handlers */ 1517/* Various stub interrupt handlers and syscall handlers */
1507 1518
1508STD_ENTRY_LOCAL(_kernel_double_fault) 1519STD_ENTRY_LOCAL(_kernel_double_fault)
1509 mfspr r1, EX_CONTEXT_1_0 1520 mfspr r1, SPR_EX_CONTEXT_K_0
1510 move r2, lr 1521 move r2, lr
1511 move r3, sp 1522 move r3, sp
1512 move r4, r52 1523 move r4, r52
@@ -1515,34 +1526,44 @@ STD_ENTRY_LOCAL(_kernel_double_fault)
1515 STD_ENDPROC(_kernel_double_fault) 1526 STD_ENDPROC(_kernel_double_fault)
1516 1527
1517STD_ENTRY_LOCAL(bad_intr) 1528STD_ENTRY_LOCAL(bad_intr)
1518 mfspr r2, EX_CONTEXT_1_0 1529 mfspr r2, SPR_EX_CONTEXT_K_0
1519 panic "Unhandled interrupt %#x: PC %#lx" 1530 panic "Unhandled interrupt %#x: PC %#lx"
1520 STD_ENDPROC(bad_intr) 1531 STD_ENDPROC(bad_intr)
1521 1532
1522/* Put address of pt_regs in reg and jump. */ 1533/* Put address of pt_regs in reg and jump. */
1523#define PTREGS_SYSCALL(x, reg) \ 1534#define PTREGS_SYSCALL(x, reg) \
1524 STD_ENTRY(x); \ 1535 STD_ENTRY(_##x); \
1525 { \ 1536 { \
1526 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ 1537 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1527 j _##x \ 1538 j x \
1528 }; \ 1539 }; \
1529 STD_ENDPROC(x) 1540 STD_ENDPROC(_##x)
1541
1542/*
1543 * Special-case sigreturn to not write r0 to the stack on return.
1544 * This is technically more efficient, but it also avoids difficulties
1545 * in the 64-bit OS when handling 32-bit compat code, since we must not
1546 * sign-extend r0 for the sigreturn return-value case.
1547 */
1548#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
1549 STD_ENTRY(_##x); \
1550 addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
1551 { \
1552 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1553 j x \
1554 }; \
1555 STD_ENDPROC(_##x)
1530 1556
1531PTREGS_SYSCALL(sys_execve, r3) 1557PTREGS_SYSCALL(sys_execve, r3)
1532PTREGS_SYSCALL(sys_sigaltstack, r2) 1558PTREGS_SYSCALL(sys_sigaltstack, r2)
1533PTREGS_SYSCALL(sys_rt_sigreturn, r0) 1559PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
1534 1560PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
1535/* Save additional callee-saves to pt_regs, put address in reg and jump. */
1536#define PTREGS_SYSCALL_ALL_REGS(x, reg) \
1537 STD_ENTRY(x); \
1538 push_extra_callee_saves reg; \
1539 j _##x; \
1540 STD_ENDPROC(x)
1541 1561
1542PTREGS_SYSCALL_ALL_REGS(sys_fork, r0) 1562/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
1543PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0) 1563STD_ENTRY(_sys_clone)
1544PTREGS_SYSCALL_ALL_REGS(sys_clone, r4) 1564 push_extra_callee_saves r4
1545PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1) 1565 j sys_clone
1566 STD_ENDPROC(_sys_clone)
1546 1567
1547/* 1568/*
1548 * This entrypoint is taken for the cmpxchg and atomic_update fast 1569 * This entrypoint is taken for the cmpxchg and atomic_update fast
@@ -1555,12 +1576,14 @@ PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1)
1555 * to be available to it on entry. It does not modify any callee-save 1576 * to be available to it on entry. It does not modify any callee-save
1556 * registers (including "lr"). It does not check what PL it is being 1577 * registers (including "lr"). It does not check what PL it is being
1557 * called at, so you'd better not call it other than at PL0. 1578 * called at, so you'd better not call it other than at PL0.
1579 * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
1580 * it ever is necessary to use more registers, be aware.
1558 * 1581 *
1559 * It does not use the stack, but since it might be re-interrupted by 1582 * It does not use the stack, but since it might be re-interrupted by
1560 * a page fault which would assume the stack was valid, it does 1583 * a page fault which would assume the stack was valid, it does
1561 * save/restore the stack pointer and zero it out to make sure it gets reset. 1584 * save/restore the stack pointer and zero it out to make sure it gets reset.
1562 * Since we always keep interrupts disabled, the hypervisor won't 1585 * Since we always keep interrupts disabled, the hypervisor won't
1563 * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them 1586 * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
1564 * (other than to advance the PC on return). 1587 * (other than to advance the PC on return).
1565 * 1588 *
1566 * We have to manually validate the user vs kernel address range 1589 * We have to manually validate the user vs kernel address range
@@ -1766,7 +1789,7 @@ ENTRY(sys_cmpxchg)
1766 /* Do slow mtspr here so the following "mf" waits less. */ 1789 /* Do slow mtspr here so the following "mf" waits less. */
1767 { 1790 {
1768 move sp, r27 1791 move sp, r27
1769 mtspr EX_CONTEXT_1_0, r28 1792 mtspr SPR_EX_CONTEXT_K_0, r28
1770 } 1793 }
1771 mf 1794 mf
1772 1795
@@ -1785,7 +1808,7 @@ ENTRY(sys_cmpxchg)
1785 } 1808 }
1786 { 1809 {
1787 move sp, r27 1810 move sp, r27
1788 mtspr EX_CONTEXT_1_0, r28 1811 mtspr SPR_EX_CONTEXT_K_0, r28
1789 } 1812 }
1790 iret 1813 iret
1791 1814
@@ -1813,7 +1836,7 @@ ENTRY(sys_cmpxchg)
1813#endif 1836#endif
1814 1837
1815 /* Issue the slow SPR here while the tns result is in flight. */ 1838 /* Issue the slow SPR here while the tns result is in flight. */
1816 mfspr r28, EX_CONTEXT_1_0 1839 mfspr r28, SPR_EX_CONTEXT_K_0
1817 1840
1818 { 1841 {
1819 addi r28, r28, 8 /* return to the instruction after the swint1 */ 1842 addi r28, r28, 8 /* return to the instruction after the swint1 */
@@ -1901,7 +1924,7 @@ ENTRY(sys_cmpxchg)
1901.Lcmpxchg64_mismatch: 1924.Lcmpxchg64_mismatch:
1902 { 1925 {
1903 move sp, r27 1926 move sp, r27
1904 mtspr EX_CONTEXT_1_0, r28 1927 mtspr SPR_EX_CONTEXT_K_0, r28
1905 } 1928 }
1906 mf 1929 mf
1907 { 1930 {
@@ -1982,8 +2005,13 @@ int_unalign:
1982 int_hand INT_PERF_COUNT, PERF_COUNT, \ 2005 int_hand INT_PERF_COUNT, PERF_COUNT, \
1983 op_handle_perf_interrupt, handle_nmi 2006 op_handle_perf_interrupt, handle_nmi
1984 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr 2007 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
2008#if CONFIG_KERNEL_PL == 2
2009 dc_dispatch INT_INTCTRL_2, INTCTRL_2
2010 int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
2011#else
1985 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr 2012 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1986 dc_dispatch INT_INTCTRL_1, INTCTRL_1 2013 dc_dispatch INT_INTCTRL_1, INTCTRL_1
2014#endif
1987 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr 2015 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1988 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ 2016 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1989 hv_message_intr, handle_interrupt_downcall 2017 hv_message_intr, handle_interrupt_downcall
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 596c60086930..128805ef8f2c 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -26,7 +26,7 @@
26#define IS_HW_CLEARED 1 26#define IS_HW_CLEARED 1
27 27
28/* 28/*
29 * The set of interrupts we enable for raw_local_irq_enable(). 29 * The set of interrupts we enable for arch_local_irq_enable().
30 * This is initialized to have just a single interrupt that the kernel 30 * This is initialized to have just a single interrupt that the kernel
31 * doesn't actually use as a sentinel. During kernel init, 31 * doesn't actually use as a sentinel. During kernel init,
32 * interrupts are added as the kernel gets prepared to support them. 32 * interrupts are added as the kernel gets prepared to support them.
@@ -61,9 +61,9 @@ static DEFINE_SPINLOCK(available_irqs_lock);
61 61
62#if CHIP_HAS_IPI() 62#if CHIP_HAS_IPI()
63/* Use SPRs to manipulate device interrupts. */ 63/* Use SPRs to manipulate device interrupts. */
64#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask) 64#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
65#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask) 65#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
66#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask) 66#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
67#else 67#else
68/* Use HV to manipulate device interrupts. */ 68/* Use HV to manipulate device interrupts. */
69#define mask_irqs(irq_mask) hv_disable_intr(irq_mask) 69#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
@@ -89,16 +89,16 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
89 * masked by a previous interrupt. Then, mask out the ones 89 * masked by a previous interrupt. Then, mask out the ones
90 * we're going to handle. 90 * we're going to handle.
91 */ 91 */
92 unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1); 92 unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
93 original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked; 93 original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
94 __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs); 94 __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
95#else 95#else
96 /* 96 /*
97 * Hypervisor performs the equivalent of the Gx code above and 97 * Hypervisor performs the equivalent of the Gx code above and
98 * then puts the pending interrupt mask into a system save reg 98 * then puts the pending interrupt mask into a system save reg
99 * for us to find. 99 * for us to find.
100 */ 100 */
101 original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); 101 original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
102#endif 102#endif
103 remaining_irqs = original_irqs; 103 remaining_irqs = original_irqs;
104 104
@@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq)
208} 208}
209 209
210static struct irq_chip tile_irq_chip = { 210static struct irq_chip tile_irq_chip = {
211 .typename = "tile_irq_chip", 211 .name = "tile_irq_chip",
212 .ack = tile_irq_chip_ack, 212 .ack = tile_irq_chip_ack,
213 .eoi = tile_irq_chip_eoi, 213 .eoi = tile_irq_chip_eoi,
214 .mask = tile_irq_chip_mask, 214 .mask = tile_irq_chip_mask,
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
225 /* Enable interrupt delivery. */ 225 /* Enable interrupt delivery. */
226 unmask_irqs(~0UL); 226 unmask_irqs(~0UL);
227#if CHIP_HAS_IPI() 227#if CHIP_HAS_IPI()
228 raw_local_irq_unmask(INT_IPI_1); 228 arch_local_irq_unmask(INT_IPI_K);
229#endif 229#endif
230} 230}
231 231
@@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v)
288 for_each_online_cpu(j) 288 for_each_online_cpu(j)
289 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 289 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
290#endif 290#endif
291 seq_printf(p, " %14s", irq_desc[i].chip->typename); 291 seq_printf(p, " %14s", irq_desc[i].chip->name);
292 seq_printf(p, " %s", action->name); 292 seq_printf(p, " %s", action->name);
293 293
294 for (action = action->next; action; action = action->next) 294 for (action = action->next; action; action = action->next)
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ba7a265d6179..0d8b9e933487 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -182,13 +182,13 @@ static void kexec_find_and_set_command_line(struct kimage *image)
182 182
183 if ((entry & IND_SOURCE)) { 183 if ((entry & IND_SOURCE)) {
184 void *va = 184 void *va =
185 kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0); 185 kmap_atomic_pfn(entry >> PAGE_SHIFT);
186 r = kexec_bn2cl(va); 186 r = kexec_bn2cl(va);
187 if (r) { 187 if (r) {
188 command_line = r; 188 command_line = r;
189 break; 189 break;
190 } 190 }
191 kunmap_atomic(va, KM_USER0); 191 kunmap_atomic(va);
192 } 192 }
193 } 193 }
194 194
@@ -198,7 +198,7 @@ static void kexec_find_and_set_command_line(struct kimage *image)
198 198
199 hverr = hv_set_command_line( 199 hverr = hv_set_command_line(
200 (HV_VirtAddr) command_line, strlen(command_line)); 200 (HV_VirtAddr) command_line, strlen(command_line));
201 kunmap_atomic(command_line, KM_USER0); 201 kunmap_atomic(command_line);
202 } else { 202 } else {
203 pr_info("%s: no command line found; making empty\n", 203 pr_info("%s: no command line found; making empty\n",
204 __func__); 204 __func__);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 6d23ed271d10..0858ee6b520f 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
34 panic("hv_register_message_state: error %d", rc); 34 panic("hv_register_message_state: error %d", rc);
35 35
36 /* Make sure downcall interrupts will be enabled. */ 36 /* Make sure downcall interrupts will be enabled. */
37 raw_local_irq_unmask(INT_INTCTRL_1); 37 arch_local_irq_unmask(INT_INTCTRL_K);
38} 38}
39 39
40void hv_message_intr(struct pt_regs *regs, int intnum) 40void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
new file mode 100644
index 000000000000..a1ee25be9ad9
--- /dev/null
+++ b/arch/tile/kernel/pci.c
@@ -0,0 +1,621 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/delay.h>
18#include <linux/string.h>
19#include <linux/init.h>
20#include <linux/capability.h>
21#include <linux/sched.h>
22#include <linux/errno.h>
23#include <linux/bootmem.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26#include <linux/uaccess.h>
27
28#include <asm/processor.h>
29#include <asm/sections.h>
30#include <asm/byteorder.h>
31#include <asm/hv_driver.h>
32#include <hv/drv_pcie_rc_intf.h>
33
34
35/*
36 * Initialization flow and process
37 * -------------------------------
38 *
39 * This files containes the routines to search for PCI buses,
40 * enumerate the buses, and configure any attached devices.
41 *
42 * There are two entry points here:
43 * 1) tile_pci_init
44 * This sets up the pci_controller structs, and opens the
45 * FDs to the hypervisor. This is called from setup_arch() early
46 * in the boot process.
47 * 2) pcibios_init
48 * This probes the PCI bus(es) for any attached hardware. It's
49 * called by subsys_initcall. All of the real work is done by the
50 * generic Linux PCI layer.
51 *
52 */
53
54/*
55 * This flag tells if the platform is TILEmpower that needs
56 * special configuration for the PLX switch chip.
57 */
58int __write_once tile_plx_gen1;
59
60static struct pci_controller controllers[TILE_NUM_PCIE];
61static int num_controllers;
62
63static struct pci_ops tile_cfg_ops;
64
65
66/*
67 * We don't need to worry about the alignment of resources.
68 */
69resource_size_t pcibios_align_resource(void *data, const struct resource *res,
70 resource_size_t size, resource_size_t align)
71{
72 return res->start;
73}
74EXPORT_SYMBOL(pcibios_align_resource);
75
76/*
77 * Open a FD to the hypervisor PCI device.
78 *
79 * controller_id is the controller number, config type is 0 or 1 for
80 * config0 or config1 operations.
81 */
82static int __init tile_pcie_open(int controller_id, int config_type)
83{
84 char filename[32];
85 int fd;
86
87 sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
88
89 fd = hv_dev_open((HV_VirtAddr)filename, 0);
90
91 return fd;
92}
93
94
95/*
96 * Get the IRQ numbers from the HV and set up the handlers for them.
97 */
98static int __init tile_init_irqs(int controller_id,
99 struct pci_controller *controller)
100{
101 char filename[32];
102 int fd;
103 int ret;
104 int x;
105 struct pcie_rc_config rc_config;
106
107 sprintf(filename, "pcie/%d/ctl", controller_id);
108 fd = hv_dev_open((HV_VirtAddr)filename, 0);
109 if (fd < 0) {
110 pr_err("PCI: hv_dev_open(%s) failed\n", filename);
111 return -1;
112 }
113 ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
114 sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
115 hv_dev_close(fd);
116 if (ret != sizeof(rc_config)) {
117 pr_err("PCI: wanted %zd bytes, got %d\n",
118 sizeof(rc_config), ret);
119 return -1;
120 }
121 /* Record irq_base so that we can map INTx to IRQ # later. */
122 controller->irq_base = rc_config.intr;
123
124 for (x = 0; x < 4; x++)
125 tile_irq_activate(rc_config.intr + x,
126 TILE_IRQ_HW_CLEAR);
127
128 if (rc_config.plx_gen1)
129 controller->plx_gen1 = 1;
130
131 return 0;
132}
133
134/*
135 * First initialization entry point, called from setup_arch().
136 *
137 * Find valid controllers and fill in pci_controller structs for each
138 * of them.
139 *
140 * Returns the number of controllers discovered.
141 */
142int __init tile_pci_init(void)
143{
144 int i;
145
146 pr_info("PCI: Searching for controllers...\n");
147
148 /* Do any configuration we need before using the PCIe */
149
150 for (i = 0; i < TILE_NUM_PCIE; i++) {
151 int hv_cfg_fd0 = -1;
152 int hv_cfg_fd1 = -1;
153 int hv_mem_fd = -1;
154 char name[32];
155 struct pci_controller *controller;
156
157 /*
158 * Open the fd to the HV. If it fails then this
159 * device doesn't exist.
160 */
161 hv_cfg_fd0 = tile_pcie_open(i, 0);
162 if (hv_cfg_fd0 < 0)
163 continue;
164 hv_cfg_fd1 = tile_pcie_open(i, 1);
165 if (hv_cfg_fd1 < 0) {
166 pr_err("PCI: Couldn't open config fd to HV "
167 "for controller %d\n", i);
168 goto err_cont;
169 }
170
171 sprintf(name, "pcie/%d/mem", i);
172 hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
173 if (hv_mem_fd < 0) {
174 pr_err("PCI: Could not open mem fd to HV!\n");
175 goto err_cont;
176 }
177
178 pr_info("PCI: Found PCI controller #%d\n", i);
179
180 controller = &controllers[num_controllers];
181
182 if (tile_init_irqs(i, controller)) {
183 pr_err("PCI: Could not initialize "
184 "IRQs, aborting.\n");
185 goto err_cont;
186 }
187
188 controller->index = num_controllers;
189 controller->hv_cfg_fd[0] = hv_cfg_fd0;
190 controller->hv_cfg_fd[1] = hv_cfg_fd1;
191 controller->hv_mem_fd = hv_mem_fd;
192 controller->first_busno = 0;
193 controller->last_busno = 0xff;
194 controller->ops = &tile_cfg_ops;
195
196 num_controllers++;
197 continue;
198
199err_cont:
200 if (hv_cfg_fd0 >= 0)
201 hv_dev_close(hv_cfg_fd0);
202 if (hv_cfg_fd1 >= 0)
203 hv_dev_close(hv_cfg_fd1);
204 if (hv_mem_fd >= 0)
205 hv_dev_close(hv_mem_fd);
206 continue;
207 }
208
209 /*
210 * Before using the PCIe, see if we need to do any platform-specific
211 * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
212 */
213 for (i = 0; i < num_controllers; i++) {
214 struct pci_controller *controller = &controllers[i];
215
216 if (controller->plx_gen1)
217 tile_plx_gen1 = 1;
218 }
219
220 return num_controllers;
221}
222
223/*
224 * (pin - 1) converts from the PCI standard's [1:4] convention to
225 * a normal [0:3] range.
226 */
227static int tile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
228{
229 struct pci_controller *controller =
230 (struct pci_controller *)dev->sysdata;
231 return (pin - 1) + controller->irq_base;
232}
233
234
235static void __init fixup_read_and_payload_sizes(void)
236{
237 struct pci_dev *dev = NULL;
238 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
239 int max_read_size = 0x2; /* Limit to 512 byte reads. */
240 u16 new_values;
241
242 /* Scan for the smallest maximum payload size. */
243 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
244 int pcie_caps_offset;
245 u32 devcap;
246 int max_payload;
247
248 pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
249 if (pcie_caps_offset == 0)
250 continue;
251
252 pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP,
253 &devcap);
254 max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
255 if (max_payload < smallest_max_payload)
256 smallest_max_payload = max_payload;
257 }
258
259 /* Now, set the max_payload_size for all devices to that value. */
260 new_values = (max_read_size << 12) | (smallest_max_payload << 5);
261 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
262 int pcie_caps_offset;
263 u16 devctl;
264
265 pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
266 if (pcie_caps_offset == 0)
267 continue;
268
269 pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
270 &devctl);
271 devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ);
272 devctl |= new_values;
273 pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
274 devctl);
275 }
276}
277
278
279/*
280 * Second PCI initialization entry point, called by subsys_initcall.
281 *
282 * The controllers have been set up by the time we get here, by a call to
283 * tile_pci_init.
284 */
285static int __init pcibios_init(void)
286{
287 int i;
288
289 pr_info("PCI: Probing PCI hardware\n");
290
291 /*
292 * Delay a bit in case devices aren't ready. Some devices are
293 * known to require at least 20ms here, but we use a more
294 * conservative value.
295 */
296 mdelay(250);
297
298 /* Scan all of the recorded PCI controllers. */
299 for (i = 0; i < num_controllers; i++) {
300 struct pci_controller *controller = &controllers[i];
301 struct pci_bus *bus;
302
303 pr_info("PCI: initializing controller #%d\n", i);
304
305 /*
306 * This comes from the generic Linux PCI driver.
307 *
308 * It reads the PCI tree for this bus into the Linux
309 * data structures.
310 *
311 * This is inlined in linux/pci.h and calls into
312 * pci_scan_bus_parented() in probe.c.
313 */
314 bus = pci_scan_bus(0, controller->ops, controller);
315 controller->root_bus = bus;
316 controller->last_busno = bus->subordinate;
317
318 }
319
320 /* Do machine dependent PCI interrupt routing */
321 pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
322
323 /*
324 * This comes from the generic Linux PCI driver.
325 *
326 * It allocates all of the resources (I/O memory, etc)
327 * associated with the devices read in above.
328 */
329
330 pci_assign_unassigned_resources();
331
332 /* Configure the max_read_size and max_payload_size values. */
333 fixup_read_and_payload_sizes();
334
335 /* Record the I/O resources in the PCI controller structure. */
336 for (i = 0; i < num_controllers; i++) {
337 struct pci_bus *root_bus = controllers[i].root_bus;
338 struct pci_bus *next_bus;
339 struct pci_dev *dev;
340
341 list_for_each_entry(dev, &root_bus->devices, bus_list) {
342 /* Find the PCI host controller, ie. the 1st bridge. */
343 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
344 (PCI_SLOT(dev->devfn) == 0)) {
345 next_bus = dev->subordinate;
346 controllers[i].mem_resources[0] =
347 *next_bus->resource[0];
348 controllers[i].mem_resources[1] =
349 *next_bus->resource[1];
350 controllers[i].mem_resources[2] =
351 *next_bus->resource[2];
352
353 break;
354 }
355 }
356
357 }
358
359 return 0;
360}
361subsys_initcall(pcibios_init);
362
363/*
364 * No bus fixups needed.
365 */
366void __devinit pcibios_fixup_bus(struct pci_bus *bus)
367{
368 /* Nothing needs to be done. */
369}
370
371/*
372 * This can be called from the generic PCI layer, but doesn't need to
373 * do anything.
374 */
375char __devinit *pcibios_setup(char *str)
376{
377 /* Nothing needs to be done. */
378 return str;
379}
380
381/*
382 * This is called from the generic Linux layer.
383 */
384void __init pcibios_update_irq(struct pci_dev *dev, int irq)
385{
386 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
387}
388
389/*
390 * Enable memory and/or address decoding, as appropriate, for the
391 * device described by the 'dev' struct.
392 *
393 * This is called from the generic PCI layer, and can be called
394 * for bridges or endpoints.
395 */
396int pcibios_enable_device(struct pci_dev *dev, int mask)
397{
398 u16 cmd, old_cmd;
399 u8 header_type;
400 int i;
401 struct resource *r;
402
403 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
404
405 pci_read_config_word(dev, PCI_COMMAND, &cmd);
406 old_cmd = cmd;
407 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
408 /*
409 * For bridges, we enable both memory and I/O decoding
410 * in call cases.
411 */
412 cmd |= PCI_COMMAND_IO;
413 cmd |= PCI_COMMAND_MEMORY;
414 } else {
415 /*
416 * For endpoints, we enable memory and/or I/O decoding
417 * only if they have a memory resource of that type.
418 */
419 for (i = 0; i < 6; i++) {
420 r = &dev->resource[i];
421 if (r->flags & IORESOURCE_UNSET) {
422 pr_err("PCI: Device %s not available "
423 "because of resource collisions\n",
424 pci_name(dev));
425 return -EINVAL;
426 }
427 if (r->flags & IORESOURCE_IO)
428 cmd |= PCI_COMMAND_IO;
429 if (r->flags & IORESOURCE_MEM)
430 cmd |= PCI_COMMAND_MEMORY;
431 }
432 }
433
434 /*
435 * We only write the command if it changed.
436 */
437 if (cmd != old_cmd)
438 pci_write_config_word(dev, PCI_COMMAND, cmd);
439 return 0;
440}
441
442void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
443{
444 unsigned long start = pci_resource_start(dev, bar);
445 unsigned long len = pci_resource_len(dev, bar);
446 unsigned long flags = pci_resource_flags(dev, bar);
447
448 if (!len)
449 return NULL;
450 if (max && len > max)
451 len = max;
452
453 if (!(flags & IORESOURCE_MEM)) {
454 pr_info("PCI: Trying to map invalid resource %#lx\n", flags);
455 start = 0;
456 }
457
458 return (void __iomem *)start;
459}
460EXPORT_SYMBOL(pci_iomap);
461
462
463/****************************************************************
464 *
465 * Tile PCI config space read/write routines
466 *
467 ****************************************************************/
468
469/*
470 * These are the normal read and write ops
471 * These are expanded with macros from pci_bus_read_config_byte() etc.
472 *
473 * devfn is the combined PCI slot & function.
474 *
475 * offset is in bytes, from the start of config space for the
476 * specified bus & slot.
477 */
478
479static int __devinit tile_cfg_read(struct pci_bus *bus,
480 unsigned int devfn,
481 int offset,
482 int size,
483 u32 *val)
484{
485 struct pci_controller *controller = bus->sysdata;
486 int busnum = bus->number & 0xff;
487 int slot = (devfn >> 3) & 0x1f;
488 int function = devfn & 0x7;
489 u32 addr;
490 int config_mode = 1;
491
492 /*
493 * There is no bridge between the Tile and bus 0, so we
494 * use config0 to talk to bus 0.
495 *
496 * If we're talking to a bus other than zero then we
497 * must have found a bridge.
498 */
499 if (busnum == 0) {
500 /*
501 * We fake an empty slot for (busnum == 0) && (slot > 0),
502 * since there is only one slot on bus 0.
503 */
504 if (slot) {
505 *val = 0xFFFFFFFF;
506 return 0;
507 }
508 config_mode = 0;
509 }
510
511 addr = busnum << 20; /* Bus in 27:20 */
512 addr |= slot << 15; /* Slot (device) in 19:15 */
513 addr |= function << 12; /* Function is in 14:12 */
514 addr |= (offset & 0xFFF); /* byte address in 0:11 */
515
516 return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
517 (HV_VirtAddr)(val), size, addr);
518}
519
520
521/*
522 * See tile_cfg_read() for relevent comments.
523 * Note that "val" is the value to write, not a pointer to that value.
524 */
525static int __devinit tile_cfg_write(struct pci_bus *bus,
526 unsigned int devfn,
527 int offset,
528 int size,
529 u32 val)
530{
531 struct pci_controller *controller = bus->sysdata;
532 int busnum = bus->number & 0xff;
533 int slot = (devfn >> 3) & 0x1f;
534 int function = devfn & 0x7;
535 u32 addr;
536 int config_mode = 1;
537 HV_VirtAddr valp = (HV_VirtAddr)&val;
538
539 /*
540 * For bus 0 slot 0 we use config 0 accesses.
541 */
542 if (busnum == 0) {
543 /*
544 * We fake an empty slot for (busnum == 0) && (slot > 0),
545 * since there is only one slot on bus 0.
546 */
547 if (slot)
548 return 0;
549 config_mode = 0;
550 }
551
552 addr = busnum << 20; /* Bus in 27:20 */
553 addr |= slot << 15; /* Slot (device) in 19:15 */
554 addr |= function << 12; /* Function is in 14:12 */
555 addr |= (offset & 0xFFF); /* byte address in 0:11 */
556
557#ifdef __BIG_ENDIAN
558 /* Point to the correct part of the 32-bit "val". */
559 valp += 4 - size;
560#endif
561
562 return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
563 valp, size, addr);
564}
565
566
567static struct pci_ops tile_cfg_ops = {
568 .read = tile_cfg_read,
569 .write = tile_cfg_write,
570};
571
572
573/*
574 * In the following, each PCI controller's mem_resources[1]
575 * represents its (non-prefetchable) PCI memory resource.
576 * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
577 * prefetchable PCI memory resources, respectively.
578 * For more details, see pci_setup_bridge() in setup-bus.c.
579 * By comparing the target PCI memory address against the
580 * end address of controller 0, we can determine the controller
581 * that should accept the PCI memory access.
582 */
583#define TILE_READ(size, type) \
584type _tile_read##size(unsigned long addr) \
585{ \
586 type val; \
587 int idx = 0; \
588 if (addr > controllers[0].mem_resources[1].end && \
589 addr > controllers[0].mem_resources[2].end) \
590 idx = 1; \
591 if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \
592 (HV_VirtAddr)(&val), sizeof(type), addr)) \
593 pr_err("PCI: read %zd bytes at 0x%lX failed\n", \
594 sizeof(type), addr); \
595 return val; \
596} \
597EXPORT_SYMBOL(_tile_read##size)
598
599TILE_READ(b, u8);
600TILE_READ(w, u16);
601TILE_READ(l, u32);
602TILE_READ(q, u64);
603
604#define TILE_WRITE(size, type) \
605void _tile_write##size(type val, unsigned long addr) \
606{ \
607 int idx = 0; \
608 if (addr > controllers[0].mem_resources[1].end && \
609 addr > controllers[0].mem_resources[2].end) \
610 idx = 1; \
611 if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \
612 (HV_VirtAddr)(&val), sizeof(type), addr)) \
613 pr_err("PCI: write %zd bytes at 0x%lX failed\n", \
614 sizeof(type), addr); \
615} \
616EXPORT_SYMBOL(_tile_write##size)
617
618TILE_WRITE(b, u8);
619TILE_WRITE(w, u16);
620TILE_WRITE(l, u32);
621TILE_WRITE(q, u64);
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 92ef925d2f8d..2e02c41ddf3b 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -23,7 +23,6 @@
23#include <linux/sysctl.h> 23#include <linux/sysctl.h>
24#include <linux/hardirq.h> 24#include <linux/hardirq.h>
25#include <linux/mman.h> 25#include <linux/mman.h>
26#include <linux/smp.h>
27#include <asm/pgtable.h> 26#include <asm/pgtable.h>
28#include <asm/processor.h> 27#include <asm/processor.h>
29#include <asm/sections.h> 28#include <asm/sections.h>
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index ed590ad0acdc..e90eb53173b0 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -212,11 +212,19 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
212 childregs->sp = sp; /* override with new user stack pointer */ 212 childregs->sp = sp; /* override with new user stack pointer */
213 213
214 /* 214 /*
215 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
216 * which is passed in as arg #5 to sys_clone().
217 */
218 if (clone_flags & CLONE_SETTLS)
219 childregs->tp = regs->regs[4];
220
221 /*
215 * Copy the callee-saved registers from the passed pt_regs struct 222 * Copy the callee-saved registers from the passed pt_regs struct
216 * into the context-switch callee-saved registers area. 223 * into the context-switch callee-saved registers area.
217 * We have to restore the callee-saved registers since we may 224 * This way when we start the interrupt-return sequence, the
218 * be cloning a userspace task with userspace register state, 225 * callee-save registers will be correctly in registers, which
219 * and we won't be unwinding the same kernel frames to restore them. 226 * is how we assume the compiler leaves them as we start doing
227 * the normal return-from-interrupt path after calling C code.
220 * Zero out the C ABI save area to mark the top of the stack. 228 * Zero out the C ABI save area to mark the top of the stack.
221 */ 229 */
222 ksp = (unsigned long) childregs; 230 ksp = (unsigned long) childregs;
@@ -304,15 +312,25 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
304/* Allow user processes to access the DMA SPRs */ 312/* Allow user processes to access the DMA SPRs */
305void grant_dma_mpls(void) 313void grant_dma_mpls(void)
306{ 314{
315#if CONFIG_KERNEL_PL == 2
316 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
317 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
318#else
307 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1); 319 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
308 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1); 320 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
321#endif
309} 322}
310 323
311/* Forbid user processes from accessing the DMA SPRs */ 324/* Forbid user processes from accessing the DMA SPRs */
312void restrict_dma_mpls(void) 325void restrict_dma_mpls(void)
313{ 326{
327#if CONFIG_KERNEL_PL == 2
328 __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
329 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
330#else
314 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); 331 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
315 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); 332 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
333#endif
316} 334}
317 335
318/* Pause the DMA engine, then save off its state registers. */ 336/* Pause the DMA engine, then save off its state registers. */
@@ -408,6 +426,15 @@ static void save_arch_state(struct thread_struct *t)
408#if CHIP_HAS_PROC_STATUS_SPR() 426#if CHIP_HAS_PROC_STATUS_SPR()
409 t->proc_status = __insn_mfspr(SPR_PROC_STATUS); 427 t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
410#endif 428#endif
429#if !CHIP_HAS_FIXED_INTVEC_BASE()
430 t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
431#endif
432#if CHIP_HAS_TILE_RTF_HWM()
433 t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
434#endif
435#if CHIP_HAS_DSTREAM_PF()
436 t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
437#endif
411} 438}
412 439
413static void restore_arch_state(const struct thread_struct *t) 440static void restore_arch_state(const struct thread_struct *t)
@@ -428,14 +455,14 @@ static void restore_arch_state(const struct thread_struct *t)
428#if CHIP_HAS_PROC_STATUS_SPR() 455#if CHIP_HAS_PROC_STATUS_SPR()
429 __insn_mtspr(SPR_PROC_STATUS, t->proc_status); 456 __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
430#endif 457#endif
458#if !CHIP_HAS_FIXED_INTVEC_BASE()
459 __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
460#endif
431#if CHIP_HAS_TILE_RTF_HWM() 461#if CHIP_HAS_TILE_RTF_HWM()
432 /* 462 __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
433 * Clear this whenever we switch back to a process in case 463#endif
434 * the previous process was monkeying with it. Even if enabled 464#if CHIP_HAS_DSTREAM_PF()
435 * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a 465 __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
436 * performance hint, so isn't worth a full save/restore.
437 */
438 __insn_mtspr(SPR_TILE_RTF_HWM, 0);
439#endif 466#endif
440} 467}
441 468
@@ -514,19 +541,15 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
514 * Switch kernel SP, PC, and callee-saved registers. 541 * Switch kernel SP, PC, and callee-saved registers.
515 * In the context of the new task, return the old task pointer 542 * In the context of the new task, return the old task pointer
516 * (i.e. the task that actually called __switch_to). 543 * (i.e. the task that actually called __switch_to).
517 * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp. 544 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
518 */ 545 */
519 return __switch_to(prev, next, next_current_ksp0(next)); 546 return __switch_to(prev, next, next_current_ksp0(next));
520} 547}
521 548
522long _sys_fork(struct pt_regs *regs) 549/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
523{ 550SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
524 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); 551 void __user *, parent_tidptr, void __user *, child_tidptr,
525} 552 struct pt_regs *, regs)
526
527long _sys_clone(unsigned long clone_flags, unsigned long newsp,
528 void __user *parent_tidptr, void __user *child_tidptr,
529 struct pt_regs *regs)
530{ 553{
531 if (!newsp) 554 if (!newsp)
532 newsp = regs->sp; 555 newsp = regs->sp;
@@ -534,17 +557,13 @@ long _sys_clone(unsigned long clone_flags, unsigned long newsp,
534 parent_tidptr, child_tidptr); 557 parent_tidptr, child_tidptr);
535} 558}
536 559
537long _sys_vfork(struct pt_regs *regs)
538{
539 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp,
540 regs, 0, NULL, NULL);
541}
542
543/* 560/*
544 * sys_execve() executes a new program. 561 * sys_execve() executes a new program.
545 */ 562 */
546long _sys_execve(char __user *path, char __user *__user *argv, 563SYSCALL_DEFINE4(execve, const char __user *, path,
547 char __user *__user *envp, struct pt_regs *regs) 564 const char __user *const __user *, argv,
565 const char __user *const __user *, envp,
566 struct pt_regs *, regs)
548{ 567{
549 long error; 568 long error;
550 char *filename; 569 char *filename;
@@ -560,8 +579,10 @@ out:
560} 579}
561 580
562#ifdef CONFIG_COMPAT 581#ifdef CONFIG_COMPAT
563long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 582long compat_sys_execve(const char __user *path,
564 compat_uptr_t __user *envp, struct pt_regs *regs) 583 const compat_uptr_t __user *argv,
584 const compat_uptr_t __user *envp,
585 struct pt_regs *regs)
565{ 586{
566 long error; 587 long error;
567 char *filename; 588 char *filename;
@@ -656,7 +677,7 @@ void show_regs(struct pt_regs *regs)
656 regs->regs[51], regs->regs[52], regs->tp); 677 regs->regs[51], regs->regs[52], regs->tp);
657 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); 678 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
658#else 679#else
659 for (i = 0; i < 52; i += 3) 680 for (i = 0; i < 52; i += 4)
660 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT 681 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
661 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", 682 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
662 i, regs->regs[i], i+1, regs->regs[i+1], 683 i, regs->regs[i], i+1, regs->regs[i+1],
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 7161bd03d2fd..e92e40527d6d 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -32,25 +32,6 @@ void user_disable_single_step(struct task_struct *child)
32} 32}
33 33
34/* 34/*
35 * This routine will put a word on the process's privileged stack.
36 */
37static void putreg(struct task_struct *task,
38 unsigned long addr, unsigned long value)
39{
40 unsigned int regno = addr / sizeof(unsigned long);
41 struct pt_regs *childregs = task_pt_regs(task);
42 childregs->regs[regno] = value;
43 childregs->flags |= PT_FLAGS_RESTORE_REGS;
44}
45
46static unsigned long getreg(struct task_struct *task, unsigned long addr)
47{
48 unsigned int regno = addr / sizeof(unsigned long);
49 struct pt_regs *childregs = task_pt_regs(task);
50 return childregs->regs[regno];
51}
52
53/*
54 * Called by kernel/ptrace.c when detaching.. 35 * Called by kernel/ptrace.c when detaching..
55 */ 36 */
56void ptrace_disable(struct task_struct *child) 37void ptrace_disable(struct task_struct *child)
@@ -64,61 +45,80 @@ void ptrace_disable(struct task_struct *child)
64 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 45 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
65} 46}
66 47
67long arch_ptrace(struct task_struct *child, long request, long addr, long data) 48long arch_ptrace(struct task_struct *child, long request,
49 unsigned long addr, unsigned long data)
68{ 50{
69 unsigned long __user *datap; 51 unsigned long __user *datap = (long __user __force *)data;
70 unsigned long tmp; 52 unsigned long tmp;
71 int i;
72 long ret = -EIO; 53 long ret = -EIO;
73 54 char *childreg;
74#ifdef CONFIG_COMPAT 55 struct pt_regs copyregs;
75 if (task_thread_info(current)->status & TS_COMPAT) 56 int ex1_offset;
76 data = (u32)data;
77 if (task_thread_info(child)->status & TS_COMPAT)
78 addr = (u32)addr;
79#endif
80 datap = (unsigned long __user __force *)data;
81 57
82 switch (request) { 58 switch (request) {
83 59
84 case PTRACE_PEEKUSR: /* Read register from pt_regs. */ 60 case PTRACE_PEEKUSR: /* Read register from pt_regs. */
85 if (addr & (sizeof(data)-1)) 61 if (addr >= PTREGS_SIZE)
86 break;
87 if (addr < 0 || addr >= PTREGS_SIZE)
88 break; 62 break;
89 tmp = getreg(child, addr); /* Read register */ 63 childreg = (char *)task_pt_regs(child) + addr;
90 ret = put_user(tmp, datap); 64#ifdef CONFIG_COMPAT
65 if (is_compat_task()) {
66 if (addr & (sizeof(compat_long_t)-1))
67 break;
68 ret = put_user(*(compat_long_t *)childreg,
69 (compat_long_t __user *)datap);
70 } else
71#endif
72 {
73 if (addr & (sizeof(long)-1))
74 break;
75 ret = put_user(*(long *)childreg, datap);
76 }
91 break; 77 break;
92 78
93 case PTRACE_POKEUSR: /* Write register in pt_regs. */ 79 case PTRACE_POKEUSR: /* Write register in pt_regs. */
94 if (addr & (sizeof(data)-1)) 80 if (addr >= PTREGS_SIZE)
95 break; 81 break;
96 if (addr < 0 || addr >= PTREGS_SIZE) 82 childreg = (char *)task_pt_regs(child) + addr;
97 break; 83
98 putreg(child, addr, data); /* Write register */ 84 /* Guard against overwrites of the privilege level. */
85 ex1_offset = PTREGS_OFFSET_EX1;
86#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
87 if (is_compat_task()) /* point at low word */
88 ex1_offset += sizeof(compat_long_t);
89#endif
90 if (addr == ex1_offset)
91 data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
92
93#ifdef CONFIG_COMPAT
94 if (is_compat_task()) {
95 if (addr & (sizeof(compat_long_t)-1))
96 break;
97 *(compat_long_t *)childreg = data;
98 } else
99#endif
100 {
101 if (addr & (sizeof(long)-1))
102 break;
103 *(long *)childreg = data;
104 }
99 ret = 0; 105 ret = 0;
100 break; 106 break;
101 107
102 case PTRACE_GETREGS: /* Get all registers from the child. */ 108 case PTRACE_GETREGS: /* Get all registers from the child. */
103 if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE)) 109 if (copy_to_user(datap, task_pt_regs(child),
104 break; 110 sizeof(struct pt_regs)) == 0) {
105 for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) { 111 ret = 0;
106 ret = __put_user(getreg(child, i), datap);
107 if (ret != 0)
108 break;
109 datap++;
110 } 112 }
111 break; 113 break;
112 114
113 case PTRACE_SETREGS: /* Set all registers in the child. */ 115 case PTRACE_SETREGS: /* Set all registers in the child. */
114 if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE)) 116 if (copy_from_user(&copyregs, datap,
115 break; 117 sizeof(struct pt_regs)) == 0) {
116 for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) { 118 copyregs.ex1 =
117 ret = __get_user(tmp, datap); 119 PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
118 if (ret != 0) 120 *task_pt_regs(child) = copyregs;
119 break; 121 ret = 0;
120 putreg(child, i, tmp);
121 datap++;
122 } 122 }
123 break; 123 break;
124 124
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index acd86d20beba..baa3d905fee2 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -27,7 +27,7 @@
27void machine_halt(void) 27void machine_halt(void)
28{ 28{
29 warn_early_printk(); 29 warn_early_printk();
30 raw_local_irq_disable_all(); 30 arch_local_irq_disable_all();
31 smp_send_stop(); 31 smp_send_stop();
32 hv_halt(); 32 hv_halt();
33} 33}
@@ -35,14 +35,14 @@ void machine_halt(void)
35void machine_power_off(void) 35void machine_power_off(void)
36{ 36{
37 warn_early_printk(); 37 warn_early_printk();
38 raw_local_irq_disable_all(); 38 arch_local_irq_disable_all();
39 smp_send_stop(); 39 smp_send_stop();
40 hv_power_off(); 40 hv_power_off();
41} 41}
42 42
43void machine_restart(char *cmd) 43void machine_restart(char *cmd)
44{ 44{
45 raw_local_irq_disable_all(); 45 arch_local_irq_disable_all();
46 smp_send_stop(); 46 smp_send_stop();
47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); 47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
48} 48}
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S
index e88d6e122783..caa13101c264 100644
--- a/arch/tile/kernel/regs_32.S
+++ b/arch/tile/kernel/regs_32.S
@@ -85,7 +85,7 @@ STD_ENTRY_SECTION(__switch_to, .sched.text)
85 { 85 {
86 /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */ 86 /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
87 move sp, r13 87 move sp, r13
88 mtspr SYSTEM_SAVE_1_0, r2 88 mtspr SPR_SYSTEM_SAVE_K_0, r2
89 } 89 }
90 FOR_EACH_CALLEE_SAVED_REG(LOAD_REG) 90 FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
91.L__switch_to_pc: 91.L__switch_to_pc:
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 4dd21c1e6d5e..f18573643ed1 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -30,8 +30,6 @@
30#include <linux/timex.h> 30#include <linux/timex.h>
31#include <asm/setup.h> 31#include <asm/setup.h>
32#include <asm/sections.h> 32#include <asm/sections.h>
33#include <asm/sections.h>
34#include <asm/cacheflush.h>
35#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
36#include <asm/pgalloc.h> 34#include <asm/pgalloc.h>
37#include <asm/mmu_context.h> 35#include <asm/mmu_context.h>
@@ -187,11 +185,11 @@ early_param("vmalloc", parse_vmalloc);
187 185
188#ifdef CONFIG_HIGHMEM 186#ifdef CONFIG_HIGHMEM
189/* 187/*
190 * Determine for each controller where its lowmem is mapped and how 188 * Determine for each controller where its lowmem is mapped and how much of
191 * much of it is mapped there. On controller zero, the first few 189 * it is mapped there. On controller zero, the first few megabytes are
192 * megabytes are mapped at 0xfd000000 as code, so in principle we 190 * already mapped in as code at MEM_SV_INTRPT, so in principle we could
193 * could start our data mappings higher up, but for now we don't 191 * start our data mappings higher up, but for now we don't bother, to avoid
194 * bother, to avoid additional confusion. 192 * additional confusion.
195 * 193 *
196 * One question is whether, on systems with more than 768 Mb and 194 * One question is whether, on systems with more than 768 Mb and
197 * controllers of different sizes, to map in a proportionate amount of 195 * controllers of different sizes, to map in a proportionate amount of
@@ -311,7 +309,7 @@ static void __init setup_memory(void)
311#endif 309#endif
312 310
313 /* We are using a char to hold the cpu_2_node[] mapping */ 311 /* We are using a char to hold the cpu_2_node[] mapping */
314 BUG_ON(MAX_NUMNODES > 127); 312 BUILD_BUG_ON(MAX_NUMNODES > 127);
315 313
316 /* Discover the ranges of memory available to us */ 314 /* Discover the ranges of memory available to us */
317 for (i = 0; ; ++i) { 315 for (i = 0; ; ++i) {
@@ -842,7 +840,7 @@ static int __init topology_init(void)
842 for_each_online_node(i) 840 for_each_online_node(i)
843 register_one_node(i); 841 register_one_node(i);
844 842
845 for_each_present_cpu(i) 843 for (i = 0; i < smp_height * smp_width; ++i)
846 register_cpu(&cpu_devices[i], i); 844 register_cpu(&cpu_devices[i], i);
847 845
848 return 0; 846 return 0;
@@ -870,11 +868,14 @@ void __cpuinit setup_cpu(int boot)
870 868
871 /* Allow asynchronous TLB interrupts. */ 869 /* Allow asynchronous TLB interrupts. */
872#if CHIP_HAS_TILE_DMA() 870#if CHIP_HAS_TILE_DMA()
873 raw_local_irq_unmask(INT_DMATLB_MISS); 871 arch_local_irq_unmask(INT_DMATLB_MISS);
874 raw_local_irq_unmask(INT_DMATLB_ACCESS); 872 arch_local_irq_unmask(INT_DMATLB_ACCESS);
875#endif 873#endif
876#if CHIP_HAS_SN_PROC() 874#if CHIP_HAS_SN_PROC()
877 raw_local_irq_unmask(INT_SNITLB_MISS); 875 arch_local_irq_unmask(INT_SNITLB_MISS);
876#endif
877#ifdef __tilegx__
878 arch_local_irq_unmask(INT_SINGLE_STEP_K);
878#endif 879#endif
879 880
880 /* 881 /*
@@ -893,11 +894,12 @@ void __cpuinit setup_cpu(int boot)
893#endif 894#endif
894 895
895 /* 896 /*
896 * Set the MPL for interrupt control 0 to user level. 897 * Set the MPL for interrupt control 0 & 1 to the corresponding
897 * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs, 898 * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
898 * as well as the PL 0 interrupt mask. 899 * SPRs, as well as the interrupt mask.
899 */ 900 */
900 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); 901 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
902 __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
901 903
902 /* Initialize IRQ support for this cpu. */ 904 /* Initialize IRQ support for this cpu. */
903 setup_irq_regs(); 905 setup_irq_regs();
@@ -953,7 +955,7 @@ static void __init load_hv_initrd(void)
953 if (rc != stat.size) { 955 if (rc != stat.size) {
954 pr_err("Error reading %d bytes from hvfs file '%s': %d\n", 956 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
955 stat.size, initramfs_file, rc); 957 stat.size, initramfs_file, rc);
956 free_bootmem((unsigned long) initrd, stat.size); 958 free_initrd_mem((unsigned long) initrd, stat.size);
957 return; 959 return;
958 } 960 }
959 initrd_start = (unsigned long) initrd; 961 initrd_start = (unsigned long) initrd;
@@ -962,7 +964,7 @@ static void __init load_hv_initrd(void)
962 964
963void __init free_initrd_mem(unsigned long begin, unsigned long end) 965void __init free_initrd_mem(unsigned long begin, unsigned long end)
964{ 966{
965 free_bootmem(begin, end - begin); 967 free_bootmem(__pa(begin), end - begin);
966} 968}
967 969
968static void __init validate_hv(void) 970static void __init validate_hv(void)
@@ -1033,7 +1035,7 @@ static void __init validate_va(void)
1033 * In addition, make sure we CAN'T use the end of memory, since 1035 * In addition, make sure we CAN'T use the end of memory, since
1034 * we use the last chunk of each pgd for the pgd_list. 1036 * we use the last chunk of each pgd for the pgd_list.
1035 */ 1037 */
1036 int i, fc_fd_ok = 0; 1038 int i, user_kernel_ok = 0;
1037 unsigned long max_va = 0; 1039 unsigned long max_va = 0;
1038 unsigned long list_va = 1040 unsigned long list_va =
1039 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); 1041 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
@@ -1044,13 +1046,13 @@ static void __init validate_va(void)
1044 break; 1046 break;
1045 if (range.start <= MEM_USER_INTRPT && 1047 if (range.start <= MEM_USER_INTRPT &&
1046 range.start + range.size >= MEM_HV_INTRPT) 1048 range.start + range.size >= MEM_HV_INTRPT)
1047 fc_fd_ok = 1; 1049 user_kernel_ok = 1;
1048 if (range.start == 0) 1050 if (range.start == 0)
1049 max_va = range.size; 1051 max_va = range.size;
1050 BUG_ON(range.start + range.size > list_va); 1052 BUG_ON(range.start + range.size > list_va);
1051 } 1053 }
1052 if (!fc_fd_ok) 1054 if (!user_kernel_ok)
1053 early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n"); 1055 early_panic("Hypervisor not configured for user/kernel VAs\n");
1054 if (max_va == 0) 1056 if (max_va == 0)
1055 early_panic("Hypervisor not configured for low VAs\n"); 1057 early_panic("Hypervisor not configured for low VAs\n");
1056 if (max_va < KERNEL_HIGH_VADDR) 1058 if (max_va < KERNEL_HIGH_VADDR)
@@ -1334,6 +1336,10 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
1334 pte_t *pte; 1336 pte_t *pte;
1335 1337
1336 BUG_ON(pgd_addr_invalid(addr)); 1338 BUG_ON(pgd_addr_invalid(addr));
1339 if (addr < VMALLOC_START || addr >= VMALLOC_END)
1340 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
1341 " try increasing CONFIG_VMALLOC_RESERVE\n",
1342 addr, VMALLOC_START, VMALLOC_END);
1337 1343
1338 pgd = swapper_pg_dir + pgd_index(addr); 1344 pgd = swapper_pg_dir + pgd_index(addr);
1339 pud = pud_offset(pgd, addr); 1345 pud = pud_offset(pgd, addr);
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 45b66a3c991f..1260321155f1 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -16,7 +16,6 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/signal.h> 20#include <linux/signal.h>
22#include <linux/errno.h> 21#include <linux/errno.h>
@@ -41,8 +40,8 @@
41#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
42 41
43 42
44long _sys_sigaltstack(const stack_t __user *uss, 43SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
45 stack_t __user *uoss, struct pt_regs *regs) 44 stack_t __user *, uoss, struct pt_regs *, regs)
46{ 45{
47 return do_sigaltstack(uss, uoss, regs->sp); 46 return do_sigaltstack(uss, uoss, regs->sp);
48} 47}
@@ -53,7 +52,7 @@ long _sys_sigaltstack(const stack_t __user *uss,
53 */ 52 */
54 53
55int restore_sigcontext(struct pt_regs *regs, 54int restore_sigcontext(struct pt_regs *regs,
56 struct sigcontext __user *sc, long *pr0) 55 struct sigcontext __user *sc)
57{ 56{
58 int err = 0; 57 int err = 0;
59 int i; 58 int i;
@@ -61,23 +60,30 @@ int restore_sigcontext(struct pt_regs *regs,
61 /* Always make any pending restarted system calls return -EINTR */ 60 /* Always make any pending restarted system calls return -EINTR */
62 current_thread_info()->restart_block.fn = do_no_restart_syscall; 61 current_thread_info()->restart_block.fn = do_no_restart_syscall;
63 62
63 /*
64 * Enforce that sigcontext is like pt_regs, and doesn't mess
65 * up our stack alignment rules.
66 */
67 BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
68 BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
69
64 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 70 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
65 err |= __get_user(((long *)regs)[i], 71 err |= __get_user(regs->regs[i], &sc->gregs[i]);
66 &((long __user *)(&sc->regs))[i]); 72
73 /* Ensure that the PL is always set to USER_PL. */
74 regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
67 75
68 regs->faultnum = INT_SWINT_1_SIGRETURN; 76 regs->faultnum = INT_SWINT_1_SIGRETURN;
69 77
70 err |= __get_user(*pr0, &sc->regs.regs[0]);
71 return err; 78 return err;
72} 79}
73 80
74/* sigreturn() returns long since it restores r0 in the interrupted code. */ 81/* The assembly shim for this function arranges to ignore the return value. */
75long _sys_rt_sigreturn(struct pt_regs *regs) 82SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
76{ 83{
77 struct rt_sigframe __user *frame = 84 struct rt_sigframe __user *frame =
78 (struct rt_sigframe __user *)(regs->sp); 85 (struct rt_sigframe __user *)(regs->sp);
79 sigset_t set; 86 sigset_t set;
80 long r0;
81 87
82 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 88 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
83 goto badframe; 89 goto badframe;
@@ -90,13 +96,13 @@ long _sys_rt_sigreturn(struct pt_regs *regs)
90 recalc_sigpending(); 96 recalc_sigpending();
91 spin_unlock_irq(&current->sighand->siglock); 97 spin_unlock_irq(&current->sighand->siglock);
92 98
93 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) 99 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
94 goto badframe; 100 goto badframe;
95 101
96 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) 102 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
97 goto badframe; 103 goto badframe;
98 104
99 return r0; 105 return 0;
100 106
101badframe: 107badframe:
102 force_sig(SIGSEGV, current); 108 force_sig(SIGSEGV, current);
@@ -112,8 +118,7 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
112 int i, err = 0; 118 int i, err = 0;
113 119
114 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 120 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
115 err |= __put_user(((long *)regs)[i], 121 err |= __put_user(regs->regs[i], &sc->gregs[i]);
116 &((long __user *)(&sc->regs))[i]);
117 122
118 return err; 123 return err;
119} 124}
@@ -203,19 +208,17 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
203 * Set up registers for signal handler. 208 * Set up registers for signal handler.
204 * Registers that we don't modify keep the value they had from 209 * Registers that we don't modify keep the value they had from
205 * user-space at the time we took the signal. 210 * user-space at the time we took the signal.
211 * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
212 * since some things rely on this (e.g. glibc's debug/segfault.c).
206 */ 213 */
207 regs->pc = (unsigned long) ka->sa.sa_handler; 214 regs->pc = (unsigned long) ka->sa.sa_handler;
208 regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ 215 regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
209 regs->sp = (unsigned long) frame; 216 regs->sp = (unsigned long) frame;
210 regs->lr = restorer; 217 regs->lr = restorer;
211 regs->regs[0] = (unsigned long) usig; 218 regs->regs[0] = (unsigned long) usig;
212 219 regs->regs[1] = (unsigned long) &frame->info;
213 if (ka->sa.sa_flags & SA_SIGINFO) { 220 regs->regs[2] = (unsigned long) &frame->uc;
214 /* Need extra arguments, so mark to restore caller-saves. */ 221 regs->flags |= PT_FLAGS_CALLER_SAVES;
215 regs->regs[1] = (unsigned long) &frame->info;
216 regs->regs[2] = (unsigned long) &frame->uc;
217 regs->flags |= PT_FLAGS_CALLER_SAVES;
218 }
219 222
220 /* 223 /*
221 * Notify any tracer that was single-stepping it. 224 * Notify any tracer that was single-stepping it.
@@ -327,7 +330,7 @@ void do_signal(struct pt_regs *regs)
327 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 330 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
328 } 331 }
329 332
330 return; 333 goto done;
331 } 334 }
332 335
333 /* Did we come from a system call? */ 336 /* Did we come from a system call? */
@@ -355,4 +358,8 @@ void do_signal(struct pt_regs *regs)
355 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 358 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
356 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 359 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
357 } 360 }
361
362done:
363 /* Avoid double syscall restart if there are nested signals. */
364 regs->faultnum = INT_SWINT_1_SIGRETURN;
358} 365}
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 5ec4b9c651f2..1eb3b39e36c7 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -15,7 +15,7 @@
15 * Derived from iLib's single-stepping code. 15 * Derived from iLib's single-stepping code.
16 */ 16 */
17 17
18#ifndef __tilegx__ /* No support for single-step yet. */ 18#ifndef __tilegx__ /* Hardware support for single step unavailable. */
19 19
20/* These functions are only used on the TILE platform */ 20/* These functions are only used on the TILE platform */
21#include <linux/slab.h> 21#include <linux/slab.h>
@@ -660,4 +660,75 @@ void single_step_once(struct pt_regs *regs)
660 regs->pc += 8; 660 regs->pc += 8;
661} 661}
662 662
663#else
664#include <linux/smp.h>
665#include <linux/ptrace.h>
666#include <arch/spr_def.h>
667
668static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
669
670
671/*
672 * Called directly on the occasion of an interrupt.
673 *
674 * If the process doesn't have single step set, then we use this as an
675 * opportunity to turn single step off.
676 *
677 * It has been mentioned that we could conditionally turn off single stepping
678 * on each entry into the kernel and rely on single_step_once to turn it
679 * on for the processes that matter (as we already do), but this
680 * implementation is somewhat more efficient in that we muck with registers
681 * once on a bum interrupt rather than on every entry into the kernel.
682 *
683 * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
684 * so we have to run through this process again before we can say that an
685 * instruction has executed.
686 *
687 * swint will set CANCELED, but it's a legitimate instruction. Fortunately
688 * it changes the PC. If it hasn't changed, then we know that the interrupt
689 * wasn't generated by swint and we'll need to run this process again before
690 * we can say an instruction has executed.
691 *
692 * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
693 * on with our lives.
694 */
695
696void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
697{
698 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
699 struct thread_info *info = (void *)current_thread_info();
700 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
701 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
702
703 if (is_single_step == 0) {
704 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
705
706 } else if ((*ss_pc != regs->pc) ||
707 (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
708
709 ptrace_notify(SIGTRAP);
710 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
711 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
712 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
713 }
714}
715
716
717/*
718 * Called from need_singlestep. Set up the control registers and the enable
719 * register, then return back.
720 */
721
722void single_step_once(struct pt_regs *regs)
723{
724 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
725 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
726
727 *ss_pc = regs->pc;
728 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
729 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
730 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
731 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
732}
733
663#endif /* !__tilegx__ */ 734#endif /* !__tilegx__ */
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 1cb5ec79de04..9575b37a8b75 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -115,7 +115,7 @@ static void smp_start_cpu_interrupt(void)
115static void smp_stop_cpu_interrupt(void) 115static void smp_stop_cpu_interrupt(void)
116{ 116{
117 set_cpu_online(smp_processor_id(), 0); 117 set_cpu_online(smp_processor_id(), 0);
118 raw_local_irq_disable_all(); 118 arch_local_irq_disable_all();
119 for (;;) 119 for (;;)
120 asm("nap"); 120 asm("nap");
121} 121}
@@ -212,7 +212,7 @@ void __init ipi_init(void)
212 212
213 tile.x = cpu_x(cpu); 213 tile.x = cpu_x(cpu);
214 tile.y = cpu_y(cpu); 214 tile.y = cpu_y(cpu);
215 if (hv_get_ipi_pte(tile, 1, &pte) != 0) 215 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
216 panic("Failed to initialize IPI for cpu %d\n", cpu); 216 panic("Failed to initialize IPI for cpu %d\n", cpu);
217 217
218 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; 218 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 74d62d098edf..b949edcec200 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -18,7 +18,6 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/kernel_stat.h> 20#include <linux/kernel_stat.h>
21#include <linux/smp_lock.h>
22#include <linux/bootmem.h> 21#include <linux/bootmem.h>
23#include <linux/notifier.h> 22#include <linux/notifier.h>
24#include <linux/cpu.h> 23#include <linux/cpu.h>
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index b6268d3ae869..0d54106be3d6 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -30,6 +30,10 @@
30#include <arch/abi.h> 30#include <arch/abi.h>
31#include <arch/interrupts.h> 31#include <arch/interrupts.h>
32 32
33#define KBT_ONGOING 0 /* Backtrace still ongoing */
34#define KBT_DONE 1 /* Backtrace cleanly completed */
35#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
36#define KBT_LOOP 3 /* Backtrace entered a loop */
33 37
34/* Is address on the specified kernel stack? */ 38/* Is address on the specified kernel stack? */
35static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp) 39static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
@@ -108,7 +112,6 @@ static bool read_memory_func(void *result, VirtualAddress address,
108/* Return a pt_regs pointer for a valid fault handler frame */ 112/* Return a pt_regs pointer for a valid fault handler frame */
109static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) 113static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
110{ 114{
111#ifndef __tilegx__
112 const char *fault = NULL; /* happy compiler */ 115 const char *fault = NULL; /* happy compiler */
113 char fault_buf[64]; 116 char fault_buf[64];
114 VirtualAddress sp = kbt->it.sp; 117 VirtualAddress sp = kbt->it.sp;
@@ -146,7 +149,6 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
146 } 149 }
147 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) 150 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
148 return p; 151 return p;
149#endif
150 return NULL; 152 return NULL;
151} 153}
152 154
@@ -177,7 +179,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
177 pr_err(" <received signal %d>\n", 179 pr_err(" <received signal %d>\n",
178 frame->info.si_signo); 180 frame->info.si_signo);
179 } 181 }
180 return &frame->uc.uc_mcontext.regs; 182 return (struct pt_regs *)&frame->uc.uc_mcontext;
181 } 183 }
182 return NULL; 184 return NULL;
183} 185}
@@ -209,11 +211,11 @@ static int KBacktraceIterator_next_item_inclusive(
209 for (;;) { 211 for (;;) {
210 do { 212 do {
211 if (!KBacktraceIterator_is_sigreturn(kbt)) 213 if (!KBacktraceIterator_is_sigreturn(kbt))
212 return 1; 214 return KBT_ONGOING;
213 } while (backtrace_next(&kbt->it)); 215 } while (backtrace_next(&kbt->it));
214 216
215 if (!KBacktraceIterator_restart(kbt)) 217 if (!KBacktraceIterator_restart(kbt))
216 return 0; 218 return KBT_DONE;
217 } 219 }
218} 220}
219 221
@@ -266,7 +268,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
266 kbt->pgtable = NULL; 268 kbt->pgtable = NULL;
267 kbt->verbose = 0; /* override in caller if desired */ 269 kbt->verbose = 0; /* override in caller if desired */
268 kbt->profile = 0; /* override in caller if desired */ 270 kbt->profile = 0; /* override in caller if desired */
269 kbt->end = 0; 271 kbt->end = KBT_ONGOING;
270 kbt->new_context = 0; 272 kbt->new_context = 0;
271 if (is_current) { 273 if (is_current) {
272 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; 274 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
@@ -292,7 +294,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
292 if (regs == NULL) { 294 if (regs == NULL) {
293 if (is_current || t->state == TASK_RUNNING) { 295 if (is_current || t->state == TASK_RUNNING) {
294 /* Can't do this; we need registers */ 296 /* Can't do this; we need registers */
295 kbt->end = 1; 297 kbt->end = KBT_RUNNING;
296 return; 298 return;
297 } 299 }
298 pc = get_switch_to_pc(); 300 pc = get_switch_to_pc();
@@ -307,26 +309,29 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
307 } 309 }
308 310
309 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52); 311 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
310 kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); 312 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
311} 313}
312EXPORT_SYMBOL(KBacktraceIterator_init); 314EXPORT_SYMBOL(KBacktraceIterator_init);
313 315
314int KBacktraceIterator_end(struct KBacktraceIterator *kbt) 316int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
315{ 317{
316 return kbt->end; 318 return kbt->end != KBT_ONGOING;
317} 319}
318EXPORT_SYMBOL(KBacktraceIterator_end); 320EXPORT_SYMBOL(KBacktraceIterator_end);
319 321
320void KBacktraceIterator_next(struct KBacktraceIterator *kbt) 322void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
321{ 323{
324 VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp;
322 kbt->new_context = 0; 325 kbt->new_context = 0;
323 if (!backtrace_next(&kbt->it) && 326 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
324 !KBacktraceIterator_restart(kbt)) { 327 kbt->end = KBT_DONE;
325 kbt->end = 1; 328 return;
326 return; 329 }
327 } 330 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
328 331 if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
329 kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); 332 /* Trapped in a loop; give up. */
333 kbt->end = KBT_LOOP;
334 }
330} 335}
331EXPORT_SYMBOL(KBacktraceIterator_next); 336EXPORT_SYMBOL(KBacktraceIterator_next);
332 337
@@ -351,12 +356,6 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
351 kbt->task->pid, kbt->task->tgid, kbt->task->comm, 356 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
352 smp_processor_id(), get_cycles()); 357 smp_processor_id(), get_cycles());
353 } 358 }
354#ifdef __tilegx__
355 if (kbt->is_current) {
356 __insn_mtspr(SPR_SIM_CONTROL,
357 SIM_DUMP_SPR_ARG(SIM_DUMP_BACKTRACE));
358 }
359#endif
360 kbt->verbose = 1; 359 kbt->verbose = 1;
361 i = 0; 360 i = 0;
362 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { 361 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
@@ -395,6 +394,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
395 break; 394 break;
396 } 395 }
397 } 396 }
397 if (kbt->end == KBT_LOOP)
398 pr_err("Stack dump stopped; next frame identical to this one\n");
398 if (headers) 399 if (headers)
399 pr_err("Stack dump complete\n"); 400 pr_err("Stack dump complete\n");
400} 401}
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index f0f87eab8c39..e2187d24a9b4 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -20,7 +20,6 @@
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/syscalls.h> 23#include <linux/syscalls.h>
25#include <linux/mman.h> 24#include <linux/mman.h>
26#include <linux/file.h> 25#include <linux/file.h>
@@ -110,6 +109,15 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
110#define sys_sync_file_range sys_sync_file_range2 109#define sys_sync_file_range sys_sync_file_range2
111#endif 110#endif
112 111
112/* Call the trampolines to manage pt_regs where necessary. */
113#define sys_execve _sys_execve
114#define sys_sigaltstack _sys_sigaltstack
115#define sys_rt_sigreturn _sys_rt_sigreturn
116#define sys_clone _sys_clone
117#ifndef __tilegx__
118#define sys_cmpxchg_badaddr _sys_cmpxchg_badaddr
119#endif
120
113/* 121/*
114 * Note that we can't include <linux/unistd.h> here since the header 122 * Note that we can't include <linux/unistd.h> here since the header
115 * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well. 123 * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index b9ab25a889b5..f2e156e44692 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -36,16 +36,6 @@
36/* How many cycles per second we are running at. */ 36/* How many cycles per second we are running at. */
37static cycles_t cycles_per_sec __write_once; 37static cycles_t cycles_per_sec __write_once;
38 38
39/*
40 * We set up shift and multiply values with a minsec of five seconds,
41 * since our timer counter counts down 31 bits at a frequency of
42 * no less than 500 MHz. See @minsec for clocks_calc_mult_shift().
43 * We could use a different value for the 64-bit free-running
44 * cycle counter, but we use the same one for consistency, and since
45 * we will be reasonably precise with this value anyway.
46 */
47#define TILE_MINSEC 5
48
49cycles_t get_clock_rate(void) 39cycles_t get_clock_rate(void)
50{ 40{
51 return cycles_per_sec; 41 return cycles_per_sec;
@@ -68,6 +58,14 @@ cycles_t get_cycles(void)
68} 58}
69#endif 59#endif
70 60
61/*
62 * We use a relatively small shift value so that sched_clock()
63 * won't wrap around very often.
64 */
65#define SCHED_CLOCK_SHIFT 10
66
67static unsigned long sched_clock_mult __write_once;
68
71static cycles_t clocksource_get_cycles(struct clocksource *cs) 69static cycles_t clocksource_get_cycles(struct clocksource *cs)
72{ 70{
73 return get_cycles(); 71 return get_cycles();
@@ -78,6 +76,7 @@ static struct clocksource cycle_counter_cs = {
78 .rating = 300, 76 .rating = 300,
79 .read = clocksource_get_cycles, 77 .read = clocksource_get_cycles,
80 .mask = CLOCKSOURCE_MASK(64), 78 .mask = CLOCKSOURCE_MASK(64),
79 .shift = 22, /* typical value, e.g. x86 tsc uses this */
81 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 80 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
82}; 81};
83 82
@@ -88,8 +87,10 @@ static struct clocksource cycle_counter_cs = {
88void __init setup_clock(void) 87void __init setup_clock(void)
89{ 88{
90 cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED); 89 cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED);
91 clocksource_calc_mult_shift(&cycle_counter_cs, cycles_per_sec, 90 sched_clock_mult =
92 TILE_MINSEC); 91 clocksource_hz2mult(cycles_per_sec, SCHED_CLOCK_SHIFT);
92 cycle_counter_cs.mult =
93 clocksource_hz2mult(cycles_per_sec, cycle_counter_cs.shift);
93} 94}
94 95
95void __init calibrate_delay(void) 96void __init calibrate_delay(void)
@@ -117,16 +118,21 @@ void __init time_init(void)
117 * counter, plus bit 31, which signifies that the counter has wrapped 118 * counter, plus bit 31, which signifies that the counter has wrapped
118 * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be 119 * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be
119 * raised as long as bit 31 is set. 120 * raised as long as bit 31 is set.
121 *
122 * The TILE_MINSEC value represents the largest range of real-time
123 * we can possibly cover with the timer, based on MAX_TICK combined
124 * with the slowest reasonable clock rate we might run at.
120 */ 125 */
121 126
122#define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */ 127#define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */
128#define TILE_MINSEC 5 /* timer covers no more than 5 seconds */
123 129
124static int tile_timer_set_next_event(unsigned long ticks, 130static int tile_timer_set_next_event(unsigned long ticks,
125 struct clock_event_device *evt) 131 struct clock_event_device *evt)
126{ 132{
127 BUG_ON(ticks > MAX_TICK); 133 BUG_ON(ticks > MAX_TICK);
128 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks); 134 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
129 raw_local_irq_unmask_now(INT_TILE_TIMER); 135 arch_local_irq_unmask_now(INT_TILE_TIMER);
130 return 0; 136 return 0;
131} 137}
132 138
@@ -137,7 +143,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
137static void tile_timer_set_mode(enum clock_event_mode mode, 143static void tile_timer_set_mode(enum clock_event_mode mode,
138 struct clock_event_device *evt) 144 struct clock_event_device *evt)
139{ 145{
140 raw_local_irq_mask_now(INT_TILE_TIMER); 146 arch_local_irq_mask_now(INT_TILE_TIMER);
141} 147}
142 148
143/* 149/*
@@ -166,7 +172,7 @@ void __cpuinit setup_tile_timer(void)
166 evt->cpumask = cpumask_of(smp_processor_id()); 172 evt->cpumask = cpumask_of(smp_processor_id());
167 173
168 /* Start out with timer not firing. */ 174 /* Start out with timer not firing. */
169 raw_local_irq_mask_now(INT_TILE_TIMER); 175 arch_local_irq_mask_now(INT_TILE_TIMER);
170 176
171 /* Register tile timer. */ 177 /* Register tile timer. */
172 clockevents_register_device(evt); 178 clockevents_register_device(evt);
@@ -182,7 +188,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
182 * Mask the timer interrupt here, since we are a oneshot timer 188 * Mask the timer interrupt here, since we are a oneshot timer
183 * and there are now by definition no events pending. 189 * and there are now by definition no events pending.
184 */ 190 */
185 raw_local_irq_mask(INT_TILE_TIMER); 191 arch_local_irq_mask(INT_TILE_TIMER);
186 192
187 /* Track time spent here in an interrupt context */ 193 /* Track time spent here in an interrupt context */
188 irq_enter(); 194 irq_enter();
@@ -211,8 +217,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
211unsigned long long sched_clock(void) 217unsigned long long sched_clock(void)
212{ 218{
213 return clocksource_cyc2ns(get_cycles(), 219 return clocksource_cyc2ns(get_cycles(),
214 cycle_counter_cs.mult, 220 sched_clock_mult, SCHED_CLOCK_SHIFT);
215 cycle_counter_cs.shift);
216} 221}
217 222
218int setup_profiling_timer(unsigned int multiplier) 223int setup_profiling_timer(unsigned int multiplier)
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 3870abbeeaa2..5474fc2e77e8 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -128,7 +128,9 @@ static int special_ill(bundle_bits bundle, int *sigp, int *codep)
128#ifdef __tilegx__ 128#ifdef __tilegx__
129 if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0) 129 if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0)
130 return 0; 130 return 0;
131 if (get_Opcode_X1(bundle) != UNARY_OPCODE_X1) 131 if (get_Opcode_X1(bundle) != RRR_0_OPCODE_X1)
132 return 0;
133 if (get_RRROpcodeExtension_X1(bundle) != UNARY_RRR_0_OPCODE_X1)
132 return 0; 134 return 0;
133 if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1) 135 if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1)
134 return 0; 136 return 0;
@@ -258,7 +260,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
258 address = regs->pc; 260 address = regs->pc;
259 break; 261 break;
260 case INT_UNALIGN_DATA: 262 case INT_UNALIGN_DATA:
261#ifndef __tilegx__ /* FIXME: GX: no single-step yet */ 263#ifndef __tilegx__ /* Emulated support for single step debugging */
262 if (unaligned_fixup >= 0) { 264 if (unaligned_fixup >= 0) {
263 struct single_step_state *state = 265 struct single_step_state *state =
264 current_thread_info()->step_state; 266 current_thread_info()->step_state;
@@ -276,7 +278,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
276 case INT_DOUBLE_FAULT: 278 case INT_DOUBLE_FAULT:
277 /* 279 /*
278 * For double fault, "reason" is actually passed as 280 * For double fault, "reason" is actually passed as
279 * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so 281 * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
280 * we can provide the original fault number rather than 282 * we can provide the original fault number rather than
281 * the uninteresting "INT_DOUBLE_FAULT" so the user can 283 * the uninteresting "INT_DOUBLE_FAULT" so the user can
282 * learn what actually struck while PL0 ICS was set. 284 * learn what actually struck while PL0 ICS was set.