aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-06-25 17:04:17 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-07-06 13:41:51 -0400
commit0707ad30d10110aebc01a5a64fb63f4b32d20b73 (patch)
tree64d8ba73e605ac26e56808d1d77701b3f83cf8b2
parentc78095bd8c77fca2619769ff8efb639fd100e373 (diff)
arch/tile: Miscellaneous cleanup changes.
This commit is primarily changes caused by reviewing "sparse" and "checkpatch" output on our sources, so is somewhat noisy, since things like "printk() -> pr_err()" (or whatever) throughout the codebase tend to get tedious to read. Rather than trying to tease apart precisely which things changed due to which type of code review, this commit includes various cleanups in the code: - sparse: Add declarations in headers for globals. - sparse: Fix __user annotations. - sparse: Using gfp_t consistently instead of int. - sparse: removing functions not actually used. - checkpatch: Clean up printk() warnings by using pr_info(), etc.; also avoid partial-line printks except in bootup code. - checkpatch: Use exposed structs rather than typedefs. - checkpatch: Change some C99 comments to C89 comments. In addition, a couple of minor other changes are rolled in to this commit: - Add support for a "raise" instruction to cause SIGFPE, etc., to be raised. - Remove some compat code that is unnecessary when we fully eliminate some of the deprecated syscalls from the generic syscall ABI. - Update the tile_defconfig to reflect current config contents. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r--arch/tile/configs/tile_defconfig15
-rw-r--r--arch/tile/include/arch/abi.h69
-rw-r--r--arch/tile/include/arch/interrupts_32.h2
-rw-r--r--arch/tile/include/asm/atomic_32.h17
-rw-r--r--arch/tile/include/asm/compat.h55
-rw-r--r--arch/tile/include/asm/elf.h6
-rw-r--r--arch/tile/include/asm/futex.h17
-rw-r--r--arch/tile/include/asm/page.h9
-rw-r--r--arch/tile/include/asm/pgtable.h9
-rw-r--r--arch/tile/include/asm/pgtable_32.h12
-rw-r--r--arch/tile/include/asm/ptrace.h5
-rw-r--r--arch/tile/include/asm/sections.h9
-rw-r--r--arch/tile/include/asm/signal.h1
-rw-r--r--arch/tile/include/asm/spinlock_32.h3
-rw-r--r--arch/tile/include/asm/stack.h6
-rw-r--r--arch/tile/include/asm/syscalls.h72
-rw-r--r--arch/tile/include/asm/system.h30
-rw-r--r--arch/tile/include/asm/thread_info.h8
-rw-r--r--arch/tile/include/asm/traps.h26
-rw-r--r--arch/tile/include/asm/uaccess.h22
-rw-r--r--arch/tile/include/asm/unistd.h9
-rw-r--r--arch/tile/kernel/backtrace.c81
-rw-r--r--arch/tile/kernel/compat.c28
-rw-r--r--arch/tile/kernel/compat_signal.c10
-rw-r--r--arch/tile/kernel/early_printk.c2
-rw-r--r--arch/tile/kernel/entry.S4
-rw-r--r--arch/tile/kernel/machine_kexec.c38
-rw-r--r--arch/tile/kernel/messaging.c5
-rw-r--r--arch/tile/kernel/module.c16
-rw-r--r--arch/tile/kernel/process.c110
-rw-r--r--arch/tile/kernel/ptrace.c3
-rw-r--r--arch/tile/kernel/reboot.c7
-rw-r--r--arch/tile/kernel/setup.c132
-rw-r--r--arch/tile/kernel/signal.c19
-rw-r--r--arch/tile/kernel/single_step.c75
-rw-r--r--arch/tile/kernel/smpboot.c37
-rw-r--r--arch/tile/kernel/stack.c43
-rw-r--r--arch/tile/kernel/sys.c18
-rw-r--r--arch/tile/kernel/time.c7
-rw-r--r--arch/tile/kernel/traps.c130
-rw-r--r--arch/tile/kernel/vmlinux.lds.S4
-rw-r--r--arch/tile/lib/atomic_32.c53
-rw-r--r--arch/tile/lib/cpumask.c1
-rw-r--r--arch/tile/lib/exports.c1
-rw-r--r--arch/tile/lib/memcpy_tile64.c6
-rw-r--r--arch/tile/lib/memmove_32.c2
-rw-r--r--arch/tile/lib/memset_32.c3
-rw-r--r--arch/tile/lib/spinlock_common.h2
-rw-r--r--arch/tile/lib/uaccess.c3
-rw-r--r--arch/tile/mm/elf.c4
-rw-r--r--arch/tile/mm/fault.c64
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/tile/mm/homecache.c18
-rw-r--r--arch/tile/mm/hugetlbpage.c2
-rw-r--r--arch/tile/mm/init.c99
-rw-r--r--arch/tile/mm/pgtable.c46
56 files changed, 798 insertions, 679 deletions
diff --git a/arch/tile/configs/tile_defconfig b/arch/tile/configs/tile_defconfig
index 74a5be39e8f2..f34c70b46c64 100644
--- a/arch/tile/configs/tile_defconfig
+++ b/arch/tile/configs/tile_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.34 3# Linux kernel version: 2.6.34
4# Fri May 28 17:51:43 2010 4# Thu Jun 3 13:20:05 2010
5# 5#
6CONFIG_MMU=y 6CONFIG_MMU=y
7CONFIG_GENERIC_CSUM=y 7CONFIG_GENERIC_CSUM=y
@@ -9,16 +9,13 @@ CONFIG_GENERIC_HARDIRQS=y
9CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 9CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
10CONFIG_GENERIC_IRQ_PROBE=y 10CONFIG_GENERIC_IRQ_PROBE=y
11CONFIG_GENERIC_PENDING_IRQ=y 11CONFIG_GENERIC_PENDING_IRQ=y
12CONFIG_ZONE_DMA=y
13CONFIG_SEMAPHORE_SLEEPERS=y 12CONFIG_SEMAPHORE_SLEEPERS=y
14CONFIG_CC_OPTIMIZE_FOR_SIZE=y
15CONFIG_HAVE_ARCH_ALLOC_REMAP=y 13CONFIG_HAVE_ARCH_ALLOC_REMAP=y
16CONFIG_HAVE_SETUP_PER_CPU_AREA=y 14CONFIG_HAVE_SETUP_PER_CPU_AREA=y
17CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y 15CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
18CONFIG_SYS_SUPPORTS_HUGETLBFS=y 16CONFIG_SYS_SUPPORTS_HUGETLBFS=y
19CONFIG_GENERIC_TIME=y 17CONFIG_GENERIC_TIME=y
20CONFIG_GENERIC_CLOCKEVENTS=y 18CONFIG_GENERIC_CLOCKEVENTS=y
21CONFIG_CLOCKSOURCE_WATCHDOG=y
22CONFIG_RWSEM_GENERIC_SPINLOCK=y 19CONFIG_RWSEM_GENERIC_SPINLOCK=y
23CONFIG_DEFAULT_MIGRATION_COST=10000000 20CONFIG_DEFAULT_MIGRATION_COST=10000000
24CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y 21CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
@@ -32,7 +29,6 @@ CONFIG_STRICT_DEVMEM=y
32CONFIG_SMP=y 29CONFIG_SMP=y
33CONFIG_WERROR=y 30CONFIG_WERROR=y
34# CONFIG_DEBUG_COPY_FROM_USER is not set 31# CONFIG_DEBUG_COPY_FROM_USER is not set
35CONFIG_SERIAL_CONSOLE=y
36CONFIG_HVC_TILE=y 32CONFIG_HVC_TILE=y
37CONFIG_TILE=y 33CONFIG_TILE=y
38# CONFIG_TILEGX is not set 34# CONFIG_TILEGX is not set
@@ -86,6 +82,7 @@ CONFIG_INITRAMFS_COMPRESSION_NONE=y
86# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set 82# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
87# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set 83# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
88# CONFIG_INITRAMFS_COMPRESSION_LZO is not set 84# CONFIG_INITRAMFS_COMPRESSION_LZO is not set
85CONFIG_CC_OPTIMIZE_FOR_SIZE=y
89CONFIG_SYSCTL=y 86CONFIG_SYSCTL=y
90CONFIG_ANON_INODES=y 87CONFIG_ANON_INODES=y
91CONFIG_EMBEDDED=y 88CONFIG_EMBEDDED=y
@@ -220,7 +217,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
220CONFIG_SPLIT_PTLOCK_CPUS=4 217CONFIG_SPLIT_PTLOCK_CPUS=4
221CONFIG_MIGRATION=y 218CONFIG_MIGRATION=y
222CONFIG_PHYS_ADDR_T_64BIT=y 219CONFIG_PHYS_ADDR_T_64BIT=y
223CONFIG_ZONE_DMA_FLAG=1 220CONFIG_ZONE_DMA_FLAG=0
224CONFIG_BOUNCE=y 221CONFIG_BOUNCE=y
225CONFIG_VIRT_TO_BUS=y 222CONFIG_VIRT_TO_BUS=y
226# CONFIG_KSM is not set 223# CONFIG_KSM is not set
@@ -232,10 +229,11 @@ CONFIG_FEEDBACK_USE=""
232CONFIG_VMALLOC_RESERVE=0x1000000 229CONFIG_VMALLOC_RESERVE=0x1000000
233CONFIG_HARDWALL=y 230CONFIG_HARDWALL=y
234CONFIG_MEMPROF=y 231CONFIG_MEMPROF=y
235CONFIG_XGBE_MAIN=y 232CONFIG_XGBE=y
236CONFIG_NET_TILE=y 233CONFIG_NET_TILE=y
237CONFIG_PSEUDO_NAPI=y 234CONFIG_PSEUDO_NAPI=y
238CONFIG_TILEPCI_ENDP=y 235CONFIG_TILEPCI_ENDP=y
236CONFIG_TILEPCI_HOST_SUBSET=m
239CONFIG_TILE_IDE_GPIO=y 237CONFIG_TILE_IDE_GPIO=y
240CONFIG_TILE_SOFTUART=y 238CONFIG_TILE_SOFTUART=y
241 239
@@ -244,6 +242,8 @@ CONFIG_TILE_SOFTUART=y
244# 242#
245CONFIG_PCI=y 243CONFIG_PCI=y
246CONFIG_PCI_DOMAINS=y 244CONFIG_PCI_DOMAINS=y
245# CONFIG_NO_IOMEM is not set
246# CONFIG_NO_IOPORT is not set
247# CONFIG_ARCH_SUPPORTS_MSI is not set 247# CONFIG_ARCH_SUPPORTS_MSI is not set
248CONFIG_PCI_DEBUG=y 248CONFIG_PCI_DEBUG=y
249# CONFIG_PCI_STUB is not set 249# CONFIG_PCI_STUB is not set
@@ -742,6 +742,7 @@ CONFIG_HVC_DRIVER=y
742# 742#
743# CONFIG_RAW_DRIVER is not set 743# CONFIG_RAW_DRIVER is not set
744# CONFIG_TCG_TPM is not set 744# CONFIG_TCG_TPM is not set
745CONFIG_DEVPORT=y
745CONFIG_I2C=y 746CONFIG_I2C=y
746CONFIG_I2C_BOARDINFO=y 747CONFIG_I2C_BOARDINFO=y
747CONFIG_I2C_COMPAT=y 748CONFIG_I2C_COMPAT=y
diff --git a/arch/tile/include/arch/abi.h b/arch/tile/include/arch/abi.h
index 7cdc47b3e02a..da8df5b9d914 100644
--- a/arch/tile/include/arch/abi.h
+++ b/arch/tile/include/arch/abi.h
@@ -1,26 +1,29 @@
1// Copyright 2010 Tilera Corporation. All Rights Reserved. 1/*
2// 2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3// This program is free software; you can redistribute it and/or 3 *
4// modify it under the terms of the GNU General Public License 4 * This program is free software; you can redistribute it and/or
5// as published by the Free Software Foundation, version 2. 5 * modify it under the terms of the GNU General Public License
6// 6 * as published by the Free Software Foundation, version 2.
7// This program is distributed in the hope that it will be useful, but 7 *
8// WITHOUT ANY WARRANTY; without even the implied warranty of 8 * This program is distributed in the hope that it will be useful, but
9// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10// NON INFRINGEMENT. See the GNU General Public License for 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11// more details. 11 * NON INFRINGEMENT. See the GNU General Public License for
12 12 * more details.
13//! @file 13 */
14//! 14
15//! ABI-related register definitions helpful when writing assembly code. 15/**
16//! 16 * @file
17 *
18 * ABI-related register definitions helpful when writing assembly code.
19 */
17 20
18#ifndef __ARCH_ABI_H__ 21#ifndef __ARCH_ABI_H__
19#define __ARCH_ABI_H__ 22#define __ARCH_ABI_H__
20 23
21#include <arch/chip.h> 24#include <arch/chip.h>
22 25
23// Registers 0 - 55 are "normal", but some perform special roles. 26/* Registers 0 - 55 are "normal", but some perform special roles. */
24 27
25#define TREG_FP 52 /**< Frame pointer. */ 28#define TREG_FP 52 /**< Frame pointer. */
26#define TREG_TP 53 /**< Thread pointer. */ 29#define TREG_TP 53 /**< Thread pointer. */
@@ -30,7 +33,7 @@
30/** Index of last normal general-purpose register. */ 33/** Index of last normal general-purpose register. */
31#define TREG_LAST_GPR 55 34#define TREG_LAST_GPR 55
32 35
33// Registers 56 - 62 are "special" network registers. 36/* Registers 56 - 62 are "special" network registers. */
34 37
35#define TREG_SN 56 /**< Static network access. */ 38#define TREG_SN 56 /**< Static network access. */
36#define TREG_IDN0 57 /**< IDN demux 0 access. */ 39#define TREG_IDN0 57 /**< IDN demux 0 access. */
@@ -40,7 +43,7 @@
40#define TREG_UDN2 61 /**< UDN demux 2 access. */ 43#define TREG_UDN2 61 /**< UDN demux 2 access. */
41#define TREG_UDN3 62 /**< UDN demux 3 access. */ 44#define TREG_UDN3 62 /**< UDN demux 3 access. */
42 45
43// Register 63 is the "special" zero register. 46/* Register 63 is the "special" zero register. */
44 47
45#define TREG_ZERO 63 /**< "Zero" register; always reads as "0". */ 48#define TREG_ZERO 63 /**< "Zero" register; always reads as "0". */
46 49
@@ -52,42 +55,44 @@
52#define TREG_SYSCALL_NR_NAME r10 55#define TREG_SYSCALL_NR_NAME r10
53 56
54 57
55//! The ABI requires callers to allocate a caller state save area of 58/**
56//! this many bytes at the bottom of each stack frame. 59 * The ABI requires callers to allocate a caller state save area of
57//! 60 * this many bytes at the bottom of each stack frame.
61 */
58#ifdef __tile__ 62#ifdef __tile__
59#define C_ABI_SAVE_AREA_SIZE (2 * __SIZEOF_POINTER__) 63#define C_ABI_SAVE_AREA_SIZE (2 * __SIZEOF_POINTER__)
60#endif 64#endif
61 65
62//! The operand to an 'info' opcode directing the backtracer to not 66/**
63//! try to find the calling frame. 67 * The operand to an 'info' opcode directing the backtracer to not
64//! 68 * try to find the calling frame.
69 */
65#define INFO_OP_CANNOT_BACKTRACE 2 70#define INFO_OP_CANNOT_BACKTRACE 2
66 71
67#ifndef __ASSEMBLER__ 72#ifndef __ASSEMBLER__
68#if CHIP_WORD_SIZE() > 32 73#if CHIP_WORD_SIZE() > 32
69 74
70//! Unsigned type that can hold a register. 75/** Unsigned type that can hold a register. */
71typedef unsigned long long uint_reg_t; 76typedef unsigned long long uint_reg_t;
72 77
73//! Signed type that can hold a register. 78/** Signed type that can hold a register. */
74typedef long long int_reg_t; 79typedef long long int_reg_t;
75 80
76//! String prefix to use for printf(). 81/** String prefix to use for printf(). */
77#define INT_REG_FMT "ll" 82#define INT_REG_FMT "ll"
78 83
79#elif !defined(__LP64__) /* avoid confusion with LP64 cross-build tools */ 84#elif !defined(__LP64__) /* avoid confusion with LP64 cross-build tools */
80 85
81//! Unsigned type that can hold a register. 86/** Unsigned type that can hold a register. */
82typedef unsigned long uint_reg_t; 87typedef unsigned long uint_reg_t;
83 88
84//! Signed type that can hold a register. 89/** Signed type that can hold a register. */
85typedef long int_reg_t; 90typedef long int_reg_t;
86 91
87//! String prefix to use for printf(). 92/** String prefix to use for printf(). */
88#define INT_REG_FMT "l" 93#define INT_REG_FMT "l"
89 94
90#endif 95#endif
91#endif /* __ASSEMBLER__ */ 96#endif /* __ASSEMBLER__ */
92 97
93#endif // !__ARCH_ABI_H__ 98#endif /* !__ARCH_ABI_H__ */
diff --git a/arch/tile/include/arch/interrupts_32.h b/arch/tile/include/arch/interrupts_32.h
index feffada705f0..9d0bfa7e59be 100644
--- a/arch/tile/include/arch/interrupts_32.h
+++ b/arch/tile/include/arch/interrupts_32.h
@@ -301,4 +301,4 @@
301 INT_MASK(INT_DOUBLE_FAULT) | \ 301 INT_MASK(INT_DOUBLE_FAULT) | \
302 INT_MASK(INT_AUX_PERF_COUNT) | \ 302 INT_MASK(INT_AUX_PERF_COUNT) | \
303 0) 303 0)
304#endif // !__ARCH_INTERRUPTS_H__ 304#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index e4f8b4f04895..40a5a3a876d9 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -348,6 +348,23 @@ void __init_atomic_per_cpu(void);
348/* Support releasing the atomic lock in do_page_fault_ics(). */ 348/* Support releasing the atomic lock in do_page_fault_ics(). */
349void __atomic_fault_unlock(int *lock_ptr); 349void __atomic_fault_unlock(int *lock_ptr);
350#endif 350#endif
351
352/* Private helper routines in lib/atomic_asm_32.S */
353extern struct __get_user __atomic_cmpxchg(volatile int *p,
354 int *lock, int o, int n);
355extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
356extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
357extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
358 int *lock, int o, int n);
359extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
360extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
361extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
362extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
363extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
364extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
365extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
366 int *lock, u64 o, u64 n);
367
351#endif /* !__ASSEMBLY__ */ 368#endif /* !__ASSEMBLY__ */
352 369
353#endif /* _ASM_TILE_ATOMIC_32_H */ 370#endif /* _ASM_TILE_ATOMIC_32_H */
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index b09292bcc19f..5a34da6cdd79 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -70,48 +70,7 @@ struct compat_timeval {
70 s32 tv_usec; 70 s32 tv_usec;
71}; 71};
72 72
73struct compat_stat { 73#define compat_stat stat
74 unsigned int st_dev;
75 unsigned int st_ino;
76 unsigned int st_mode;
77 unsigned int st_nlink;
78 unsigned int st_uid;
79 unsigned int st_gid;
80 unsigned int st_rdev;
81 unsigned int __pad1;
82 int st_size;
83 int st_blksize;
84 int __pad2;
85 int st_blocks;
86 int st_atime;
87 unsigned int st_atime_nsec;
88 int st_mtime;
89 unsigned int st_mtime_nsec;
90 int st_ctime;
91 unsigned int st_ctime_nsec;
92 unsigned int __unused[2];
93};
94
95struct compat_stat64 {
96 unsigned long st_dev;
97 unsigned long st_ino;
98 unsigned int st_mode;
99 unsigned int st_nlink;
100 unsigned int st_uid;
101 unsigned int st_gid;
102 unsigned long st_rdev;
103 long st_size;
104 unsigned int st_blksize;
105 unsigned long st_blocks __attribute__((packed));
106 unsigned int st_atime;
107 unsigned int st_atime_nsec;
108 unsigned int st_mtime;
109 unsigned int st_mtime_nsec;
110 unsigned int st_ctime;
111 unsigned int st_ctime_nsec;
112 unsigned int __unused8;
113};
114
115#define compat_statfs statfs 74#define compat_statfs statfs
116 75
117struct compat_sysctl { 76struct compat_sysctl {
@@ -233,7 +192,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
233/* Sign-extend when storing a kernel pointer to a user's ptregs. */ 192/* Sign-extend when storing a kernel pointer to a user's ptregs. */
234static inline unsigned long ptr_to_compat_reg(void __user *uptr) 193static inline unsigned long ptr_to_compat_reg(void __user *uptr)
235{ 194{
236 return (long)(int)(long)uptr; 195 return (long)(int)(long __force)uptr;
237} 196}
238 197
239static inline void __user *compat_alloc_user_space(long len) 198static inline void __user *compat_alloc_user_space(long len)
@@ -278,17 +237,8 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags,
278long compat_sys_fallocate(int fd, int mode, 237long compat_sys_fallocate(int fd, int mode,
279 u32 offset_lo, u32 offset_hi, 238 u32 offset_lo, u32 offset_hi,
280 u32 len_lo, u32 len_hi); 239 u32 len_lo, u32 len_hi);
281long compat_sys_stat64(char __user *filename,
282 struct compat_stat64 __user *statbuf);
283long compat_sys_lstat64(char __user *filename,
284 struct compat_stat64 __user *statbuf);
285long compat_sys_fstat64(unsigned int fd, struct compat_stat64 __user *statbuf);
286long compat_sys_fstatat64(int dfd, char __user *filename,
287 struct compat_stat64 __user *statbuf, int flag);
288long compat_sys_sched_rr_get_interval(compat_pid_t pid, 240long compat_sys_sched_rr_get_interval(compat_pid_t pid,
289 struct compat_timespec __user *interval); 241 struct compat_timespec __user *interval);
290ssize_t compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
291 size_t count);
292 242
293/* Versions of compat functions that differ from generic Linux. */ 243/* Versions of compat functions that differ from generic Linux. */
294struct compat_msgbuf; 244struct compat_msgbuf;
@@ -302,7 +252,6 @@ long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid,
302 compat_long_t addr, compat_long_t data); 252 compat_long_t addr, compat_long_t data);
303 253
304/* Tilera Linux syscalls that don't have "compat" versions. */ 254/* Tilera Linux syscalls that don't have "compat" versions. */
305#define compat_sys_raise_fpe sys_raise_fpe
306#define compat_sys_flush_cache sys_flush_cache 255#define compat_sys_flush_cache sys_flush_cache
307 256
308#endif /* _ASM_TILE_COMPAT_H */ 257#endif /* _ASM_TILE_COMPAT_H */
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index 1bca0debdb0f..623a6bb741c1 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -59,8 +59,7 @@ enum { ELF_ARCH = CHIP_ELF_TYPE() };
59 */ 59 */
60#define elf_check_arch(x) \ 60#define elf_check_arch(x) \
61 ((x)->e_ident[EI_CLASS] == ELF_CLASS && \ 61 ((x)->e_ident[EI_CLASS] == ELF_CLASS && \
62 ((x)->e_machine == CHIP_ELF_TYPE() || \ 62 (x)->e_machine == CHIP_ELF_TYPE())
63 (x)->e_machine == CHIP_COMPAT_ELF_TYPE()))
64 63
65/* The module loader only handles a few relocation types. */ 64/* The module loader only handles a few relocation types. */
66#ifndef __tilegx__ 65#ifndef __tilegx__
@@ -139,8 +138,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
139 */ 138 */
140#define compat_elf_check_arch(x) \ 139#define compat_elf_check_arch(x) \
141 ((x)->e_ident[EI_CLASS] == ELFCLASS32 && \ 140 ((x)->e_ident[EI_CLASS] == ELFCLASS32 && \
142 ((x)->e_machine == CHIP_ELF_TYPE() || \ 141 (x)->e_machine == CHIP_ELF_TYPE())
143 (x)->e_machine == CHIP_COMPAT_ELF_TYPE()))
144 142
145#define compat_start_thread(regs, ip, usp) do { \ 143#define compat_start_thread(regs, ip, usp) do { \
146 regs->pc = ptr_to_compat_reg((void *)(ip)); \ 144 regs->pc = ptr_to_compat_reg((void *)(ip)); \
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index 9eaeb3c08786..fe0d10dcae57 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -29,14 +29,14 @@
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30#include <linux/errno.h> 30#include <linux/errno.h>
31 31
32extern struct __get_user futex_set(int *v, int i); 32extern struct __get_user futex_set(int __user *v, int i);
33extern struct __get_user futex_add(int *v, int n); 33extern struct __get_user futex_add(int __user *v, int n);
34extern struct __get_user futex_or(int *v, int n); 34extern struct __get_user futex_or(int __user *v, int n);
35extern struct __get_user futex_andn(int *v, int n); 35extern struct __get_user futex_andn(int __user *v, int n);
36extern struct __get_user futex_cmpxchg(int *v, int o, int n); 36extern struct __get_user futex_cmpxchg(int __user *v, int o, int n);
37 37
38#ifndef __tilegx__ 38#ifndef __tilegx__
39extern struct __get_user futex_xor(int *v, int n); 39extern struct __get_user futex_xor(int __user *v, int n);
40#else 40#else
41static inline struct __get_user futex_xor(int __user *uaddr, int n) 41static inline struct __get_user futex_xor(int __user *uaddr, int n)
42{ 42{
@@ -131,6 +131,11 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
131 return asm_ret.err ? asm_ret.err : asm_ret.val; 131 return asm_ret.err ? asm_ret.err : asm_ret.val;
132} 132}
133 133
134#ifndef __tilegx__
135/* Return failure from the atomic wrappers. */
136struct __get_user __atomic_bad_address(int __user *addr);
137#endif
138
134#endif /* !__ASSEMBLY__ */ 139#endif /* !__ASSEMBLY__ */
135 140
136#endif /* _ASM_TILE_FUTEX_H */ 141#endif /* _ASM_TILE_FUTEX_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index c8301c43d6d9..f894a9016da6 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -16,8 +16,6 @@
16#define _ASM_TILE_PAGE_H 16#define _ASM_TILE_PAGE_H
17 17
18#include <linux/const.h> 18#include <linux/const.h>
19#include <hv/hypervisor.h>
20#include <arch/chip.h>
21 19
22/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ 20/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
23#define PAGE_SHIFT 16 21#define PAGE_SHIFT 16
@@ -29,6 +27,11 @@
29#define PAGE_MASK (~(PAGE_SIZE - 1)) 27#define PAGE_MASK (~(PAGE_SIZE - 1))
30#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 28#define HPAGE_MASK (~(HPAGE_SIZE - 1))
31 29
30#ifdef __KERNEL__
31
32#include <hv/hypervisor.h>
33#include <arch/chip.h>
34
32/* 35/*
33 * The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx 36 * The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx
34 * definitions in <hv/hypervisor.h>. We validate this at build time 37 * definitions in <hv/hypervisor.h>. We validate this at build time
@@ -331,4 +334,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
331#include <asm-generic/memory_model.h> 334#include <asm-generic/memory_model.h>
332#include <asm-generic/getorder.h> 335#include <asm-generic/getorder.h>
333 336
337#endif /* __KERNEL__ */
338
334#endif /* _ASM_TILE_PAGE_H */ 339#endif /* _ASM_TILE_PAGE_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index beb1504e9c10..b3367379d537 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -229,9 +229,9 @@ static inline void __pte_clear(pte_t *ptep)
229#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x)) 229#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
230 230
231#define pte_ERROR(e) \ 231#define pte_ERROR(e) \
232 printk("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e)) 232 pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
233#define pgd_ERROR(e) \ 233#define pgd_ERROR(e) \
234 printk("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) 234 pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
235 235
236/* 236/*
237 * set_pte_order() sets the given PTE and also sanity-checks the 237 * set_pte_order() sets the given PTE and also sanity-checks the
@@ -470,6 +470,11 @@ static inline int pmd_huge_page(pmd_t pmd)
470 470
471#include <asm-generic/pgtable.h> 471#include <asm-generic/pgtable.h>
472 472
473/* Support /proc/NN/pgtable API. */
474struct seq_file;
475int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
476 unsigned long vaddr, pte_t *ptep, void **datap);
477
473#endif /* !__ASSEMBLY__ */ 478#endif /* !__ASSEMBLY__ */
474 479
475#endif /* _ASM_TILE_PGTABLE_H */ 480#endif /* _ASM_TILE_PGTABLE_H */
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index b935fb2ad4f3..53ec34884744 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -89,15 +89,27 @@ static inline int pgd_addr_invalid(unsigned long addr)
89/* 89/*
90 * Provide versions of these routines that can be used safely when 90 * Provide versions of these routines that can be used safely when
91 * the hypervisor may be asynchronously modifying dirty/accessed bits. 91 * the hypervisor may be asynchronously modifying dirty/accessed bits.
92 * ptep_get_and_clear() matches the generic one but we provide it to
93 * be parallel with the 64-bit code.
92 */ 94 */
93#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 95#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
94#define __HAVE_ARCH_PTEP_SET_WRPROTECT 96#define __HAVE_ARCH_PTEP_SET_WRPROTECT
97#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
95 98
96extern int ptep_test_and_clear_young(struct vm_area_struct *, 99extern int ptep_test_and_clear_young(struct vm_area_struct *,
97 unsigned long addr, pte_t *); 100 unsigned long addr, pte_t *);
98extern void ptep_set_wrprotect(struct mm_struct *, 101extern void ptep_set_wrprotect(struct mm_struct *,
99 unsigned long addr, pte_t *); 102 unsigned long addr, pte_t *);
100 103
104#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
105static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
106 unsigned long addr, pte_t *ptep)
107{
108 pte_t pte = *ptep;
109 pte_clear(_mm, addr, ptep);
110 return pte;
111}
112
101/* Create a pmd from a PTFN. */ 113/* Create a pmd from a PTFN. */
102static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) 114static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
103{ 115{
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index 4d1d9953016a..acdae814e016 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -112,6 +112,9 @@ struct pt_regs {
112/* Fill in a struct pt_regs with the current kernel registers. */ 112/* Fill in a struct pt_regs with the current kernel registers. */
113struct pt_regs *get_pt_regs(struct pt_regs *); 113struct pt_regs *get_pt_regs(struct pt_regs *);
114 114
115/* Trace the current syscall. */
116extern void do_syscall_trace(void);
117
115extern void show_regs(struct pt_regs *); 118extern void show_regs(struct pt_regs *);
116 119
117#define arch_has_single_step() (1) 120#define arch_has_single_step() (1)
@@ -123,7 +126,7 @@ extern void show_regs(struct pt_regs *);
123 */ 126 */
124struct single_step_state { 127struct single_step_state {
125 /* the page to which we will write hacked-up bundles */ 128 /* the page to which we will write hacked-up bundles */
126 void *buffer; 129 void __user *buffer;
127 130
128 union { 131 union {
129 int flags; 132 int flags;
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h
index 6c111491f0ed..d062d463fca9 100644
--- a/arch/tile/include/asm/sections.h
+++ b/arch/tile/include/asm/sections.h
@@ -25,7 +25,14 @@ extern char _sinitdata[], _einitdata[];
25/* Write-once data is writable only till the end of initialization. */ 25/* Write-once data is writable only till the end of initialization. */
26extern char __w1data_begin[], __w1data_end[]; 26extern char __w1data_begin[], __w1data_end[];
27 27
28extern char __feedback_section_start[], __feedback_section_end[]; 28
29/* Not exactly sections, but PC comparison points in the code. */
30extern char __rt_sigreturn[], __rt_sigreturn_end[];
31#ifndef __tilegx__
32extern char sys_cmpxchg[], __sys_cmpxchg_end[];
33extern char __sys_cmpxchg_grab_lock[];
34extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
35#endif
29 36
30/* Handle the discontiguity between _sdata and _stext. */ 37/* Handle the discontiguity between _sdata and _stext. */
31static inline int arch_is_kernel_data(unsigned long addr) 38static inline int arch_is_kernel_data(unsigned long addr)
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h
index d20d326d201b..eb0253f32202 100644
--- a/arch/tile/include/asm/signal.h
+++ b/arch/tile/include/asm/signal.h
@@ -26,6 +26,7 @@
26#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 26#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
27int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); 27int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
28int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); 28int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
29void do_signal(struct pt_regs *regs);
29#endif 30#endif
30 31
31#endif /* _ASM_TILE_SIGNAL_H */ 32#endif /* _ASM_TILE_SIGNAL_H */
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
index f3a8473c68da..88efdde8dd2b 100644
--- a/arch/tile/include/asm/spinlock_32.h
+++ b/arch/tile/include/asm/spinlock_32.h
@@ -134,9 +134,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rwlock)
134{ 134{
135 int locked; 135 int locked;
136 u32 val = __insn_tns((int *)&rwlock->lock); 136 u32 val = __insn_tns((int *)&rwlock->lock);
137 if (unlikely(val & 1)) { 137 if (unlikely(val & 1))
138 return arch_read_trylock_slow(rwlock); 138 return arch_read_trylock_slow(rwlock);
139 }
140 locked = (val << _RD_COUNT_WIDTH) == 0; 139 locked = (val << _RD_COUNT_WIDTH) == 0;
141 rwlock->lock = val + (locked << _RD_COUNT_SHIFT); 140 rwlock->lock = val + (locked << _RD_COUNT_SHIFT);
142 return locked; 141 return locked;
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h
index 864913bcfbc9..f908473c322d 100644
--- a/arch/tile/include/asm/stack.h
+++ b/arch/tile/include/asm/stack.h
@@ -48,6 +48,10 @@ extern void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
48/* Initialize iterator based on current stack. */ 48/* Initialize iterator based on current stack. */
49extern void KBacktraceIterator_init_current(struct KBacktraceIterator *kbt); 49extern void KBacktraceIterator_init_current(struct KBacktraceIterator *kbt);
50 50
51/* Helper method for above. */
52extern void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt,
53 ulong pc, ulong lr, ulong sp, ulong r52);
54
51/* No more frames? */ 55/* No more frames? */
52extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt); 56extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt);
53 57
@@ -64,5 +68,7 @@ extern void tile_show_stack(struct KBacktraceIterator *, int headers);
64/* Dump stack of current process, with registers to seed the backtrace. */ 68/* Dump stack of current process, with registers to seed the backtrace. */
65extern void dump_stack_regs(struct pt_regs *); 69extern void dump_stack_regs(struct pt_regs *);
66 70
71/* Helper method for assembly dump_stack(). */
72extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
67 73
68#endif /* _ASM_TILE_STACK_H */ 74#endif /* _ASM_TILE_STACK_H */
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index 9f2b8e2f69d5..af165a74537f 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -22,7 +22,19 @@
22#include <linux/linkage.h> 22#include <linux/linkage.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <asm-generic/syscalls.h> 25#include <linux/compat.h>
26
27/* The array of function pointers for syscalls. */
28extern void *sys_call_table[];
29#ifdef CONFIG_COMPAT
30extern void *compat_sys_call_table[];
31#endif
32
33/*
34 * Note that by convention, any syscall which requires the current
35 * register set takes an additional "struct pt_regs *" pointer; the
36 * sys_xxx() function just adds the pointer and tail-calls to _sys_xxx().
37 */
26 38
27/* kernel/sys.c */ 39/* kernel/sys.c */
28ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count); 40ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count);
@@ -31,10 +43,66 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
31int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, 43int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
32 u32 len_lo, u32 len_hi, int advice); 44 u32 len_lo, u32 len_hi, int advice);
33long sys_flush_cache(void); 45long sys_flush_cache(void);
46long sys_mmap2(unsigned long addr, unsigned long len,
47 unsigned long prot, unsigned long flags,
48 unsigned long fd, unsigned long pgoff);
49#ifdef __tilegx__
50long sys_mmap(unsigned long addr, unsigned long len,
51 unsigned long prot, unsigned long flags,
52 unsigned long fd, off_t pgoff);
53#endif
54
55/* kernel/process.c */
56long sys_clone(unsigned long clone_flags, unsigned long newsp,
57 void __user *parent_tid, void __user *child_tid);
58long _sys_clone(unsigned long clone_flags, unsigned long newsp,
59 void __user *parent_tid, void __user *child_tid,
60 struct pt_regs *regs);
61long sys_fork(void);
62long _sys_fork(struct pt_regs *regs);
63long sys_vfork(void);
64long _sys_vfork(struct pt_regs *regs);
65long sys_execve(char __user *filename, char __user * __user *argv,
66 char __user * __user *envp);
67long _sys_execve(char __user *filename, char __user * __user *argv,
68 char __user * __user *envp, struct pt_regs *regs);
69
70/* kernel/signal.c */
71long sys_sigaltstack(const stack_t __user *, stack_t __user *);
72long _sys_sigaltstack(const stack_t __user *, stack_t __user *,
73 struct pt_regs *);
74long sys_rt_sigreturn(void);
75long _sys_rt_sigreturn(struct pt_regs *regs);
76
77/* platform-independent functions */
78long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
79long sys_rt_sigaction(int sig, const struct sigaction __user *act,
80 struct sigaction __user *oact, size_t sigsetsize);
34 81
35#ifndef __tilegx__ 82#ifndef __tilegx__
36/* mm/fault.c */ 83/* mm/fault.c */
37int sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *); 84int sys_cmpxchg_badaddr(unsigned long address);
85int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
86#endif
87
88#ifdef CONFIG_COMPAT
89long compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
90 compat_uptr_t __user *envp);
91long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
92 compat_uptr_t __user *envp, struct pt_regs *regs);
93long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
94 struct compat_sigaltstack __user *uoss_ptr);
95long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
96 struct compat_sigaltstack __user *uoss_ptr,
97 struct pt_regs *regs);
98long compat_sys_rt_sigreturn(void);
99long _compat_sys_rt_sigreturn(struct pt_regs *regs);
100
101/* These four are not defined for 64-bit, but serve as "compat" syscalls. */
102long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg);
103long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
104long sys_truncate64(const char __user *path, loff_t length);
105long sys_ftruncate64(unsigned int fd, loff_t length);
38#endif 106#endif
39 107
40#endif /* _ASM_TILE_SYSCALLS_H */ 108#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h
index d6ca7f816c87..0935094f370a 100644
--- a/arch/tile/include/asm/system.h
+++ b/arch/tile/include/asm/system.h
@@ -160,6 +160,14 @@ struct task_struct;
160extern struct task_struct *_switch_to(struct task_struct *prev, 160extern struct task_struct *_switch_to(struct task_struct *prev,
161 struct task_struct *next); 161 struct task_struct *next);
162 162
163/* Helper function for _switch_to(). */
164extern struct task_struct *__switch_to(struct task_struct *prev,
165 struct task_struct *next,
166 unsigned long new_system_save_1_0);
167
168/* Address that switched-away from tasks are at. */
169extern unsigned long get_switch_to_pc(void);
170
163/* 171/*
164 * On SMP systems, when the scheduler does migration-cost autodetection, 172 * On SMP systems, when the scheduler does migration-cost autodetection,
165 * it needs a way to flush as much of the CPU's caches as possible: 173 * it needs a way to flush as much of the CPU's caches as possible:
@@ -187,10 +195,26 @@ extern int unaligned_printk;
187/* Number of unaligned fixups performed */ 195/* Number of unaligned fixups performed */
188extern unsigned int unaligned_fixup_count; 196extern unsigned int unaligned_fixup_count;
189 197
198/* Init-time routine to do tile-specific per-cpu setup. */
199void setup_cpu(int boot);
200
190/* User-level DMA management functions */ 201/* User-level DMA management functions */
191void grant_dma_mpls(void); 202void grant_dma_mpls(void);
192void restrict_dma_mpls(void); 203void restrict_dma_mpls(void);
193 204
205#ifdef CONFIG_HARDWALL
206/* User-level network management functions */
207void reset_network_state(void);
208void grant_network_mpls(void);
209void restrict_network_mpls(void);
210int hardwall_deactivate(struct task_struct *task);
211
212/* Hook hardwall code into changes in affinity. */
213#define arch_set_cpus_allowed(p, new_mask) do { \
214 if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
215 hardwall_deactivate(p); \
216} while (0)
217#endif
194 218
195/* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */ 219/* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */
196extern int _sim_syscall(int syscall_num, ...); 220extern int _sim_syscall(int syscall_num, ...);
@@ -215,6 +239,12 @@ extern int _sim_syscall(int syscall_num, ...);
215 homecache_migrate_kthread(); \ 239 homecache_migrate_kthread(); \
216} while (0) 240} while (0)
217 241
242/* Support function for forking a new task. */
243void ret_from_fork(void);
244
245/* Called from ret_from_fork() when a new process starts up. */
246struct task_struct *sim_notify_fork(struct task_struct *prev);
247
218#endif /* !__ASSEMBLY__ */ 248#endif /* !__ASSEMBLY__ */
219 249
220#endif /* _ASM_TILE_SYSTEM_H */ 250#endif /* _ASM_TILE_SYSTEM_H */
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 9024bf3530aa..beec8729564a 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -55,7 +55,7 @@ struct thread_info {
55 .restart_block = { \ 55 .restart_block = { \
56 .fn = do_no_restart_syscall, \ 56 .fn = do_no_restart_syscall, \
57 }, \ 57 }, \
58 .step_state = 0, \ 58 .step_state = NULL, \
59} 59}
60 60
61#define init_thread_info (init_thread_union.thread_info) 61#define init_thread_info (init_thread_union.thread_info)
@@ -86,6 +86,12 @@ register unsigned long stack_pointer __asm__("sp");
86extern struct thread_info *alloc_thread_info(struct task_struct *task); 86extern struct thread_info *alloc_thread_info(struct task_struct *task);
87extern void free_thread_info(struct thread_info *info); 87extern void free_thread_info(struct thread_info *info);
88 88
89/* Sit on a nap instruction until interrupted. */
90extern void smp_nap(void);
91
92/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
93extern void _cpu_idle(void);
94
89/* Switch boot idle thread to a freshly-allocated stack and free old stack. */ 95/* Switch boot idle thread to a freshly-allocated stack and free old stack. */
90extern void cpu_idle_on_new_stack(struct thread_info *old_ti, 96extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
91 unsigned long new_sp, 97 unsigned long new_sp,
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index eab33d4a917d..432a9c15c8a2 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -18,9 +18,28 @@
18/* mm/fault.c */ 18/* mm/fault.c */
19void do_page_fault(struct pt_regs *, int fault_num, 19void do_page_fault(struct pt_regs *, int fault_num,
20 unsigned long address, unsigned long write); 20 unsigned long address, unsigned long write);
21void do_async_page_fault(struct pt_regs *);
22
23#ifndef __tilegx__
24/*
25 * We return this structure in registers to avoid having to write
26 * additional save/restore code in the intvec.S caller.
27 */
28struct intvec_state {
29 void *handler;
30 unsigned long vecnum;
31 unsigned long fault_num;
32 unsigned long info;
33 unsigned long retval;
34};
35struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
36 unsigned long address,
37 unsigned long info);
38#endif
21 39
22/* kernel/traps.c */ 40/* kernel/traps.c */
23void do_trap(struct pt_regs *, int fault_num, unsigned long reason); 41void do_trap(struct pt_regs *, int fault_num, unsigned long reason);
42void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
24 43
25/* kernel/time.c */ 44/* kernel/time.c */
26void do_timer_interrupt(struct pt_regs *, int fault_num); 45void do_timer_interrupt(struct pt_regs *, int fault_num);
@@ -31,6 +50,13 @@ void hv_message_intr(struct pt_regs *, int intnum);
31/* kernel/irq.c */ 50/* kernel/irq.c */
32void tile_dev_intr(struct pt_regs *, int intnum); 51void tile_dev_intr(struct pt_regs *, int intnum);
33 52
53#ifdef CONFIG_HARDWALL
54/* kernel/hardwall.c */
55void do_hardwall_trap(struct pt_regs *, int fault_num);
56#endif
57
58/* kernel/ptrace.c */
59void do_breakpoint(struct pt_regs *, int fault_num);
34 60
35 61
36#endif /* _ASM_TILE_SYSCALLS_H */ 62#endif /* _ASM_TILE_SYSCALLS_H */
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index f3058afd5a88..ed17a80ec0ed 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -89,8 +89,10 @@ int __range_ok(unsigned long addr, unsigned long size);
89 * checks that the pointer is in the user space range - after calling 89 * checks that the pointer is in the user space range - after calling
90 * this function, memory access functions may still return -EFAULT. 90 * this function, memory access functions may still return -EFAULT.
91 */ 91 */
92#define access_ok(type, addr, size) \ 92#define access_ok(type, addr, size) ({ \
93 (likely(__range_ok((unsigned long)addr, size) == 0)) 93 __chk_user_ptr(addr); \
94 likely(__range_ok((unsigned long)(addr), (size)) == 0); \
95})
94 96
95/* 97/*
96 * The exception table consists of pairs of addresses: the first is the 98 * The exception table consists of pairs of addresses: the first is the
@@ -134,14 +136,14 @@ struct __get_user {
134 * such extended assembler routines, though we will have to use a 136 * such extended assembler routines, though we will have to use a
135 * different return code in that case (1, 2, or 4, rather than -EFAULT). 137 * different return code in that case (1, 2, or 4, rather than -EFAULT).
136 */ 138 */
137extern struct __get_user __get_user_1(const void *); 139extern struct __get_user __get_user_1(const void __user *);
138extern struct __get_user __get_user_2(const void *); 140extern struct __get_user __get_user_2(const void __user *);
139extern struct __get_user __get_user_4(const void *); 141extern struct __get_user __get_user_4(const void __user *);
140extern struct __get_user __get_user_8(const void *); 142extern struct __get_user __get_user_8(const void __user *);
141extern int __put_user_1(long, void *); 143extern int __put_user_1(long, void __user *);
142extern int __put_user_2(long, void *); 144extern int __put_user_2(long, void __user *);
143extern int __put_user_4(long, void *); 145extern int __put_user_4(long, void __user *);
144extern int __put_user_8(long long, void *); 146extern int __put_user_8(long long, void __user *);
145 147
146/* Unimplemented routines to cause linker failures */ 148/* Unimplemented routines to cause linker failures */
147extern struct __get_user __get_user_bad(void); 149extern struct __get_user __get_user_bad(void);
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index 03b3d5d665dd..f2e3ff485333 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -15,7 +15,6 @@
15#if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL) 15#if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL)
16#define _ASM_TILE_UNISTD_H 16#define _ASM_TILE_UNISTD_H
17 17
18
19#ifndef __LP64__ 18#ifndef __LP64__
20/* Use the flavor of this syscall that matches the 32-bit API better. */ 19/* Use the flavor of this syscall that matches the 32-bit API better. */
21#define __ARCH_WANT_SYNC_FILE_RANGE2 20#define __ARCH_WANT_SYNC_FILE_RANGE2
@@ -24,6 +23,10 @@
24/* Use the standard ABI for syscalls. */ 23/* Use the standard ABI for syscalls. */
25#include <asm-generic/unistd.h> 24#include <asm-generic/unistd.h>
26 25
26/* Additional Tilera-specific syscalls. */
27#define __NR_flush_cache (__NR_arch_specific_syscall + 1)
28__SYSCALL(__NR_flush_cache, sys_flush_cache)
29
27#ifndef __tilegx__ 30#ifndef __tilegx__
28/* "Fast" syscalls provide atomic support for 32-bit chips. */ 31/* "Fast" syscalls provide atomic support for 32-bit chips. */
29#define __NR_FAST_cmpxchg -1 32#define __NR_FAST_cmpxchg -1
@@ -33,10 +36,6 @@
33__SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr) 36__SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr)
34#endif 37#endif
35 38
36/* Additional Tilera-specific syscalls. */
37#define __NR_flush_cache (__NR_arch_specific_syscall + 1)
38__SYSCALL(__NR_flush_cache, sys_flush_cache)
39
40#ifdef __KERNEL__ 39#ifdef __KERNEL__
41/* In compat mode, we use sys_llseek() for compat_sys_llseek(). */ 40/* In compat mode, we use sys_llseek() for compat_sys_llseek(). */
42#ifdef CONFIG_COMPAT 41#ifdef CONFIG_COMPAT
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index 1b0a410ef5e7..77265f3b58d6 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -30,18 +30,18 @@
30 30
31 31
32/** A decoded bundle used for backtracer analysis. */ 32/** A decoded bundle used for backtracer analysis. */
33typedef struct { 33struct BacktraceBundle {
34 tile_bundle_bits bits; 34 tile_bundle_bits bits;
35 int num_insns; 35 int num_insns;
36 struct tile_decoded_instruction 36 struct tile_decoded_instruction
37 insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]; 37 insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE];
38} BacktraceBundle; 38};
39 39
40 40
41/* This implementation only makes sense for native tools. */ 41/* This implementation only makes sense for native tools. */
42/** Default function to read memory. */ 42/** Default function to read memory. */
43static bool 43static bool bt_read_memory(void *result, VirtualAddress addr,
44bt_read_memory(void *result, VirtualAddress addr, size_t size, void *extra) 44 size_t size, void *extra)
45{ 45{
46 /* FIXME: this should do some horrible signal stuff to catch 46 /* FIXME: this should do some horrible signal stuff to catch
47 * SEGV cleanly and fail. 47 * SEGV cleanly and fail.
@@ -58,11 +58,11 @@ bt_read_memory(void *result, VirtualAddress addr, size_t size, void *extra)
58 * has the specified mnemonic, and whose first 'num_operands_to_match' 58 * has the specified mnemonic, and whose first 'num_operands_to_match'
59 * operands exactly match those in 'operand_values'. 59 * operands exactly match those in 'operand_values'.
60 */ 60 */
61static const struct tile_decoded_instruction* 61static const struct tile_decoded_instruction *find_matching_insn(
62find_matching_insn(const BacktraceBundle *bundle, 62 const struct BacktraceBundle *bundle,
63 tile_mnemonic mnemonic, 63 tile_mnemonic mnemonic,
64 const int *operand_values, 64 const int *operand_values,
65 int num_operands_to_match) 65 int num_operands_to_match)
66{ 66{
67 int i, j; 67 int i, j;
68 bool match; 68 bool match;
@@ -90,8 +90,7 @@ find_matching_insn(const BacktraceBundle *bundle,
90} 90}
91 91
92/** Does this bundle contain an 'iret' instruction? */ 92/** Does this bundle contain an 'iret' instruction? */
93static inline bool 93static inline bool bt_has_iret(const struct BacktraceBundle *bundle)
94bt_has_iret(const BacktraceBundle *bundle)
95{ 94{
96 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL; 95 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
97} 96}
@@ -99,8 +98,7 @@ bt_has_iret(const BacktraceBundle *bundle)
99/** Does this bundle contain an 'addi sp, sp, OFFSET' or 98/** Does this bundle contain an 'addi sp, sp, OFFSET' or
100 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET? 99 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
101 */ 100 */
102static bool 101static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
103bt_has_addi_sp(const BacktraceBundle *bundle, int *adjust)
104{ 102{
105 static const int vals[2] = { TREG_SP, TREG_SP }; 103 static const int vals[2] = { TREG_SP, TREG_SP };
106 104
@@ -120,8 +118,7 @@ bt_has_addi_sp(const BacktraceBundle *bundle, int *adjust)
120 * as an unsigned value by this code since that's what the caller wants. 118 * as an unsigned value by this code since that's what the caller wants.
121 * Returns the number of info ops found. 119 * Returns the number of info ops found.
122 */ 120 */
123static int 121static int bt_get_info_ops(const struct BacktraceBundle *bundle,
124bt_get_info_ops(const BacktraceBundle *bundle,
125 int operands[MAX_INFO_OPS_PER_BUNDLE]) 122 int operands[MAX_INFO_OPS_PER_BUNDLE])
126{ 123{
127 int num_ops = 0; 124 int num_ops = 0;
@@ -143,8 +140,7 @@ bt_get_info_ops(const BacktraceBundle *bundle,
143/** Does this bundle contain a jrp instruction, and if so, to which 140/** Does this bundle contain a jrp instruction, and if so, to which
144 * register is it jumping? 141 * register is it jumping?
145 */ 142 */
146static bool 143static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
147bt_has_jrp(const BacktraceBundle *bundle, int *target_reg)
148{ 144{
149 const struct tile_decoded_instruction *insn = 145 const struct tile_decoded_instruction *insn =
150 find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0); 146 find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0);
@@ -156,8 +152,7 @@ bt_has_jrp(const BacktraceBundle *bundle, int *target_reg)
156} 152}
157 153
158/** Does this bundle modify the specified register in any way? */ 154/** Does this bundle modify the specified register in any way? */
159static bool 155static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
160bt_modifies_reg(const BacktraceBundle *bundle, int reg)
161{ 156{
162 int i, j; 157 int i, j;
163 for (i = 0; i < bundle->num_insns; i++) { 158 for (i = 0; i < bundle->num_insns; i++) {
@@ -177,30 +172,26 @@ bt_modifies_reg(const BacktraceBundle *bundle, int reg)
177} 172}
178 173
179/** Does this bundle modify sp? */ 174/** Does this bundle modify sp? */
180static inline bool 175static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle)
181bt_modifies_sp(const BacktraceBundle *bundle)
182{ 176{
183 return bt_modifies_reg(bundle, TREG_SP); 177 return bt_modifies_reg(bundle, TREG_SP);
184} 178}
185 179
186/** Does this bundle modify lr? */ 180/** Does this bundle modify lr? */
187static inline bool 181static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle)
188bt_modifies_lr(const BacktraceBundle *bundle)
189{ 182{
190 return bt_modifies_reg(bundle, TREG_LR); 183 return bt_modifies_reg(bundle, TREG_LR);
191} 184}
192 185
193/** Does this bundle contain the instruction 'move fp, sp'? */ 186/** Does this bundle contain the instruction 'move fp, sp'? */
194static inline bool 187static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
195bt_has_move_r52_sp(const BacktraceBundle *bundle)
196{ 188{
197 static const int vals[2] = { 52, TREG_SP }; 189 static const int vals[2] = { 52, TREG_SP };
198 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; 190 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
199} 191}
200 192
201/** Does this bundle contain the instruction 'sw sp, lr'? */ 193/** Does this bundle contain the instruction 'sw sp, lr'? */
202static inline bool 194static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
203bt_has_sw_sp_lr(const BacktraceBundle *bundle)
204{ 195{
205 static const int vals[2] = { TREG_SP, TREG_LR }; 196 static const int vals[2] = { TREG_SP, TREG_LR };
206 return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL; 197 return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL;
@@ -209,11 +200,10 @@ bt_has_sw_sp_lr(const BacktraceBundle *bundle)
209/** Locates the caller's PC and SP for a program starting at the 200/** Locates the caller's PC and SP for a program starting at the
210 * given address. 201 * given address.
211 */ 202 */
212static void 203static void find_caller_pc_and_caller_sp(CallerLocation *location,
213find_caller_pc_and_caller_sp(CallerLocation *location, 204 const VirtualAddress start_pc,
214 const VirtualAddress start_pc, 205 BacktraceMemoryReader read_memory_func,
215 BacktraceMemoryReader read_memory_func, 206 void *read_memory_func_extra)
216 void *read_memory_func_extra)
217{ 207{
218 /* Have we explicitly decided what the sp is, 208 /* Have we explicitly decided what the sp is,
219 * rather than just the default? 209 * rather than just the default?
@@ -253,7 +243,7 @@ find_caller_pc_and_caller_sp(CallerLocation *location,
253 243
254 for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) { 244 for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) {
255 245
256 BacktraceBundle bundle; 246 struct BacktraceBundle bundle;
257 int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE]; 247 int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE];
258 int one_ago, jrp_reg; 248 int one_ago, jrp_reg;
259 bool has_jrp; 249 bool has_jrp;
@@ -475,12 +465,11 @@ find_caller_pc_and_caller_sp(CallerLocation *location,
475 } 465 }
476} 466}
477 467
478void 468void backtrace_init(BacktraceIterator *state,
479backtrace_init(BacktraceIterator *state, 469 BacktraceMemoryReader read_memory_func,
480 BacktraceMemoryReader read_memory_func, 470 void *read_memory_func_extra,
481 void *read_memory_func_extra, 471 VirtualAddress pc, VirtualAddress lr,
482 VirtualAddress pc, VirtualAddress lr, 472 VirtualAddress sp, VirtualAddress r52)
483 VirtualAddress sp, VirtualAddress r52)
484{ 473{
485 CallerLocation location; 474 CallerLocation location;
486 VirtualAddress fp, initial_frame_caller_pc; 475 VirtualAddress fp, initial_frame_caller_pc;
@@ -558,8 +547,7 @@ backtrace_init(BacktraceIterator *state,
558 state->read_memory_func_extra = read_memory_func_extra; 547 state->read_memory_func_extra = read_memory_func_extra;
559} 548}
560 549
561bool 550bool backtrace_next(BacktraceIterator *state)
562backtrace_next(BacktraceIterator *state)
563{ 551{
564 VirtualAddress next_fp, next_pc, next_frame[2]; 552 VirtualAddress next_fp, next_pc, next_frame[2];
565 553
@@ -614,12 +602,11 @@ backtrace_next(BacktraceIterator *state)
614 602
615#else /* TILE_CHIP < 10 */ 603#else /* TILE_CHIP < 10 */
616 604
617void 605void backtrace_init(BacktraceIterator *state,
618backtrace_init(BacktraceIterator *state, 606 BacktraceMemoryReader read_memory_func,
619 BacktraceMemoryReader read_memory_func, 607 void *read_memory_func_extra,
620 void *read_memory_func_extra, 608 VirtualAddress pc, VirtualAddress lr,
621 VirtualAddress pc, VirtualAddress lr, 609 VirtualAddress sp, VirtualAddress r52)
622 VirtualAddress sp, VirtualAddress r52)
623{ 610{
624 state->pc = pc; 611 state->pc = pc;
625 state->sp = sp; 612 state->sp = sp;
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index a374c99deeb6..b1e06d041555 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -88,34 +88,14 @@ long compat_sys_sched_rr_get_interval(compat_pid_t pid,
88 mm_segment_t old_fs = get_fs(); 88 mm_segment_t old_fs = get_fs();
89 89
90 set_fs(KERNEL_DS); 90 set_fs(KERNEL_DS);
91 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); 91 ret = sys_sched_rr_get_interval(pid,
92 (struct timespec __force __user *)&t);
92 set_fs(old_fs); 93 set_fs(old_fs);
93 if (put_compat_timespec(&t, interval)) 94 if (put_compat_timespec(&t, interval))
94 return -EFAULT; 95 return -EFAULT;
95 return ret; 96 return ret;
96} 97}
97 98
98ssize_t compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
99 size_t count)
100{
101 mm_segment_t old_fs = get_fs();
102 int ret;
103 off_t of;
104
105 if (offset && get_user(of, offset))
106 return -EFAULT;
107
108 set_fs(KERNEL_DS);
109 ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
110 count);
111 set_fs(old_fs);
112
113 if (offset && put_user(of, offset))
114 return -EFAULT;
115 return ret;
116}
117
118
119/* 99/*
120 * The usual compat_sys_msgsnd() and _msgrcv() seem to be assuming 100 * The usual compat_sys_msgsnd() and _msgrcv() seem to be assuming
121 * some different calling convention than our normal 32-bit tile code. 101 * some different calling convention than our normal 32-bit tile code.
@@ -177,6 +157,10 @@ long tile_compat_sys_msgrcv(int msqid,
177/* Pass full 64-bit values through ptrace. */ 157/* Pass full 64-bit values through ptrace. */
178#define compat_sys_ptrace tile_compat_sys_ptrace 158#define compat_sys_ptrace tile_compat_sys_ptrace
179 159
160/*
161 * Note that we can't include <linux/unistd.h> here since the header
162 * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
163 */
180void *compat_sys_call_table[__NR_syscalls] = { 164void *compat_sys_call_table[__NR_syscalls] = {
181 [0 ... __NR_syscalls-1] = sys_ni_syscall, 165 [0 ... __NR_syscalls-1] = sys_ni_syscall,
182#include <asm/unistd.h> 166#include <asm/unistd.h>
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 9fa4ba8ed5f4..d5efb215dd5f 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -32,13 +32,14 @@
32#include <asm/processor.h> 32#include <asm/processor.h>
33#include <asm/ucontext.h> 33#include <asm/ucontext.h>
34#include <asm/sigframe.h> 34#include <asm/sigframe.h>
35#include <asm/syscalls.h>
35#include <arch/interrupts.h> 36#include <arch/interrupts.h>
36 37
37struct compat_sigaction { 38struct compat_sigaction {
38 compat_uptr_t sa_handler; 39 compat_uptr_t sa_handler;
39 compat_ulong_t sa_flags; 40 compat_ulong_t sa_flags;
40 compat_uptr_t sa_restorer; 41 compat_uptr_t sa_restorer;
41 sigset_t sa_mask; /* mask last for extensibility */ 42 sigset_t sa_mask __packed;
42}; 43};
43 44
44struct compat_sigaltstack { 45struct compat_sigaltstack {
@@ -170,7 +171,7 @@ long compat_sys_rt_sigqueueinfo(int pid, int sig,
170 if (copy_siginfo_from_user32(&info, uinfo)) 171 if (copy_siginfo_from_user32(&info, uinfo))
171 return -EFAULT; 172 return -EFAULT;
172 set_fs(KERNEL_DS); 173 set_fs(KERNEL_DS);
173 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info); 174 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *)&info);
174 set_fs(old_fs); 175 set_fs(old_fs);
175 return ret; 176 return ret;
176} 177}
@@ -274,7 +275,8 @@ long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
274 } 275 }
275 seg = get_fs(); 276 seg = get_fs();
276 set_fs(KERNEL_DS); 277 set_fs(KERNEL_DS);
277 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, 278 ret = do_sigaltstack(uss_ptr ? (stack_t __user __force *)&uss : NULL,
279 (stack_t __user __force *)&uoss,
278 (unsigned long)compat_ptr(regs->sp)); 280 (unsigned long)compat_ptr(regs->sp));
279 set_fs(seg); 281 set_fs(seg);
280 if (ret >= 0 && uoss_ptr) { 282 if (ret >= 0 && uoss_ptr) {
@@ -336,7 +338,7 @@ static inline void __user *compat_get_sigframe(struct k_sigaction *ka,
336 * will die with SIGSEGV. 338 * will die with SIGSEGV.
337 */ 339 */
338 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) 340 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
339 return (void __user *) -1L; 341 return (void __user __force *)-1UL;
340 342
341 /* This is the X/Open sanctioned signal stack switching. */ 343 /* This is the X/Open sanctioned signal stack switching. */
342 if (ka->sa.sa_flags & SA_ONSTACK) { 344 if (ka->sa.sa_flags & SA_ONSTACK) {
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index e44d441e3f3f..2c54fd43a8a0 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -32,7 +32,7 @@ static struct console early_hv_console = {
32}; 32};
33 33
34/* Direct interface for emergencies */ 34/* Direct interface for emergencies */
35struct console *early_console = &early_hv_console; 35static struct console *early_console = &early_hv_console;
36static int early_console_initialized; 36static int early_console_initialized;
37static int early_console_complete; 37static int early_console_complete;
38 38
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 136261f7d7f9..3d01383b1b0e 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -13,9 +13,9 @@
13 */ 13 */
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <arch/abi.h> 16#include <linux/unistd.h>
17#include <asm/unistd.h>
18#include <asm/irqflags.h> 17#include <asm/irqflags.h>
18#include <arch/abi.h>
19 19
20#ifdef __tilegx__ 20#ifdef __tilegx__
21#define bnzt bnezt 21#define bnzt bnezt
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ed3e1cb8dcc4..ba7a265d6179 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -75,13 +75,13 @@ void machine_crash_shutdown(struct pt_regs *regs)
75int machine_kexec_prepare(struct kimage *image) 75int machine_kexec_prepare(struct kimage *image)
76{ 76{
77 if (num_online_cpus() > 1) { 77 if (num_online_cpus() > 1) {
78 printk(KERN_WARNING "%s: detected attempt to kexec " 78 pr_warning("%s: detected attempt to kexec "
79 "with num_online_cpus() > 1\n", 79 "with num_online_cpus() > 1\n",
80 __func__); 80 __func__);
81 return -ENOSYS; 81 return -ENOSYS;
82 } 82 }
83 if (image->type != KEXEC_TYPE_DEFAULT) { 83 if (image->type != KEXEC_TYPE_DEFAULT) {
84 printk(KERN_WARNING "%s: detected attempt to kexec " 84 pr_warning("%s: detected attempt to kexec "
85 "with unsupported type: %d\n", 85 "with unsupported type: %d\n",
86 __func__, 86 __func__,
87 image->type); 87 image->type);
@@ -124,22 +124,13 @@ static unsigned char *kexec_bn2cl(void *pg)
124 return 0; 124 return 0;
125 125
126 /* 126 /*
127 * If we get a checksum mismatch, it's possible that this is 127 * If we get a checksum mismatch, warn with the checksum
128 * just a false positive, but relatively unlikely. We dump 128 * so we can diagnose better.
129 * out the contents of the section so we can diagnose better.
130 */ 129 */
131 csum = ip_compute_csum(pg, bhdrp->b_size); 130 csum = ip_compute_csum(pg, bhdrp->b_size);
132 if (csum != 0) { 131 if (csum != 0) {
133 int i; 132 pr_warning("%s: bad checksum %#x (size %d)\n",
134 unsigned char *p = pg; 133 __func__, csum, bhdrp->b_size);
135 int nbytes = min((Elf32_Word)1000, bhdrp->b_size);
136 printk(KERN_INFO "%s: bad checksum %#x\n", __func__, csum);
137 printk(KERN_INFO "bytes (%d):", bhdrp->b_size);
138 for (i = 0; i < nbytes; ++i)
139 printk(" %02x", p[i]);
140 if (bhdrp->b_size != nbytes)
141 printk(" ...");
142 printk("\n");
143 return 0; 134 return 0;
144 } 135 }
145 136
@@ -156,7 +147,7 @@ static unsigned char *kexec_bn2cl(void *pg)
156 if ((unsigned char *) (nhdrp + 1) > 147 if ((unsigned char *) (nhdrp + 1) >
157 ((unsigned char *) pg) + bhdrp->b_size) { 148 ((unsigned char *) pg) + bhdrp->b_size) {
158 149
159 printk(KERN_INFO "%s: out of bounds\n", __func__); 150 pr_info("%s: out of bounds\n", __func__);
160 return 0; 151 return 0;
161 } 152 }
162 } 153 }
@@ -167,7 +158,7 @@ static unsigned char *kexec_bn2cl(void *pg)
167 while (*desc != '\0') { 158 while (*desc != '\0') {
168 desc++; 159 desc++;
169 if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { 160 if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
170 printk(KERN_INFO "%s: ran off end of page\n", 161 pr_info("%s: ran off end of page\n",
171 __func__); 162 __func__);
172 return 0; 163 return 0;
173 } 164 }
@@ -202,23 +193,20 @@ static void kexec_find_and_set_command_line(struct kimage *image)
202 } 193 }
203 194
204 if (command_line != 0) { 195 if (command_line != 0) {
205 printk(KERN_INFO "setting new command line to \"%s\"\n", 196 pr_info("setting new command line to \"%s\"\n",
206 command_line); 197 command_line);
207 198
208 hverr = hv_set_command_line( 199 hverr = hv_set_command_line(
209 (HV_VirtAddr) command_line, strlen(command_line)); 200 (HV_VirtAddr) command_line, strlen(command_line));
210 kunmap_atomic(command_line, KM_USER0); 201 kunmap_atomic(command_line, KM_USER0);
211 } else { 202 } else {
212 printk(KERN_INFO "%s: no command line found; making empty\n", 203 pr_info("%s: no command line found; making empty\n",
213 __func__); 204 __func__);
214 hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); 205 hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
215 } 206 }
216 if (hverr) { 207 if (hverr)
217 printk(KERN_WARNING 208 pr_warning("%s: hv_set_command_line returned error: %d\n",
218 "%s: call to hv_set_command_line returned error: %d\n", 209 __func__, hverr);
219 __func__, hverr);
220
221 }
222} 210}
223 211
224/* 212/*
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index f991f5285d8a..6d23ed271d10 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -18,13 +18,14 @@
18#include <linux/ptrace.h> 18#include <linux/ptrace.h>
19#include <asm/hv_driver.h> 19#include <asm/hv_driver.h>
20#include <asm/irq_regs.h> 20#include <asm/irq_regs.h>
21#include <asm/traps.h>
21#include <hv/hypervisor.h> 22#include <hv/hypervisor.h>
22#include <arch/interrupts.h> 23#include <arch/interrupts.h>
23 24
24/* All messages are stored here */ 25/* All messages are stored here */
25static DEFINE_PER_CPU(HV_MsgState, msg_state); 26static DEFINE_PER_CPU(HV_MsgState, msg_state);
26 27
27void __cpuinit init_messaging() 28void __cpuinit init_messaging(void)
28{ 29{
29 /* Allocate storage for messages in kernel space */ 30 /* Allocate storage for messages in kernel space */
30 HV_MsgState *state = &__get_cpu_var(msg_state); 31 HV_MsgState *state = &__get_cpu_var(msg_state);
@@ -58,7 +59,7 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
58 { 59 {
59 long sp = stack_pointer - (long) current_thread_info(); 60 long sp = stack_pointer - (long) current_thread_info();
60 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 61 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
61 printk(KERN_EMERG "hv_message_intr: " 62 pr_emerg("hv_message_intr: "
62 "stack overflow: %ld\n", 63 "stack overflow: %ld\n",
63 sp - sizeof(struct thread_info)); 64 sp - sizeof(struct thread_info));
64 dump_stack(); 65 dump_stack();
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index ed3e91161f88..e2ab82b7c7e7 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -107,7 +107,7 @@ int apply_relocate(Elf_Shdr *sechdrs,
107 unsigned int relsec, 107 unsigned int relsec,
108 struct module *me) 108 struct module *me)
109{ 109{
110 printk(KERN_ERR "module %s: .rel relocation unsupported\n", me->name); 110 pr_err("module %s: .rel relocation unsupported\n", me->name);
111 return -ENOEXEC; 111 return -ENOEXEC;
112} 112}
113 113
@@ -119,8 +119,8 @@ int apply_relocate(Elf_Shdr *sechdrs,
119static int validate_hw2_last(long value, struct module *me) 119static int validate_hw2_last(long value, struct module *me)
120{ 120{
121 if (((value << 16) >> 16) != value) { 121 if (((value << 16) >> 16) != value) {
122 printk("module %s: Out of range HW2_LAST value %#lx\n", 122 pr_warning("module %s: Out of range HW2_LAST value %#lx\n",
123 me->name, value); 123 me->name, value);
124 return 0; 124 return 0;
125 } 125 }
126 return 1; 126 return 1;
@@ -223,10 +223,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
223 value -= (unsigned long) location; /* pc-relative */ 223 value -= (unsigned long) location; /* pc-relative */
224 value = (long) value >> 3; /* count by instrs */ 224 value = (long) value >> 3; /* count by instrs */
225 if (!validate_jumpoff(value)) { 225 if (!validate_jumpoff(value)) {
226 printk("module %s: Out of range jump to" 226 pr_warning("module %s: Out of range jump to"
227 " %#llx at %#llx (%p)\n", me->name, 227 " %#llx at %#llx (%p)\n", me->name,
228 sym->st_value + rel[i].r_addend, 228 sym->st_value + rel[i].r_addend,
229 rel[i].r_offset, location); 229 rel[i].r_offset, location);
230 return -ENOEXEC; 230 return -ENOEXEC;
231 } 231 }
232 MUNGE(create_JumpOff_X1); 232 MUNGE(create_JumpOff_X1);
@@ -236,7 +236,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
236#undef MUNGE 236#undef MUNGE
237 237
238 default: 238 default:
239 printk(KERN_ERR "module %s: Unknown relocation: %d\n", 239 pr_err("module %s: Unknown relocation: %d\n",
240 me->name, (int) ELF_R_TYPE(rel[i].r_info)); 240 me->name, (int) ELF_R_TYPE(rel[i].r_info));
241 return -ENOEXEC; 241 return -ENOEXEC;
242 } 242 }
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index c70ff14a48e4..ed590ad0acdc 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -24,9 +24,14 @@
24#include <linux/compat.h> 24#include <linux/compat.h>
25#include <linux/hardirq.h> 25#include <linux/hardirq.h>
26#include <linux/syscalls.h> 26#include <linux/syscalls.h>
27#include <linux/kernel.h>
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/stack.h> 29#include <asm/stack.h>
29#include <asm/homecache.h> 30#include <asm/homecache.h>
31#include <asm/syscalls.h>
32#ifdef CONFIG_HARDWALL
33#include <asm/hardwall.h>
34#endif
30#include <arch/chip.h> 35#include <arch/chip.h>
31#include <arch/abi.h> 36#include <arch/abi.h>
32 37
@@ -43,7 +48,7 @@ static int __init idle_setup(char *str)
43 return -EINVAL; 48 return -EINVAL;
44 49
45 if (!strcmp(str, "poll")) { 50 if (!strcmp(str, "poll")) {
46 printk("using polling idle threads.\n"); 51 pr_info("using polling idle threads.\n");
47 no_idle_nap = 1; 52 no_idle_nap = 1;
48 } else if (!strcmp(str, "halt")) 53 } else if (!strcmp(str, "halt"))
49 no_idle_nap = 0; 54 no_idle_nap = 0;
@@ -62,7 +67,6 @@ early_param("idle", idle_setup);
62 */ 67 */
63void cpu_idle(void) 68void cpu_idle(void)
64{ 69{
65 extern void _cpu_idle(void);
66 int cpu = smp_processor_id(); 70 int cpu = smp_processor_id();
67 71
68 72
@@ -108,7 +112,7 @@ void cpu_idle(void)
108struct thread_info *alloc_thread_info(struct task_struct *task) 112struct thread_info *alloc_thread_info(struct task_struct *task)
109{ 113{
110 struct page *page; 114 struct page *page;
111 int flags = GFP_KERNEL; 115 gfp_t flags = GFP_KERNEL;
112 116
113#ifdef CONFIG_DEBUG_STACK_USAGE 117#ifdef CONFIG_DEBUG_STACK_USAGE
114 flags |= __GFP_ZERO; 118 flags |= __GFP_ZERO;
@@ -116,7 +120,7 @@ struct thread_info *alloc_thread_info(struct task_struct *task)
116 120
117 page = alloc_pages(flags, THREAD_SIZE_ORDER); 121 page = alloc_pages(flags, THREAD_SIZE_ORDER);
118 if (!page) 122 if (!page)
119 return 0; 123 return NULL;
120 124
121 return (struct thread_info *)page_address(page); 125 return (struct thread_info *)page_address(page);
122} 126}
@@ -129,6 +133,18 @@ void free_thread_info(struct thread_info *info)
129{ 133{
130 struct single_step_state *step_state = info->step_state; 134 struct single_step_state *step_state = info->step_state;
131 135
136#ifdef CONFIG_HARDWALL
137 /*
138 * We free a thread_info from the context of the task that has
139 * been scheduled next, so the original task is already dead.
140 * Calling deactivate here just frees up the data structures.
141 * If the task we're freeing held the last reference to a
142 * hardwall fd, it would have been released prior to this point
143 * anyway via exit_files(), and "hardwall" would be NULL by now.
144 */
145 if (info->task->thread.hardwall)
146 hardwall_deactivate(info->task);
147#endif
132 148
133 if (step_state) { 149 if (step_state) {
134 150
@@ -154,8 +170,6 @@ void free_thread_info(struct thread_info *info)
154 170
155static void save_arch_state(struct thread_struct *t); 171static void save_arch_state(struct thread_struct *t);
156 172
157extern void ret_from_fork(void);
158
159int copy_thread(unsigned long clone_flags, unsigned long sp, 173int copy_thread(unsigned long clone_flags, unsigned long sp,
160 unsigned long stack_size, 174 unsigned long stack_size,
161 struct task_struct *p, struct pt_regs *regs) 175 struct task_struct *p, struct pt_regs *regs)
@@ -235,6 +249,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
235 p->thread.proc_status = 0; 249 p->thread.proc_status = 0;
236#endif 250#endif
237 251
252#ifdef CONFIG_HARDWALL
253 /* New thread does not own any networks. */
254 p->thread.hardwall = NULL;
255#endif
238 256
239 257
240 /* 258 /*
@@ -257,7 +275,7 @@ struct task_struct *validate_current(void)
257 if (unlikely((unsigned long)tsk < PAGE_OFFSET || 275 if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
258 (void *)tsk > high_memory || 276 (void *)tsk > high_memory ||
259 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { 277 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
260 printk("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); 278 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
261 tsk = &corrupt; 279 tsk = &corrupt;
262 } 280 }
263 return tsk; 281 return tsk;
@@ -447,10 +465,6 @@ void _prepare_arch_switch(struct task_struct *next)
447} 465}
448 466
449 467
450extern struct task_struct *__switch_to(struct task_struct *prev,
451 struct task_struct *next,
452 unsigned long new_system_save_1_0);
453
454struct task_struct *__sched _switch_to(struct task_struct *prev, 468struct task_struct *__sched _switch_to(struct task_struct *prev,
455 struct task_struct *next) 469 struct task_struct *next)
456{ 470{
@@ -486,6 +500,15 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
486 } 500 }
487#endif 501#endif
488 502
503#ifdef CONFIG_HARDWALL
504 /* Enable or disable access to the network registers appropriately. */
505 if (prev->thread.hardwall != NULL) {
506 if (next->thread.hardwall == NULL)
507 restrict_network_mpls();
508 } else if (next->thread.hardwall != NULL) {
509 grant_network_mpls();
510 }
511#endif
489 512
490 /* 513 /*
491 * Switch kernel SP, PC, and callee-saved registers. 514 * Switch kernel SP, PC, and callee-saved registers.
@@ -496,14 +519,14 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
496 return __switch_to(prev, next, next_current_ksp0(next)); 519 return __switch_to(prev, next, next_current_ksp0(next));
497} 520}
498 521
499int _sys_fork(struct pt_regs *regs) 522long _sys_fork(struct pt_regs *regs)
500{ 523{
501 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); 524 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
502} 525}
503 526
504int _sys_clone(unsigned long clone_flags, unsigned long newsp, 527long _sys_clone(unsigned long clone_flags, unsigned long newsp,
505 void __user *parent_tidptr, void __user *child_tidptr, 528 void __user *parent_tidptr, void __user *child_tidptr,
506 struct pt_regs *regs) 529 struct pt_regs *regs)
507{ 530{
508 if (!newsp) 531 if (!newsp)
509 newsp = regs->sp; 532 newsp = regs->sp;
@@ -511,7 +534,7 @@ int _sys_clone(unsigned long clone_flags, unsigned long newsp,
511 parent_tidptr, child_tidptr); 534 parent_tidptr, child_tidptr);
512} 535}
513 536
514int _sys_vfork(struct pt_regs *regs) 537long _sys_vfork(struct pt_regs *regs)
515{ 538{
516 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, 539 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp,
517 regs, 0, NULL, NULL); 540 regs, 0, NULL, NULL);
@@ -520,10 +543,10 @@ int _sys_vfork(struct pt_regs *regs)
520/* 543/*
521 * sys_execve() executes a new program. 544 * sys_execve() executes a new program.
522 */ 545 */
523int _sys_execve(char __user *path, char __user *__user *argv, 546long _sys_execve(char __user *path, char __user *__user *argv,
524 char __user *__user *envp, struct pt_regs *regs) 547 char __user *__user *envp, struct pt_regs *regs)
525{ 548{
526 int error; 549 long error;
527 char *filename; 550 char *filename;
528 551
529 filename = getname(path); 552 filename = getname(path);
@@ -537,10 +560,10 @@ out:
537} 560}
538 561
539#ifdef CONFIG_COMPAT 562#ifdef CONFIG_COMPAT
540int _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 563long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv,
541 compat_uptr_t __user *envp, struct pt_regs *regs) 564 compat_uptr_t __user *envp, struct pt_regs *regs)
542{ 565{
543 int error; 566 long error;
544 char *filename; 567 char *filename;
545 568
546 filename = getname(path); 569 filename = getname(path);
@@ -616,31 +639,32 @@ void exit_thread(void)
616 /* Nothing */ 639 /* Nothing */
617} 640}
618 641
619#ifdef __tilegx__
620# define LINECOUNT 3
621# define EXTRA_NL "\n"
622#else
623# define LINECOUNT 4
624# define EXTRA_NL ""
625#endif
626
627void show_regs(struct pt_regs *regs) 642void show_regs(struct pt_regs *regs)
628{ 643{
629 struct task_struct *tsk = validate_current(); 644 struct task_struct *tsk = validate_current();
630 int i, linebreak; 645 int i;
631 printk("\n"); 646
632 printk(" Pid: %d, comm: %20s, CPU: %d\n", 647 pr_err("\n");
648 pr_err(" Pid: %d, comm: %20s, CPU: %d\n",
633 tsk->pid, tsk->comm, smp_processor_id()); 649 tsk->pid, tsk->comm, smp_processor_id());
634 for (i = linebreak = 0; i < 53; ++i) { 650#ifdef __tilegx__
635 printk(" r%-2d: "REGFMT, i, regs->regs[i]); 651 for (i = 0; i < 51; i += 3)
636 if (++linebreak == LINECOUNT) { 652 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
637 linebreak = 0; 653 i, regs->regs[i], i+1, regs->regs[i+1],
638 printk("\n"); 654 i+2, regs->regs[i+2]);
639 } 655 pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
640 } 656 regs->regs[51], regs->regs[52], regs->tp);
641 printk(" tp : "REGFMT EXTRA_NL " sp : "REGFMT" lr : "REGFMT"\n", 657 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
642 regs->tp, regs->sp, regs->lr); 658#else
643 printk(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", 659 for (i = 0; i < 52; i += 3)
660 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
661 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
662 i, regs->regs[i], i+1, regs->regs[i+1],
663 i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
664 pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
665 regs->regs[52], regs->tp, regs->sp, regs->lr);
666#endif
667 pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
644 regs->pc, regs->ex1, regs->faultnum); 668 regs->pc, regs->ex1, regs->faultnum);
645 669
646 dump_stack_regs(regs); 670 dump_stack_regs(regs);
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 468054928e7d..e5701d1a52d7 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -19,6 +19,7 @@
19#include <linux/kprobes.h> 19#include <linux/kprobes.h>
20#include <linux/compat.h> 20#include <linux/compat.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <asm/traps.h>
22 23
23void user_enable_single_step(struct task_struct *child) 24void user_enable_single_step(struct task_struct *child)
24{ 25{
@@ -76,7 +77,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
76 if (task_thread_info(child)->status & TS_COMPAT) 77 if (task_thread_info(child)->status & TS_COMPAT)
77 addr = (u32)addr; 78 addr = (u32)addr;
78#endif 79#endif
79 datap = (unsigned long __user *)data; 80 datap = (unsigned long __user __force *)data;
80 81
81 switch (request) { 82 switch (request) {
82 83
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index a4523923605e..acd86d20beba 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -15,6 +15,7 @@
15#include <linux/stddef.h> 15#include <linux/stddef.h>
16#include <linux/reboot.h> 16#include <linux/reboot.h>
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/pm.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/setup.h> 20#include <asm/setup.h>
20#include <hv/hypervisor.h> 21#include <hv/hypervisor.h>
@@ -46,7 +47,5 @@ void machine_restart(char *cmd)
46 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); 47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
47} 48}
48 49
49/* 50/* No interesting distinction to be made here. */
50 * Power off function, if any 51void (*pm_power_off)(void) = NULL;
51 */
52void (*pm_power_off)(void) = machine_power_off;
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 934136b61ceb..4dd21c1e6d5e 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -20,6 +20,7 @@
20#include <linux/node.h> 20#include <linux/node.h>
21#include <linux/cpu.h> 21#include <linux/cpu.h>
22#include <linux/ioport.h> 22#include <linux/ioport.h>
23#include <linux/irq.h>
23#include <linux/kexec.h> 24#include <linux/kexec.h>
24#include <linux/pci.h> 25#include <linux/pci.h>
25#include <linux/initrd.h> 26#include <linux/initrd.h>
@@ -109,7 +110,7 @@ static int __init setup_maxmem(char *str)
109 110
110 maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << 111 maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) <<
111 (HPAGE_SHIFT - PAGE_SHIFT); 112 (HPAGE_SHIFT - PAGE_SHIFT);
112 printk("Forcing RAM used to no more than %dMB\n", 113 pr_info("Forcing RAM used to no more than %dMB\n",
113 maxmem_pfn >> (20 - PAGE_SHIFT)); 114 maxmem_pfn >> (20 - PAGE_SHIFT));
114 return 0; 115 return 0;
115} 116}
@@ -127,7 +128,7 @@ static int __init setup_maxnodemem(char *str)
127 128
128 maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << 129 maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) <<
129 (HPAGE_SHIFT - PAGE_SHIFT); 130 (HPAGE_SHIFT - PAGE_SHIFT);
130 printk("Forcing RAM used on node %ld to no more than %dMB\n", 131 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
131 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); 132 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
132 return 0; 133 return 0;
133} 134}
@@ -140,7 +141,7 @@ static int __init setup_isolnodes(char *str)
140 return -EINVAL; 141 return -EINVAL;
141 142
142 nodelist_scnprintf(buf, sizeof(buf), isolnodes); 143 nodelist_scnprintf(buf, sizeof(buf), isolnodes);
143 printk("Set isolnodes value to '%s'\n", buf); 144 pr_info("Set isolnodes value to '%s'\n", buf);
144 return 0; 145 return 0;
145} 146}
146early_param("isolnodes", setup_isolnodes); 147early_param("isolnodes", setup_isolnodes);
@@ -155,7 +156,7 @@ static int __init setup_pci_reserve(char* str)
155 return -EINVAL; 156 return -EINVAL;
156 157
157 pci_reserve_mb = mb; 158 pci_reserve_mb = mb;
158 printk("Reserving %dMB for PCIE root complex mappings\n", 159 pr_info("Reserving %dMB for PCIE root complex mappings\n",
159 pci_reserve_mb); 160 pci_reserve_mb);
160 return 0; 161 return 0;
161} 162}
@@ -269,7 +270,7 @@ static void *__init setup_pa_va_mapping(void)
269 * This is up to 4 mappings for lowmem, one mapping per memory 270 * This is up to 4 mappings for lowmem, one mapping per memory
270 * controller, plus one for our text segment. 271 * controller, plus one for our text segment.
271 */ 272 */
272void __cpuinit store_permanent_mappings(void) 273static void __cpuinit store_permanent_mappings(void)
273{ 274{
274 int i; 275 int i;
275 276
@@ -320,14 +321,14 @@ static void __init setup_memory(void)
320 break; 321 break;
321#ifdef CONFIG_FLATMEM 322#ifdef CONFIG_FLATMEM
322 if (i > 0) { 323 if (i > 0) {
323 printk("Can't use discontiguous PAs: %#llx..%#llx\n", 324 pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
324 range.size, range.start + range.size); 325 range.size, range.start + range.size);
325 continue; 326 continue;
326 } 327 }
327#endif 328#endif
328#ifndef __tilegx__ 329#ifndef __tilegx__
329 if ((unsigned long)range.start) { 330 if ((unsigned long)range.start) {
330 printk("Range not at 4GB multiple: %#llx..%#llx\n", 331 pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
331 range.start, range.start + range.size); 332 range.start, range.start + range.size);
332 continue; 333 continue;
333 } 334 }
@@ -335,51 +336,51 @@ static void __init setup_memory(void)
335 if ((range.start & (HPAGE_SIZE-1)) != 0 || 336 if ((range.start & (HPAGE_SIZE-1)) != 0 ||
336 (range.size & (HPAGE_SIZE-1)) != 0) { 337 (range.size & (HPAGE_SIZE-1)) != 0) {
337 unsigned long long start_pa = range.start; 338 unsigned long long start_pa = range.start;
338 unsigned long long size = range.size; 339 unsigned long long orig_size = range.size;
339 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; 340 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
340 range.size -= (range.start - start_pa); 341 range.size -= (range.start - start_pa);
341 range.size &= HPAGE_MASK; 342 range.size &= HPAGE_MASK;
342 printk("Range not hugepage-aligned: %#llx..%#llx:" 343 pr_err("Range not hugepage-aligned: %#llx..%#llx:"
343 " now %#llx-%#llx\n", 344 " now %#llx-%#llx\n",
344 start_pa, start_pa + size, 345 start_pa, start_pa + orig_size,
345 range.start, range.start + range.size); 346 range.start, range.start + range.size);
346 } 347 }
347 highbits = __pa_to_highbits(range.start); 348 highbits = __pa_to_highbits(range.start);
348 if (highbits >= NR_PA_HIGHBIT_VALUES) { 349 if (highbits >= NR_PA_HIGHBIT_VALUES) {
349 printk("PA high bits too high: %#llx..%#llx\n", 350 pr_err("PA high bits too high: %#llx..%#llx\n",
350 range.start, range.start + range.size); 351 range.start, range.start + range.size);
351 continue; 352 continue;
352 } 353 }
353 if (highbits_seen[highbits]) { 354 if (highbits_seen[highbits]) {
354 printk("Range overlaps in high bits: %#llx..%#llx\n", 355 pr_err("Range overlaps in high bits: %#llx..%#llx\n",
355 range.start, range.start + range.size); 356 range.start, range.start + range.size);
356 continue; 357 continue;
357 } 358 }
358 highbits_seen[highbits] = 1; 359 highbits_seen[highbits] = 1;
359 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { 360 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
360 int size = maxnodemem_pfn[i]; 361 int max_size = maxnodemem_pfn[i];
361 if (size > 0) { 362 if (max_size > 0) {
362 printk("Maxnodemem reduced node %d to" 363 pr_err("Maxnodemem reduced node %d to"
363 " %d pages\n", i, size); 364 " %d pages\n", i, max_size);
364 range.size = (HV_PhysAddr)size << PAGE_SHIFT; 365 range.size = PFN_PHYS(max_size);
365 } else { 366 } else {
366 printk("Maxnodemem disabled node %d\n", i); 367 pr_err("Maxnodemem disabled node %d\n", i);
367 continue; 368 continue;
368 } 369 }
369 } 370 }
370 if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { 371 if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) {
371 int size = maxmem_pfn - num_physpages; 372 int max_size = maxmem_pfn - num_physpages;
372 if (size > 0) { 373 if (max_size > 0) {
373 printk("Maxmem reduced node %d to %d pages\n", 374 pr_err("Maxmem reduced node %d to %d pages\n",
374 i, size); 375 i, max_size);
375 range.size = (HV_PhysAddr)size << PAGE_SHIFT; 376 range.size = PFN_PHYS(max_size);
376 } else { 377 } else {
377 printk("Maxmem disabled node %d\n", i); 378 pr_err("Maxmem disabled node %d\n", i);
378 continue; 379 continue;
379 } 380 }
380 } 381 }
381 if (i >= MAX_NUMNODES) { 382 if (i >= MAX_NUMNODES) {
382 printk("Too many PA nodes (#%d): %#llx...%#llx\n", 383 pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
383 i, range.size, range.size + range.start); 384 i, range.size, range.size + range.start);
384 continue; 385 continue;
385 } 386 }
@@ -391,7 +392,7 @@ static void __init setup_memory(void)
391#ifndef __tilegx__ 392#ifndef __tilegx__
392 if (((HV_PhysAddr)end << PAGE_SHIFT) != 393 if (((HV_PhysAddr)end << PAGE_SHIFT) !=
393 (range.start + range.size)) { 394 (range.start + range.size)) {
394 printk("PAs too high to represent: %#llx..%#llx\n", 395 pr_err("PAs too high to represent: %#llx..%#llx\n",
395 range.start, range.start + range.size); 396 range.start, range.start + range.size);
396 continue; 397 continue;
397 } 398 }
@@ -412,7 +413,7 @@ static void __init setup_memory(void)
412 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); 413 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
413 if (end < pci_reserve_end_pfn + percpu_pages) { 414 if (end < pci_reserve_end_pfn + percpu_pages) {
414 end = pci_reserve_start_pfn; 415 end = pci_reserve_start_pfn;
415 printk("PCI mapping region reduced node %d to" 416 pr_err("PCI mapping region reduced node %d to"
416 " %ld pages\n", i, end - start); 417 " %ld pages\n", i, end - start);
417 } 418 }
418 } 419 }
@@ -456,11 +457,11 @@ static void __init setup_memory(void)
456 } 457 }
457 } 458 }
458 num_physpages -= dropped_pages; 459 num_physpages -= dropped_pages;
459 printk(KERN_WARNING "Only using %ldMB memory;" 460 pr_warning("Only using %ldMB memory;"
460 " ignoring %ldMB.\n", 461 " ignoring %ldMB.\n",
461 num_physpages >> (20 - PAGE_SHIFT), 462 num_physpages >> (20 - PAGE_SHIFT),
462 dropped_pages >> (20 - PAGE_SHIFT)); 463 dropped_pages >> (20 - PAGE_SHIFT));
463 printk(KERN_WARNING "Consider using a larger page size.\n"); 464 pr_warning("Consider using a larger page size.\n");
464 } 465 }
465#endif 466#endif
466 467
@@ -478,9 +479,9 @@ static void __init setup_memory(void)
478 MAXMEM_PFN : mappable_physpages; 479 MAXMEM_PFN : mappable_physpages;
479 highmem_pages = (long) (num_physpages - lowmem_pages); 480 highmem_pages = (long) (num_physpages - lowmem_pages);
480 481
481 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 482 pr_notice("%ldMB HIGHMEM available.\n",
482 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); 483 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
483 printk(KERN_NOTICE "%ldMB LOWMEM available.\n", 484 pr_notice("%ldMB LOWMEM available.\n",
484 pages_to_mb(lowmem_pages)); 485 pages_to_mb(lowmem_pages));
485#else 486#else
486 /* Set max_low_pfn based on what node 0 can directly address. */ 487 /* Set max_low_pfn based on what node 0 can directly address. */
@@ -488,15 +489,15 @@ static void __init setup_memory(void)
488 489
489#ifndef __tilegx__ 490#ifndef __tilegx__
490 if (node_end_pfn[0] > MAXMEM_PFN) { 491 if (node_end_pfn[0] > MAXMEM_PFN) {
491 printk(KERN_WARNING "Only using %ldMB LOWMEM.\n", 492 pr_warning("Only using %ldMB LOWMEM.\n",
492 MAXMEM>>20); 493 MAXMEM>>20);
493 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); 494 pr_warning("Use a HIGHMEM enabled kernel.\n");
494 max_low_pfn = MAXMEM_PFN; 495 max_low_pfn = MAXMEM_PFN;
495 max_pfn = MAXMEM_PFN; 496 max_pfn = MAXMEM_PFN;
496 num_physpages = MAXMEM_PFN; 497 num_physpages = MAXMEM_PFN;
497 node_end_pfn[0] = MAXMEM_PFN; 498 node_end_pfn[0] = MAXMEM_PFN;
498 } else { 499 } else {
499 printk(KERN_NOTICE "%ldMB memory available.\n", 500 pr_notice("%ldMB memory available.\n",
500 pages_to_mb(node_end_pfn[0])); 501 pages_to_mb(node_end_pfn[0]));
501 } 502 }
502 for (i = 1; i < MAX_NUMNODES; ++i) { 503 for (i = 1; i < MAX_NUMNODES; ++i) {
@@ -512,7 +513,7 @@ static void __init setup_memory(void)
512 if (pages) 513 if (pages)
513 high_memory = pfn_to_kaddr(node_end_pfn[i]); 514 high_memory = pfn_to_kaddr(node_end_pfn[i]);
514 } 515 }
515 printk(KERN_NOTICE "%ldMB memory available.\n", 516 pr_notice("%ldMB memory available.\n",
516 pages_to_mb(lowmem_pages)); 517 pages_to_mb(lowmem_pages));
517#endif 518#endif
518#endif 519#endif
@@ -744,7 +745,7 @@ static void __init setup_numa_mapping(void)
744 nodes_andnot(default_nodes, node_online_map, isolnodes); 745 nodes_andnot(default_nodes, node_online_map, isolnodes);
745 if (nodes_empty(default_nodes)) { 746 if (nodes_empty(default_nodes)) {
746 BUG_ON(!node_isset(0, node_online_map)); 747 BUG_ON(!node_isset(0, node_online_map));
747 printk("Forcing NUMA node zero available as a default node\n"); 748 pr_err("Forcing NUMA node zero available as a default node\n");
748 node_set(0, default_nodes); 749 node_set(0, default_nodes);
749 } 750 }
750 751
@@ -822,13 +823,13 @@ static void __init setup_numa_mapping(void)
822 printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y); 823 printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
823 for (x = 0; x < smp_width; ++x, ++cpu) { 824 for (x = 0; x < smp_width; ++x, ++cpu) {
824 if (cpu_to_node(cpu) < 0) { 825 if (cpu_to_node(cpu) < 0) {
825 printk(" -"); 826 pr_cont(" -");
826 cpu_2_node[cpu] = first_node(default_nodes); 827 cpu_2_node[cpu] = first_node(default_nodes);
827 } else { 828 } else {
828 printk(" %d", cpu_to_node(cpu)); 829 pr_cont(" %d", cpu_to_node(cpu));
829 } 830 }
830 } 831 }
831 printk("\n"); 832 pr_cont("\n");
832 } 833 }
833} 834}
834 835
@@ -856,12 +857,17 @@ subsys_initcall(topology_init);
856#endif /* CONFIG_NUMA */ 857#endif /* CONFIG_NUMA */
857 858
858/** 859/**
859 * setup_mpls() - Allow the user-space code to access various SPRs. 860 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
861 * @boot: Is this the boot cpu?
860 * 862 *
861 * Also called from online_secondary(). 863 * Called from setup_arch() on the boot cpu, or online_secondary().
862 */ 864 */
863void __cpuinit setup_mpls(void) 865void __cpuinit setup_cpu(int boot)
864{ 866{
867 /* The boot cpu sets up its permanent mappings much earlier. */
868 if (!boot)
869 store_permanent_mappings();
870
865 /* Allow asynchronous TLB interrupts. */ 871 /* Allow asynchronous TLB interrupts. */
866#if CHIP_HAS_TILE_DMA() 872#if CHIP_HAS_TILE_DMA()
867 raw_local_irq_unmask(INT_DMATLB_MISS); 873 raw_local_irq_unmask(INT_DMATLB_MISS);
@@ -892,6 +898,14 @@ void __cpuinit setup_mpls(void)
892 * as well as the PL 0 interrupt mask. 898 * as well as the PL 0 interrupt mask.
893 */ 899 */
894 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); 900 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
901
902 /* Initialize IRQ support for this cpu. */
903 setup_irq_regs();
904
905#ifdef CONFIG_HARDWALL
906 /* Reset the network state on this cpu. */
907 reset_network_state();
908#endif
895} 909}
896 910
897static int __initdata set_initramfs_file; 911static int __initdata set_initramfs_file;
@@ -922,22 +936,22 @@ static void __init load_hv_initrd(void)
922 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); 936 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
923 if (fd == HV_ENOENT) { 937 if (fd == HV_ENOENT) {
924 if (set_initramfs_file) 938 if (set_initramfs_file)
925 printk("No such hvfs initramfs file '%s'\n", 939 pr_warning("No such hvfs initramfs file '%s'\n",
926 initramfs_file); 940 initramfs_file);
927 return; 941 return;
928 } 942 }
929 BUG_ON(fd < 0); 943 BUG_ON(fd < 0);
930 stat = hv_fs_fstat(fd); 944 stat = hv_fs_fstat(fd);
931 BUG_ON(stat.size < 0); 945 BUG_ON(stat.size < 0);
932 if (stat.flags & HV_FS_ISDIR) { 946 if (stat.flags & HV_FS_ISDIR) {
933 printk("Ignoring hvfs file '%s': it's a directory.\n", 947 pr_warning("Ignoring hvfs file '%s': it's a directory.\n",
934 initramfs_file); 948 initramfs_file);
935 return; 949 return;
936 } 950 }
937 initrd = alloc_bootmem_pages(stat.size); 951 initrd = alloc_bootmem_pages(stat.size);
938 rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0); 952 rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
939 if (rc != stat.size) { 953 if (rc != stat.size) {
940 printk("Error reading %d bytes from hvfs file '%s': %d\n", 954 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
941 stat.size, initramfs_file, rc); 955 stat.size, initramfs_file, rc);
942 free_bootmem((unsigned long) initrd, stat.size); 956 free_bootmem((unsigned long) initrd, stat.size);
943 return; 957 return;
@@ -966,9 +980,9 @@ static void __init validate_hv(void)
966 HV_Topology topology = hv_inquire_topology(); 980 HV_Topology topology = hv_inquire_topology();
967 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); 981 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
968 if (topology.width != 1 || topology.height != 1) { 982 if (topology.width != 1 || topology.height != 1) {
969 printk("Warning: booting UP kernel on %dx%d grid;" 983 pr_warning("Warning: booting UP kernel on %dx%d grid;"
970 " will ignore all but first tile.\n", 984 " will ignore all but first tile.\n",
971 topology.width, topology.height); 985 topology.width, topology.height);
972 } 986 }
973#endif 987#endif
974 988
@@ -1004,7 +1018,7 @@ static void __init validate_hv(void)
1004 1018
1005 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, 1019 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
1006 sizeof(chip_model)) < 0) { 1020 sizeof(chip_model)) < 0) {
1007 printk("Warning: HV_CONFSTR_CHIP_MODEL not available\n"); 1021 pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1008 strlcpy(chip_model, "unknown", sizeof(chip_model)); 1022 strlcpy(chip_model, "unknown", sizeof(chip_model));
1009 } 1023 }
1010} 1024}
@@ -1096,7 +1110,7 @@ static int __init disabled_cpus(char *str)
1096 if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0) 1110 if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
1097 return -EINVAL; 1111 return -EINVAL;
1098 if (cpumask_test_cpu(boot_cpu, &disabled_map)) { 1112 if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
1099 printk("disabled_cpus: can't disable boot cpu %d\n", boot_cpu); 1113 pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
1100 cpumask_clear_cpu(boot_cpu, &disabled_map); 1114 cpumask_clear_cpu(boot_cpu, &disabled_map);
1101 } 1115 }
1102 return 0; 1116 return 0;
@@ -1104,12 +1118,12 @@ static int __init disabled_cpus(char *str)
1104 1118
1105early_param("disabled_cpus", disabled_cpus); 1119early_param("disabled_cpus", disabled_cpus);
1106 1120
1107void __init print_disabled_cpus() 1121void __init print_disabled_cpus(void)
1108{ 1122{
1109 if (!cpumask_empty(&disabled_map)) { 1123 if (!cpumask_empty(&disabled_map)) {
1110 char buf[100]; 1124 char buf[100];
1111 cpulist_scnprintf(buf, sizeof(buf), &disabled_map); 1125 cpulist_scnprintf(buf, sizeof(buf), &disabled_map);
1112 printk(KERN_INFO "CPUs not available for Linux: %s\n", buf); 1126 pr_info("CPUs not available for Linux: %s\n", buf);
1113 } 1127 }
1114} 1128}
1115 1129
@@ -1162,7 +1176,7 @@ static void __init setup_cpu_maps(void)
1162 (HV_VirtAddr) cpumask_bits(&cpu_lotar_map), 1176 (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
1163 sizeof(cpu_lotar_map)); 1177 sizeof(cpu_lotar_map));
1164 if (rc < 0) { 1178 if (rc < 0) {
1165 printk("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); 1179 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1166 cpu_lotar_map = cpu_possible_map; 1180 cpu_lotar_map = cpu_possible_map;
1167 } 1181 }
1168 1182
@@ -1182,7 +1196,7 @@ static void __init setup_cpu_maps(void)
1182 1196
1183static int __init dataplane(char *str) 1197static int __init dataplane(char *str)
1184{ 1198{
1185 printk("WARNING: dataplane support disabled in this kernel\n"); 1199 pr_warning("WARNING: dataplane support disabled in this kernel\n");
1186 return 0; 1200 return 0;
1187} 1201}
1188 1202
@@ -1200,8 +1214,8 @@ void __init setup_arch(char **cmdline_p)
1200 len = hv_get_command_line((HV_VirtAddr) boot_command_line, 1214 len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1201 COMMAND_LINE_SIZE); 1215 COMMAND_LINE_SIZE);
1202 if (boot_command_line[0]) 1216 if (boot_command_line[0])
1203 printk("WARNING: ignoring dynamic command line \"%s\"\n", 1217 pr_warning("WARNING: ignoring dynamic command line \"%s\"\n",
1204 boot_command_line); 1218 boot_command_line);
1205 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 1219 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1206#else 1220#else
1207 char *hv_cmdline; 1221 char *hv_cmdline;
@@ -1269,7 +1283,7 @@ void __init setup_arch(char **cmdline_p)
1269 setup_numa_mapping(); 1283 setup_numa_mapping();
1270 zone_sizes_init(); 1284 zone_sizes_init();
1271 set_page_homes(); 1285 set_page_homes();
1272 setup_mpls(); 1286 setup_cpu(1);
1273 setup_clock(); 1287 setup_clock();
1274 load_hv_initrd(); 1288 load_hv_initrd();
1275} 1289}
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 45835cfad407..45b66a3c991f 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -33,6 +33,7 @@
33#include <asm/processor.h> 33#include <asm/processor.h>
34#include <asm/ucontext.h> 34#include <asm/ucontext.h>
35#include <asm/sigframe.h> 35#include <asm/sigframe.h>
36#include <asm/syscalls.h>
36#include <arch/interrupts.h> 37#include <arch/interrupts.h>
37 38
38#define DEBUG_SIG 0 39#define DEBUG_SIG 0
@@ -40,11 +41,8 @@
40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 41#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
41 42
42 43
43/* Caller before callee in this file; other callee is in assembler */
44void do_signal(struct pt_regs *regs);
45
46long _sys_sigaltstack(const stack_t __user *uss, 44long _sys_sigaltstack(const stack_t __user *uss,
47 stack_t __user *uoss, struct pt_regs *regs) 45 stack_t __user *uoss, struct pt_regs *regs)
48{ 46{
49 return do_sigaltstack(uss, uoss, regs->sp); 47 return do_sigaltstack(uss, uoss, regs->sp);
50} 48}
@@ -65,7 +63,7 @@ int restore_sigcontext(struct pt_regs *regs,
65 63
66 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 64 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
67 err |= __get_user(((long *)regs)[i], 65 err |= __get_user(((long *)regs)[i],
68 &((long *)(&sc->regs))[i]); 66 &((long __user *)(&sc->regs))[i]);
69 67
70 regs->faultnum = INT_SWINT_1_SIGRETURN; 68 regs->faultnum = INT_SWINT_1_SIGRETURN;
71 69
@@ -73,7 +71,8 @@ int restore_sigcontext(struct pt_regs *regs,
73 return err; 71 return err;
74} 72}
75 73
76int _sys_rt_sigreturn(struct pt_regs *regs) 74/* sigreturn() returns long since it restores r0 in the interrupted code. */
75long _sys_rt_sigreturn(struct pt_regs *regs)
77{ 76{
78 struct rt_sigframe __user *frame = 77 struct rt_sigframe __user *frame =
79 (struct rt_sigframe __user *)(regs->sp); 78 (struct rt_sigframe __user *)(regs->sp);
@@ -114,7 +113,7 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
114 113
115 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 114 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
116 err |= __put_user(((long *)regs)[i], 115 err |= __put_user(((long *)regs)[i],
117 &((long *)(&sc->regs))[i]); 116 &((long __user *)(&sc->regs))[i]);
118 117
119 return err; 118 return err;
120} 119}
@@ -137,7 +136,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka,
137 * will die with SIGSEGV. 136 * will die with SIGSEGV.
138 */ 137 */
139 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) 138 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
140 return (void __user *) -1L; 139 return (void __user __force *)-1UL;
141 140
142 /* This is the X/Open sanctioned signal stack switching. */ 141 /* This is the X/Open sanctioned signal stack switching. */
143 if (ka->sa.sa_flags & SA_ONSTACK) { 142 if (ka->sa.sa_flags & SA_ONSTACK) {
@@ -185,8 +184,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
185 /* Create the ucontext. */ 184 /* Create the ucontext. */
186 err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); 185 err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
187 err |= __put_user(0, &frame->uc.uc_flags); 186 err |= __put_user(0, &frame->uc.uc_flags);
188 err |= __put_user(0, &frame->uc.uc_link); 187 err |= __put_user(NULL, &frame->uc.uc_link);
189 err |= __put_user((void *)(current->sas_ss_sp), 188 err |= __put_user((void __user *)(current->sas_ss_sp),
190 &frame->uc.uc_stack.ss_sp); 189 &frame->uc.uc_stack.ss_sp);
191 err |= __put_user(sas_ss_flags(regs->sp), 190 err |= __put_user(sas_ss_flags(regs->sp),
192 &frame->uc.uc_stack.ss_flags); 191 &frame->uc.uc_stack.ss_flags);
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 266aae123632..5ec4b9c651f2 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -23,6 +23,7 @@
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/mman.h> 24#include <linux/mman.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/err.h>
26#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
27#include <asm/opcode-tile.h> 28#include <asm/opcode-tile.h>
28#include <asm/opcode_constants.h> 29#include <asm/opcode_constants.h>
@@ -39,8 +40,8 @@ static int __init setup_unaligned_printk(char *str)
39 if (strict_strtol(str, 0, &val) != 0) 40 if (strict_strtol(str, 0, &val) != 0)
40 return 0; 41 return 0;
41 unaligned_printk = val; 42 unaligned_printk = val;
42 printk("Printk for each unaligned data accesses is %s\n", 43 pr_info("Printk for each unaligned data accesses is %s\n",
43 unaligned_printk ? "enabled" : "disabled"); 44 unaligned_printk ? "enabled" : "disabled");
44 return 1; 45 return 1;
45} 46}
46__setup("unaligned_printk=", setup_unaligned_printk); 47__setup("unaligned_printk=", setup_unaligned_printk);
@@ -113,7 +114,7 @@ static tile_bundle_bits rewrite_load_store_unaligned(
113 enum mem_op mem_op, 114 enum mem_op mem_op,
114 int size, int sign_ext) 115 int size, int sign_ext)
115{ 116{
116 unsigned char *addr; 117 unsigned char __user *addr;
117 int val_reg, addr_reg, err, val; 118 int val_reg, addr_reg, err, val;
118 119
119 /* Get address and value registers */ 120 /* Get address and value registers */
@@ -148,7 +149,7 @@ static tile_bundle_bits rewrite_load_store_unaligned(
148 return bundle; 149 return bundle;
149 150
150 /* If it's aligned, don't handle it specially */ 151 /* If it's aligned, don't handle it specially */
151 addr = (void *)regs->regs[addr_reg]; 152 addr = (void __user *)regs->regs[addr_reg];
152 if (((unsigned long)addr % size) == 0) 153 if (((unsigned long)addr % size) == 0)
153 return bundle; 154 return bundle;
154 155
@@ -183,7 +184,7 @@ static tile_bundle_bits rewrite_load_store_unaligned(
183 siginfo_t info = { 184 siginfo_t info = {
184 .si_signo = SIGSEGV, 185 .si_signo = SIGSEGV,
185 .si_code = SEGV_MAPERR, 186 .si_code = SEGV_MAPERR,
186 .si_addr = (void __user *)addr 187 .si_addr = addr
187 }; 188 };
188 force_sig_info(info.si_signo, &info, current); 189 force_sig_info(info.si_signo, &info, current);
189 return (tile_bundle_bits) 0; 190 return (tile_bundle_bits) 0;
@@ -193,30 +194,33 @@ static tile_bundle_bits rewrite_load_store_unaligned(
193 siginfo_t info = { 194 siginfo_t info = {
194 .si_signo = SIGBUS, 195 .si_signo = SIGBUS,
195 .si_code = BUS_ADRALN, 196 .si_code = BUS_ADRALN,
196 .si_addr = (void __user *)addr 197 .si_addr = addr
197 }; 198 };
198 force_sig_info(info.si_signo, &info, current); 199 force_sig_info(info.si_signo, &info, current);
199 return (tile_bundle_bits) 0; 200 return (tile_bundle_bits) 0;
200 } 201 }
201 202
202 if (unaligned_printk || unaligned_fixup_count == 0) { 203 if (unaligned_printk || unaligned_fixup_count == 0) {
203 printk("Process %d/%s: PC %#lx: Fixup of" 204 pr_info("Process %d/%s: PC %#lx: Fixup of"
204 " unaligned %s at %#lx.\n", 205 " unaligned %s at %#lx.\n",
205 current->pid, current->comm, regs->pc, 206 current->pid, current->comm, regs->pc,
206 (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) ? 207 (mem_op == MEMOP_LOAD ||
207 "load" : "store", 208 mem_op == MEMOP_LOAD_POSTINCR) ?
208 (unsigned long)addr); 209 "load" : "store",
210 (unsigned long)addr);
209 if (!unaligned_printk) { 211 if (!unaligned_printk) {
210 printk("\n" 212#define P pr_info
211"Unaligned fixups in the kernel will slow your application considerably.\n" 213P("\n");
212"You can find them by writing \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n" 214P("Unaligned fixups in the kernel will slow your application considerably.\n");
213"which requests the kernel show all unaligned fixups, or writing a \"0\"\n" 215P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
214"to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n" 216P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
215"access will become a SIGBUS you can debug. No further warnings will be\n" 217P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
216"shown so as to avoid additional slowdown, but you can track the number\n" 218P("access will become a SIGBUS you can debug. No further warnings will be\n");
217"of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n" 219P("shown so as to avoid additional slowdown, but you can track the number\n");
218"Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n" 220P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
219 "\n"); 221P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
222P("\n");
223#undef P
220 } 224 }
221 } 225 }
222 ++unaligned_fixup_count; 226 ++unaligned_fixup_count;
@@ -276,7 +280,7 @@ void single_step_once(struct pt_regs *regs)
276 struct thread_info *info = (void *)current_thread_info(); 280 struct thread_info *info = (void *)current_thread_info();
277 struct single_step_state *state = info->step_state; 281 struct single_step_state *state = info->step_state;
278 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); 282 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
279 tile_bundle_bits *buffer, *pc; 283 tile_bundle_bits __user *buffer, *pc;
280 tile_bundle_bits bundle; 284 tile_bundle_bits bundle;
281 int temp_reg; 285 int temp_reg;
282 int target_reg = TREG_LR; 286 int target_reg = TREG_LR;
@@ -306,21 +310,21 @@ void single_step_once(struct pt_regs *regs)
306 /* allocate a page of writable, executable memory */ 310 /* allocate a page of writable, executable memory */
307 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); 311 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
308 if (state == NULL) { 312 if (state == NULL) {
309 printk("Out of kernel memory trying to single-step\n"); 313 pr_err("Out of kernel memory trying to single-step\n");
310 return; 314 return;
311 } 315 }
312 316
313 /* allocate a cache line of writable, executable memory */ 317 /* allocate a cache line of writable, executable memory */
314 down_write(&current->mm->mmap_sem); 318 down_write(&current->mm->mmap_sem);
315 buffer = (void *) do_mmap(0, 0, 64, 319 buffer = (void __user *) do_mmap(NULL, 0, 64,
316 PROT_EXEC | PROT_READ | PROT_WRITE, 320 PROT_EXEC | PROT_READ | PROT_WRITE,
317 MAP_PRIVATE | MAP_ANONYMOUS, 321 MAP_PRIVATE | MAP_ANONYMOUS,
318 0); 322 0);
319 up_write(&current->mm->mmap_sem); 323 up_write(&current->mm->mmap_sem);
320 324
321 if ((int)buffer < 0 && (int)buffer > -PAGE_SIZE) { 325 if (IS_ERR((void __force *)buffer)) {
322 kfree(state); 326 kfree(state);
323 printk("Out of kernel pages trying to single-step\n"); 327 pr_err("Out of kernel pages trying to single-step\n");
324 return; 328 return;
325 } 329 }
326 330
@@ -349,11 +353,14 @@ void single_step_once(struct pt_regs *regs)
349 if (regs->faultnum == INT_SWINT_1) 353 if (regs->faultnum == INT_SWINT_1)
350 regs->pc -= 8; 354 regs->pc -= 8;
351 355
352 pc = (tile_bundle_bits *)(regs->pc); 356 pc = (tile_bundle_bits __user *)(regs->pc);
353 bundle = pc[0]; 357 if (get_user(bundle, pc) != 0) {
358 pr_err("Couldn't read instruction at %p trying to step\n", pc);
359 return;
360 }
354 361
355 /* We'll follow the instruction with 2 ill op bundles */ 362 /* We'll follow the instruction with 2 ill op bundles */
356 state->orig_pc = (unsigned long) pc; 363 state->orig_pc = (unsigned long)pc;
357 state->next_pc = (unsigned long)(pc + 1); 364 state->next_pc = (unsigned long)(pc + 1);
358 state->branch_next_pc = 0; 365 state->branch_next_pc = 0;
359 state->update = 0; 366 state->update = 0;
@@ -633,7 +640,7 @@ void single_step_once(struct pt_regs *regs)
633 } 640 }
634 641
635 if (err) { 642 if (err) {
636 printk("Fault when writing to single-step buffer\n"); 643 pr_err("Fault when writing to single-step buffer\n");
637 return; 644 return;
638 } 645 }
639 646
@@ -641,12 +648,12 @@ void single_step_once(struct pt_regs *regs)
641 * Flush the buffer. 648 * Flush the buffer.
642 * We do a local flush only, since this is a thread-specific buffer. 649 * We do a local flush only, since this is a thread-specific buffer.
643 */ 650 */
644 __flush_icache_range((unsigned long) state->buffer, 651 __flush_icache_range((unsigned long)state->buffer,
645 (unsigned long) buffer); 652 (unsigned long)buffer);
646 653
647 /* Indicate enabled */ 654 /* Indicate enabled */
648 state->is_enabled = is_single_step; 655 state->is_enabled = is_single_step;
649 regs->pc = (unsigned long) state->buffer; 656 regs->pc = (unsigned long)state->buffer;
650 657
651 /* Fault immediately if we are coming back from a syscall. */ 658 /* Fault immediately if we are coming back from a syscall. */
652 if (regs->faultnum == INT_SWINT_1) 659 if (regs->faultnum == INT_SWINT_1)
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index aa3aafdb4b93..74d62d098edf 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -25,19 +25,13 @@
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/irq.h>
28#include <asm/mmu_context.h> 29#include <asm/mmu_context.h>
29#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
30#include <asm/sections.h> 31#include <asm/sections.h>
31 32
32/*
33 * This assembly function is provided in entry.S.
34 * When called, it loops on a nap instruction forever.
35 * FIXME: should be in a header somewhere.
36 */
37extern void smp_nap(void);
38
39/* State of each CPU. */ 33/* State of each CPU. */
40DEFINE_PER_CPU(int, cpu_state) = { 0 }; 34static DEFINE_PER_CPU(int, cpu_state) = { 0 };
41 35
42/* The messaging code jumps to this pointer during boot-up */ 36/* The messaging code jumps to this pointer during boot-up */
43unsigned long start_cpu_function_addr; 37unsigned long start_cpu_function_addr;
@@ -74,7 +68,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
74 */ 68 */
75 rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu)); 69 rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
76 if (rc != 0) 70 if (rc != 0)
77 printk("Couldn't set init affinity to boot cpu (%ld)\n", rc); 71 pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc);
78 72
79 /* Print information about disabled and dataplane cpus. */ 73 /* Print information about disabled and dataplane cpus. */
80 print_disabled_cpus(); 74 print_disabled_cpus();
@@ -134,13 +128,13 @@ static __init int reset_init_affinity(void)
134{ 128{
135 long rc = sched_setaffinity(current->pid, &init_affinity); 129 long rc = sched_setaffinity(current->pid, &init_affinity);
136 if (rc != 0) 130 if (rc != 0)
137 printk(KERN_WARNING "couldn't reset init affinity (%ld)\n", 131 pr_warning("couldn't reset init affinity (%ld)\n",
138 rc); 132 rc);
139 return 0; 133 return 0;
140} 134}
141late_initcall(reset_init_affinity); 135late_initcall(reset_init_affinity);
142 136
143struct cpumask cpu_started __cpuinitdata; 137static struct cpumask cpu_started __cpuinitdata;
144 138
145/* 139/*
146 * Activate a secondary processor. Very minimal; don't add anything 140 * Activate a secondary processor. Very minimal; don't add anything
@@ -172,9 +166,6 @@ static void __cpuinit start_secondary(void)
172 BUG(); 166 BUG();
173 enter_lazy_tlb(&init_mm, current); 167 enter_lazy_tlb(&init_mm, current);
174 168
175 /* Enable IRQs. */
176 init_per_tile_IRQs();
177
178 /* Allow hypervisor messages to be received */ 169 /* Allow hypervisor messages to be received */
179 init_messaging(); 170 init_messaging();
180 local_irq_enable(); 171 local_irq_enable();
@@ -182,7 +173,7 @@ static void __cpuinit start_secondary(void)
182 /* Indicate that we're ready to come up. */ 173 /* Indicate that we're ready to come up. */
183 /* Must not do this before we're ready to receive messages */ 174 /* Must not do this before we're ready to receive messages */
184 if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { 175 if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
185 printk(KERN_WARNING "CPU#%d already started!\n", cpuid); 176 pr_warning("CPU#%d already started!\n", cpuid);
186 for (;;) 177 for (;;)
187 local_irq_enable(); 178 local_irq_enable();
188 } 179 }
@@ -190,13 +181,10 @@ static void __cpuinit start_secondary(void)
190 smp_nap(); 181 smp_nap();
191} 182}
192 183
193void setup_mpls(void); /* from kernel/setup.c */
194void store_permanent_mappings(void);
195
196/* 184/*
197 * Bring a secondary processor online. 185 * Bring a secondary processor online.
198 */ 186 */
199void __cpuinit online_secondary() 187void __cpuinit online_secondary(void)
200{ 188{
201 /* 189 /*
202 * low-memory mappings have been cleared, flush them from 190 * low-memory mappings have been cleared, flush them from
@@ -222,17 +210,14 @@ void __cpuinit online_secondary()
222 ipi_call_unlock(); 210 ipi_call_unlock();
223 __get_cpu_var(cpu_state) = CPU_ONLINE; 211 __get_cpu_var(cpu_state) = CPU_ONLINE;
224 212
225 /* Set up MPLs for this processor */ 213 /* Set up tile-specific state for this cpu. */
226 setup_mpls(); 214 setup_cpu(0);
227
228 215
229 /* Set up tile-timer clock-event device on this cpu */ 216 /* Set up tile-timer clock-event device on this cpu */
230 setup_tile_timer(); 217 setup_tile_timer();
231 218
232 preempt_enable(); 219 preempt_enable();
233 220
234 store_permanent_mappings();
235
236 cpu_idle(); 221 cpu_idle();
237} 222}
238 223
@@ -242,7 +227,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
242 static int timeout; 227 static int timeout;
243 for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) { 228 for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) {
244 if (timeout >= 50000) { 229 if (timeout >= 50000) {
245 printk(KERN_INFO "skipping unresponsive cpu%d\n", cpu); 230 pr_info("skipping unresponsive cpu%d\n", cpu);
246 local_irq_enable(); 231 local_irq_enable();
247 return -EIO; 232 return -EIO;
248 } 233 }
@@ -289,5 +274,5 @@ void __init smp_cpus_done(unsigned int max_cpus)
289 ; 274 ;
290 rc = sched_setaffinity(current->pid, cpumask_of(cpu)); 275 rc = sched_setaffinity(current->pid, cpumask_of(cpu));
291 if (rc != 0) 276 if (rc != 0)
292 printk("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc); 277 pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc);
293} 278}
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 382170b4b40a..b6268d3ae869 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -56,13 +56,16 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
56 HV_PTE pte; 56 HV_PTE pte;
57 struct page *page; 57 struct page *page;
58 58
59 if (l1_pgtable == NULL)
60 return 0; /* can't read user space in other tasks */
61
59 pte = l1_pgtable[HV_L1_INDEX(address)]; 62 pte = l1_pgtable[HV_L1_INDEX(address)];
60 if (!hv_pte_get_present(pte)) 63 if (!hv_pte_get_present(pte))
61 return 0; 64 return 0;
62 pfn = hv_pte_get_pfn(pte); 65 pfn = hv_pte_get_pfn(pte);
63 if (pte_huge(pte)) { 66 if (pte_huge(pte)) {
64 if (!pfn_valid(pfn)) { 67 if (!pfn_valid(pfn)) {
65 printk(KERN_ERR "huge page has bad pfn %#lx\n", pfn); 68 pr_err("huge page has bad pfn %#lx\n", pfn);
66 return 0; 69 return 0;
67 } 70 }
68 return hv_pte_get_present(pte) && hv_pte_get_readable(pte); 71 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
@@ -70,7 +73,7 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
70 73
71 page = pfn_to_page(pfn); 74 page = pfn_to_page(pfn);
72 if (PageHighMem(page)) { 75 if (PageHighMem(page)) {
73 printk(KERN_ERR "L2 page table not in LOWMEM (%#llx)\n", 76 pr_err("L2 page table not in LOWMEM (%#llx)\n",
74 HV_PFN_TO_CPA(pfn)); 77 HV_PFN_TO_CPA(pfn));
75 return 0; 78 return 0;
76 } 79 }
@@ -91,13 +94,12 @@ static bool read_memory_func(void *result, VirtualAddress address,
91 /* We only tolerate kernel-space reads of this task's stack */ 94 /* We only tolerate kernel-space reads of this task's stack */
92 if (!in_kernel_stack(kbt, address)) 95 if (!in_kernel_stack(kbt, address))
93 return 0; 96 return 0;
94 } else if (kbt->pgtable == NULL) {
95 return 0; /* can't read user space in other tasks */
96 } else if (!valid_address(kbt, address)) { 97 } else if (!valid_address(kbt, address)) {
97 return 0; /* invalid user-space address */ 98 return 0; /* invalid user-space address */
98 } 99 }
99 pagefault_disable(); 100 pagefault_disable();
100 retval = __copy_from_user_inatomic(result, (const void *)address, 101 retval = __copy_from_user_inatomic(result,
102 (void __user __force *)address,
101 size); 103 size);
102 pagefault_enable(); 104 pagefault_enable();
103 return (retval == 0); 105 return (retval == 0);
@@ -131,14 +133,14 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
131 in_kernel_stack(kbt, p->sp) && 133 in_kernel_stack(kbt, p->sp) &&
132 p->sp >= sp) { 134 p->sp >= sp) {
133 if (kbt->verbose) 135 if (kbt->verbose)
134 printk(KERN_ERR " <%s while in kernel mode>\n", fault); 136 pr_err(" <%s while in kernel mode>\n", fault);
135 } else if (EX1_PL(p->ex1) == USER_PL && 137 } else if (EX1_PL(p->ex1) == USER_PL &&
136 p->pc < PAGE_OFFSET && 138 p->pc < PAGE_OFFSET &&
137 p->sp < PAGE_OFFSET) { 139 p->sp < PAGE_OFFSET) {
138 if (kbt->verbose) 140 if (kbt->verbose)
139 printk(KERN_ERR " <%s while in user mode>\n", fault); 141 pr_err(" <%s while in user mode>\n", fault);
140 } else if (kbt->verbose) { 142 } else if (kbt->verbose) {
141 printk(KERN_ERR " (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", 143 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
142 p->pc, p->sp, p->ex1); 144 p->pc, p->sp, p->ex1);
143 p = NULL; 145 p = NULL;
144 } 146 }
@@ -166,13 +168,13 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
166 if (!valid_address(kbt, b->sp) || 168 if (!valid_address(kbt, b->sp) ||
167 !valid_address(kbt, sigframe_top)) { 169 !valid_address(kbt, sigframe_top)) {
168 if (kbt->verbose) 170 if (kbt->verbose)
169 printk(" (odd signal: sp %#lx?)\n", 171 pr_err(" (odd signal: sp %#lx?)\n",
170 (unsigned long)(b->sp)); 172 (unsigned long)(b->sp));
171 return NULL; 173 return NULL;
172 } 174 }
173 frame = (struct rt_sigframe *)b->sp; 175 frame = (struct rt_sigframe *)b->sp;
174 if (kbt->verbose) { 176 if (kbt->verbose) {
175 printk(KERN_ERR " <received signal %d>\n", 177 pr_err(" <received signal %d>\n",
176 frame->info.si_signo); 178 frame->info.si_signo);
177 } 179 }
178 return &frame->uc.uc_mcontext.regs; 180 return &frame->uc.uc_mcontext.regs;
@@ -180,7 +182,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
180 return NULL; 182 return NULL;
181} 183}
182 184
183int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) 185static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
184{ 186{
185 return is_sigreturn(kbt->it.pc); 187 return is_sigreturn(kbt->it.pc);
186} 188}
@@ -231,13 +233,13 @@ static void validate_stack(struct pt_regs *regs)
231 unsigned long sp = stack_pointer; 233 unsigned long sp = stack_pointer;
232 234
233 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { 235 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
234 printk("WARNING: cpu %d: kernel stack page %#lx underrun!\n" 236 pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
235 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", 237 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
236 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); 238 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
237 } 239 }
238 240
239 else if (sp < ksp0_base + sizeof(struct thread_info)) { 241 else if (sp < ksp0_base + sizeof(struct thread_info)) {
240 printk("WARNING: cpu %d: kernel stack page %#lx overrun!\n" 242 pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
241 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", 243 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
242 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); 244 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
243 } 245 }
@@ -280,7 +282,7 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
280 if (!PageHighMem(page)) 282 if (!PageHighMem(page))
281 kbt->pgtable = __va(pgdir_pa); 283 kbt->pgtable = __va(pgdir_pa);
282 else 284 else
283 printk(KERN_ERR "page table not in LOWMEM" 285 pr_err("page table not in LOWMEM"
284 " (%#llx)\n", pgdir_pa); 286 " (%#llx)\n", pgdir_pa);
285 } 287 }
286 local_flush_tlb_all(); 288 local_flush_tlb_all();
@@ -288,13 +290,12 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
288 } 290 }
289 291
290 if (regs == NULL) { 292 if (regs == NULL) {
291 extern const void *get_switch_to_pc(void);
292 if (is_current || t->state == TASK_RUNNING) { 293 if (is_current || t->state == TASK_RUNNING) {
293 /* Can't do this; we need registers */ 294 /* Can't do this; we need registers */
294 kbt->end = 1; 295 kbt->end = 1;
295 return; 296 return;
296 } 297 }
297 pc = (ulong) get_switch_to_pc(); 298 pc = get_switch_to_pc();
298 lr = t->thread.pc; 299 lr = t->thread.pc;
299 sp = t->thread.ksp; 300 sp = t->thread.ksp;
300 r52 = 0; 301 r52 = 0;
@@ -344,8 +345,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
344 * then bust_spinlocks() spit out a space in front of us 345 * then bust_spinlocks() spit out a space in front of us
345 * and it will mess up our KERN_ERR. 346 * and it will mess up our KERN_ERR.
346 */ 347 */
347 printk("\n"); 348 pr_err("\n");
348 printk(KERN_ERR "Starting stack dump of tid %d, pid %d (%s)" 349 pr_err("Starting stack dump of tid %d, pid %d (%s)"
349 " on cpu %d at cycle %lld\n", 350 " on cpu %d at cycle %lld\n",
350 kbt->task->pid, kbt->task->tgid, kbt->task->comm, 351 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
351 smp_processor_id(), get_cycles()); 352 smp_processor_id(), get_cycles());
@@ -385,17 +386,17 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
385 namebuf[sizeof(namebuf)-1] = '\0'; 386 namebuf[sizeof(namebuf)-1] = '\0';
386 } 387 }
387 388
388 printk(KERN_ERR " frame %d: 0x%lx %s(sp 0x%lx)\n", 389 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
389 i++, address, namebuf, (unsigned long)(kbt->it.sp)); 390 i++, address, namebuf, (unsigned long)(kbt->it.sp));
390 391
391 if (i >= 100) { 392 if (i >= 100) {
392 printk(KERN_ERR "Stack dump truncated" 393 pr_err("Stack dump truncated"
393 " (%d frames)\n", i); 394 " (%d frames)\n", i);
394 break; 395 break;
395 } 396 }
396 } 397 }
397 if (headers) 398 if (headers)
398 printk(KERN_ERR "Stack dump complete\n"); 399 pr_err("Stack dump complete\n");
399} 400}
400EXPORT_SYMBOL(tile_show_stack); 401EXPORT_SYMBOL(tile_show_stack);
401 402
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index 0427978cea0a..f0f87eab8c39 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -27,11 +27,10 @@
27#include <linux/mempolicy.h> 27#include <linux/mempolicy.h>
28#include <linux/binfmts.h> 28#include <linux/binfmts.h>
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/syscalls.h> 30#include <linux/compat.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/signal.h> 32#include <linux/signal.h>
33#include <asm/syscalls.h> 33#include <asm/syscalls.h>
34
35#include <asm/pgtable.h> 34#include <asm/pgtable.h>
36#include <asm/homecache.h> 35#include <asm/homecache.h>
37#include <arch/chip.h> 36#include <arch/chip.h>
@@ -74,10 +73,7 @@ int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
74 73
75#endif /* 32-bit syscall wrappers */ 74#endif /* 32-bit syscall wrappers */
76 75
77/* 76/* Note: used by the compat code even in 64-bit Linux. */
78 * This API uses a 4KB-page-count offset into the file descriptor.
79 * It is likely not the right API to use on a 64-bit platform.
80 */
81SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, 77SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
82 unsigned long, prot, unsigned long, flags, 78 unsigned long, prot, unsigned long, flags,
83 unsigned long, fd, unsigned long, off_4k) 79 unsigned long, fd, unsigned long, off_4k)
@@ -89,10 +85,7 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
89 off_4k >> PAGE_ADJUST); 85 off_4k >> PAGE_ADJUST);
90} 86}
91 87
92/* 88#ifdef __tilegx__
93 * This API uses a byte offset into the file descriptor.
94 * It is likely not the right API to use on a 32-bit platform.
95 */
96SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, 89SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
97 unsigned long, prot, unsigned long, flags, 90 unsigned long, prot, unsigned long, flags,
98 unsigned long, fd, off_t, offset) 91 unsigned long, fd, off_t, offset)
@@ -102,6 +95,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
102 return sys_mmap_pgoff(addr, len, prot, flags, fd, 95 return sys_mmap_pgoff(addr, len, prot, flags, fd,
103 offset >> PAGE_SHIFT); 96 offset >> PAGE_SHIFT);
104} 97}
98#endif
105 99
106 100
107/* Provide the actual syscall number to call mapping. */ 101/* Provide the actual syscall number to call mapping. */
@@ -116,6 +110,10 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
116#define sys_sync_file_range sys_sync_file_range2 110#define sys_sync_file_range sys_sync_file_range2
117#endif 111#endif
118 112
113/*
114 * Note that we can't include <linux/unistd.h> here since the header
115 * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
116 */
119void *sys_call_table[__NR_syscalls] = { 117void *sys_call_table[__NR_syscalls] = {
120 [0 ... __NR_syscalls-1] = sys_ni_syscall, 118 [0 ... __NR_syscalls-1] = sys_ni_syscall,
121#include <asm/unistd.h> 119#include <asm/unistd.h>
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 47500a324e32..b9ab25a889b5 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -23,6 +23,7 @@
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
26#include <asm/traps.h>
26#include <hv/hypervisor.h> 27#include <hv/hypervisor.h>
27#include <arch/interrupts.h> 28#include <arch/interrupts.h>
28#include <arch/spr_def.h> 29#include <arch/spr_def.h>
@@ -45,13 +46,13 @@ static cycles_t cycles_per_sec __write_once;
45 */ 46 */
46#define TILE_MINSEC 5 47#define TILE_MINSEC 5
47 48
48cycles_t get_clock_rate() 49cycles_t get_clock_rate(void)
49{ 50{
50 return cycles_per_sec; 51 return cycles_per_sec;
51} 52}
52 53
53#if CHIP_HAS_SPLIT_CYCLE() 54#if CHIP_HAS_SPLIT_CYCLE()
54cycles_t get_cycles() 55cycles_t get_cycles(void)
55{ 56{
56 unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH); 57 unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH);
57 unsigned int low = __insn_mfspr(SPR_CYCLE_LOW); 58 unsigned int low = __insn_mfspr(SPR_CYCLE_LOW);
@@ -67,7 +68,7 @@ cycles_t get_cycles()
67} 68}
68#endif 69#endif
69 70
70cycles_t clocksource_get_cycles(struct clocksource *cs) 71static cycles_t clocksource_get_cycles(struct clocksource *cs)
71{ 72{
72 return get_cycles(); 73 return get_cycles();
73} 74}
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 12cb10f38527..3870abbeeaa2 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -20,6 +20,9 @@
20#include <linux/uaccess.h> 20#include <linux/uaccess.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <asm/opcode-tile.h> 22#include <asm/opcode-tile.h>
23#include <asm/opcode_constants.h>
24#include <asm/stack.h>
25#include <asm/traps.h>
23 26
24#include <arch/interrupts.h> 27#include <arch/interrupts.h>
25#include <arch/spr_def.h> 28#include <arch/spr_def.h>
@@ -42,7 +45,7 @@ static int __init setup_unaligned_fixup(char *str)
42 if (strict_strtol(str, 0, &val) != 0) 45 if (strict_strtol(str, 0, &val) != 0)
43 return 0; 46 return 0;
44 unaligned_fixup = val; 47 unaligned_fixup = val;
45 printk("Fixups for unaligned data accesses are %s\n", 48 pr_info("Fixups for unaligned data accesses are %s\n",
46 unaligned_fixup >= 0 ? 49 unaligned_fixup >= 0 ?
47 (unaligned_fixup ? "enabled" : "disabled") : 50 (unaligned_fixup ? "enabled" : "disabled") :
48 "completely disabled"); 51 "completely disabled");
@@ -56,7 +59,7 @@ static int dma_disabled;
56 59
57static int __init nodma(char *str) 60static int __init nodma(char *str)
58{ 61{
59 printk("User-space DMA is disabled\n"); 62 pr_info("User-space DMA is disabled\n");
60 dma_disabled = 1; 63 dma_disabled = 1;
61 return 1; 64 return 1;
62} 65}
@@ -97,20 +100,106 @@ static int retry_gpv(unsigned int gpv_reason)
97 100
98#endif /* CHIP_HAS_TILE_DMA() */ 101#endif /* CHIP_HAS_TILE_DMA() */
99 102
100/* Defined inside do_trap(), below. */
101#ifdef __tilegx__ 103#ifdef __tilegx__
102extern tilegx_bundle_bits bpt_code; 104#define bundle_bits tilegx_bundle_bits
103#else 105#else
104extern tile_bundle_bits bpt_code; 106#define bundle_bits tile_bundle_bits
105#endif 107#endif
106 108
109extern bundle_bits bpt_code;
110
111asm(".pushsection .rodata.bpt_code,\"a\";"
112 ".align 8;"
113 "bpt_code: bpt;"
114 ".size bpt_code,.-bpt_code;"
115 ".popsection");
116
117static int special_ill(bundle_bits bundle, int *sigp, int *codep)
118{
119 int sig, code, maxcode;
120
121 if (bundle == bpt_code) {
122 *sigp = SIGTRAP;
123 *codep = TRAP_BRKPT;
124 return 1;
125 }
126
127 /* If it's a "raise" bundle, then "ill" must be in pipe X1. */
128#ifdef __tilegx__
129 if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0)
130 return 0;
131 if (get_Opcode_X1(bundle) != UNARY_OPCODE_X1)
132 return 0;
133 if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1)
134 return 0;
135#else
136 if (bundle & TILE_BUNDLE_Y_ENCODING_MASK)
137 return 0;
138 if (get_Opcode_X1(bundle) != SHUN_0_OPCODE_X1)
139 return 0;
140 if (get_UnShOpcodeExtension_X1(bundle) != UN_0_SHUN_0_OPCODE_X1)
141 return 0;
142 if (get_UnOpcodeExtension_X1(bundle) != ILL_UN_0_SHUN_0_OPCODE_X1)
143 return 0;
144#endif
145
146 /* Check that the magic distinguishers are set to mean "raise". */
147 if (get_Dest_X1(bundle) != 29 || get_SrcA_X1(bundle) != 37)
148 return 0;
149
150 /* There must be an "addli zero, zero, VAL" in X0. */
151 if (get_Opcode_X0(bundle) != ADDLI_OPCODE_X0)
152 return 0;
153 if (get_Dest_X0(bundle) != TREG_ZERO)
154 return 0;
155 if (get_SrcA_X0(bundle) != TREG_ZERO)
156 return 0;
157
158 /*
159 * Validate the proposed signal number and si_code value.
160 * Note that we embed these in the static instruction itself
161 * so that we perturb the register state as little as possible
162 * at the time of the actual fault; it's unlikely you'd ever
163 * need to dynamically choose which kind of fault to raise
164 * from user space.
165 */
166 sig = get_Imm16_X0(bundle) & 0x3f;
167 switch (sig) {
168 case SIGILL:
169 maxcode = NSIGILL;
170 break;
171 case SIGFPE:
172 maxcode = NSIGFPE;
173 break;
174 case SIGSEGV:
175 maxcode = NSIGSEGV;
176 break;
177 case SIGBUS:
178 maxcode = NSIGBUS;
179 break;
180 case SIGTRAP:
181 maxcode = NSIGTRAP;
182 break;
183 default:
184 return 0;
185 }
186 code = (get_Imm16_X0(bundle) >> 6) & 0xf;
187 if (code <= 0 || code > maxcode)
188 return 0;
189
190 /* Make it the requested signal. */
191 *sigp = sig;
192 *codep = code | __SI_FAULT;
193 return 1;
194}
195
107void __kprobes do_trap(struct pt_regs *regs, int fault_num, 196void __kprobes do_trap(struct pt_regs *regs, int fault_num,
108 unsigned long reason) 197 unsigned long reason)
109{ 198{
110 siginfo_t info = { 0 }; 199 siginfo_t info = { 0 };
111 int signo, code; 200 int signo, code;
112 unsigned long address; 201 unsigned long address;
113 __typeof__(bpt_code) instr; 202 bundle_bits instr;
114 203
115 /* Re-enable interrupts. */ 204 /* Re-enable interrupts. */
116 local_irq_enable(); 205 local_irq_enable();
@@ -122,10 +211,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
122 if (!user_mode(regs)) { 211 if (!user_mode(regs)) {
123 if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */ 212 if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */
124 return; 213 return;
125 printk(KERN_ALERT "Kernel took bad trap %d at PC %#lx\n", 214 pr_alert("Kernel took bad trap %d at PC %#lx\n",
126 fault_num, regs->pc); 215 fault_num, regs->pc);
127 if (fault_num == INT_GPV) 216 if (fault_num == INT_GPV)
128 printk(KERN_ALERT "GPV_REASON is %#lx\n", reason); 217 pr_alert("GPV_REASON is %#lx\n", reason);
129 show_regs(regs); 218 show_regs(regs);
130 do_exit(SIGKILL); /* FIXME: implement i386 die() */ 219 do_exit(SIGKILL); /* FIXME: implement i386 die() */
131 return; 220 return;
@@ -133,22 +222,14 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
133 222
134 switch (fault_num) { 223 switch (fault_num) {
135 case INT_ILL: 224 case INT_ILL:
136 asm(".pushsection .rodata.bpt_code,\"a\";" 225 if (copy_from_user(&instr, (void __user *)regs->pc,
137 ".align 8;" 226 sizeof(instr))) {
138 "bpt_code: bpt;" 227 pr_err("Unreadable instruction for INT_ILL:"
139 ".size bpt_code,.-bpt_code;"
140 ".popsection");
141
142 if (copy_from_user(&instr, (void *)regs->pc, sizeof(instr))) {
143 printk(KERN_ERR "Unreadable instruction for INT_ILL:"
144 " %#lx\n", regs->pc); 228 " %#lx\n", regs->pc);
145 do_exit(SIGKILL); 229 do_exit(SIGKILL);
146 return; 230 return;
147 } 231 }
148 if (instr == bpt_code) { 232 if (!special_ill(instr, &signo, &code)) {
149 signo = SIGTRAP;
150 code = TRAP_BRKPT;
151 } else {
152 signo = SIGILL; 233 signo = SIGILL;
153 code = ILL_ILLOPC; 234 code = ILL_ILLOPC;
154 } 235 }
@@ -181,7 +262,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
181 if (unaligned_fixup >= 0) { 262 if (unaligned_fixup >= 0) {
182 struct single_step_state *state = 263 struct single_step_state *state =
183 current_thread_info()->step_state; 264 current_thread_info()->step_state;
184 if (!state || (void *)(regs->pc) != state->buffer) { 265 if (!state ||
266 (void __user *)(regs->pc) != state->buffer) {
185 single_step_once(regs); 267 single_step_once(regs);
186 return; 268 return;
187 } 269 }
@@ -221,17 +303,15 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
221 303
222 info.si_signo = signo; 304 info.si_signo = signo;
223 info.si_code = code; 305 info.si_code = code;
224 info.si_addr = (void *)address; 306 info.si_addr = (void __user *)address;
225 if (signo == SIGILL) 307 if (signo == SIGILL)
226 info.si_trapno = fault_num; 308 info.si_trapno = fault_num;
227 force_sig_info(signo, &info, current); 309 force_sig_info(signo, &info, current);
228} 310}
229 311
230extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
231
232void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) 312void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
233{ 313{
234 _dump_stack(dummy, pc, lr, sp, r52); 314 _dump_stack(dummy, pc, lr, sp, r52);
235 printk("Double fault: exiting\n"); 315 pr_emerg("Double fault: exiting\n");
236 machine_halt(); 316 machine_halt();
237} 317}
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index 77388c1415bd..25fdc0c1839a 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -36,8 +36,8 @@ SECTIONS
36 36
37 /* Now the real code */ 37 /* Now the real code */
38 . = ALIGN(0x20000); 38 . = ALIGN(0x20000);
39 HEAD_TEXT_SECTION :text =0
40 .text : AT (ADDR(.text) - LOAD_OFFSET) { 39 .text : AT (ADDR(.text) - LOAD_OFFSET) {
40 HEAD_TEXT
41 SCHED_TEXT 41 SCHED_TEXT
42 LOCK_TEXT 42 LOCK_TEXT
43 __fix_text_end = .; /* tile-cpack won't rearrange before this */ 43 __fix_text_end = .; /* tile-cpack won't rearrange before this */
@@ -46,7 +46,7 @@ SECTIONS
46 *(.coldtext*) 46 *(.coldtext*)
47 *(.fixup) 47 *(.fixup)
48 *(.gnu.warning) 48 *(.gnu.warning)
49 } 49 } :text =0
50 _etext = .; 50 _etext = .;
51 51
52 /* "Init" is divided into two areas with very different virtual addresses. */ 52 /* "Init" is divided into two areas with very different virtual addresses. */
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index be1e8acd105d..8040b42a8eea 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -18,27 +18,10 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <asm/atomic.h> 20#include <asm/atomic.h>
21#include <asm/futex.h>
21#include <arch/chip.h> 22#include <arch/chip.h>
22 23
23/* The routines in atomic_asm.S are private, so we only declare them here. */ 24/* See <asm/atomic_32.h> */
24extern struct __get_user __atomic_cmpxchg(volatile int *p,
25 int *lock, int o, int n);
26extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
27extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
28extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
29 int *lock, int o, int n);
30extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
31extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
32extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
33
34extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
35extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
36extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
37extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
38 int *lock, u64 o, u64 n);
39
40
41/* See <asm/atomic.h> */
42#if ATOMIC_LOCKS_FOUND_VIA_TABLE() 25#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
43 26
44/* 27/*
@@ -209,7 +192,7 @@ u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
209EXPORT_SYMBOL(_atomic64_cmpxchg); 192EXPORT_SYMBOL(_atomic64_cmpxchg);
210 193
211 194
212static inline int *__futex_setup(__user int *v) 195static inline int *__futex_setup(int __user *v)
213{ 196{
214 /* 197 /*
215 * Issue a prefetch to the counter to bring it into cache. 198 * Issue a prefetch to the counter to bring it into cache.
@@ -217,37 +200,37 @@ static inline int *__futex_setup(__user int *v)
217 * since it might fault; instead we do a prefetch into the L2. 200 * since it might fault; instead we do a prefetch into the L2.
218 */ 201 */
219 __insn_prefetch(v); 202 __insn_prefetch(v);
220 return __atomic_hashed_lock(v); 203 return __atomic_hashed_lock((int __force *)v);
221} 204}
222 205
223struct __get_user futex_set(int *v, int i) 206struct __get_user futex_set(int __user *v, int i)
224{ 207{
225 return __atomic_xchg(v, __futex_setup(v), i); 208 return __atomic_xchg((int __force *)v, __futex_setup(v), i);
226} 209}
227 210
228struct __get_user futex_add(int *v, int n) 211struct __get_user futex_add(int __user *v, int n)
229{ 212{
230 return __atomic_xchg_add(v, __futex_setup(v), n); 213 return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
231} 214}
232 215
233struct __get_user futex_or(int *v, int n) 216struct __get_user futex_or(int __user *v, int n)
234{ 217{
235 return __atomic_or(v, __futex_setup(v), n); 218 return __atomic_or((int __force *)v, __futex_setup(v), n);
236} 219}
237 220
238struct __get_user futex_andn(int *v, int n) 221struct __get_user futex_andn(int __user *v, int n)
239{ 222{
240 return __atomic_andn(v, __futex_setup(v), n); 223 return __atomic_andn((int __force *)v, __futex_setup(v), n);
241} 224}
242 225
243struct __get_user futex_xor(int *v, int n) 226struct __get_user futex_xor(int __user *v, int n)
244{ 227{
245 return __atomic_xor(v, __futex_setup(v), n); 228 return __atomic_xor((int __force *)v, __futex_setup(v), n);
246} 229}
247 230
248struct __get_user futex_cmpxchg(int *v, int o, int n) 231struct __get_user futex_cmpxchg(int __user *v, int o, int n)
249{ 232{
250 return __atomic_cmpxchg(v, __futex_setup(v), o, n); 233 return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
251} 234}
252 235
253/* 236/*
@@ -260,7 +243,7 @@ struct __get_user futex_cmpxchg(int *v, int o, int n)
260 * invoked in is the context of the "_atomic_xxx()" routines called 243 * invoked in is the context of the "_atomic_xxx()" routines called
261 * by the functions in this file. 244 * by the functions in this file.
262 */ 245 */
263struct __get_user __atomic_bad_address(int *addr) 246struct __get_user __atomic_bad_address(int __user *addr)
264{ 247{
265 if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int)))) 248 if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
266 panic("Bad address used for kernel atomic op: %p\n", addr); 249 panic("Bad address used for kernel atomic op: %p\n", addr);
@@ -271,7 +254,7 @@ struct __get_user __atomic_bad_address(int *addr)
271#if CHIP_HAS_CBOX_HOME_MAP() 254#if CHIP_HAS_CBOX_HOME_MAP()
272static int __init noatomichash(char *str) 255static int __init noatomichash(char *str)
273{ 256{
274 printk("noatomichash is deprecated.\n"); 257 pr_warning("noatomichash is deprecated.\n");
275 return 1; 258 return 1;
276} 259}
277__setup("noatomichash", noatomichash); 260__setup("noatomichash", noatomichash);
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c
index af745b3b2559..fdc403614d12 100644
--- a/arch/tile/lib/cpumask.c
+++ b/arch/tile/lib/cpumask.c
@@ -15,6 +15,7 @@
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/smp.h>
18 19
19/* 20/*
20 * Allow cropping out bits beyond the end of the array. 21 * Allow cropping out bits beyond the end of the array.
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index af8e70e2a0ce..6bc7b52b4aa0 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -21,6 +21,7 @@
21EXPORT_SYMBOL(__get_user_1); 21EXPORT_SYMBOL(__get_user_1);
22EXPORT_SYMBOL(__get_user_2); 22EXPORT_SYMBOL(__get_user_2);
23EXPORT_SYMBOL(__get_user_4); 23EXPORT_SYMBOL(__get_user_4);
24EXPORT_SYMBOL(__get_user_8);
24EXPORT_SYMBOL(__put_user_1); 25EXPORT_SYMBOL(__put_user_1);
25EXPORT_SYMBOL(__put_user_2); 26EXPORT_SYMBOL(__put_user_2);
26EXPORT_SYMBOL(__put_user_4); 27EXPORT_SYMBOL(__put_user_4);
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index 4f0047342469..dfedea7b266b 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -60,8 +60,8 @@ typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long);
60static void memcpy_multicache(void *dest, const void *source, 60static void memcpy_multicache(void *dest, const void *source,
61 pte_t dst_pte, pte_t src_pte, int len) 61 pte_t dst_pte, pte_t src_pte, int len)
62{ 62{
63 int idx, i; 63 int idx;
64 unsigned long flags, newsrc, newdst, endsrc; 64 unsigned long flags, newsrc, newdst;
65 pmd_t *pmdp; 65 pmd_t *pmdp;
66 pte_t *ptep; 66 pte_t *ptep;
67 int cpu = get_cpu(); 67 int cpu = get_cpu();
@@ -121,7 +121,7 @@ static void memcpy_multicache(void *dest, const void *source,
121 */ 121 */
122 sim_allow_multiple_caching(0); 122 sim_allow_multiple_caching(0);
123 local_irq_restore(flags); 123 local_irq_restore(flags);
124 put_cpu_no_resched(); 124 put_cpu();
125} 125}
126 126
127/* 127/*
diff --git a/arch/tile/lib/memmove_32.c b/arch/tile/lib/memmove_32.c
index f09d8c4523ec..fd615ae6ade7 100644
--- a/arch/tile/lib/memmove_32.c
+++ b/arch/tile/lib/memmove_32.c
@@ -42,7 +42,7 @@ void *memmove(void *dest, const void *src, size_t n)
42 in = (const uint8_t *)src; 42 in = (const uint8_t *)src;
43 out = (uint8_t *)dest; 43 out = (uint8_t *)dest;
44 stride = 1; 44 stride = 1;
45 } 45 }
46 46
47 /* Manually software-pipeline this loop. */ 47 /* Manually software-pipeline this loop. */
48 x = *in; 48 x = *in;
diff --git a/arch/tile/lib/memset_32.c b/arch/tile/lib/memset_32.c
index 8593bc82398a..bfde5d864df1 100644
--- a/arch/tile/lib/memset_32.c
+++ b/arch/tile/lib/memset_32.c
@@ -245,7 +245,8 @@ void *memset(void *s, int c, size_t n)
245 wh += CACHE_LINE_SIZE_IN_WORDS; 245 wh += CACHE_LINE_SIZE_IN_WORDS;
246 } while (--i); 246 } while (--i);
247 247
248 for (j = x * (CACHE_LINE_SIZE_IN_WORDS / 4); j != 0; j--) { 248 for (j = x * (CACHE_LINE_SIZE_IN_WORDS / 4);
249 j != 0; j--) {
249 *out32++ = v32; 250 *out32++ = v32;
250 *out32++ = v32; 251 *out32++ = v32;
251 *out32++ = v32; 252 *out32++ = v32;
diff --git a/arch/tile/lib/spinlock_common.h b/arch/tile/lib/spinlock_common.h
index 8dffebde6630..c10109809132 100644
--- a/arch/tile/lib/spinlock_common.h
+++ b/arch/tile/lib/spinlock_common.h
@@ -35,7 +35,7 @@ relax(int iterations)
35} 35}
36 36
37/* Perform bounded exponential backoff.*/ 37/* Perform bounded exponential backoff.*/
38void delay_backoff(int iterations) 38static void delay_backoff(int iterations)
39{ 39{
40 u32 exponent, loops; 40 u32 exponent, loops;
41 41
diff --git a/arch/tile/lib/uaccess.c b/arch/tile/lib/uaccess.c
index 9ae182568b77..f8d398c9ee7f 100644
--- a/arch/tile/lib/uaccess.c
+++ b/arch/tile/lib/uaccess.c
@@ -18,14 +18,15 @@
18int __range_ok(unsigned long addr, unsigned long size) 18int __range_ok(unsigned long addr, unsigned long size)
19{ 19{
20 unsigned long limit = current_thread_info()->addr_limit.seg; 20 unsigned long limit = current_thread_info()->addr_limit.seg;
21 __chk_user_ptr(addr);
22 return !((addr < limit && size <= limit - addr) || 21 return !((addr < limit && size <= limit - addr) ||
23 is_arch_mappable_range(addr, size)); 22 is_arch_mappable_range(addr, size));
24} 23}
25EXPORT_SYMBOL(__range_ok); 24EXPORT_SYMBOL(__range_ok);
26 25
26#ifdef CONFIG_DEBUG_COPY_FROM_USER
27void copy_from_user_overflow(void) 27void copy_from_user_overflow(void)
28{ 28{
29 WARN(1, "Buffer overflow detected!\n"); 29 WARN(1, "Buffer overflow detected!\n");
30} 30}
31EXPORT_SYMBOL(copy_from_user_overflow); 31EXPORT_SYMBOL(copy_from_user_overflow);
32#endif
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c
index 818c9bef060c..55e58e93bfc5 100644
--- a/arch/tile/mm/elf.c
+++ b/arch/tile/mm/elf.c
@@ -20,6 +20,7 @@
20#include <linux/elf.h> 20#include <linux/elf.h>
21#include <asm/pgtable.h> 21#include <asm/pgtable.h>
22#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
23#include <asm/sections.h>
23 24
24/* Notify a running simulator, if any, that an exec just occurred. */ 25/* Notify a running simulator, if any, that an exec just occurred. */
25static void sim_notify_exec(const char *binary_name) 26static void sim_notify_exec(const char *binary_name)
@@ -77,9 +78,8 @@ static void *vdso_page;
77/* One-entry array used for install_special_mapping. */ 78/* One-entry array used for install_special_mapping. */
78static struct page *vdso_pages[1]; 79static struct page *vdso_pages[1];
79 80
80int __init vdso_setup(void) 81static int __init vdso_setup(void)
81{ 82{
82 extern char __rt_sigreturn[], __rt_sigreturn_end[];
83 vdso_page = (void *)get_zeroed_page(GFP_ATOMIC); 83 vdso_page = (void *)get_zeroed_page(GFP_ATOMIC);
84 memcpy(vdso_page, __rt_sigreturn, __rt_sigreturn_end - __rt_sigreturn); 84 memcpy(vdso_page, __rt_sigreturn, __rt_sigreturn_end - __rt_sigreturn);
85 vdso_pages[0] = virt_to_page(vdso_page); 85 vdso_pages[0] = virt_to_page(vdso_page);
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 9b6b92f07def..0011f06b4fe2 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -39,32 +39,11 @@
39#include <asm/system.h> 39#include <asm/system.h>
40#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
41#include <asm/sections.h> 41#include <asm/sections.h>
42#include <asm/traps.h>
43#include <asm/syscalls.h>
42 44
43#include <arch/interrupts.h> 45#include <arch/interrupts.h>
44 46
45/*
46 * Unlock any spinlocks which will prevent us from getting the
47 * message out
48 */
49void bust_spinlocks(int yes)
50{
51 int loglevel_save = console_loglevel;
52
53 if (yes) {
54 oops_in_progress = 1;
55 return;
56 }
57 oops_in_progress = 0;
58 /*
59 * OK, the message is on the console. Now we call printk()
60 * without oops_in_progress set so that printk will give klogd
61 * a poke. Hold onto your hats...
62 */
63 console_loglevel = 15; /* NMI oopser may have shut the console up */
64 printk(" ");
65 console_loglevel = loglevel_save;
66}
67
68static noinline void force_sig_info_fault(int si_signo, int si_code, 47static noinline void force_sig_info_fault(int si_signo, int si_code,
69 unsigned long address, int fault_num, struct task_struct *tsk) 48 unsigned long address, int fault_num, struct task_struct *tsk)
70{ 49{
@@ -301,10 +280,10 @@ static int handle_page_fault(struct pt_regs *regs,
301 */ 280 */
302 stack_offset = stack_pointer & (THREAD_SIZE-1); 281 stack_offset = stack_pointer & (THREAD_SIZE-1);
303 if (stack_offset < THREAD_SIZE / 8) { 282 if (stack_offset < THREAD_SIZE / 8) {
304 printk(KERN_ALERT "Potential stack overrun: sp %#lx\n", 283 pr_alert("Potential stack overrun: sp %#lx\n",
305 stack_pointer); 284 stack_pointer);
306 show_regs(regs); 285 show_regs(regs);
307 printk(KERN_ALERT "Killing current process %d/%s\n", 286 pr_alert("Killing current process %d/%s\n",
308 tsk->pid, tsk->comm); 287 tsk->pid, tsk->comm);
309 do_group_exit(SIGKILL); 288 do_group_exit(SIGKILL);
310 } 289 }
@@ -422,7 +401,7 @@ good_area:
422 } else if (write) { 401 } else if (write) {
423#ifdef TEST_VERIFY_AREA 402#ifdef TEST_VERIFY_AREA
424 if (!is_page_fault && regs->cs == KERNEL_CS) 403 if (!is_page_fault && regs->cs == KERNEL_CS)
425 printk("WP fault at "REGFMT"\n", regs->eip); 404 pr_err("WP fault at "REGFMT"\n", regs->eip);
426#endif 405#endif
427 if (!(vma->vm_flags & VM_WRITE)) 406 if (!(vma->vm_flags & VM_WRITE))
428 goto bad_area; 407 goto bad_area;
@@ -450,6 +429,7 @@ good_area:
450 else 429 else
451 tsk->min_flt++; 430 tsk->min_flt++;
452 431
432#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
453 /* 433 /*
454 * If this was an asynchronous fault, 434 * If this was an asynchronous fault,
455 * restart the appropriate engine. 435 * restart the appropriate engine.
@@ -472,6 +452,7 @@ good_area:
472 break; 452 break;
473#endif 453#endif
474 } 454 }
455#endif
475 456
476 up_read(&mm->mmap_sem); 457 up_read(&mm->mmap_sem);
477 return 1; 458 return 1;
@@ -514,17 +495,17 @@ no_context:
514 pte_t *pte = lookup_address(address); 495 pte_t *pte = lookup_address(address);
515 496
516 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) 497 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
517 printk(KERN_CRIT "kernel tried to execute" 498 pr_crit("kernel tried to execute"
518 " non-executable page - exploit attempt?" 499 " non-executable page - exploit attempt?"
519 " (uid: %d)\n", current->uid); 500 " (uid: %d)\n", current->uid);
520 } 501 }
521#endif 502#endif
522 if (address < PAGE_SIZE) 503 if (address < PAGE_SIZE)
523 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference\n"); 504 pr_alert("Unable to handle kernel NULL pointer dereference\n");
524 else 505 else
525 printk(KERN_ALERT "Unable to handle kernel paging request\n"); 506 pr_alert("Unable to handle kernel paging request\n");
526 printk(" at virtual address "REGFMT", pc "REGFMT"\n", 507 pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
527 address, regs->pc); 508 address, regs->pc);
528 509
529 show_regs(regs); 510 show_regs(regs);
530 511
@@ -555,7 +536,7 @@ out_of_memory:
555 down_read(&mm->mmap_sem); 536 down_read(&mm->mmap_sem);
556 goto survive; 537 goto survive;
557 } 538 }
558 printk("VM: killing process %s\n", tsk->comm); 539 pr_alert("VM: killing process %s\n", tsk->comm);
559 if (!is_kernel_mode) 540 if (!is_kernel_mode)
560 do_group_exit(SIGKILL); 541 do_group_exit(SIGKILL);
561 goto no_context; 542 goto no_context;
@@ -573,31 +554,12 @@ do_sigbus:
573 554
574#ifndef __tilegx__ 555#ifndef __tilegx__
575 556
576extern char sys_cmpxchg[], __sys_cmpxchg_end[];
577extern char __sys_cmpxchg_grab_lock[];
578extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
579
580/*
581 * We return this structure in registers to avoid having to write
582 * additional save/restore code in the intvec.S caller.
583 */
584struct intvec_state {
585 void *handler;
586 unsigned long vecnum;
587 unsigned long fault_num;
588 unsigned long info;
589 unsigned long retval;
590};
591
592/* We must release ICS before panicking or we won't get anywhere. */ 557/* We must release ICS before panicking or we won't get anywhere. */
593#define ics_panic(fmt, ...) do { \ 558#define ics_panic(fmt, ...) do { \
594 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ 559 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
595 panic(fmt, __VA_ARGS__); \ 560 panic(fmt, __VA_ARGS__); \
596} while (0) 561} while (0)
597 562
598void do_page_fault(struct pt_regs *regs, int fault_num,
599 unsigned long address, unsigned long write);
600
601/* 563/*
602 * When we take an ITLB or DTLB fault or access violation in the 564 * When we take an ITLB or DTLB fault or access violation in the
603 * supervisor while the critical section bit is set, the hypervisor is 565 * supervisor while the critical section bit is set, the hypervisor is
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 1fcecc5b9e03..ff1cdff5114d 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -121,7 +121,7 @@ static struct list_head amp_list = LIST_HEAD_INIT(amp_list);
121struct kmap_amps { 121struct kmap_amps {
122 struct atomic_mapped_page per_type[KM_TYPE_NR]; 122 struct atomic_mapped_page per_type[KM_TYPE_NR];
123}; 123};
124DEFINE_PER_CPU(struct kmap_amps, amps); 124static DEFINE_PER_CPU(struct kmap_amps, amps);
125 125
126/* 126/*
127 * Add a page and va, on this cpu, to the list of kmap_atomic pages, 127 * Add a page and va, on this cpu, to the list of kmap_atomic pages,
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 52feb77133ce..97c478e7be27 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -46,7 +46,7 @@
46 * locally from a remote home. There's no point in using it if we 46 * locally from a remote home. There's no point in using it if we
47 * don't have coherent local caching, though. 47 * don't have coherent local caching, though.
48 */ 48 */
49int __write_once noallocl2; 49static int __write_once noallocl2;
50static int __init set_noallocl2(char *str) 50static int __init set_noallocl2(char *str)
51{ 51{
52 noallocl2 = 1; 52 noallocl2 = 1;
@@ -60,15 +60,11 @@ early_param("noallocl2", set_noallocl2);
60 60
61#endif 61#endif
62 62
63
64
65/* Provide no-op versions of these routines to keep flush_remote() cleaner. */ 63/* Provide no-op versions of these routines to keep flush_remote() cleaner. */
66#define mark_caches_evicted_start() 0 64#define mark_caches_evicted_start() 0
67#define mark_caches_evicted_finish(mask, timestamp) do {} while (0) 65#define mark_caches_evicted_finish(mask, timestamp) do {} while (0)
68 66
69 67
70
71
72/* 68/*
73 * Update the irq_stat for cpus that we are going to interrupt 69 * Update the irq_stat for cpus that we are going to interrupt
74 * with TLB or cache flushes. Also handle removing dataplane cpus 70 * with TLB or cache flushes. Also handle removing dataplane cpus
@@ -171,20 +167,12 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
171 cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); 167 cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
172 cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); 168 cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
173 169
174 printk("hv_flush_remote(%#llx, %#lx, %p [%s]," 170 pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
175 " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", 171 " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
176 cache_pa, cache_control, cache_cpumask, cache_buf, 172 cache_pa, cache_control, cache_cpumask, cache_buf,
177 (unsigned long)tlb_va, tlb_length, tlb_pgsize, 173 (unsigned long)tlb_va, tlb_length, tlb_pgsize,
178 tlb_cpumask, tlb_buf, 174 tlb_cpumask, tlb_buf,
179 asids, asidcount, rc); 175 asids, asidcount, rc);
180 if (asidcount > 0) {
181 int i;
182 printk(" asids:");
183 for (i = 0; i < asidcount; ++i)
184 printk(" %d,%d,%d",
185 asids[i].x, asids[i].y, asids[i].asid);
186 printk("\n");
187 }
188 panic("Unsafe to continue."); 176 panic("Unsafe to continue.");
189} 177}
190 178
@@ -293,7 +281,7 @@ pte_t pte_set_home(pte_t pte, int home)
293 */ 281 */
294 if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) { 282 if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
295 pte = hv_pte_clear_nc(pte); 283 pte = hv_pte_clear_nc(pte);
296 printk("non-immutable page incoherently referenced: %#llx\n", 284 pr_err("non-immutable page incoherently referenced: %#llx\n",
297 pte.val); 285 pte.val);
298 } 286 }
299 287
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index c38570f8f0d0..24688b697a8d 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -332,7 +332,7 @@ static __init int setup_hugepagesz(char *opt)
332 } else if (ps == PUD_SIZE) { 332 } else if (ps == PUD_SIZE) {
333 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 333 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
334 } else { 334 } else {
335 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", 335 pr_err("hugepagesz: Unsupported page size %lu M\n",
336 ps >> 20); 336 ps >> 20);
337 return 0; 337 return 0;
338 } 338 }
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 125ac53b60fc..d89c9eacd162 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -67,7 +67,9 @@
67 67
68#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) 68#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
69 69
70#ifndef __tilegx__
70unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; 71unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
72#endif
71 73
72DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 74DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
73 75
@@ -282,9 +284,9 @@ static pgprot_t __init init_pgprot(ulong address)
282 /* 284 /*
283 * Everything else that isn't data or bss is heap, so mark it 285 * Everything else that isn't data or bss is heap, so mark it
284 * with the initial heap home (hash-for-home, or this cpu). This 286 * with the initial heap home (hash-for-home, or this cpu). This
285 * includes any addresses after the loaded image; any address before 287 * includes any addresses after the loaded image and any address before
286 * _einittext (since we already captured the case of text before 288 * _einitdata, since we already captured the case of text before
287 * _sinittext); and any init-data pages. 289 * _sinittext, and __pa(einittext) is approximately __pa(sinitdata).
288 * 290 *
289 * All the LOWMEM pages that we mark this way will get their 291 * All the LOWMEM pages that we mark this way will get their
290 * struct page homecache properly marked later, in set_page_homes(). 292 * struct page homecache properly marked later, in set_page_homes().
@@ -292,9 +294,7 @@ static pgprot_t __init init_pgprot(ulong address)
292 * homes, but with a zero free_time we don't have to actually 294 * homes, but with a zero free_time we don't have to actually
293 * do a flush action the first time we use them, either. 295 * do a flush action the first time we use them, either.
294 */ 296 */
295 if (address >= (ulong) _end || address < (ulong) _sdata || 297 if (address >= (ulong) _end || address < (ulong) _einitdata)
296 (address >= (ulong) _sinitdata &&
297 address < (ulong) _einitdata))
298 return construct_pgprot(PAGE_KERNEL, initial_heap_home()); 298 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
299 299
300#if CHIP_HAS_CBOX_HOME_MAP() 300#if CHIP_HAS_CBOX_HOME_MAP()
@@ -304,35 +304,38 @@ static pgprot_t __init init_pgprot(ulong address)
304#endif 304#endif
305 305
306 /* 306 /*
307 * Make the w1data homed like heap to start with, to avoid
308 * making it part of the page-striped data area when we're just
309 * going to convert it to read-only soon anyway.
310 */
311 if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end)
312 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
313
314 /*
307 * Otherwise we just hand out consecutive cpus. To avoid 315 * Otherwise we just hand out consecutive cpus. To avoid
308 * requiring this function to hold state, we just walk forward from 316 * requiring this function to hold state, we just walk forward from
309 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach 317 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach
310 * the requested address, while walking cpu home around kdata_mask. 318 * the requested address, while walking cpu home around kdata_mask.
311 * This is typically no more than a dozen or so iterations. 319 * This is typically no more than a dozen or so iterations.
312 */ 320 */
313 BUG_ON(_einitdata != __bss_start); 321 page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK;
314 for (page = (ulong)_sdata, cpu = NR_CPUS; ; ) { 322 BUG_ON(address < page || address >= (ulong)_end);
315 cpu = cpumask_next(cpu, &kdata_mask); 323 cpu = cpumask_first(&kdata_mask);
316 if (cpu == NR_CPUS) 324 for (; page < address; page += PAGE_SIZE) {
317 cpu = cpumask_first(&kdata_mask); 325 if (page >= (ulong)&init_thread_union &&
318 if (page >= address) 326 page < (ulong)&init_thread_union + THREAD_SIZE)
319 break; 327 continue;
320 page += PAGE_SIZE;
321 if (page == (ulong)__start_rodata)
322 page = (ulong)__end_rodata;
323 if (page == (ulong)&init_thread_union)
324 page += THREAD_SIZE;
325 if (page == (ulong)_sinitdata)
326 page = (ulong)_einitdata;
327 if (page == (ulong)empty_zero_page) 328 if (page == (ulong)empty_zero_page)
328 page += PAGE_SIZE; 329 continue;
329#ifndef __tilegx__ 330#ifndef __tilegx__
330#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() 331#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
331 if (page == (ulong)atomic_locks) 332 if (page == (ulong)atomic_locks)
332 page += PAGE_SIZE; 333 continue;
333#endif 334#endif
334#endif 335#endif
335 336 cpu = cpumask_next(cpu, &kdata_mask);
337 if (cpu == NR_CPUS)
338 cpu = cpumask_first(&kdata_mask);
336 } 339 }
337 return construct_pgprot(PAGE_KERNEL, cpu); 340 return construct_pgprot(PAGE_KERNEL, cpu);
338} 341}
@@ -362,7 +365,7 @@ static int __init setup_ktext(char *str)
362 /* If you have a leading "nocache", turn off ktext caching */ 365 /* If you have a leading "nocache", turn off ktext caching */
363 if (strncmp(str, "nocache", 7) == 0) { 366 if (strncmp(str, "nocache", 7) == 0) {
364 ktext_nocache = 1; 367 ktext_nocache = 1;
365 printk("ktext: disabling local caching of kernel text\n"); 368 pr_info("ktext: disabling local caching of kernel text\n");
366 str += 7; 369 str += 7;
367 if (*str == ',') 370 if (*str == ',')
368 ++str; 371 ++str;
@@ -374,20 +377,20 @@ static int __init setup_ktext(char *str)
374 377
375 /* Default setting on Tile64: use a huge page */ 378 /* Default setting on Tile64: use a huge page */
376 if (strcmp(str, "huge") == 0) 379 if (strcmp(str, "huge") == 0)
377 printk("ktext: using one huge locally cached page\n"); 380 pr_info("ktext: using one huge locally cached page\n");
378 381
379 /* Pay TLB cost but get no cache benefit: cache small pages locally */ 382 /* Pay TLB cost but get no cache benefit: cache small pages locally */
380 else if (strcmp(str, "local") == 0) { 383 else if (strcmp(str, "local") == 0) {
381 ktext_small = 1; 384 ktext_small = 1;
382 ktext_local = 1; 385 ktext_local = 1;
383 printk("ktext: using small pages with local caching\n"); 386 pr_info("ktext: using small pages with local caching\n");
384 } 387 }
385 388
386 /* Neighborhood cache ktext pages on all cpus. */ 389 /* Neighborhood cache ktext pages on all cpus. */
387 else if (strcmp(str, "all") == 0) { 390 else if (strcmp(str, "all") == 0) {
388 ktext_small = 1; 391 ktext_small = 1;
389 ktext_all = 1; 392 ktext_all = 1;
390 printk("ktext: using maximal caching neighborhood\n"); 393 pr_info("ktext: using maximal caching neighborhood\n");
391 } 394 }
392 395
393 396
@@ -397,10 +400,10 @@ static int __init setup_ktext(char *str)
397 cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); 400 cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
398 if (cpumask_weight(&ktext_mask) > 1) { 401 if (cpumask_weight(&ktext_mask) > 1) {
399 ktext_small = 1; 402 ktext_small = 1;
400 printk("ktext: using caching neighborhood %s " 403 pr_info("ktext: using caching neighborhood %s "
401 "with small pages\n", buf); 404 "with small pages\n", buf);
402 } else { 405 } else {
403 printk("ktext: caching on cpu %s with one huge page\n", 406 pr_info("ktext: caching on cpu %s with one huge page\n",
404 buf); 407 buf);
405 } 408 }
406 } 409 }
@@ -470,19 +473,19 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
470 473
471#if CHIP_HAS_CBOX_HOME_MAP() 474#if CHIP_HAS_CBOX_HOME_MAP()
472 if (ktext_arg_seen && ktext_hash) { 475 if (ktext_arg_seen && ktext_hash) {
473 printk("warning: \"ktext\" boot argument ignored" 476 pr_warning("warning: \"ktext\" boot argument ignored"
474 " if \"kcache_hash\" sets up text hash-for-home\n"); 477 " if \"kcache_hash\" sets up text hash-for-home\n");
475 ktext_small = 0; 478 ktext_small = 0;
476 } 479 }
477 480
478 if (kdata_arg_seen && kdata_hash) { 481 if (kdata_arg_seen && kdata_hash) {
479 printk("warning: \"kdata\" boot argument ignored" 482 pr_warning("warning: \"kdata\" boot argument ignored"
480 " if \"kcache_hash\" sets up data hash-for-home\n"); 483 " if \"kcache_hash\" sets up data hash-for-home\n");
481 } 484 }
482 485
483 if (kdata_huge && !hash_default) { 486 if (kdata_huge && !hash_default) {
484 printk("warning: disabling \"kdata=huge\"; requires" 487 pr_warning("warning: disabling \"kdata=huge\"; requires"
485 " kcache_hash=all or =allbutstack\n"); 488 " kcache_hash=all or =allbutstack\n");
486 kdata_huge = 0; 489 kdata_huge = 0;
487 } 490 }
488#endif 491#endif
@@ -556,11 +559,11 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
556 if (!cpumask_empty(&bad)) { 559 if (!cpumask_empty(&bad)) {
557 char buf[NR_CPUS * 5]; 560 char buf[NR_CPUS * 5];
558 cpulist_scnprintf(buf, sizeof(buf), &bad); 561 cpulist_scnprintf(buf, sizeof(buf), &bad);
559 printk("ktext: not using unavailable cpus %s\n", buf); 562 pr_info("ktext: not using unavailable cpus %s\n", buf);
560 } 563 }
561 if (cpumask_empty(&ktext_mask)) { 564 if (cpumask_empty(&ktext_mask)) {
562 printk("ktext: no valid cpus; caching on %d.\n", 565 pr_warning("ktext: no valid cpus; caching on %d.\n",
563 smp_processor_id()); 566 smp_processor_id());
564 cpumask_copy(&ktext_mask, 567 cpumask_copy(&ktext_mask,
565 cpumask_of(smp_processor_id())); 568 cpumask_of(smp_processor_id()));
566 } 569 }
@@ -737,17 +740,18 @@ static void __init set_non_bootmem_pages_init(void)
737 for_each_zone(z) { 740 for_each_zone(z) {
738 unsigned long start, end; 741 unsigned long start, end;
739 int nid = z->zone_pgdat->node_id; 742 int nid = z->zone_pgdat->node_id;
743 int idx = zone_idx(z);
740 744
741 start = z->zone_start_pfn; 745 start = z->zone_start_pfn;
742 if (start == 0) 746 if (start == 0)
743 continue; /* bootmem */ 747 continue; /* bootmem */
744 end = start + z->spanned_pages; 748 end = start + z->spanned_pages;
745 if (zone_idx(z) == ZONE_NORMAL) { 749 if (idx == ZONE_NORMAL) {
746 BUG_ON(start != node_start_pfn[nid]); 750 BUG_ON(start != node_start_pfn[nid]);
747 start = node_free_pfn[nid]; 751 start = node_free_pfn[nid];
748 } 752 }
749#ifdef CONFIG_HIGHMEM 753#ifdef CONFIG_HIGHMEM
750 if (zone_idx(z) == ZONE_HIGHMEM) 754 if (idx == ZONE_HIGHMEM)
751 totalhigh_pages += z->spanned_pages; 755 totalhigh_pages += z->spanned_pages;
752#endif 756#endif
753 if (kdata_huge) { 757 if (kdata_huge) {
@@ -841,9 +845,9 @@ void __init mem_init(void)
841#ifdef CONFIG_HIGHMEM 845#ifdef CONFIG_HIGHMEM
842 /* check that fixmap and pkmap do not overlap */ 846 /* check that fixmap and pkmap do not overlap */
843 if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { 847 if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
844 printk(KERN_ERR "fixmap and kmap areas overlap" 848 pr_err("fixmap and kmap areas overlap"
845 " - this will crash\n"); 849 " - this will crash\n");
846 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", 850 pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
847 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), 851 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1),
848 FIXADDR_START); 852 FIXADDR_START);
849 BUG(); 853 BUG();
@@ -863,7 +867,7 @@ void __init mem_init(void)
863 initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; 867 initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext;
864 initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; 868 initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata;
865 869
866 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", 870 pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
867 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 871 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
868 num_physpages << (PAGE_SHIFT-10), 872 num_physpages << (PAGE_SHIFT-10),
869 codesize >> 10, 873 codesize >> 10,
@@ -968,7 +972,6 @@ static void mark_w1data_ro(void)
968 BUG_ON((addr & (PAGE_SIZE-1)) != 0); 972 BUG_ON((addr & (PAGE_SIZE-1)) != 0);
969 for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { 973 for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) {
970 unsigned long pfn = kaddr_to_pfn((void *)addr); 974 unsigned long pfn = kaddr_to_pfn((void *)addr);
971 struct page *page = pfn_to_page(pfn);
972 pte_t *ptep = virt_to_pte(NULL, addr); 975 pte_t *ptep = virt_to_pte(NULL, addr);
973 BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ 976 BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */
974 set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); 977 set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO));
@@ -986,7 +989,7 @@ static long __write_once initfree = 1;
986static int __init set_initfree(char *str) 989static int __init set_initfree(char *str)
987{ 990{
988 strict_strtol(str, 0, &initfree); 991 strict_strtol(str, 0, &initfree);
989 printk("initfree: %s free init pages\n", initfree ? "will" : "won't"); 992 pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't");
990 return 1; 993 return 1;
991} 994}
992__setup("initfree=", set_initfree); 995__setup("initfree=", set_initfree);
@@ -996,8 +999,8 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
996 unsigned long addr = (unsigned long) begin; 999 unsigned long addr = (unsigned long) begin;
997 1000
998 if (kdata_huge && !initfree) { 1001 if (kdata_huge && !initfree) {
999 printk("Warning: ignoring initfree=0:" 1002 pr_warning("Warning: ignoring initfree=0:"
1000 " incompatible with kdata=huge\n"); 1003 " incompatible with kdata=huge\n");
1001 initfree = 1; 1004 initfree = 1;
1002 } 1005 }
1003 end = (end + PAGE_SIZE - 1) & PAGE_MASK; 1006 end = (end + PAGE_SIZE - 1) & PAGE_MASK;
@@ -1033,7 +1036,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
1033 free_page(addr); 1036 free_page(addr);
1034 totalram_pages++; 1037 totalram_pages++;
1035 } 1038 }
1036 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 1039 pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
1037} 1040}
1038 1041
1039void free_initmem(void) 1042void free_initmem(void)
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 289e729bbd76..28c23140c947 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -46,7 +46,7 @@ void show_mem(void)
46{ 46{
47 struct zone *zone; 47 struct zone *zone;
48 48
49 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" 49 pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
50 " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" 50 " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
51 " pagecache:%lu swap:%lu\n", 51 " pagecache:%lu swap:%lu\n",
52 (global_page_state(NR_ACTIVE_ANON) + 52 (global_page_state(NR_ACTIVE_ANON) +
@@ -71,7 +71,6 @@ void show_mem(void)
71 if (!populated_zone(zone)) 71 if (!populated_zone(zone))
72 continue; 72 continue;
73 73
74 printk("Node %d %7s: ", zone_to_nid(zone), zone->name);
75 spin_lock_irqsave(&zone->lock, flags); 74 spin_lock_irqsave(&zone->lock, flags);
76 for (order = 0; order < MAX_ORDER; order++) { 75 for (order = 0; order < MAX_ORDER; order++) {
77 int nr = zone->free_area[order].nr_free; 76 int nr = zone->free_area[order].nr_free;
@@ -80,7 +79,8 @@ void show_mem(void)
80 largest_order = order; 79 largest_order = order;
81 } 80 }
82 spin_unlock_irqrestore(&zone->lock, flags); 81 spin_unlock_irqrestore(&zone->lock, flags);
83 printk("%lukB (largest %luKb)\n", 82 pr_err("Node %d %7s: %lukB (largest %luKb)\n",
83 zone_to_nid(zone), zone->name,
84 K(total), largest_order ? K(1UL) << largest_order : 0); 84 K(total), largest_order ? K(1UL) << largest_order : 0);
85 } 85 }
86} 86}
@@ -123,42 +123,6 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
123 local_flush_tlb_page(NULL, vaddr, PAGE_SIZE); 123 local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
124} 124}
125 125
126/*
127 * Associate a huge virtual page frame with a given physical page frame
128 * and protection flags for that frame. pfn is for the base of the page,
129 * vaddr is what the page gets mapped to - both must be properly aligned.
130 * The pmd must already be instantiated.
131 */
132void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
133{
134 pgd_t *pgd;
135 pud_t *pud;
136 pmd_t *pmd;
137
138 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
139 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
140 return; /* BUG(); */
141 }
142 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
143 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
144 return; /* BUG(); */
145 }
146 pgd = swapper_pg_dir + pgd_index(vaddr);
147 if (pgd_none(*pgd)) {
148 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
149 return; /* BUG(); */
150 }
151 pud = pud_offset(pgd, vaddr);
152 pmd = pmd_offset(pud, vaddr);
153 set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(pfn), flags));
154 /*
155 * It's enough to flush this one mapping.
156 * We flush both small and huge TSBs to be sure.
157 */
158 local_flush_tlb_page(NULL, vaddr, HPAGE_SIZE);
159 local_flush_tlb_pages(NULL, vaddr, PAGE_SIZE, HPAGE_SIZE);
160}
161
162void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) 126void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
163{ 127{
164 unsigned long address = __fix_to_virt(idx); 128 unsigned long address = __fix_to_virt(idx);
@@ -257,7 +221,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
257 221
258struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 222struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
259{ 223{
260 int flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP; 224 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP;
261 struct page *p; 225 struct page *p;
262 226
263#ifdef CONFIG_HIGHPTE 227#ifdef CONFIG_HIGHPTE
@@ -550,7 +514,7 @@ void iounmap(volatile void __iomem *addr_in)
550 read_unlock(&vmlist_lock); 514 read_unlock(&vmlist_lock);
551 515
552 if (!p) { 516 if (!p) {
553 printk("iounmap: bad address %p\n", addr); 517 pr_err("iounmap: bad address %p\n", addr);
554 dump_stack(); 518 dump_stack();
555 return; 519 return;
556 } 520 }