aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-15 20:31:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-15 20:31:43 -0400
commit7a1b29a82ba76427de791098c095ce31dab9333d (patch)
tree077a1563ce243b6ac619397a0b7904623a28de50
parentd7824370e26325c881b665350ce64fb0a4fde24a (diff)
parenta5854dd7f30c3849edf9b9711362e2dd51d3f855 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: arch/tile: don't validate CROSS_COMPILE needlessly arch/tile: export only COMMAND_LINE_SIZE to userspace. arch/tile: rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN arch/tile: Rename the hweight() implementations to __arch_hweight() arch/tile: extend syscall ABI to set r1 on return as well. arch/tile: Various cleanups. arch/tile: support backtracing on TILE-Gx arch/tile: Fix a couple of issues with the COMPAT code for TILE-Gx. arch/tile: Use separate, better minsec values for clocksource and sched_clock. arch/tile: correct a bug in freeing bootmem by VA for the optional second initrd. arch: tile: mm: pgtable.c: Removed duplicated #include arch: tile: kernel/proc.c Removed duplicated #include Add fanotify syscalls to <asm-generic/unistd.h>. arch/tile: support new kunmap_atomic() naming convention. tile: remove unused ISA_DMA_THRESHOLD define Conflicts in arch/tile/configs/tile_defconfig (pick the mainline version with the reduced defconfig).
-rw-r--r--arch/tile/Makefile20
-rw-r--r--arch/tile/include/arch/abi.h4
-rw-r--r--arch/tile/include/asm/atomic_32.h37
-rw-r--r--arch/tile/include/asm/backtrace.h4
-rw-r--r--arch/tile/include/asm/bitops.h9
-rw-r--r--arch/tile/include/asm/cache.h5
-rw-r--r--arch/tile/include/asm/highmem.h2
-rw-r--r--arch/tile/include/asm/page.h6
-rw-r--r--arch/tile/include/asm/scatterlist.h21
-rw-r--r--arch/tile/include/asm/setup.h8
-rw-r--r--arch/tile/include/asm/siginfo.h4
-rw-r--r--arch/tile/include/asm/uaccess.h4
-rw-r--r--arch/tile/include/hv/hypervisor.h8
-rw-r--r--arch/tile/kernel/backtrace.c137
-rw-r--r--arch/tile/kernel/compat_signal.c4
-rw-r--r--arch/tile/kernel/intvec_32.S14
-rw-r--r--arch/tile/kernel/proc.c1
-rw-r--r--arch/tile/kernel/setup.c4
-rw-r--r--arch/tile/kernel/stack.c8
-rw-r--r--arch/tile/kernel/time.c33
-rw-r--r--arch/tile/kernel/traps.c4
-rw-r--r--arch/tile/lib/Makefile4
-rw-r--r--arch/tile/lib/exports.c16
-rw-r--r--arch/tile/lib/memcpy_32.S20
-rw-r--r--arch/tile/lib/memset_32.c25
-rw-r--r--arch/tile/mm/fault.c8
-rw-r--r--arch/tile/mm/highmem.c4
-rw-r--r--arch/tile/mm/homecache.c3
-rw-r--r--arch/tile/mm/pgtable.c1
-rw-r--r--include/asm-generic/unistd.h6
30 files changed, 218 insertions, 206 deletions
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 07c4318c0629..fd8f6bb5face 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -8,20 +8,22 @@
8# for "archclean" and "archdep" for cleaning up and making dependencies for 8# for "archclean" and "archdep" for cleaning up and making dependencies for
9# this architecture 9# this architecture
10 10
11ifeq ($(CROSS_COMPILE),)
12# If building with TILERA_ROOT set (i.e. using the Tilera Multicore 11# If building with TILERA_ROOT set (i.e. using the Tilera Multicore
13# Development Environment) we can set CROSS_COMPILE based on that. 12# Development Environment) we can set CROSS_COMPILE based on that.
14ifdef TILERA_ROOT
15CROSS_COMPILE = $(TILERA_ROOT)/bin/tile-
16endif
17endif
18
19# If we're not cross-compiling, make sure we're on the right architecture. 13# If we're not cross-compiling, make sure we're on the right architecture.
14# Only bother to test for a few common targets, to avoid useless errors.
20ifeq ($(CROSS_COMPILE),) 15ifeq ($(CROSS_COMPILE),)
21HOST_ARCH = $(shell uname -m) 16 ifdef TILERA_ROOT
22ifneq ($(HOST_ARCH),$(ARCH)) 17 CROSS_COMPILE := $(TILERA_ROOT)/bin/tile-
18 else
19 goals := $(if $(MAKECMDGOALS), $(MAKECMDGOALS), all)
20 ifneq ($(strip $(filter vmlinux modules all,$(goals))),)
21 HOST_ARCH := $(shell uname -m)
22 ifneq ($(HOST_ARCH),$(ARCH))
23$(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH)) 23$(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
24endif 24 endif
25 endif
26 endif
25endif 27endif
26 28
27 29
diff --git a/arch/tile/include/arch/abi.h b/arch/tile/include/arch/abi.h
index da8df5b9d914..8affc76f771a 100644
--- a/arch/tile/include/arch/abi.h
+++ b/arch/tile/include/arch/abi.h
@@ -59,9 +59,7 @@
59 * The ABI requires callers to allocate a caller state save area of 59 * The ABI requires callers to allocate a caller state save area of
60 * this many bytes at the bottom of each stack frame. 60 * this many bytes at the bottom of each stack frame.
61 */ 61 */
62#ifdef __tile__ 62#define C_ABI_SAVE_AREA_SIZE (2 * (CHIP_WORD_SIZE() / 8))
63#define C_ABI_SAVE_AREA_SIZE (2 * __SIZEOF_POINTER__)
64#endif
65 63
66/** 64/**
67 * The operand to an 'info' opcode directing the backtracer to not 65 * The operand to an 'info' opcode directing the backtracer to not
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 40a5a3a876d9..ed359aee8837 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -255,43 +255,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
255#define smp_mb__after_atomic_dec() do { } while (0) 255#define smp_mb__after_atomic_dec() do { } while (0)
256#define smp_mb__after_atomic_inc() do { } while (0) 256#define smp_mb__after_atomic_inc() do { } while (0)
257 257
258
259/*
260 * Support "tns" atomic integers. These are atomic integers that can
261 * hold any value but "1". They are more efficient than regular atomic
262 * operations because the "lock" (aka acquire) step is a single "tns"
263 * in the uncontended case, and the "unlock" (aka release) step is a
264 * single "store" without an mf. (However, note that on tilepro the
265 * "tns" will evict the local cache line, so it's not all upside.)
266 *
267 * Note that you can ONLY observe the value stored in the pointer
268 * using these operations; a direct read of the value may confusingly
269 * return the special value "1".
270 */
271
272int __tns_atomic_acquire(atomic_t *);
273void __tns_atomic_release(atomic_t *p, int v);
274
275static inline void tns_atomic_set(atomic_t *v, int i)
276{
277 __tns_atomic_acquire(v);
278 __tns_atomic_release(v, i);
279}
280
281static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n)
282{
283 int ret = __tns_atomic_acquire(v);
284 __tns_atomic_release(v, (ret == o) ? n : ret);
285 return ret;
286}
287
288static inline int tns_atomic_xchg(atomic_t *v, int n)
289{
290 int ret = __tns_atomic_acquire(v);
291 __tns_atomic_release(v, n);
292 return ret;
293}
294
295#endif /* !__ASSEMBLY__ */ 258#endif /* !__ASSEMBLY__ */
296 259
297/* 260/*
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h
index 6970bfcad549..758ca4619d50 100644
--- a/arch/tile/include/asm/backtrace.h
+++ b/arch/tile/include/asm/backtrace.h
@@ -21,7 +21,9 @@
21 21
22#include <arch/chip.h> 22#include <arch/chip.h>
23 23
24#if CHIP_VA_WIDTH() > 32 24#if defined(__tile__)
25typedef unsigned long VirtualAddress;
26#elif CHIP_VA_WIDTH() > 32
25typedef unsigned long long VirtualAddress; 27typedef unsigned long long VirtualAddress;
26#else 28#else
27typedef unsigned int VirtualAddress; 29typedef unsigned int VirtualAddress;
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index 84600f3514da..6832b4be8990 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -98,26 +98,27 @@ static inline int fls64(__u64 w)
98 return (sizeof(__u64) * 8) - __builtin_clzll(w); 98 return (sizeof(__u64) * 8) - __builtin_clzll(w);
99} 99}
100 100
101static inline unsigned int hweight32(unsigned int w) 101static inline unsigned int __arch_hweight32(unsigned int w)
102{ 102{
103 return __builtin_popcount(w); 103 return __builtin_popcount(w);
104} 104}
105 105
106static inline unsigned int hweight16(unsigned int w) 106static inline unsigned int __arch_hweight16(unsigned int w)
107{ 107{
108 return __builtin_popcount(w & 0xffff); 108 return __builtin_popcount(w & 0xffff);
109} 109}
110 110
111static inline unsigned int hweight8(unsigned int w) 111static inline unsigned int __arch_hweight8(unsigned int w)
112{ 112{
113 return __builtin_popcount(w & 0xff); 113 return __builtin_popcount(w & 0xff);
114} 114}
115 115
116static inline unsigned long hweight64(__u64 w) 116static inline unsigned long __arch_hweight64(__u64 w)
117{ 117{
118 return __builtin_popcountll(w); 118 return __builtin_popcountll(w);
119} 119}
120 120
121#include <asm-generic/bitops/const_hweight.h>
121#include <asm-generic/bitops/lock.h> 122#include <asm-generic/bitops/lock.h>
122#include <asm-generic/bitops/sched.h> 123#include <asm-generic/bitops/sched.h>
123#include <asm-generic/bitops/ext2-non-atomic.h> 124#include <asm-generic/bitops/ext2-non-atomic.h>
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index f6101840c9e7..08a2815b5e4e 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -27,11 +27,10 @@
27#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES) 27#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)
28 28
29/* 29/*
30 * TILE-Gx is fully coherents so we don't need to define 30 * TILE-Gx is fully coherent so we don't need to define ARCH_DMA_MINALIGN.
31 * ARCH_KMALLOC_MINALIGN.
32 */ 31 */
33#ifndef __tilegx__ 32#ifndef __tilegx__
34#define ARCH_KMALLOC_MINALIGN L2_CACHE_BYTES 33#define ARCH_DMA_MINALIGN L2_CACHE_BYTES
35#endif 34#endif
36 35
37/* use the cache line size for the L2, which is where it counts */ 36/* use the cache line size for the L2, which is where it counts */
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index efdd12e91020..d155db6fa9bd 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -60,7 +60,7 @@ void *kmap_fix_kpte(struct page *page, int finished);
60/* This macro is used only in map_new_virtual() to map "page". */ 60/* This macro is used only in map_new_virtual() to map "page". */
61#define kmap_prot page_to_kpgprot(page) 61#define kmap_prot page_to_kpgprot(page)
62 62
63void kunmap_atomic(void *kvaddr, enum km_type type); 63void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
64void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 64void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
65void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 65void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
66struct page *kmap_atomic_to_page(void *ptr); 66struct page *kmap_atomic_to_page(void *ptr);
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index f894a9016da6..7d90641cf18d 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -129,6 +129,11 @@ static inline u64 pmd_val(pmd_t pmd)
129 129
130#endif 130#endif
131 131
132static inline __attribute_const__ int get_order(unsigned long size)
133{
134 return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT);
135}
136
132#endif /* !__ASSEMBLY__ */ 137#endif /* !__ASSEMBLY__ */
133 138
134#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 139#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
@@ -332,7 +337,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
332 (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 337 (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
333 338
334#include <asm-generic/memory_model.h> 339#include <asm-generic/memory_model.h>
335#include <asm-generic/getorder.h>
336 340
337#endif /* __KERNEL__ */ 341#endif /* __KERNEL__ */
338 342
diff --git a/arch/tile/include/asm/scatterlist.h b/arch/tile/include/asm/scatterlist.h
index c5604242c0d5..35d786fe93ae 100644
--- a/arch/tile/include/asm/scatterlist.h
+++ b/arch/tile/include/asm/scatterlist.h
@@ -1,22 +1 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_SCATTERLIST_H
16#define _ASM_TILE_SCATTERLIST_H
17
18#define ISA_DMA_THRESHOLD (~0UL)
19
20#include <asm-generic/scatterlist.h> #include <asm-generic/scatterlist.h>
21
22#endif /* _ASM_TILE_SCATTERLIST_H */
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
index 823ddd47ff6e..7caf0f36b030 100644
--- a/arch/tile/include/asm/setup.h
+++ b/arch/tile/include/asm/setup.h
@@ -15,6 +15,10 @@
15#ifndef _ASM_TILE_SETUP_H 15#ifndef _ASM_TILE_SETUP_H
16#define _ASM_TILE_SETUP_H 16#define _ASM_TILE_SETUP_H
17 17
18#define COMMAND_LINE_SIZE 2048
19
20#ifdef __KERNEL__
21
18#include <linux/pfn.h> 22#include <linux/pfn.h>
19#include <linux/init.h> 23#include <linux/init.h>
20 24
@@ -23,10 +27,10 @@
23 */ 27 */
24#define MAXMEM_PFN PFN_DOWN(MAXMEM) 28#define MAXMEM_PFN PFN_DOWN(MAXMEM)
25 29
26#define COMMAND_LINE_SIZE 2048
27
28void early_panic(const char *fmt, ...); 30void early_panic(const char *fmt, ...);
29void warn_early_printk(void); 31void warn_early_printk(void);
30void __init disable_early_printk(void); 32void __init disable_early_printk(void);
31 33
34#endif /* __KERNEL__ */
35
32#endif /* _ASM_TILE_SETUP_H */ 36#endif /* _ASM_TILE_SETUP_H */
diff --git a/arch/tile/include/asm/siginfo.h b/arch/tile/include/asm/siginfo.h
index 0c12d1b9ddf2..56d661bb010b 100644
--- a/arch/tile/include/asm/siginfo.h
+++ b/arch/tile/include/asm/siginfo.h
@@ -17,6 +17,10 @@
17 17
18#define __ARCH_SI_TRAPNO 18#define __ARCH_SI_TRAPNO
19 19
20#ifdef __LP64__
21# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
22#endif
23
20#include <asm-generic/siginfo.h> 24#include <asm-generic/siginfo.h>
21 25
22/* 26/*
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index ed17a80ec0ed..ef34d2caa5b1 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -389,14 +389,14 @@ static inline unsigned long __must_check copy_from_user(void *to,
389 * Returns number of bytes that could not be copied. 389 * Returns number of bytes that could not be copied.
390 * On success, this will be zero. 390 * On success, this will be zero.
391 */ 391 */
392extern unsigned long __copy_in_user_asm( 392extern unsigned long __copy_in_user_inatomic(
393 void __user *to, const void __user *from, unsigned long n); 393 void __user *to, const void __user *from, unsigned long n);
394 394
395static inline unsigned long __must_check 395static inline unsigned long __must_check
396__copy_in_user(void __user *to, const void __user *from, unsigned long n) 396__copy_in_user(void __user *to, const void __user *from, unsigned long n)
397{ 397{
398 might_sleep(); 398 might_sleep();
399 return __copy_in_user_asm(to, from, n); 399 return __copy_in_user_inatomic(to, from, n);
400} 400}
401 401
402static inline unsigned long __must_check 402static inline unsigned long __must_check
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 59b46dc53994..9bd303a141b2 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -532,11 +532,11 @@ void hv_disable_intr(HV_IntrMask disab_mask);
532 */ 532 */
533void hv_clear_intr(HV_IntrMask clear_mask); 533void hv_clear_intr(HV_IntrMask clear_mask);
534 534
535/** Assert a set of device interrupts. 535/** Raise a set of device interrupts.
536 * 536 *
537 * @param assert_mask Bitmap of interrupts to clear. 537 * @param raise_mask Bitmap of interrupts to raise.
538 */ 538 */
539void hv_assert_intr(HV_IntrMask assert_mask); 539void hv_raise_intr(HV_IntrMask raise_mask);
540 540
541/** Trigger a one-shot interrupt on some tile 541/** Trigger a one-shot interrupt on some tile
542 * 542 *
@@ -1712,7 +1712,7 @@ typedef struct
1712 * @param cache_control This argument allows you to specify a length of 1712 * @param cache_control This argument allows you to specify a length of
1713 * physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN). 1713 * physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN).
1714 * You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache. 1714 * You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache.
1715 * You can "or" in HV_FLUSH_EVICT_LI1 to flush the whole LII cache. 1715 * You can "or" in HV_FLUSH_EVICT_L1I to flush the whole L1I cache.
1716 * HV_FLUSH_ALL flushes all caches. 1716 * HV_FLUSH_ALL flushes all caches.
1717 * @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of 1717 * @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of
1718 * tile indices to perform cache flush on. The low bit of the first 1718 * tile indices to perform cache flush on. The low bit of the first
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index 77265f3b58d6..d3c41c1ff6bd 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -19,9 +19,6 @@
19 19
20#include <arch/chip.h> 20#include <arch/chip.h>
21 21
22#if TILE_CHIP < 10
23
24
25#include <asm/opcode-tile.h> 22#include <asm/opcode-tile.h>
26 23
27 24
@@ -29,6 +26,27 @@
29#define TREG_LR 55 26#define TREG_LR 55
30 27
31 28
29#if TILE_CHIP >= 10
30#define tile_bundle_bits tilegx_bundle_bits
31#define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE
32#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
33#define tile_decoded_instruction tilegx_decoded_instruction
34#define tile_mnemonic tilegx_mnemonic
35#define parse_insn_tile parse_insn_tilegx
36#define TILE_OPC_IRET TILEGX_OPC_IRET
37#define TILE_OPC_ADDI TILEGX_OPC_ADDI
38#define TILE_OPC_ADDLI TILEGX_OPC_ADDLI
39#define TILE_OPC_INFO TILEGX_OPC_INFO
40#define TILE_OPC_INFOL TILEGX_OPC_INFOL
41#define TILE_OPC_JRP TILEGX_OPC_JRP
42#define TILE_OPC_MOVE TILEGX_OPC_MOVE
43#define OPCODE_STORE TILEGX_OPC_ST
44typedef long long bt_int_reg_t;
45#else
46#define OPCODE_STORE TILE_OPC_SW
47typedef int bt_int_reg_t;
48#endif
49
32/** A decoded bundle used for backtracer analysis. */ 50/** A decoded bundle used for backtracer analysis. */
33struct BacktraceBundle { 51struct BacktraceBundle {
34 tile_bundle_bits bits; 52 tile_bundle_bits bits;
@@ -41,7 +59,7 @@ struct BacktraceBundle {
41/* This implementation only makes sense for native tools. */ 59/* This implementation only makes sense for native tools. */
42/** Default function to read memory. */ 60/** Default function to read memory. */
43static bool bt_read_memory(void *result, VirtualAddress addr, 61static bool bt_read_memory(void *result, VirtualAddress addr,
44 size_t size, void *extra) 62 unsigned int size, void *extra)
45{ 63{
46 /* FIXME: this should do some horrible signal stuff to catch 64 /* FIXME: this should do some horrible signal stuff to catch
47 * SEGV cleanly and fail. 65 * SEGV cleanly and fail.
@@ -106,6 +124,12 @@ static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
106 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2); 124 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
107 if (insn == NULL) 125 if (insn == NULL)
108 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2); 126 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
127#if TILE_CHIP >= 10
128 if (insn == NULL)
129 insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2);
130 if (insn == NULL)
131 insn = find_matching_insn(bundle, TILEGX_OPC_ADDXI, vals, 2);
132#endif
109 if (insn == NULL) 133 if (insn == NULL)
110 return false; 134 return false;
111 135
@@ -190,13 +214,52 @@ static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
190 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; 214 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
191} 215}
192 216
193/** Does this bundle contain the instruction 'sw sp, lr'? */ 217/** Does this bundle contain a store of lr to sp? */
194static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle) 218static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
195{ 219{
196 static const int vals[2] = { TREG_SP, TREG_LR }; 220 static const int vals[2] = { TREG_SP, TREG_LR };
197 return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL; 221 return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL;
222}
223
224#if TILE_CHIP >= 10
225/** Track moveli values placed into registers. */
226static inline void bt_update_moveli(const struct BacktraceBundle *bundle,
227 int moveli_args[])
228{
229 int i;
230 for (i = 0; i < bundle->num_insns; i++) {
231 const struct tile_decoded_instruction *insn =
232 &bundle->insns[i];
233
234 if (insn->opcode->mnemonic == TILEGX_OPC_MOVELI) {
235 int reg = insn->operand_values[0];
236 moveli_args[reg] = insn->operand_values[1];
237 }
238 }
198} 239}
199 240
241/** Does this bundle contain an 'add sp, sp, reg' instruction
242 * from a register that we saw a moveli into, and if so, what
243 * is the value in the register?
244 */
245static bool bt_has_add_sp(const struct BacktraceBundle *bundle, int *adjust,
246 int moveli_args[])
247{
248 static const int vals[2] = { TREG_SP, TREG_SP };
249
250 const struct tile_decoded_instruction *insn =
251 find_matching_insn(bundle, TILEGX_OPC_ADDX, vals, 2);
252 if (insn) {
253 int reg = insn->operand_values[2];
254 if (moveli_args[reg]) {
255 *adjust = moveli_args[reg];
256 return true;
257 }
258 }
259 return false;
260}
261#endif
262
200/** Locates the caller's PC and SP for a program starting at the 263/** Locates the caller's PC and SP for a program starting at the
201 * given address. 264 * given address.
202 */ 265 */
@@ -227,6 +290,11 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
227 int next_bundle = 0; 290 int next_bundle = 0;
228 VirtualAddress pc; 291 VirtualAddress pc;
229 292
293#if TILE_CHIP >= 10
294 /* Naively try to track moveli values to support addx for -m32. */
295 int moveli_args[TILEGX_NUM_REGISTERS] = { 0 };
296#endif
297
230 /* Default to assuming that the caller's sp is the current sp. 298 /* Default to assuming that the caller's sp is the current sp.
231 * This is necessary to handle the case where we start backtracing 299 * This is necessary to handle the case where we start backtracing
232 * right at the end of the epilog. 300 * right at the end of the epilog.
@@ -380,7 +448,11 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
380 448
381 if (!sp_determined) { 449 if (!sp_determined) {
382 int adjust; 450 int adjust;
383 if (bt_has_addi_sp(&bundle, &adjust)) { 451 if (bt_has_addi_sp(&bundle, &adjust)
452#if TILE_CHIP >= 10
453 || bt_has_add_sp(&bundle, &adjust, moveli_args)
454#endif
455 ) {
384 location->sp_location = SP_LOC_OFFSET; 456 location->sp_location = SP_LOC_OFFSET;
385 457
386 if (adjust <= 0) { 458 if (adjust <= 0) {
@@ -427,6 +499,11 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
427 sp_determined = true; 499 sp_determined = true;
428 } 500 }
429 } 501 }
502
503#if TILE_CHIP >= 10
504 /* Track moveli arguments for -m32 mode. */
505 bt_update_moveli(&bundle, moveli_args);
506#endif
430 } 507 }
431 508
432 if (bt_has_iret(&bundle)) { 509 if (bt_has_iret(&bundle)) {
@@ -502,11 +579,10 @@ void backtrace_init(BacktraceIterator *state,
502 break; 579 break;
503 } 580 }
504 581
505 /* The frame pointer should theoretically be aligned mod 8. If 582 /* If the frame pointer is not aligned to the basic word size
506 * it's not even aligned mod 4 then something terrible happened 583 * something terrible happened and we should mark it as invalid.
507 * and we should mark it as invalid.
508 */ 584 */
509 if (fp % 4 != 0) 585 if (fp % sizeof(bt_int_reg_t) != 0)
510 fp = -1; 586 fp = -1;
511 587
512 /* -1 means "don't know initial_frame_caller_pc". */ 588 /* -1 means "don't know initial_frame_caller_pc". */
@@ -547,9 +623,16 @@ void backtrace_init(BacktraceIterator *state,
547 state->read_memory_func_extra = read_memory_func_extra; 623 state->read_memory_func_extra = read_memory_func_extra;
548} 624}
549 625
626/* Handle the case where the register holds more bits than the VA. */
627static bool valid_addr_reg(bt_int_reg_t reg)
628{
629 return ((VirtualAddress)reg == reg);
630}
631
550bool backtrace_next(BacktraceIterator *state) 632bool backtrace_next(BacktraceIterator *state)
551{ 633{
552 VirtualAddress next_fp, next_pc, next_frame[2]; 634 VirtualAddress next_fp, next_pc;
635 bt_int_reg_t next_frame[2];
553 636
554 if (state->fp == -1) { 637 if (state->fp == -1) {
555 /* No parent frame. */ 638 /* No parent frame. */
@@ -563,11 +646,9 @@ bool backtrace_next(BacktraceIterator *state)
563 } 646 }
564 647
565 next_fp = next_frame[1]; 648 next_fp = next_frame[1];
566 if (next_fp % 4 != 0) { 649 if (!valid_addr_reg(next_frame[1]) ||
567 /* Caller's frame pointer is suspect, so give up. 650 next_fp % sizeof(bt_int_reg_t) != 0) {
568 * Technically it should be aligned mod 8, but we will 651 /* Caller's frame pointer is suspect, so give up. */
569 * be forgiving here.
570 */
571 return false; 652 return false;
572 } 653 }
573 654
@@ -585,7 +666,7 @@ bool backtrace_next(BacktraceIterator *state)
585 } else { 666 } else {
586 /* Get the caller PC from the frame linkage area. */ 667 /* Get the caller PC from the frame linkage area. */
587 next_pc = next_frame[0]; 668 next_pc = next_frame[0];
588 if (next_pc == 0 || 669 if (!valid_addr_reg(next_frame[0]) || next_pc == 0 ||
589 next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { 670 next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
590 /* The PC is suspect, so give up. */ 671 /* The PC is suspect, so give up. */
591 return false; 672 return false;
@@ -599,23 +680,3 @@ bool backtrace_next(BacktraceIterator *state)
599 680
600 return true; 681 return true;
601} 682}
602
603#else /* TILE_CHIP < 10 */
604
605void backtrace_init(BacktraceIterator *state,
606 BacktraceMemoryReader read_memory_func,
607 void *read_memory_func_extra,
608 VirtualAddress pc, VirtualAddress lr,
609 VirtualAddress sp, VirtualAddress r52)
610{
611 state->pc = pc;
612 state->sp = sp;
613 state->fp = -1;
614 state->initial_frame_caller_pc = -1;
615 state->read_memory_func = read_memory_func;
616 state->read_memory_func_extra = read_memory_func_extra;
617}
618
619bool backtrace_next(BacktraceIterator *state) { return false; }
620
621#endif /* TILE_CHIP < 10 */
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index d5efb215dd5f..9c710db43f13 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -56,13 +56,15 @@ struct compat_ucontext {
56 sigset_t uc_sigmask; /* mask last for extensibility */ 56 sigset_t uc_sigmask; /* mask last for extensibility */
57}; 57};
58 58
59#define COMPAT_SI_PAD_SIZE ((SI_MAX_SIZE - 3 * sizeof(int)) / sizeof(int))
60
59struct compat_siginfo { 61struct compat_siginfo {
60 int si_signo; 62 int si_signo;
61 int si_errno; 63 int si_errno;
62 int si_code; 64 int si_code;
63 65
64 union { 66 union {
65 int _pad[SI_PAD_SIZE]; 67 int _pad[COMPAT_SI_PAD_SIZE];
66 68
67 /* kill() */ 69 /* kill() */
68 struct { 70 struct {
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 3404c75f8e64..84f296ca9e63 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -952,7 +952,7 @@ STD_ENTRY(interrupt_return)
952 * able to safely read all the remaining words on those cache 952 * able to safely read all the remaining words on those cache
953 * lines without waiting for the memory subsystem. 953 * lines without waiting for the memory subsystem.
954 */ 954 */
955 pop_reg_zero r0, r1, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0) 955 pop_reg_zero r0, r28, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
956 pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30) 956 pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30)
957 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC 957 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
958 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1 958 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
@@ -1017,7 +1017,17 @@ STD_ENTRY(interrupt_return)
1017 { move r22, zero; move r23, zero } 1017 { move r22, zero; move r23, zero }
1018 { move r24, zero; move r25, zero } 1018 { move r24, zero; move r25, zero }
1019 { move r26, zero; move r27, zero } 1019 { move r26, zero; move r27, zero }
1020 { move r28, zero; move r29, zero } 1020
1021 /* Set r1 to errno if we are returning an error, otherwise zero. */
1022 {
1023 moveli r29, 1024
1024 sub r1, zero, r0
1025 }
1026 slt_u r29, r1, r29
1027 {
1028 mnz r1, r29, r1
1029 move r29, zero
1030 }
1021 iret 1031 iret
1022 1032
1023 /* 1033 /*
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 92ef925d2f8d..2e02c41ddf3b 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -23,7 +23,6 @@
23#include <linux/sysctl.h> 23#include <linux/sysctl.h>
24#include <linux/hardirq.h> 24#include <linux/hardirq.h>
25#include <linux/mman.h> 25#include <linux/mman.h>
26#include <linux/smp.h>
27#include <asm/pgtable.h> 26#include <asm/pgtable.h>
28#include <asm/processor.h> 27#include <asm/processor.h>
29#include <asm/sections.h> 28#include <asm/sections.h>
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 4dd21c1e6d5e..e7d54c73d5c1 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -953,7 +953,7 @@ static void __init load_hv_initrd(void)
953 if (rc != stat.size) { 953 if (rc != stat.size) {
954 pr_err("Error reading %d bytes from hvfs file '%s': %d\n", 954 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
955 stat.size, initramfs_file, rc); 955 stat.size, initramfs_file, rc);
956 free_bootmem((unsigned long) initrd, stat.size); 956 free_initrd_mem((unsigned long) initrd, stat.size);
957 return; 957 return;
958 } 958 }
959 initrd_start = (unsigned long) initrd; 959 initrd_start = (unsigned long) initrd;
@@ -962,7 +962,7 @@ static void __init load_hv_initrd(void)
962 962
963void __init free_initrd_mem(unsigned long begin, unsigned long end) 963void __init free_initrd_mem(unsigned long begin, unsigned long end)
964{ 964{
965 free_bootmem(begin, end - begin); 965 free_bootmem(__pa(begin), end - begin);
966} 966}
967 967
968static void __init validate_hv(void) 968static void __init validate_hv(void)
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index b6268d3ae869..38a68b0b4581 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -108,7 +108,6 @@ static bool read_memory_func(void *result, VirtualAddress address,
108/* Return a pt_regs pointer for a valid fault handler frame */ 108/* Return a pt_regs pointer for a valid fault handler frame */
109static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) 109static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
110{ 110{
111#ifndef __tilegx__
112 const char *fault = NULL; /* happy compiler */ 111 const char *fault = NULL; /* happy compiler */
113 char fault_buf[64]; 112 char fault_buf[64];
114 VirtualAddress sp = kbt->it.sp; 113 VirtualAddress sp = kbt->it.sp;
@@ -146,7 +145,6 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
146 } 145 }
147 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) 146 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
148 return p; 147 return p;
149#endif
150 return NULL; 148 return NULL;
151} 149}
152 150
@@ -351,12 +349,6 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
351 kbt->task->pid, kbt->task->tgid, kbt->task->comm, 349 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
352 smp_processor_id(), get_cycles()); 350 smp_processor_id(), get_cycles());
353 } 351 }
354#ifdef __tilegx__
355 if (kbt->is_current) {
356 __insn_mtspr(SPR_SIM_CONTROL,
357 SIM_DUMP_SPR_ARG(SIM_DUMP_BACKTRACE));
358 }
359#endif
360 kbt->verbose = 1; 352 kbt->verbose = 1;
361 i = 0; 353 i = 0;
362 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { 354 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index b9ab25a889b5..6bed820e1421 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -36,16 +36,6 @@
36/* How many cycles per second we are running at. */ 36/* How many cycles per second we are running at. */
37static cycles_t cycles_per_sec __write_once; 37static cycles_t cycles_per_sec __write_once;
38 38
39/*
40 * We set up shift and multiply values with a minsec of five seconds,
41 * since our timer counter counts down 31 bits at a frequency of
42 * no less than 500 MHz. See @minsec for clocks_calc_mult_shift().
43 * We could use a different value for the 64-bit free-running
44 * cycle counter, but we use the same one for consistency, and since
45 * we will be reasonably precise with this value anyway.
46 */
47#define TILE_MINSEC 5
48
49cycles_t get_clock_rate(void) 39cycles_t get_clock_rate(void)
50{ 40{
51 return cycles_per_sec; 41 return cycles_per_sec;
@@ -68,6 +58,14 @@ cycles_t get_cycles(void)
68} 58}
69#endif 59#endif
70 60
61/*
62 * We use a relatively small shift value so that sched_clock()
63 * won't wrap around very often.
64 */
65#define SCHED_CLOCK_SHIFT 10
66
67static unsigned long sched_clock_mult __write_once;
68
71static cycles_t clocksource_get_cycles(struct clocksource *cs) 69static cycles_t clocksource_get_cycles(struct clocksource *cs)
72{ 70{
73 return get_cycles(); 71 return get_cycles();
@@ -78,6 +76,7 @@ static struct clocksource cycle_counter_cs = {
78 .rating = 300, 76 .rating = 300,
79 .read = clocksource_get_cycles, 77 .read = clocksource_get_cycles,
80 .mask = CLOCKSOURCE_MASK(64), 78 .mask = CLOCKSOURCE_MASK(64),
79 .shift = 22, /* typical value, e.g. x86 tsc uses this */
81 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 80 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
82}; 81};
83 82
@@ -88,8 +87,10 @@ static struct clocksource cycle_counter_cs = {
88void __init setup_clock(void) 87void __init setup_clock(void)
89{ 88{
90 cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED); 89 cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED);
91 clocksource_calc_mult_shift(&cycle_counter_cs, cycles_per_sec, 90 sched_clock_mult =
92 TILE_MINSEC); 91 clocksource_hz2mult(cycles_per_sec, SCHED_CLOCK_SHIFT);
92 cycle_counter_cs.mult =
93 clocksource_hz2mult(cycles_per_sec, cycle_counter_cs.shift);
93} 94}
94 95
95void __init calibrate_delay(void) 96void __init calibrate_delay(void)
@@ -117,9 +118,14 @@ void __init time_init(void)
117 * counter, plus bit 31, which signifies that the counter has wrapped 118 * counter, plus bit 31, which signifies that the counter has wrapped
118 * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be 119 * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be
119 * raised as long as bit 31 is set. 120 * raised as long as bit 31 is set.
121 *
122 * The TILE_MINSEC value represents the largest range of real-time
123 * we can possibly cover with the timer, based on MAX_TICK combined
124 * with the slowest reasonable clock rate we might run at.
120 */ 125 */
121 126
122#define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */ 127#define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */
128#define TILE_MINSEC 5 /* timer covers no more than 5 seconds */
123 129
124static int tile_timer_set_next_event(unsigned long ticks, 130static int tile_timer_set_next_event(unsigned long ticks,
125 struct clock_event_device *evt) 131 struct clock_event_device *evt)
@@ -211,8 +217,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
211unsigned long long sched_clock(void) 217unsigned long long sched_clock(void)
212{ 218{
213 return clocksource_cyc2ns(get_cycles(), 219 return clocksource_cyc2ns(get_cycles(),
214 cycle_counter_cs.mult, 220 sched_clock_mult, SCHED_CLOCK_SHIFT);
215 cycle_counter_cs.shift);
216} 221}
217 222
218int setup_profiling_timer(unsigned int multiplier) 223int setup_profiling_timer(unsigned int multiplier)
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 3870abbeeaa2..0f362dc2c57f 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -128,7 +128,9 @@ static int special_ill(bundle_bits bundle, int *sigp, int *codep)
128#ifdef __tilegx__ 128#ifdef __tilegx__
129 if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0) 129 if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0)
130 return 0; 130 return 0;
131 if (get_Opcode_X1(bundle) != UNARY_OPCODE_X1) 131 if (get_Opcode_X1(bundle) != RRR_0_OPCODE_X1)
132 return 0;
133 if (get_RRROpcodeExtension_X1(bundle) != UNARY_RRR_0_OPCODE_X1)
132 return 0; 134 return 0;
133 if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1) 135 if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1)
134 return 0; 136 return 0;
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile
index 438af38bc9eb..746dc81ed3c4 100644
--- a/arch/tile/lib/Makefile
+++ b/arch/tile/lib/Makefile
@@ -7,7 +7,9 @@ lib-y = cacheflush.o checksum.o cpumask.o delay.o \
7 memcpy_$(BITS).o memchr_$(BITS).o memmove_$(BITS).o memset_$(BITS).o \ 7 memcpy_$(BITS).o memchr_$(BITS).o memmove_$(BITS).o memset_$(BITS).o \
8 strchr_$(BITS).o strlen_$(BITS).o 8 strchr_$(BITS).o strlen_$(BITS).o
9 9
10ifneq ($(CONFIG_TILEGX),y) 10ifeq ($(CONFIG_TILEGX),y)
11lib-y += memcpy_user_64.o
12else
11lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o 13lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o
12endif 14endif
13 15
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 6bc7b52b4aa0..ce5dbf56578f 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -36,21 +36,29 @@ EXPORT_SYMBOL(clear_user_asm);
36EXPORT_SYMBOL(current_text_addr); 36EXPORT_SYMBOL(current_text_addr);
37EXPORT_SYMBOL(dump_stack); 37EXPORT_SYMBOL(dump_stack);
38 38
39/* arch/tile/lib/__memcpy.S */ 39/* arch/tile/lib/, various memcpy files */
40/* NOTE: on TILE64, these symbols appear in arch/tile/lib/memcpy_tile64.c */
41EXPORT_SYMBOL(memcpy); 40EXPORT_SYMBOL(memcpy);
42EXPORT_SYMBOL(__copy_to_user_inatomic); 41EXPORT_SYMBOL(__copy_to_user_inatomic);
43EXPORT_SYMBOL(__copy_from_user_inatomic); 42EXPORT_SYMBOL(__copy_from_user_inatomic);
44EXPORT_SYMBOL(__copy_from_user_zeroing); 43EXPORT_SYMBOL(__copy_from_user_zeroing);
44#ifdef __tilegx__
45EXPORT_SYMBOL(__copy_in_user_inatomic);
46#endif
45 47
46/* hypervisor glue */ 48/* hypervisor glue */
47#include <hv/hypervisor.h> 49#include <hv/hypervisor.h>
48EXPORT_SYMBOL(hv_dev_open); 50EXPORT_SYMBOL(hv_dev_open);
49EXPORT_SYMBOL(hv_dev_pread); 51EXPORT_SYMBOL(hv_dev_pread);
50EXPORT_SYMBOL(hv_dev_pwrite); 52EXPORT_SYMBOL(hv_dev_pwrite);
53EXPORT_SYMBOL(hv_dev_preada);
54EXPORT_SYMBOL(hv_dev_pwritea);
55EXPORT_SYMBOL(hv_dev_poll);
56EXPORT_SYMBOL(hv_dev_poll_cancel);
51EXPORT_SYMBOL(hv_dev_close); 57EXPORT_SYMBOL(hv_dev_close);
58EXPORT_SYMBOL(hv_sysconf);
59EXPORT_SYMBOL(hv_confstr);
52 60
53/* -ltile-cc */ 61/* libgcc.a */
54uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); 62uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
55EXPORT_SYMBOL(__udivsi3); 63EXPORT_SYMBOL(__udivsi3);
56int32_t __divsi3(int32_t dividend, int32_t divisor); 64int32_t __divsi3(int32_t dividend, int32_t divisor);
@@ -70,8 +78,6 @@ EXPORT_SYMBOL(__moddi3);
70#ifndef __tilegx__ 78#ifndef __tilegx__
71uint64_t __ll_mul(uint64_t n0, uint64_t n1); 79uint64_t __ll_mul(uint64_t n0, uint64_t n1);
72EXPORT_SYMBOL(__ll_mul); 80EXPORT_SYMBOL(__ll_mul);
73#endif
74#ifndef __tilegx__
75int64_t __muldi3(int64_t, int64_t); 81int64_t __muldi3(int64_t, int64_t);
76EXPORT_SYMBOL(__muldi3); 82EXPORT_SYMBOL(__muldi3);
77uint64_t __lshrdi3(uint64_t, unsigned int); 83uint64_t __lshrdi3(uint64_t, unsigned int);
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S
index f92984bf60ec..30c3b7ebb55d 100644
--- a/arch/tile/lib/memcpy_32.S
+++ b/arch/tile/lib/memcpy_32.S
@@ -17,10 +17,6 @@
17 17
18#include <arch/chip.h> 18#include <arch/chip.h>
19 19
20#if CHIP_HAS_WH64() || defined(MEMCPY_TEST_WH64)
21#define MEMCPY_USE_WH64
22#endif
23
24 20
25#include <linux/linkage.h> 21#include <linux/linkage.h>
26 22
@@ -160,7 +156,7 @@ EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
160 156
161 { addi r3, r1, 60; andi r9, r9, -64 } 157 { addi r3, r1, 60; andi r9, r9, -64 }
162 158
163#ifdef MEMCPY_USE_WH64 159#if CHIP_HAS_WH64()
164 /* No need to prefetch dst, we'll just do the wh64 160 /* No need to prefetch dst, we'll just do the wh64
165 * right before we copy a line. 161 * right before we copy a line.
166 */ 162 */
@@ -173,7 +169,7 @@ EX: { lw r6, r3; addi r3, r3, 64 }
173 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 169 /* Intentionally stall for a few cycles to leave L2 cache alone. */
174 { bnzt zero, . } 170 { bnzt zero, . }
175EX: { lw r7, r3; addi r3, r3, 64 } 171EX: { lw r7, r3; addi r3, r3, 64 }
176#ifndef MEMCPY_USE_WH64 172#if !CHIP_HAS_WH64()
177 /* Prefetch the dest */ 173 /* Prefetch the dest */
178 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 174 /* Intentionally stall for a few cycles to leave L2 cache alone. */
179 { bnzt zero, . } 175 { bnzt zero, . }
@@ -288,15 +284,7 @@ EX: { lw r7, r3; addi r3, r3, 64 }
288 /* Fill second L1D line. */ 284 /* Fill second L1D line. */
289EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */ 285EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
290 286
291#ifdef MEMCPY_TEST_WH64 287#if CHIP_HAS_WH64()
292 /* Issue a fake wh64 that clobbers the destination words
293 * with random garbage, for testing.
294 */
295 { movei r19, 64; crc32_32 r10, r2, r9 }
296.Lwh64_test_loop:
297EX: { sw r9, r10; addi r9, r9, 4; addi r19, r19, -4 }
298 { bnzt r19, .Lwh64_test_loop; crc32_32 r10, r10, r19 }
299#elif CHIP_HAS_WH64()
300 /* Prepare destination line for writing. */ 288 /* Prepare destination line for writing. */
301EX: { wh64 r9; addi r9, r9, 64 } 289EX: { wh64 r9; addi r9, r9, 64 }
302#else 290#else
@@ -340,7 +328,7 @@ EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */
340EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */ 328EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
341EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */ 329EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
342EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */ 330EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
343#ifdef MEMCPY_USE_WH64 331#if CHIP_HAS_WH64()
344EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */ 332EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
345#else 333#else
346 /* Back up the r9 to a cache line we are already storing to 334 /* Back up the r9 to a cache line we are already storing to
diff --git a/arch/tile/lib/memset_32.c b/arch/tile/lib/memset_32.c
index bfde5d864df1..d014c1fbcbc2 100644
--- a/arch/tile/lib/memset_32.c
+++ b/arch/tile/lib/memset_32.c
@@ -141,7 +141,6 @@ void *memset(void *s, int c, size_t n)
141 */ 141 */
142 __insn_prefetch(&out32[ahead32]); 142 __insn_prefetch(&out32[ahead32]);
143 143
144#if 1
145#if CACHE_LINE_SIZE_IN_WORDS % 4 != 0 144#if CACHE_LINE_SIZE_IN_WORDS % 4 != 0
146#error "Unhandled CACHE_LINE_SIZE_IN_WORDS" 145#error "Unhandled CACHE_LINE_SIZE_IN_WORDS"
147#endif 146#endif
@@ -157,30 +156,6 @@ void *memset(void *s, int c, size_t n)
157 *out32++ = v32; 156 *out32++ = v32;
158 *out32++ = v32; 157 *out32++ = v32;
159 } 158 }
160#else
161 /* Unfortunately, due to a code generator flaw this
162 * allocates a separate register for each of these
163 * stores, which requires a large number of spills,
164 * which makes this procedure enormously bigger
165 * (something like 70%)
166 */
167 *out32++ = v32;
168 *out32++ = v32;
169 *out32++ = v32;
170 *out32++ = v32;
171 *out32++ = v32;
172 *out32++ = v32;
173 *out32++ = v32;
174 *out32++ = v32;
175 *out32++ = v32;
176 *out32++ = v32;
177 *out32++ = v32;
178 *out32++ = v32;
179 *out32++ = v32;
180 *out32++ = v32;
181 *out32++ = v32;
182 n32 -= 16;
183#endif
184 159
185 /* To save compiled code size, reuse this loop even 160 /* To save compiled code size, reuse this loop even
186 * when we run out of prefetching to do by dropping 161 * when we run out of prefetching to do by dropping
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 0011f06b4fe2..704f3e8a4385 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -567,6 +567,14 @@ do_sigbus:
567 * since that might indicate we have not yet squirreled the SPR 567 * since that might indicate we have not yet squirreled the SPR
568 * contents away and can thus safely take a recursive interrupt. 568 * contents away and can thus safely take a recursive interrupt.
569 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2. 569 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2.
570 *
571 * Note that this routine is called before homecache_tlb_defer_enter(),
572 * which means that we can properly unlock any atomics that might
573 * be used there (good), but also means we must be very sensitive
574 * to not touch any data structures that might be located in memory
575 * that could migrate, as we could be entering the kernel on a dataplane
576 * cpu that has been deferring kernel TLB updates. This means, for
577 * example, that we can't migrate init_mm or its pgd.
570 */ 578 */
571struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, 579struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
572 unsigned long address, 580 unsigned long address,
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index ff1cdff5114d..12ab137e7d4f 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -276,7 +276,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
276} 276}
277EXPORT_SYMBOL(kmap_atomic); 277EXPORT_SYMBOL(kmap_atomic);
278 278
279void kunmap_atomic(void *kvaddr, enum km_type type) 279void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
280{ 280{
281 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 281 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
282 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 282 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
@@ -300,7 +300,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
300 arch_flush_lazy_mmu_mode(); 300 arch_flush_lazy_mmu_mode();
301 pagefault_enable(); 301 pagefault_enable();
302} 302}
303EXPORT_SYMBOL(kunmap_atomic); 303EXPORT_SYMBOL(kunmap_atomic_notypecheck);
304 304
305/* 305/*
306 * This API is supposed to allow us to map memory without a "struct page". 306 * This API is supposed to allow us to map memory without a "struct page".
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 97c478e7be27..fb3b4a55cec4 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -29,6 +29,7 @@
29#include <linux/timex.h> 29#include <linux/timex.h>
30#include <linux/cache.h> 30#include <linux/cache.h>
31#include <linux/smp.h> 31#include <linux/smp.h>
32#include <linux/module.h>
32 33
33#include <asm/page.h> 34#include <asm/page.h>
34#include <asm/sections.h> 35#include <asm/sections.h>
@@ -348,6 +349,7 @@ pte_t pte_set_home(pte_t pte, int home)
348 349
349 return pte; 350 return pte;
350} 351}
352EXPORT_SYMBOL(pte_set_home);
351 353
352/* 354/*
353 * The routines in this section are the "static" versions of the normal 355 * The routines in this section are the "static" versions of the normal
@@ -403,6 +405,7 @@ struct page *homecache_alloc_pages(gfp_t gfp_mask,
403 homecache_change_page_home(page, order, home); 405 homecache_change_page_home(page, order, home);
404 return page; 406 return page;
405} 407}
408EXPORT_SYMBOL(homecache_alloc_pages);
406 409
407struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, 410struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
408 unsigned int order, int home) 411 unsigned int order, int home)
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 28c23140c947..335c24621c41 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -17,7 +17,6 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/highmem.h> 20#include <linux/highmem.h>
22#include <linux/slab.h> 21#include <linux/slab.h>
23#include <linux/pagemap.h> 22#include <linux/pagemap.h>
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index e1898090f22c..b969770196c2 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -642,9 +642,13 @@ __SYSCALL(__NR_recvmmsg, sys_recvmmsg)
642__SYSCALL(__NR_wait4, sys_wait4) 642__SYSCALL(__NR_wait4, sys_wait4)
643#define __NR_prlimit64 261 643#define __NR_prlimit64 261
644__SYSCALL(__NR_prlimit64, sys_prlimit64) 644__SYSCALL(__NR_prlimit64, sys_prlimit64)
645#define __NR_fanotify_init 262
646__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
647#define __NR_fanotify_mark 263
648__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
645 649
646#undef __NR_syscalls 650#undef __NR_syscalls
647#define __NR_syscalls 262 651#define __NR_syscalls 264
648 652
649/* 653/*
650 * All syscalls below here should go away really, 654 * All syscalls below here should go away really,