aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/Kbuild8
-rw-r--r--arch/arm64/include/asm/barrier.h1
-rw-r--r--arch/arm64/include/asm/cacheflush.h7
-rw-r--r--arch/arm64/include/asm/compat.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h29
-rw-r--r--arch/arm64/include/asm/debug-monitors.h64
-rw-r--r--arch/arm64/include/asm/dma-mapping.h7
-rw-r--r--arch/arm64/include/asm/hwcap.h9
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/irqflags.h23
-rw-r--r--arch/arm64/include/asm/kgdb.h84
-rw-r--r--arch/arm64/include/asm/kvm_arm.h15
-rw-r--r--arch/arm64/include/asm/percpu.h8
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h5
-rw-r--r--arch/arm64/include/asm/pgtable.h70
-rw-r--r--arch/arm64/include/asm/psci.h2
-rw-r--r--arch/arm64/include/asm/ptrace.h5
-rw-r--r--arch/arm64/include/asm/tlb.h136
-rw-r--r--arch/arm64/include/asm/topology.h39
-rw-r--r--arch/arm64/include/asm/uaccess.h4
-rw-r--r--arch/arm64/include/asm/unistd.h1
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/arm64/include/uapi/asm/perf_regs.h40
24 files changed, 377 insertions, 194 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 71c53ecfcc3a..4bca4923fc0b 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -12,6 +12,7 @@ generic-y += dma.h
12generic-y += emergency-restart.h 12generic-y += emergency-restart.h
13generic-y += errno.h 13generic-y += errno.h
14generic-y += ftrace.h 14generic-y += ftrace.h
15generic-y += hash.h
15generic-y += hw_irq.h 16generic-y += hw_irq.h
16generic-y += ioctl.h 17generic-y += ioctl.h
17generic-y += ioctls.h 18generic-y += ioctls.h
@@ -22,13 +23,16 @@ generic-y += kmap_types.h
22generic-y += kvm_para.h 23generic-y += kvm_para.h
23generic-y += local.h 24generic-y += local.h
24generic-y += local64.h 25generic-y += local64.h
26generic-y += mcs_spinlock.h
25generic-y += mman.h 27generic-y += mman.h
26generic-y += msgbuf.h 28generic-y += msgbuf.h
27generic-y += mutex.h 29generic-y += mutex.h
28generic-y += pci.h 30generic-y += pci.h
29generic-y += poll.h 31generic-y += poll.h
30generic-y += posix_types.h 32generic-y += posix_types.h
33generic-y += preempt.h
31generic-y += resource.h 34generic-y += resource.h
35generic-y += rwsem.h
32generic-y += scatterlist.h 36generic-y += scatterlist.h
33generic-y += sections.h 37generic-y += sections.h
34generic-y += segment.h 38generic-y += segment.h
@@ -38,8 +42,8 @@ generic-y += shmbuf.h
38generic-y += sizes.h 42generic-y += sizes.h
39generic-y += socket.h 43generic-y += socket.h
40generic-y += sockios.h 44generic-y += sockios.h
41generic-y += switch_to.h
42generic-y += swab.h 45generic-y += swab.h
46generic-y += switch_to.h
43generic-y += termbits.h 47generic-y += termbits.h
44generic-y += termios.h 48generic-y += termios.h
45generic-y += topology.h 49generic-y += topology.h
@@ -49,5 +53,3 @@ generic-y += unaligned.h
49generic-y += user.h 53generic-y += user.h
50generic-y += vga.h 54generic-y += vga.h
51generic-y += xor.h 55generic-y += xor.h
52generic-y += preempt.h
53generic-y += hash.h
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 409ca370cfe2..66eb7648043b 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,6 +25,7 @@
25#define wfi() asm volatile("wfi" : : : "memory") 25#define wfi() asm volatile("wfi" : : : "memory")
26 26
27#define isb() asm volatile("isb" : : : "memory") 27#define isb() asm volatile("isb" : : : "memory")
28#define dmb(opt) asm volatile("dmb sy" : : : "memory")
28#define dsb(opt) asm volatile("dsb sy" : : : "memory") 29#define dsb(opt) asm volatile("dsb sy" : : : "memory")
29 30
30#define mb() dsb() 31#define mb() dsb()
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 889324981aa4..4c60e64a801c 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -85,6 +85,13 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
85} 85}
86 86
87/* 87/*
88 * Cache maintenance functions used by the DMA API. No to be used directly.
89 */
90extern void __dma_map_area(const void *, size_t, int);
91extern void __dma_unmap_area(const void *, size_t, int);
92extern void __dma_flush_range(const void *, const void *);
93
94/*
88 * Copy user data from/to a page which is mapped into a different 95 * Copy user data from/to a page which is mapped into a different
89 * processes address space. Really, we want to allow our "user 96 * processes address space. Really, we want to allow our "user
90 * space" model to handle this. 97 * space" model to handle this.
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index fda2704b3f9f..e71f81fe127a 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -228,7 +228,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
228 return (u32)(unsigned long)uptr; 228 return (u32)(unsigned long)uptr;
229} 229}
230 230
231#define compat_user_stack_pointer() (current_pt_regs()->compat_sp) 231#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
232 232
233static inline void __user *arch_compat_alloc_user_space(long len) 233static inline void __user *arch_compat_alloc_user_space(long len)
234{ 234{
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
new file mode 100644
index 000000000000..cd4ac0516488
--- /dev/null
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_CPUFEATURE_H
10#define __ASM_CPUFEATURE_H
11
12#include <asm/hwcap.h>
13
14/*
15 * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
16 * in the kernel and for user space to keep track of which optional features
17 * are supported by the current system. So let's map feature 'x' to HWCAP_x.
18 * Note that HWCAP_x constants are bit fields so we need to take the log.
19 */
20
21#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
22#define cpu_feature(x) ilog2(HWCAP_ ## x)
23
24static inline bool cpu_have_feature(unsigned int num)
25{
26 return elf_hwcap & (1UL << num);
27}
28
29#endif
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 62314791570c..6e9b5b36921c 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -26,6 +26,53 @@
26#define DBG_ESR_EVT_HWWP 0x2 26#define DBG_ESR_EVT_HWWP 0x2
27#define DBG_ESR_EVT_BRK 0x6 27#define DBG_ESR_EVT_BRK 0x6
28 28
29/*
30 * Break point instruction encoding
31 */
32#define BREAK_INSTR_SIZE 4
33
34/*
35 * ESR values expected for dynamic and compile time BRK instruction
36 */
37#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff))
38
39/*
40 * #imm16 values used for BRK instruction generation
41 * Allowed values for kgbd are 0x400 - 0x7ff
42 * 0x400: for dynamic BRK instruction
43 * 0x401: for compile time BRK instruction
44 */
45#define KGDB_DYN_DGB_BRK_IMM 0x400
46#define KDBG_COMPILED_DBG_BRK_IMM 0x401
47
48/*
49 * BRK instruction encoding
50 * The #imm16 value should be placed at bits[20:5] within BRK ins
51 */
52#define AARCH64_BREAK_MON 0xd4200000
53
54/*
55 * Extract byte from BRK instruction
56 */
57#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
58 ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
59
60/*
61 * Extract byte from BRK #imm16
62 */
63#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
64 (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
65
66#define KGDB_DYN_DGB_BRK_BYTE(x) \
67 (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
68
69#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
70#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
71#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
72#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
73
74#define CACHE_FLUSH_IS_SAFE 1
75
29enum debug_el { 76enum debug_el {
30 DBG_ACTIVE_EL0 = 0, 77 DBG_ACTIVE_EL0 = 0,
31 DBG_ACTIVE_EL1, 78 DBG_ACTIVE_EL1,
@@ -43,23 +90,6 @@ enum debug_el {
43#ifndef __ASSEMBLY__ 90#ifndef __ASSEMBLY__
44struct task_struct; 91struct task_struct;
45 92
46#define local_dbg_save(flags) \
47 do { \
48 typecheck(unsigned long, flags); \
49 asm volatile( \
50 "mrs %0, daif // local_dbg_save\n" \
51 "msr daifset, #8" \
52 : "=r" (flags) : : "memory"); \
53 } while (0)
54
55#define local_dbg_restore(flags) \
56 do { \
57 typecheck(unsigned long, flags); \
58 asm volatile( \
59 "msr daif, %0 // local_dbg_restore\n" \
60 : : "r" (flags) : "memory"); \
61 } while (0)
62
63#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */ 93#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
64 94
65#define DBG_HOOK_HANDLED 0 95#define DBG_HOOK_HANDLED 0
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index fd0c0c0e447a..3a4572ec3273 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -30,6 +30,8 @@
30 30
31#define DMA_ERROR_CODE (~(dma_addr_t)0) 31#define DMA_ERROR_CODE (~(dma_addr_t)0)
32extern struct dma_map_ops *dma_ops; 32extern struct dma_map_ops *dma_ops;
33extern struct dma_map_ops coherent_swiotlb_dma_ops;
34extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
33 35
34static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) 36static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
35{ 37{
@@ -47,6 +49,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
47 return __generic_dma_ops(dev); 49 return __generic_dma_ops(dev);
48} 50}
49 51
52static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
53{
54 dev->archdata.dma_ops = ops;
55}
56
50#include <asm-generic/dma-mapping-common.h> 57#include <asm-generic/dma-mapping-common.h>
51 58
52static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 59static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 6cddbb0c9f54..024c46183c3c 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -32,6 +32,12 @@
32#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) 32#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
33#define COMPAT_HWCAP_EVTSTRM (1 << 21) 33#define COMPAT_HWCAP_EVTSTRM (1 << 21)
34 34
35#define COMPAT_HWCAP2_AES (1 << 0)
36#define COMPAT_HWCAP2_PMULL (1 << 1)
37#define COMPAT_HWCAP2_SHA1 (1 << 2)
38#define COMPAT_HWCAP2_SHA2 (1 << 3)
39#define COMPAT_HWCAP2_CRC32 (1 << 4)
40
35#ifndef __ASSEMBLY__ 41#ifndef __ASSEMBLY__
36/* 42/*
37 * This yields a mask that user programs can use to figure out what 43 * This yields a mask that user programs can use to figure out what
@@ -41,7 +47,8 @@
41 47
42#ifdef CONFIG_COMPAT 48#ifdef CONFIG_COMPAT
43#define COMPAT_ELF_HWCAP (compat_elf_hwcap) 49#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
44extern unsigned int compat_elf_hwcap; 50#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2)
51extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
45#endif 52#endif
46 53
47extern unsigned long elf_hwcap; 54extern unsigned long elf_hwcap;
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4cc813eddacb..7846a6bb0833 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -121,7 +121,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
121 * I/O port access primitives. 121 * I/O port access primitives.
122 */ 122 */
123#define IO_SPACE_LIMIT 0xffff 123#define IO_SPACE_LIMIT 0xffff
124#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M)) 124#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
125 125
126static inline u8 inb(unsigned long addr) 126static inline u8 inb(unsigned long addr)
127{ 127{
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index b2fcfbc51ecc..11cc941bd107 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -90,5 +90,28 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
90 return flags & PSR_I_BIT; 90 return flags & PSR_I_BIT;
91} 91}
92 92
93/*
94 * save and restore debug state
95 */
96#define local_dbg_save(flags) \
97 do { \
98 typecheck(unsigned long, flags); \
99 asm volatile( \
100 "mrs %0, daif // local_dbg_save\n" \
101 "msr daifset, #8" \
102 : "=r" (flags) : : "memory"); \
103 } while (0)
104
105#define local_dbg_restore(flags) \
106 do { \
107 typecheck(unsigned long, flags); \
108 asm volatile( \
109 "msr daif, %0 // local_dbg_restore\n" \
110 : : "r" (flags) : "memory"); \
111 } while (0)
112
113#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory")
114#define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
115
93#endif 116#endif
94#endif 117#endif
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
new file mode 100644
index 000000000000..3c8aafc1082f
--- /dev/null
+++ b/arch/arm64/include/asm/kgdb.h
@@ -0,0 +1,84 @@
1/*
2 * AArch64 KGDB support
3 *
4 * Based on arch/arm/include/kgdb.h
5 *
6 * Copyright (C) 2013 Cavium Inc.
7 * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM_KGDB_H
23#define __ARM_KGDB_H
24
25#include <linux/ptrace.h>
26#include <asm/debug-monitors.h>
27
28#ifndef __ASSEMBLY__
29
30static inline void arch_kgdb_breakpoint(void)
31{
32 asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
33}
34
35extern void kgdb_handle_bus_error(void);
36extern int kgdb_fault_expected;
37
38#endif /* !__ASSEMBLY__ */
39
40/*
41 * gdb is expecting the following registers layout.
42 *
43 * General purpose regs:
44 * r0-r30: 64 bit
45 * sp,pc : 64 bit
46 * pstate : 64 bit
47 * Total: 34
48 * FPU regs:
49 * f0-f31: 128 bit
50 * Total: 32
51 * Extra regs
52 * fpsr & fpcr: 32 bit
53 * Total: 2
54 *
55 */
56
57#define _GP_REGS 34
58#define _FP_REGS 32
59#define _EXTRA_REGS 2
60/*
61 * general purpose registers size in bytes.
62 * pstate is only 4 bytes. subtract 4 bytes
63 */
64#define GP_REG_BYTES (_GP_REGS * 8)
65#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
66
67/*
68 * Size of I/O buffer for gdb packet.
69 * considering to hold all register contents, size is set
70 */
71
72#define BUFMAX 2048
73
74/*
75 * Number of bytes required for gdb_regs buffer.
76 * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each
77 * GDB fails to connect for size beyond this with error
78 * "'g' packet reply is too long"
79 */
80
81#define NUMREGBYTES ((_GP_REGS * 8) + (_FP_REGS * 16) + \
82 (_EXTRA_REGS * 4))
83
84#endif /* __ASM_KGDB_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 0eb398655378..21ef48d32ff2 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -106,7 +106,6 @@
106 106
107/* VTCR_EL2 Registers bits */ 107/* VTCR_EL2 Registers bits */
108#define VTCR_EL2_PS_MASK (7 << 16) 108#define VTCR_EL2_PS_MASK (7 << 16)
109#define VTCR_EL2_PS_40B (2 << 16)
110#define VTCR_EL2_TG0_MASK (1 << 14) 109#define VTCR_EL2_TG0_MASK (1 << 14)
111#define VTCR_EL2_TG0_4K (0 << 14) 110#define VTCR_EL2_TG0_4K (0 << 14)
112#define VTCR_EL2_TG0_64K (1 << 14) 111#define VTCR_EL2_TG0_64K (1 << 14)
@@ -129,10 +128,9 @@
129 * 64kB pages (TG0 = 1) 128 * 64kB pages (TG0 = 1)
130 * 2 level page tables (SL = 1) 129 * 2 level page tables (SL = 1)
131 */ 130 */
132#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \ 131#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
133 VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ 132 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
134 VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \ 133 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
135 VTCR_EL2_T0SZ_40B)
136#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) 134#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
137#else 135#else
138/* 136/*
@@ -142,10 +140,9 @@
142 * 4kB pages (TG0 = 0) 140 * 4kB pages (TG0 = 0)
143 * 3 level page tables (SL = 1) 141 * 3 level page tables (SL = 1)
144 */ 142 */
145#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \ 143#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
146 VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ 144 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
147 VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \ 145 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
148 VTCR_EL2_T0SZ_40B)
149#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) 146#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
150#endif 147#endif
151 148
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 13fb0b3efc5f..453a179469a3 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
16#ifndef __ASM_PERCPU_H 16#ifndef __ASM_PERCPU_H
17#define __ASM_PERCPU_H 17#define __ASM_PERCPU_H
18 18
19#ifdef CONFIG_SMP
20
19static inline void set_my_cpu_offset(unsigned long off) 21static inline void set_my_cpu_offset(unsigned long off)
20{ 22{
21 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); 23 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void)
36} 38}
37#define __my_cpu_offset __my_cpu_offset() 39#define __my_cpu_offset __my_cpu_offset()
38 40
41#else /* !CONFIG_SMP */
42
43#define set_my_cpu_offset(x) do { } while (0)
44
45#endif /* CONFIG_SMP */
46
39#include <asm-generic/percpu.h> 47#include <asm-generic/percpu.h>
40 48
41#endif /* __ASM_PERCPU_H */ 49#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index b1d2e26c3c88..f7af66b54cb2 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -100,9 +100,9 @@
100#define PTE_HYP PTE_USER 100#define PTE_HYP PTE_USER
101 101
102/* 102/*
103 * 40-bit physical address supported. 103 * Highest possible physical address supported.
104 */ 104 */
105#define PHYS_MASK_SHIFT (40) 105#define PHYS_MASK_SHIFT (48)
106#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1) 106#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
107 107
108/* 108/*
@@ -122,7 +122,6 @@
122#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28)) 122#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28))
123#define TCR_TG0_64K (UL(1) << 14) 123#define TCR_TG0_64K (UL(1) << 14)
124#define TCR_TG1_64K (UL(1) << 30) 124#define TCR_TG1_64K (UL(1) << 30)
125#define TCR_IPS_40BIT (UL(2) << 32)
126#define TCR_ASID16 (UL(1) << 36) 125#define TCR_ASID16 (UL(1) << 36)
127#define TCR_TBI0 (UL(1) << 37) 126#define TCR_TBI0 (UL(1) << 37)
128 127
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b524dcd17243..90c811f05a2e 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -136,11 +136,11 @@ extern struct page *empty_zero_page;
136/* 136/*
137 * The following only work if pte_present(). Undefined behaviour otherwise. 137 * The following only work if pte_present(). Undefined behaviour otherwise.
138 */ 138 */
139#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) 139#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
140#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) 140#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
141#define pte_young(pte) (pte_val(pte) & PTE_AF) 141#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
142#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) 142#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
143#define pte_write(pte) (pte_val(pte) & PTE_WRITE) 143#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
145 145
146#define pte_valid_user(pte) \ 146#define pte_valid_user(pte) \
@@ -199,7 +199,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
199 pte_t *ptep, pte_t pte) 199 pte_t *ptep, pte_t pte)
200{ 200{
201 if (pte_valid_user(pte)) { 201 if (pte_valid_user(pte)) {
202 if (pte_exec(pte)) 202 if (!pte_special(pte) && pte_exec(pte))
203 __sync_icache_dcache(pte, addr); 203 __sync_icache_dcache(pte, addr);
204 if (pte_dirty(pte) && pte_write(pte)) 204 if (pte_dirty(pte) && pte_write(pte))
205 pte_val(pte) &= ~PTE_RDONLY; 205 pte_val(pte) &= ~PTE_RDONLY;
@@ -227,36 +227,36 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
227 227
228#define __HAVE_ARCH_PTE_SPECIAL 228#define __HAVE_ARCH_PTE_SPECIAL
229 229
230/* 230static inline pte_t pmd_pte(pmd_t pmd)
231 * Software PMD bits for THP 231{
232 */ 232 return __pte(pmd_val(pmd));
233}
233 234
234#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) 235static inline pmd_t pte_pmd(pte_t pte)
235#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57) 236{
237 return __pmd(pte_val(pte));
238}
236 239
237/* 240/*
238 * THP definitions. 241 * THP definitions.
239 */ 242 */
240#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
241
242#define __HAVE_ARCH_PMD_WRITE
243#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
244 243
245#ifdef CONFIG_TRANSPARENT_HUGEPAGE 244#ifdef CONFIG_TRANSPARENT_HUGEPAGE
246#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) 245#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
247#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) 246#define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
248#endif 247#endif
249 248
250#define PMD_BIT_FUNC(fn,op) \ 249#define pmd_young(pmd) pte_young(pmd_pte(pmd))
251static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } 250#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
251#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
252#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
253#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
254#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
255#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
256#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
252 257
253PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); 258#define __HAVE_ARCH_PMD_WRITE
254PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); 259#define pmd_write(pmd) pte_write(pmd_pte(pmd))
255PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
256PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
257PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
258PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
259PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
260 260
261#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 261#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
262 262
@@ -266,15 +266,6 @@ PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
266 266
267#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 267#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
268 268
269static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
270{
271 const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN |
272 PMD_SECT_RDONLY | PMD_SECT_PROT_NONE |
273 PMD_SECT_VALID;
274 pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
275 return pmd;
276}
277
278#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) 269#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd)
279 270
280static inline int has_transparent_hugepage(void) 271static inline int has_transparent_hugepage(void)
@@ -286,11 +277,9 @@ static inline int has_transparent_hugepage(void)
286 * Mark the prot value as uncacheable and unbufferable. 277 * Mark the prot value as uncacheable and unbufferable.
287 */ 278 */
288#define pgprot_noncached(prot) \ 279#define pgprot_noncached(prot) \
289 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) 280 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
290#define pgprot_writecombine(prot) \ 281#define pgprot_writecombine(prot) \
291 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) 282 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
292#define pgprot_dmacoherent(prot) \
293 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
294#define __HAVE_PHYS_MEM_ACCESS_PROT 283#define __HAVE_PHYS_MEM_ACCESS_PROT
295struct file; 284struct file;
296extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 285extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -383,6 +372,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
383 return pte; 372 return pte;
384} 373}
385 374
375static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
376{
377 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
378}
379
386extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 380extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
387extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; 381extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
388 382
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index e5312ea0ec1a..d15ab8b46336 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,6 +14,6 @@
14#ifndef __ASM_PSCI_H 14#ifndef __ASM_PSCI_H
15#define __ASM_PSCI_H 15#define __ASM_PSCI_H
16 16
17int psci_init(void); 17void psci_init(void);
18 18
19#endif /* __ASM_PSCI_H */ 19#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 0e7fa4963735..c7ba261dd4b3 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -68,6 +68,7 @@
68 68
69/* Architecturally defined mapping between AArch32 and AArch64 registers */ 69/* Architecturally defined mapping between AArch32 and AArch64 registers */
70#define compat_usr(x) regs[(x)] 70#define compat_usr(x) regs[(x)]
71#define compat_fp regs[11]
71#define compat_sp regs[13] 72#define compat_sp regs[13]
72#define compat_lr regs[14] 73#define compat_lr regs[14]
73#define compat_sp_hyp regs[15] 74#define compat_sp_hyp regs[15]
@@ -132,7 +133,7 @@ struct pt_regs {
132 (!((regs)->pstate & PSR_F_BIT)) 133 (!((regs)->pstate & PSR_F_BIT))
133 134
134#define user_stack_pointer(regs) \ 135#define user_stack_pointer(regs) \
135 ((regs)->sp) 136 (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp)
136 137
137/* 138/*
138 * Are the current registers suitable for user mode? (used to maintain 139 * Are the current registers suitable for user mode? (used to maintain
@@ -164,7 +165,7 @@ static inline int valid_user_regs(struct user_pt_regs *regs)
164 return 0; 165 return 0;
165} 166}
166 167
167#define instruction_pointer(regs) (regs)->pc 168#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
168 169
169#ifdef CONFIG_SMP 170#ifdef CONFIG_SMP
170extern unsigned long profile_pc(struct pt_regs *regs); 171extern unsigned long profile_pc(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 717031a762c2..72cadf52ca80 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -19,115 +19,44 @@
19#ifndef __ASM_TLB_H 19#ifndef __ASM_TLB_H
20#define __ASM_TLB_H 20#define __ASM_TLB_H
21 21
22#include <linux/pagemap.h>
23#include <linux/swap.h>
24 22
25#include <asm/pgalloc.h> 23#include <asm-generic/tlb.h>
26#include <asm/tlbflush.h>
27
28#define MMU_GATHER_BUNDLE 8
29
30/*
31 * TLB handling. This allows us to remove pages from the page
32 * tables, and efficiently handle the TLB issues.
33 */
34struct mmu_gather {
35 struct mm_struct *mm;
36 unsigned int fullmm;
37 struct vm_area_struct *vma;
38 unsigned long start, end;
39 unsigned long range_start;
40 unsigned long range_end;
41 unsigned int nr;
42 unsigned int max;
43 struct page **pages;
44 struct page *local[MMU_GATHER_BUNDLE];
45};
46 24
47/* 25/*
48 * This is unnecessarily complex. There's three ways the TLB shootdown 26 * There's three ways the TLB shootdown code is used:
49 * code is used:
50 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). 27 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
51 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. 28 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
52 * tlb->vma will be non-NULL.
53 * 2. Unmapping all vmas. See exit_mmap(). 29 * 2. Unmapping all vmas. See exit_mmap().
54 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. 30 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
55 * tlb->vma will be non-NULL. Additionally, page tables will be freed. 31 * Page tables will be freed.
56 * 3. Unmapping argument pages. See shift_arg_pages(). 32 * 3. Unmapping argument pages. See shift_arg_pages().
57 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. 33 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
58 * tlb->vma will be NULL.
59 */ 34 */
60static inline void tlb_flush(struct mmu_gather *tlb) 35static inline void tlb_flush(struct mmu_gather *tlb)
61{ 36{
62 if (tlb->fullmm || !tlb->vma) 37 if (tlb->fullmm) {
63 flush_tlb_mm(tlb->mm); 38 flush_tlb_mm(tlb->mm);
64 else if (tlb->range_end > 0) { 39 } else if (tlb->end > 0) {
65 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); 40 struct vm_area_struct vma = { .vm_mm = tlb->mm, };
66 tlb->range_start = TASK_SIZE; 41 flush_tlb_range(&vma, tlb->start, tlb->end);
67 tlb->range_end = 0; 42 tlb->start = TASK_SIZE;
43 tlb->end = 0;
68 } 44 }
69} 45}
70 46
71static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) 47static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
72{ 48{
73 if (!tlb->fullmm) { 49 if (!tlb->fullmm) {
74 if (addr < tlb->range_start) 50 tlb->start = min(tlb->start, addr);
75 tlb->range_start = addr; 51 tlb->end = max(tlb->end, addr + PAGE_SIZE);
76 if (addr + PAGE_SIZE > tlb->range_end)
77 tlb->range_end = addr + PAGE_SIZE;
78 }
79}
80
81static inline void __tlb_alloc_page(struct mmu_gather *tlb)
82{
83 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
84
85 if (addr) {
86 tlb->pages = (void *)addr;
87 tlb->max = PAGE_SIZE / sizeof(struct page *);
88 } 52 }
89} 53}
90 54
91static inline void tlb_flush_mmu(struct mmu_gather *tlb)
92{
93 tlb_flush(tlb);
94 free_pages_and_swap_cache(tlb->pages, tlb->nr);
95 tlb->nr = 0;
96 if (tlb->pages == tlb->local)
97 __tlb_alloc_page(tlb);
98}
99
100static inline void
101tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
102{
103 tlb->mm = mm;
104 tlb->fullmm = !(start | (end+1));
105 tlb->start = start;
106 tlb->end = end;
107 tlb->vma = NULL;
108 tlb->max = ARRAY_SIZE(tlb->local);
109 tlb->pages = tlb->local;
110 tlb->nr = 0;
111 __tlb_alloc_page(tlb);
112}
113
114static inline void
115tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
116{
117 tlb_flush_mmu(tlb);
118
119 /* keep the page table cache within bounds */
120 check_pgt_cache();
121
122 if (tlb->pages != tlb->local)
123 free_pages((unsigned long)tlb->pages, 0);
124}
125
126/* 55/*
127 * Memorize the range for the TLB flush. 56 * Memorize the range for the TLB flush.
128 */ 57 */
129static inline void 58static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
130tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) 59 unsigned long addr)
131{ 60{
132 tlb_add_flush(tlb, addr); 61 tlb_add_flush(tlb, addr);
133} 62}
@@ -137,38 +66,24 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
137 * case where we're doing a full MM flush. When we're doing a munmap, 66 * case where we're doing a full MM flush. When we're doing a munmap,
138 * the vmas are adjusted to only cover the region to be torn down. 67 * the vmas are adjusted to only cover the region to be torn down.
139 */ 68 */
140static inline void 69static inline void tlb_start_vma(struct mmu_gather *tlb,
141tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 70 struct vm_area_struct *vma)
142{ 71{
143 if (!tlb->fullmm) { 72 if (!tlb->fullmm) {
144 tlb->vma = vma; 73 tlb->start = TASK_SIZE;
145 tlb->range_start = TASK_SIZE; 74 tlb->end = 0;
146 tlb->range_end = 0;
147 } 75 }
148} 76}
149 77
150static inline void 78static inline void tlb_end_vma(struct mmu_gather *tlb,
151tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 79 struct vm_area_struct *vma)
152{ 80{
153 if (!tlb->fullmm) 81 if (!tlb->fullmm)
154 tlb_flush(tlb); 82 tlb_flush(tlb);
155} 83}
156 84
157static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
158{
159 tlb->pages[tlb->nr++] = page;
160 VM_BUG_ON(tlb->nr > tlb->max);
161 return tlb->max - tlb->nr;
162}
163
164static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
165{
166 if (!__tlb_remove_page(tlb, page))
167 tlb_flush_mmu(tlb);
168}
169
170static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 85static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
171 unsigned long addr) 86 unsigned long addr)
172{ 87{
173 pgtable_page_dtor(pte); 88 pgtable_page_dtor(pte);
174 tlb_add_flush(tlb, addr); 89 tlb_add_flush(tlb, addr);
@@ -184,16 +99,5 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
184} 99}
185#endif 100#endif
186 101
187#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
188#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
189#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
190
191#define tlb_migrate_finish(mm) do { } while (0)
192
193static inline void
194tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
195{
196 tlb_add_flush(tlb, addr);
197}
198 102
199#endif 103#endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
new file mode 100644
index 000000000000..0172e6d76bf3
--- /dev/null
+++ b/arch/arm64/include/asm/topology.h
@@ -0,0 +1,39 @@
1#ifndef __ASM_TOPOLOGY_H
2#define __ASM_TOPOLOGY_H
3
4#ifdef CONFIG_SMP
5
6#include <linux/cpumask.h>
7
8struct cpu_topology {
9 int thread_id;
10 int core_id;
11 int cluster_id;
12 cpumask_t thread_sibling;
13 cpumask_t core_sibling;
14};
15
16extern struct cpu_topology cpu_topology[NR_CPUS];
17
18#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
19#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
20#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
21#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
22
23#define mc_capable() (cpu_topology[0].cluster_id != -1)
24#define smt_capable() (cpu_topology[0].thread_id != -1)
25
26void init_cpu_topology(void);
27void store_cpu_topology(unsigned int cpuid);
28const struct cpumask *cpu_coregroup_mask(int cpu);
29
30#else
31
32static inline void init_cpu_topology(void) { }
33static inline void store_cpu_topology(unsigned int cpuid) { }
34
35#endif
36
37#include <asm-generic/topology.h>
38
39#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 6c0f684aca81..3bf8f4e99a51 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -83,7 +83,7 @@ static inline void set_fs(mm_segment_t fs)
83 * Returns 1 if the range is valid, 0 otherwise. 83 * Returns 1 if the range is valid, 0 otherwise.
84 * 84 *
85 * This is equivalent to the following test: 85 * This is equivalent to the following test:
86 * (u65)addr + (u65)size < (u65)current->addr_limit 86 * (u65)addr + (u65)size <= current->addr_limit
87 * 87 *
88 * This needs 65-bit arithmetic. 88 * This needs 65-bit arithmetic.
89 */ 89 */
@@ -91,7 +91,7 @@ static inline void set_fs(mm_segment_t fs)
91({ \ 91({ \
92 unsigned long flag, roksum; \ 92 unsigned long flag, roksum; \
93 __chk_user_ptr(addr); \ 93 __chk_user_ptr(addr); \
94 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc" \ 94 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
95 : "=&r" (flag), "=&r" (roksum) \ 95 : "=&r" (flag), "=&r" (roksum) \
96 : "1" (addr), "Ir" (size), \ 96 : "1" (addr), "Ir" (size), \
97 "r" (current_thread_info()->addr_limit) \ 97 "r" (current_thread_info()->addr_limit) \
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 82ce217e94cf..a4654c656a1e 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -14,6 +14,7 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16#ifdef CONFIG_COMPAT 16#ifdef CONFIG_COMPAT
17#define __ARCH_WANT_COMPAT_SYS_GETDENTS64
17#define __ARCH_WANT_COMPAT_STAT64 18#define __ARCH_WANT_COMPAT_STAT64
18#define __ARCH_WANT_SYS_GETHOSTNAME 19#define __ARCH_WANT_SYS_GETHOSTNAME
19#define __ARCH_WANT_SYS_PAUSE 20#define __ARCH_WANT_SYS_PAUSE
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index e4b78bdca19e..942376d37d22 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -9,6 +9,7 @@ header-y += byteorder.h
9header-y += fcntl.h 9header-y += fcntl.h
10header-y += hwcap.h 10header-y += hwcap.h
11header-y += kvm_para.h 11header-y += kvm_para.h
12header-y += perf_regs.h
12header-y += param.h 13header-y += param.h
13header-y += ptrace.h 14header-y += ptrace.h
14header-y += setup.h 15header-y += setup.h
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 495ab6f84a61..eaf54a30bedc 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -148,6 +148,15 @@ struct kvm_arch_memory_slot {
148#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 148#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
149#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 149#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
150 150
151/* Device Control API: ARM VGIC */
152#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
153#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
154#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
155#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
156#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
157#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
158#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
159
151/* KVM_IRQ_LINE irq field index values */ 160/* KVM_IRQ_LINE irq field index values */
152#define KVM_ARM_IRQ_TYPE_SHIFT 24 161#define KVM_ARM_IRQ_TYPE_SHIFT 24
153#define KVM_ARM_IRQ_TYPE_MASK 0xff 162#define KVM_ARM_IRQ_TYPE_MASK 0xff
diff --git a/arch/arm64/include/uapi/asm/perf_regs.h b/arch/arm64/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..172b8317ee49
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/perf_regs.h
@@ -0,0 +1,40 @@
1#ifndef _ASM_ARM64_PERF_REGS_H
2#define _ASM_ARM64_PERF_REGS_H
3
4enum perf_event_arm_regs {
5 PERF_REG_ARM64_X0,
6 PERF_REG_ARM64_X1,
7 PERF_REG_ARM64_X2,
8 PERF_REG_ARM64_X3,
9 PERF_REG_ARM64_X4,
10 PERF_REG_ARM64_X5,
11 PERF_REG_ARM64_X6,
12 PERF_REG_ARM64_X7,
13 PERF_REG_ARM64_X8,
14 PERF_REG_ARM64_X9,
15 PERF_REG_ARM64_X10,
16 PERF_REG_ARM64_X11,
17 PERF_REG_ARM64_X12,
18 PERF_REG_ARM64_X13,
19 PERF_REG_ARM64_X14,
20 PERF_REG_ARM64_X15,
21 PERF_REG_ARM64_X16,
22 PERF_REG_ARM64_X17,
23 PERF_REG_ARM64_X18,
24 PERF_REG_ARM64_X19,
25 PERF_REG_ARM64_X20,
26 PERF_REG_ARM64_X21,
27 PERF_REG_ARM64_X22,
28 PERF_REG_ARM64_X23,
29 PERF_REG_ARM64_X24,
30 PERF_REG_ARM64_X25,
31 PERF_REG_ARM64_X26,
32 PERF_REG_ARM64_X27,
33 PERF_REG_ARM64_X28,
34 PERF_REG_ARM64_X29,
35 PERF_REG_ARM64_LR,
36 PERF_REG_ARM64_SP,
37 PERF_REG_ARM64_PC,
38 PERF_REG_ARM64_MAX,
39};
40#endif /* _ASM_ARM64_PERF_REGS_H */