aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig5
-rw-r--r--arch/arm64/Makefile17
-rw-r--r--arch/arm64/boot/Makefile5
-rw-r--r--arch/arm64/boot/dts/.gitignore1
-rw-r--r--arch/arm64/boot/dts/Makefile5
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/arm_generic.h8
-rw-r--r--arch/arm64/include/asm/assembler.h8
-rw-r--r--arch/arm64/include/asm/cacheflush.h11
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h64
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h6
-rw-r--r--arch/arm64/include/asm/pgtable.h42
-rw-r--r--arch/arm64/include/asm/processor.h5
-rw-r--r--arch/arm64/include/asm/ptrace.h31
-rw-r--r--arch/arm64/include/asm/syscalls.h8
-rw-r--r--arch/arm64/include/asm/unistd.h4
-rw-r--r--arch/arm64/include/asm/unistd32.h12
-rw-r--r--arch/arm64/include/asm/virt.h54
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S43
-rw-r--r--arch/arm64/kernel/entry.S21
-rw-r--r--arch/arm64/kernel/head.S33
-rw-r--r--arch/arm64/kernel/hyp-stub.S109
-rw-r--r--arch/arm64/kernel/process.c86
-rw-r--r--arch/arm64/kernel/signal.c49
-rw-r--r--arch/arm64/kernel/signal32.c20
-rw-r--r--arch/arm64/kernel/sys.c81
-rw-r--r--arch/arm64/kernel/sys32.S19
-rw-r--r--arch/arm64/kernel/sys_compat.c39
-rw-r--r--arch/arm64/kernel/vdso.c2
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S100
-rw-r--r--arch/arm64/mm/fault.c13
-rw-r--r--arch/arm64/mm/flush.c9
-rw-r--r--arch/arm64/mm/init.c4
35 files changed, 504 insertions, 417 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 15ac18a56c93..f9ccff915918 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2,11 +2,14 @@ config ARM64
2 def_bool y 2 def_bool y
3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
4 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 4 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
5 select COMMON_CLK
5 select GENERIC_CLOCKEVENTS 6 select GENERIC_CLOCKEVENTS
6 select GENERIC_HARDIRQS_NO_DEPRECATED 7 select GENERIC_HARDIRQS_NO_DEPRECATED
7 select GENERIC_IOMAP 8 select GENERIC_IOMAP
8 select GENERIC_IRQ_PROBE 9 select GENERIC_IRQ_PROBE
9 select GENERIC_IRQ_SHOW 10 select GENERIC_IRQ_SHOW
11 select GENERIC_KERNEL_EXECVE
12 select GENERIC_KERNEL_THREAD
10 select GENERIC_SMP_IDLE_THREAD 13 select GENERIC_SMP_IDLE_THREAD
11 select GENERIC_TIME_VSYSCALL 14 select GENERIC_TIME_VSYSCALL
12 select HARDIRQS_SW_RESEND 15 select HARDIRQS_SW_RESEND
@@ -21,7 +24,6 @@ config ARM64
21 select HAVE_IRQ_WORK 24 select HAVE_IRQ_WORK
22 select HAVE_MEMBLOCK 25 select HAVE_MEMBLOCK
23 select HAVE_PERF_EVENTS 26 select HAVE_PERF_EVENTS
24 select HAVE_SPARSE_IRQ
25 select IRQ_DOMAIN 27 select IRQ_DOMAIN
26 select MODULES_USE_ELF_RELA 28 select MODULES_USE_ELF_RELA
27 select NO_BOOTMEM 29 select NO_BOOTMEM
@@ -31,6 +33,7 @@ config ARM64
31 select RTC_LIB 33 select RTC_LIB
32 select SPARSE_IRQ 34 select SPARSE_IRQ
33 select SYSCTL_EXCEPTION_TRACE 35 select SYSCTL_EXCEPTION_TRACE
36 select CLONE_BACKWARDS
34 help 37 help
35 ARM 64-bit (AArch64) Linux support. 38 ARM 64-bit (AArch64) Linux support.
36 39
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 364191f3be43..c95c5cb212fd 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -41,20 +41,24 @@ libs-y := arch/arm64/lib/ $(libs-y)
41libs-y += $(LIBGCC) 41libs-y += $(LIBGCC)
42 42
43# Default target when executing plain make 43# Default target when executing plain make
44KBUILD_IMAGE := Image.gz 44KBUILD_IMAGE := Image.gz
45KBUILD_DTBS := dtbs
45 46
46all: $(KBUILD_IMAGE) 47all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
47 48
48boot := arch/arm64/boot 49boot := arch/arm64/boot
49 50
50Image Image.gz: vmlinux 51Image Image.gz: vmlinux
51 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 52 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
52 53
53zinstall install: vmlinux 54zinstall install: vmlinux
54 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ 55 $(Q)$(MAKE) $(build)=$(boot) $@
55 56
56%.dtb: 57%.dtb: scripts
57 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 58 $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
59
60dtbs: scripts
61 $(Q)$(MAKE) $(build)=$(boot)/dts dtbs
58 62
59# We use MRPROPER_FILES and CLEAN_FILES now 63# We use MRPROPER_FILES and CLEAN_FILES now
60archclean: 64archclean:
@@ -63,6 +67,7 @@ archclean:
63define archhelp 67define archhelp
64 echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' 68 echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
65 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' 69 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
70 echo '* dtbs - Build device tree blobs for enabled boards'
66 echo ' install - Install uncompressed kernel' 71 echo ' install - Install uncompressed kernel'
67 echo ' zinstall - Install compressed kernel' 72 echo ' zinstall - Install compressed kernel'
68 echo ' Install using (your) ~/bin/installkernel or' 73 echo ' Install using (your) ~/bin/installkernel or'
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index eca209b2b0bf..5a0e3ab854a5 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -22,9 +22,6 @@ $(obj)/Image: vmlinux FORCE
22$(obj)/Image.gz: $(obj)/Image FORCE 22$(obj)/Image.gz: $(obj)/Image FORCE
23 $(call if_changed,gzip) 23 $(call if_changed,gzip)
24 24
25$(obj)/%.dtb: $(src)/dts/%.dts
26 $(call cmd,dtc)
27
28install: $(obj)/Image 25install: $(obj)/Image
29 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 26 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
30 $(obj)/Image System.map "$(INSTALL_PATH)" 27 $(obj)/Image System.map "$(INSTALL_PATH)"
@@ -32,5 +29,3 @@ install: $(obj)/Image
32zinstall: $(obj)/Image.gz 29zinstall: $(obj)/Image.gz
33 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 30 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
34 $(obj)/Image.gz System.map "$(INSTALL_PATH)" 31 $(obj)/Image.gz System.map "$(INSTALL_PATH)"
35
36clean-files += *.dtb
diff --git a/arch/arm64/boot/dts/.gitignore b/arch/arm64/boot/dts/.gitignore
new file mode 100644
index 000000000000..b60ed208c779
--- /dev/null
+++ b/arch/arm64/boot/dts/.gitignore
@@ -0,0 +1 @@
*.dtb
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
new file mode 100644
index 000000000000..801e2d7fcbc6
--- /dev/null
+++ b/arch/arm64/boot/dts/Makefile
@@ -0,0 +1,5 @@
1targets += dtbs
2
3dtbs: $(addprefix $(obj)/, $(dtb-y))
4
5clean-files := *.dtb
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index a581a2205938..14a9d5a2b85b 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -3,6 +3,7 @@
3generic-y += bug.h 3generic-y += bug.h
4generic-y += bugs.h 4generic-y += bugs.h
5generic-y += checksum.h 5generic-y += checksum.h
6generic-y += clkdev.h
6generic-y += cputime.h 7generic-y += cputime.h
7generic-y += current.h 8generic-y += current.h
8generic-y += delay.h 9generic-y += delay.h
@@ -43,6 +44,7 @@ generic-y += swab.h
43generic-y += termbits.h 44generic-y += termbits.h
44generic-y += termios.h 45generic-y += termios.h
45generic-y += topology.h 46generic-y += topology.h
47generic-y += trace_clock.h
46generic-y += types.h 48generic-y += types.h
47generic-y += unaligned.h 49generic-y += unaligned.h
48generic-y += user.h 50generic-y += user.h
diff --git a/arch/arm64/include/asm/arm_generic.h b/arch/arm64/include/asm/arm_generic.h
index e4cec9d30f27..df2aeb82f74e 100644
--- a/arch/arm64/include/asm/arm_generic.h
+++ b/arch/arm64/include/asm/arm_generic.h
@@ -70,12 +70,12 @@ static inline void __cpuinit arch_counter_enable_user_access(void)
70{ 70{
71 u32 cntkctl; 71 u32 cntkctl;
72 72
73 /* Disable user access to the timers and the virtual counter. */ 73 /* Disable user access to the timers and the physical counter. */
74 asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl)); 74 asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
75 cntkctl &= ~((3 << 8) | (1 << 1)); 75 cntkctl &= ~((3 << 8) | (1 << 0));
76 76
77 /* Enable user access to the physical counter and frequency. */ 77 /* Enable user access to the virtual counter and frequency. */
78 cntkctl |= 1; 78 cntkctl |= (1 << 1);
79 asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); 79 asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
80} 80}
81 81
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index da2a13e8f1e6..c8eedc604984 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -107,3 +107,11 @@
107 * Register aliases. 107 * Register aliases.
108 */ 108 */
109lr .req x30 // link register 109lr .req x30 // link register
110
111/*
112 * Vector entry
113 */
114 .macro ventry label
115 .align 7
116 b \label
117 .endm
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index aa3132ab7f29..3300cbd18a89 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -70,13 +70,20 @@
70 * - size - region size 70 * - size - region size
71 */ 71 */
72extern void flush_cache_all(void); 72extern void flush_cache_all(void);
73extern void flush_cache_mm(struct mm_struct *mm);
74extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 73extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
75extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
76extern void flush_icache_range(unsigned long start, unsigned long end); 74extern void flush_icache_range(unsigned long start, unsigned long end);
77extern void __flush_dcache_area(void *addr, size_t len); 75extern void __flush_dcache_area(void *addr, size_t len);
78extern void __flush_cache_user_range(unsigned long start, unsigned long end); 76extern void __flush_cache_user_range(unsigned long start, unsigned long end);
79 77
78static inline void flush_cache_mm(struct mm_struct *mm)
79{
80}
81
82static inline void flush_cache_page(struct vm_area_struct *vma,
83 unsigned long user_addr, unsigned long pfn)
84{
85}
86
80/* 87/*
81 * Copy user data from/to a page which is mapped into a different 88 * Copy user data from/to a page which is mapped into a different
82 * processes address space. Really, we want to allow our "user 89 * processes address space. Really, we want to allow our "user
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
new file mode 100644
index 000000000000..bbec599c96bd
--- /dev/null
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -0,0 +1,64 @@
1/*
2 * FP/SIMD state saving and restoring macros
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20.macro fpsimd_save state, tmpnr
21 stp q0, q1, [\state, #16 * 0]
22 stp q2, q3, [\state, #16 * 2]
23 stp q4, q5, [\state, #16 * 4]
24 stp q6, q7, [\state, #16 * 6]
25 stp q8, q9, [\state, #16 * 8]
26 stp q10, q11, [\state, #16 * 10]
27 stp q12, q13, [\state, #16 * 12]
28 stp q14, q15, [\state, #16 * 14]
29 stp q16, q17, [\state, #16 * 16]
30 stp q18, q19, [\state, #16 * 18]
31 stp q20, q21, [\state, #16 * 20]
32 stp q22, q23, [\state, #16 * 22]
33 stp q24, q25, [\state, #16 * 24]
34 stp q26, q27, [\state, #16 * 26]
35 stp q28, q29, [\state, #16 * 28]
36 stp q30, q31, [\state, #16 * 30]!
37 mrs x\tmpnr, fpsr
38 str w\tmpnr, [\state, #16 * 2]
39 mrs x\tmpnr, fpcr
40 str w\tmpnr, [\state, #16 * 2 + 4]
41.endm
42
43.macro fpsimd_restore state, tmpnr
44 ldp q0, q1, [\state, #16 * 0]
45 ldp q2, q3, [\state, #16 * 2]
46 ldp q4, q5, [\state, #16 * 4]
47 ldp q6, q7, [\state, #16 * 6]
48 ldp q8, q9, [\state, #16 * 8]
49 ldp q10, q11, [\state, #16 * 10]
50 ldp q12, q13, [\state, #16 * 12]
51 ldp q14, q15, [\state, #16 * 14]
52 ldp q16, q17, [\state, #16 * 16]
53 ldp q18, q19, [\state, #16 * 18]
54 ldp q20, q21, [\state, #16 * 20]
55 ldp q22, q23, [\state, #16 * 22]
56 ldp q24, q25, [\state, #16 * 24]
57 ldp q26, q27, [\state, #16 * 26]
58 ldp q28, q29, [\state, #16 * 28]
59 ldp q30, q31, [\state, #16 * 30]!
60 ldr w\tmpnr, [\state, #16 * 2]
61 msr fpsr, x\tmpnr
62 ldr w\tmpnr, [\state, #16 * 2 + 4]
63 msr fpcr, x\tmpnr
64.endm
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 54f6116697f7..d2f05a608274 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -222,7 +222,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
222extern void __iounmap(volatile void __iomem *addr); 222extern void __iounmap(volatile void __iomem *addr);
223 223
224#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) 224#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
225#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 225#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
226#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) 226#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
227 227
228#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 228#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 0f3b4581d925..75fd13d289b9 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -38,7 +38,8 @@
38#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 38#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
39#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) 39#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
40#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) 40#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
41#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) 41#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
42#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
42 43
43/* 44/*
44 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). 45 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@@ -57,7 +58,8 @@
57#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 58#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
58#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ 59#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
59#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ 60#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
60#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ 61#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
62#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
61 63
62/* 64/*
63 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). 65 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 8960239be722..64b133949502 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -62,23 +62,23 @@ extern pgprot_t pgprot_default;
62 62
63#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) 63#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
64 64
65#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY) 65#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
66#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN) 66#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
67#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG) 67#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
68#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) 68#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
69#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) 69#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
70#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) 70#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
71#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) 71#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
72#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY) 72#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
73#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY) 73#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
74 74
75#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY) 75#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
76#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN) 76#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
77#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG) 77#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
78#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) 78#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
79#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) 79#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
80#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) 80#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
81#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) 81#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
82 82
83#endif /* __ASSEMBLY__ */ 83#endif /* __ASSEMBLY__ */
84 84
@@ -130,10 +130,10 @@ extern struct page *empty_zero_page;
130#define pte_young(pte) (pte_val(pte) & PTE_AF) 130#define pte_young(pte) (pte_val(pte) & PTE_AF)
131#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) 131#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
132#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) 132#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
133#define pte_exec(pte) (!(pte_val(pte) & PTE_XN)) 133#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
134 134
135#define pte_present_exec_user(pte) \ 135#define pte_present_exec_user(pte) \
136 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \ 136 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
137 (PTE_VALID | PTE_USER)) 137 (PTE_VALID | PTE_USER))
138 138
139#define PTE_BIT_FUNC(fn,op) \ 139#define PTE_BIT_FUNC(fn,op) \
@@ -159,6 +159,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
159{ 159{
160 if (pte_present_exec_user(pte)) 160 if (pte_present_exec_user(pte))
161 __sync_icache_dcache(pte, addr); 161 __sync_icache_dcache(pte, addr);
162 if (!pte_dirty(pte))
163 pte = pte_wrprotect(pte);
162 set_pte(ptep, pte); 164 set_pte(ptep, pte);
163} 165}
164 166
@@ -262,7 +264,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
262 264
263static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 265static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
264{ 266{
265 const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY; 267 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
266 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 268 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
267 return pte; 269 return pte;
268} 270}
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 77f696c14339..ab239b2c456f 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -128,11 +128,6 @@ unsigned long get_wchan(struct task_struct *p);
128extern struct task_struct *cpu_switch_to(struct task_struct *prev, 128extern struct task_struct *cpu_switch_to(struct task_struct *prev,
129 struct task_struct *next); 129 struct task_struct *next);
130 130
131/*
132 * Create a new kernel thread
133 */
134extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
135
136#define task_pt_regs(p) \ 131#define task_pt_regs(p) \
137 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) 132 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
138 133
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index b04d3404f0d1..4ce845f8ee1c 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -30,7 +30,17 @@
30#define COMPAT_PTRACE_SETVFPREGS 28 30#define COMPAT_PTRACE_SETVFPREGS 28
31#define COMPAT_PTRACE_GETHBPREGS 29 31#define COMPAT_PTRACE_GETHBPREGS 29
32#define COMPAT_PTRACE_SETHBPREGS 30 32#define COMPAT_PTRACE_SETHBPREGS 30
33
34/* AArch32 CPSR bits */
35#define COMPAT_PSR_MODE_MASK 0x0000001f
33#define COMPAT_PSR_MODE_USR 0x00000010 36#define COMPAT_PSR_MODE_USR 0x00000010
37#define COMPAT_PSR_MODE_FIQ 0x00000011
38#define COMPAT_PSR_MODE_IRQ 0x00000012
39#define COMPAT_PSR_MODE_SVC 0x00000013
40#define COMPAT_PSR_MODE_ABT 0x00000017
41#define COMPAT_PSR_MODE_HYP 0x0000001a
42#define COMPAT_PSR_MODE_UND 0x0000001b
43#define COMPAT_PSR_MODE_SYS 0x0000001f
34#define COMPAT_PSR_T_BIT 0x00000020 44#define COMPAT_PSR_T_BIT 0x00000020
35#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ 45#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
36/* 46/*
@@ -44,10 +54,27 @@
44 54
45/* sizeof(struct user) for AArch32 */ 55/* sizeof(struct user) for AArch32 */
46#define COMPAT_USER_SZ 296 56#define COMPAT_USER_SZ 296
47/* AArch32 uses x13 as the stack pointer... */ 57
58/* Architecturally defined mapping between AArch32 and AArch64 registers */
59#define compat_usr(x) regs[(x)]
48#define compat_sp regs[13] 60#define compat_sp regs[13]
49/* ... and x14 as the link register. */
50#define compat_lr regs[14] 61#define compat_lr regs[14]
62#define compat_sp_hyp regs[15]
63#define compat_sp_irq regs[16]
64#define compat_lr_irq regs[17]
65#define compat_sp_svc regs[18]
66#define compat_lr_svc regs[19]
67#define compat_sp_abt regs[20]
68#define compat_lr_abt regs[21]
69#define compat_sp_und regs[22]
70#define compat_lr_und regs[23]
71#define compat_r8_fiq regs[24]
72#define compat_r9_fiq regs[25]
73#define compat_r10_fiq regs[26]
74#define compat_r11_fiq regs[27]
75#define compat_r12_fiq regs[28]
76#define compat_sp_fiq regs[29]
77#define compat_lr_fiq regs[30]
51 78
52/* 79/*
53 * This struct defines the way the registers are stored on the stack during an 80 * This struct defines the way the registers are stored on the stack during an
diff --git a/arch/arm64/include/asm/syscalls.h b/arch/arm64/include/asm/syscalls.h
index 09ff33572aab..20d63b290665 100644
--- a/arch/arm64/include/asm/syscalls.h
+++ b/arch/arm64/include/asm/syscalls.h
@@ -23,14 +23,6 @@
23/* 23/*
24 * System call wrappers implemented in kernel/entry.S. 24 * System call wrappers implemented in kernel/entry.S.
25 */ 25 */
26asmlinkage long sys_execve_wrapper(const char __user *filename,
27 const char __user *const __user *argv,
28 const char __user *const __user *envp);
29asmlinkage long sys_clone_wrapper(unsigned long clone_flags,
30 unsigned long newsp,
31 void __user *parent_tid,
32 unsigned long tls_val,
33 void __user *child_tid);
34asmlinkage long sys_rt_sigreturn_wrapper(void); 26asmlinkage long sys_rt_sigreturn_wrapper(void);
35asmlinkage long sys_sigaltstack_wrapper(const stack_t __user *uss, 27asmlinkage long sys_sigaltstack_wrapper(const stack_t __user *uss,
36 stack_t __user *uoss); 28 stack_t __user *uoss);
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 68aff2816e86..d69aeea6da1e 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -24,5 +24,9 @@
24#define __ARCH_WANT_SYS_SIGPROCMASK 24#define __ARCH_WANT_SYS_SIGPROCMASK
25#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND 25#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
26#define __ARCH_WANT_COMPAT_SYS_SENDFILE 26#define __ARCH_WANT_COMPAT_SYS_SENDFILE
27#define __ARCH_WANT_SYS_FORK
28#define __ARCH_WANT_SYS_VFORK
27#endif 29#endif
30#define __ARCH_WANT_SYS_EXECVE
31#define __ARCH_WANT_SYS_CLONE
28#include <uapi/asm/unistd.h> 32#include <uapi/asm/unistd.h>
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 6d909faebf28..58432625fdb3 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -23,7 +23,7 @@
23 23
24__SYSCALL(0, sys_restart_syscall) 24__SYSCALL(0, sys_restart_syscall)
25__SYSCALL(1, sys_exit) 25__SYSCALL(1, sys_exit)
26__SYSCALL(2, compat_sys_fork_wrapper) 26__SYSCALL(2, sys_fork)
27__SYSCALL(3, sys_read) 27__SYSCALL(3, sys_read)
28__SYSCALL(4, sys_write) 28__SYSCALL(4, sys_write)
29__SYSCALL(5, compat_sys_open) 29__SYSCALL(5, compat_sys_open)
@@ -32,7 +32,7 @@ __SYSCALL(7, sys_ni_syscall) /* 7 was sys_waitpid */
32__SYSCALL(8, sys_creat) 32__SYSCALL(8, sys_creat)
33__SYSCALL(9, sys_link) 33__SYSCALL(9, sys_link)
34__SYSCALL(10, sys_unlink) 34__SYSCALL(10, sys_unlink)
35__SYSCALL(11, compat_sys_execve_wrapper) 35__SYSCALL(11, compat_sys_execve)
36__SYSCALL(12, sys_chdir) 36__SYSCALL(12, sys_chdir)
37__SYSCALL(13, sys_ni_syscall) /* 13 was sys_time */ 37__SYSCALL(13, sys_ni_syscall) /* 13 was sys_time */
38__SYSCALL(14, sys_mknod) 38__SYSCALL(14, sys_mknod)
@@ -141,7 +141,7 @@ __SYSCALL(116, compat_sys_sysinfo)
141__SYSCALL(117, sys_ni_syscall) /* 117 was sys_ipc */ 141__SYSCALL(117, sys_ni_syscall) /* 117 was sys_ipc */
142__SYSCALL(118, sys_fsync) 142__SYSCALL(118, sys_fsync)
143__SYSCALL(119, compat_sys_sigreturn_wrapper) 143__SYSCALL(119, compat_sys_sigreturn_wrapper)
144__SYSCALL(120, compat_sys_clone_wrapper) 144__SYSCALL(120, sys_clone)
145__SYSCALL(121, sys_setdomainname) 145__SYSCALL(121, sys_setdomainname)
146__SYSCALL(122, sys_newuname) 146__SYSCALL(122, sys_newuname)
147__SYSCALL(123, sys_ni_syscall) /* 123 was sys_modify_ldt */ 147__SYSCALL(123, sys_ni_syscall) /* 123 was sys_modify_ldt */
@@ -211,7 +211,7 @@ __SYSCALL(186, compat_sys_sigaltstack_wrapper)
211__SYSCALL(187, compat_sys_sendfile) 211__SYSCALL(187, compat_sys_sendfile)
212__SYSCALL(188, sys_ni_syscall) /* 188 reserved */ 212__SYSCALL(188, sys_ni_syscall) /* 188 reserved */
213__SYSCALL(189, sys_ni_syscall) /* 189 reserved */ 213__SYSCALL(189, sys_ni_syscall) /* 189 reserved */
214__SYSCALL(190, compat_sys_vfork_wrapper) 214__SYSCALL(190, sys_vfork)
215__SYSCALL(191, compat_sys_getrlimit) /* SuS compliant getrlimit */ 215__SYSCALL(191, compat_sys_getrlimit) /* SuS compliant getrlimit */
216__SYSCALL(192, sys_mmap_pgoff) 216__SYSCALL(192, sys_mmap_pgoff)
217__SYSCALL(193, compat_sys_truncate64_wrapper) 217__SYSCALL(193, compat_sys_truncate64_wrapper)
@@ -392,8 +392,8 @@ __SYSCALL(367, sys_fanotify_init)
392__SYSCALL(368, compat_sys_fanotify_mark_wrapper) 392__SYSCALL(368, compat_sys_fanotify_mark_wrapper)
393__SYSCALL(369, sys_prlimit64) 393__SYSCALL(369, sys_prlimit64)
394__SYSCALL(370, sys_name_to_handle_at) 394__SYSCALL(370, sys_name_to_handle_at)
395__SYSCALL(371, sys_open_by_handle_at) 395__SYSCALL(371, compat_sys_open_by_handle_at)
396__SYSCALL(372, sys_clock_adjtime) 396__SYSCALL(372, compat_sys_clock_adjtime)
397__SYSCALL(373, sys_syncfs) 397__SYSCALL(373, sys_syncfs)
398 398
399#define __NR_compat_syscalls 374 399#define __NR_compat_syscalls 374
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
new file mode 100644
index 000000000000..439827271e3d
--- /dev/null
+++ b/arch/arm64/include/asm/virt.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ASM__VIRT_H
19#define __ASM__VIRT_H
20
21#define BOOT_CPU_MODE_EL2 (0x0e12b007)
22
23#ifndef __ASSEMBLY__
24
25/*
26 * __boot_cpu_mode records what mode CPUs were booted in.
27 * A correctly-implemented bootloader must start all CPUs in the same mode:
28 * In this case, both 32bit halves of __boot_cpu_mode will contain the
29 * same value (either 0 if booted in EL1, BOOT_CPU_MODE_EL2 if booted in EL2).
30 *
31 * Should the bootloader fail to do this, the two values will be different.
32 * This allows the kernel to flag an error when the secondaries have come up.
33 */
34extern u32 __boot_cpu_mode[2];
35
36void __hyp_set_vectors(phys_addr_t phys_vector_base);
37phys_addr_t __hyp_get_vectors(void);
38
39/* Reports the availability of HYP mode */
40static inline bool is_hyp_mode_available(void)
41{
42 return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
43 __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
44}
45
46/* Check if the bootloader has booted CPUs in different modes */
47static inline bool is_hyp_mode_mismatched(void)
48{
49 return __boot_cpu_mode[0] != __boot_cpu_mode[1];
50}
51
52#endif /* __ASSEMBLY__ */
53
54#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index e2caff1b812a..74239c31e25a 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -8,7 +8,8 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
8# Object file lists. 8# Object file lists.
9arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ 9arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
10 entry-fpsimd.o process.o ptrace.o setup.o signal.o \ 10 entry-fpsimd.o process.o ptrace.o setup.o signal.o \
11 sys.o stacktrace.o time.o traps.o io.o vdso.o 11 sys.o stacktrace.o time.o traps.o io.o vdso.o \
12 hyp-stub.o
12 13
13arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ 14arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
14 sys_compat.o 15 sys_compat.o
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 17988a6e7ea2..6a27cd6dbfa6 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -20,6 +20,7 @@
20#include <linux/linkage.h> 20#include <linux/linkage.h>
21 21
22#include <asm/assembler.h> 22#include <asm/assembler.h>
23#include <asm/fpsimdmacros.h>
23 24
24/* 25/*
25 * Save the FP registers. 26 * Save the FP registers.
@@ -27,26 +28,7 @@
27 * x0 - pointer to struct fpsimd_state 28 * x0 - pointer to struct fpsimd_state
28 */ 29 */
29ENTRY(fpsimd_save_state) 30ENTRY(fpsimd_save_state)
30 stp q0, q1, [x0, #16 * 0] 31 fpsimd_save x0, 8
31 stp q2, q3, [x0, #16 * 2]
32 stp q4, q5, [x0, #16 * 4]
33 stp q6, q7, [x0, #16 * 6]
34 stp q8, q9, [x0, #16 * 8]
35 stp q10, q11, [x0, #16 * 10]
36 stp q12, q13, [x0, #16 * 12]
37 stp q14, q15, [x0, #16 * 14]
38 stp q16, q17, [x0, #16 * 16]
39 stp q18, q19, [x0, #16 * 18]
40 stp q20, q21, [x0, #16 * 20]
41 stp q22, q23, [x0, #16 * 22]
42 stp q24, q25, [x0, #16 * 24]
43 stp q26, q27, [x0, #16 * 26]
44 stp q28, q29, [x0, #16 * 28]
45 stp q30, q31, [x0, #16 * 30]!
46 mrs x8, fpsr
47 str w8, [x0, #16 * 2]
48 mrs x8, fpcr
49 str w8, [x0, #16 * 2 + 4]
50 ret 32 ret
51ENDPROC(fpsimd_save_state) 33ENDPROC(fpsimd_save_state)
52 34
@@ -56,25 +38,6 @@ ENDPROC(fpsimd_save_state)
56 * x0 - pointer to struct fpsimd_state 38 * x0 - pointer to struct fpsimd_state
57 */ 39 */
58ENTRY(fpsimd_load_state) 40ENTRY(fpsimd_load_state)
59 ldp q0, q1, [x0, #16 * 0] 41 fpsimd_restore x0, 8
60 ldp q2, q3, [x0, #16 * 2]
61 ldp q4, q5, [x0, #16 * 4]
62 ldp q6, q7, [x0, #16 * 6]
63 ldp q8, q9, [x0, #16 * 8]
64 ldp q10, q11, [x0, #16 * 10]
65 ldp q12, q13, [x0, #16 * 12]
66 ldp q14, q15, [x0, #16 * 14]
67 ldp q16, q17, [x0, #16 * 16]
68 ldp q18, q19, [x0, #16 * 18]
69 ldp q20, q21, [x0, #16 * 20]
70 ldp q22, q23, [x0, #16 * 22]
71 ldp q24, q25, [x0, #16 * 24]
72 ldp q26, q27, [x0, #16 * 26]
73 ldp q28, q29, [x0, #16 * 28]
74 ldp q30, q31, [x0, #16 * 30]!
75 ldr w8, [x0, #16 * 2]
76 ldr w9, [x0, #16 * 2 + 4]
77 msr fpsr, x8
78 msr fpcr, x9
79 ret 42 ret
80ENDPROC(fpsimd_load_state) 43ENDPROC(fpsimd_load_state)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a6f3f7da6880..9c94f404ded6 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -148,10 +148,6 @@ tsk .req x28 // current thread_info
148/* 148/*
149 * Exception vectors. 149 * Exception vectors.
150 */ 150 */
151 .macro ventry label
152 .align 7
153 b \label
154 .endm
155 151
156 .align 11 152 .align 11
157ENTRY(vectors) 153ENTRY(vectors)
@@ -594,7 +590,7 @@ work_resched:
594/* 590/*
595 * "slow" syscall return path. 591 * "slow" syscall return path.
596 */ 592 */
597ENTRY(ret_to_user) 593ret_to_user:
598 disable_irq // disable interrupts 594 disable_irq // disable interrupts
599 ldr x1, [tsk, #TI_FLAGS] 595 ldr x1, [tsk, #TI_FLAGS]
600 and x2, x1, #_TIF_WORK_MASK 596 and x2, x1, #_TIF_WORK_MASK
@@ -611,7 +607,10 @@ ENDPROC(ret_to_user)
611 */ 607 */
612ENTRY(ret_from_fork) 608ENTRY(ret_from_fork)
613 bl schedule_tail 609 bl schedule_tail
614 get_thread_info tsk 610 cbz x19, 1f // not a kernel thread
611 mov x0, x20
612 blr x19
6131: get_thread_info tsk
615 b ret_to_user 614 b ret_to_user
616ENDPROC(ret_from_fork) 615ENDPROC(ret_from_fork)
617 616
@@ -673,16 +672,6 @@ __sys_trace_return:
673/* 672/*
674 * Special system call wrappers. 673 * Special system call wrappers.
675 */ 674 */
676ENTRY(sys_execve_wrapper)
677 mov x3, sp
678 b sys_execve
679ENDPROC(sys_execve_wrapper)
680
681ENTRY(sys_clone_wrapper)
682 mov x5, sp
683 b sys_clone
684ENDPROC(sys_clone_wrapper)
685
686ENTRY(sys_rt_sigreturn_wrapper) 675ENTRY(sys_rt_sigreturn_wrapper)
687 mov x0, sp 676 mov x0, sp
688 b sys_rt_sigreturn 677 b sys_rt_sigreturn
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a2f02b63eae9..368ad1f7c36c 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -31,6 +31,7 @@
31#include <asm/pgtable-hwdef.h> 31#include <asm/pgtable-hwdef.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/page.h> 33#include <asm/page.h>
34#include <asm/virt.h>
34 35
35/* 36/*
36 * swapper_pg_dir is the virtual address of the initial page table. We place 37 * swapper_pg_dir is the virtual address of the initial page table. We place
@@ -115,13 +116,13 @@
115 116
116ENTRY(stext) 117ENTRY(stext)
117 mov x21, x0 // x21=FDT 118 mov x21, x0 // x21=FDT
119 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
118 bl el2_setup // Drop to EL1 120 bl el2_setup // Drop to EL1
119 mrs x22, midr_el1 // x22=cpuid 121 mrs x22, midr_el1 // x22=cpuid
120 mov x0, x22 122 mov x0, x22
121 bl lookup_processor_type 123 bl lookup_processor_type
122 mov x23, x0 // x23=current cpu_table 124 mov x23, x0 // x23=current cpu_table
123 cbz x23, __error_p // invalid processor (x23=0)? 125 cbz x23, __error_p // invalid processor (x23=0)?
124 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
125 bl __vet_fdt 126 bl __vet_fdt
126 bl __create_page_tables // x25=TTBR0, x26=TTBR1 127 bl __create_page_tables // x25=TTBR0, x26=TTBR1
127 /* 128 /*
@@ -147,17 +148,23 @@ ENTRY(el2_setup)
147 mrs x0, CurrentEL 148 mrs x0, CurrentEL
148 cmp x0, #PSR_MODE_EL2t 149 cmp x0, #PSR_MODE_EL2t
149 ccmp x0, #PSR_MODE_EL2h, #0x4, ne 150 ccmp x0, #PSR_MODE_EL2h, #0x4, ne
151 ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode
152 add x0, x0, x28
150 b.eq 1f 153 b.eq 1f
154 str wzr, [x0] // Remember we don't have EL2...
151 ret 155 ret
152 156
153 /* Hyp configuration. */ 157 /* Hyp configuration. */
1541: mov x0, #(1 << 31) // 64-bit EL1 1581: ldr w1, =BOOT_CPU_MODE_EL2
159 str w1, [x0, #4] // This CPU has EL2
160 mov x0, #(1 << 31) // 64-bit EL1
155 msr hcr_el2, x0 161 msr hcr_el2, x0
156 162
157 /* Generic timers. */ 163 /* Generic timers. */
158 mrs x0, cnthctl_el2 164 mrs x0, cnthctl_el2
159 orr x0, x0, #3 // Enable EL1 physical timers 165 orr x0, x0, #3 // Enable EL1 physical timers
160 msr cnthctl_el2, x0 166 msr cnthctl_el2, x0
167 msr cntvoff_el2, xzr // Clear virtual offset
161 168
162 /* Populate ID registers. */ 169 /* Populate ID registers. */
163 mrs x0, midr_el1 170 mrs x0, midr_el1
@@ -178,6 +185,13 @@ ENTRY(el2_setup)
178 msr hstr_el2, xzr // Disable CP15 traps to EL2 185 msr hstr_el2, xzr // Disable CP15 traps to EL2
179#endif 186#endif
180 187
188 /* Stage-2 translation */
189 msr vttbr_el2, xzr
190
191 /* Hypervisor stub */
192 adr x0, __hyp_stub_vectors
193 msr vbar_el2, x0
194
181 /* spsr */ 195 /* spsr */
182 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 196 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
183 PSR_MODE_EL1h) 197 PSR_MODE_EL1h)
@@ -186,6 +200,19 @@ ENTRY(el2_setup)
186 eret 200 eret
187ENDPROC(el2_setup) 201ENDPROC(el2_setup)
188 202
203/*
204 * We need to find out the CPU boot mode long after boot, so we need to
205 * store it in a writable variable.
206 *
207 * This is not in .bss, because we set it sufficiently early that the boot-time
208 * zeroing of .bss would clobber it.
209 */
210 .pushsection .data
211ENTRY(__boot_cpu_mode)
212 .long BOOT_CPU_MODE_EL2
213 .long 0
214 .popsection
215
189 .align 3 216 .align 3
1902: .quad . 2172: .quad .
191 .quad PAGE_OFFSET 218 .quad PAGE_OFFSET
@@ -201,6 +228,7 @@ ENDPROC(el2_setup)
201 * cores are held until we're ready for them to initialise. 228 * cores are held until we're ready for them to initialise.
202 */ 229 */
203ENTRY(secondary_holding_pen) 230ENTRY(secondary_holding_pen)
231 bl __calc_phys_offset // x24=phys offset
204 bl el2_setup // Drop to EL1 232 bl el2_setup // Drop to EL1
205 mrs x0, mpidr_el1 233 mrs x0, mpidr_el1
206 and x0, x0, #15 // CPU number 234 and x0, x0, #15 // CPU number
@@ -226,7 +254,6 @@ ENTRY(secondary_startup)
226 mov x23, x0 // x23=current cpu_table 254 mov x23, x0 // x23=current cpu_table
227 cbz x23, __error_p // invalid processor (x23=0)? 255 cbz x23, __error_p // invalid processor (x23=0)?
228 256
229 bl __calc_phys_offset // x24=phys offset
230 pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1 257 pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
231 ldr x12, [x23, #CPU_INFO_SETUP] 258 ldr x12, [x23, #CPU_INFO_SETUP]
232 add x12, x12, x28 // __virt_to_phys 259 add x12, x12, x28 // __virt_to_phys
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
new file mode 100644
index 000000000000..0959611d9ff1
--- /dev/null
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -0,0 +1,109 @@
1/*
2 * Hypervisor stub
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/init.h>
21#include <linux/linkage.h>
22
23#include <asm/assembler.h>
24#include <asm/ptrace.h>
25#include <asm/virt.h>
26
27 .text
28 .align 11
29
30ENTRY(__hyp_stub_vectors)
31 ventry el2_sync_invalid // Synchronous EL2t
32 ventry el2_irq_invalid // IRQ EL2t
33 ventry el2_fiq_invalid // FIQ EL2t
34 ventry el2_error_invalid // Error EL2t
35
36 ventry el2_sync_invalid // Synchronous EL2h
37 ventry el2_irq_invalid // IRQ EL2h
38 ventry el2_fiq_invalid // FIQ EL2h
39 ventry el2_error_invalid // Error EL2h
40
41 ventry el1_sync // Synchronous 64-bit EL1
42 ventry el1_irq_invalid // IRQ 64-bit EL1
43 ventry el1_fiq_invalid // FIQ 64-bit EL1
44 ventry el1_error_invalid // Error 64-bit EL1
45
46 ventry el1_sync_invalid // Synchronous 32-bit EL1
47 ventry el1_irq_invalid // IRQ 32-bit EL1
48 ventry el1_fiq_invalid // FIQ 32-bit EL1
49 ventry el1_error_invalid // Error 32-bit EL1
50ENDPROC(__hyp_stub_vectors)
51
52 .align 11
53
54el1_sync:
55 mrs x1, esr_el2
56 lsr x1, x1, #26
57 cmp x1, #0x16
58 b.ne 2f // Not an HVC trap
59 cbz x0, 1f
60 msr vbar_el2, x0 // Set vbar_el2
61 b 2f
621: mrs x0, vbar_el2 // Return vbar_el2
632: eret
64ENDPROC(el1_sync)
65
66.macro invalid_vector label
67\label:
68 b \label
69ENDPROC(\label)
70.endm
71
72 invalid_vector el2_sync_invalid
73 invalid_vector el2_irq_invalid
74 invalid_vector el2_fiq_invalid
75 invalid_vector el2_error_invalid
76 invalid_vector el1_sync_invalid
77 invalid_vector el1_irq_invalid
78 invalid_vector el1_fiq_invalid
79 invalid_vector el1_error_invalid
80
81/*
82 * __hyp_set_vectors: Call this after boot to set the initial hypervisor
83 * vectors as part of hypervisor installation. On an SMP system, this should
84 * be called on each CPU.
85 *
86 * x0 must be the physical address of the new vector table, and must be
87 * 2KB aligned.
88 *
89 * Before calling this, you must check that the stub hypervisor is installed
90 * everywhere, by waiting for any secondary CPUs to be brought up and then
91 * checking that is_hyp_mode_available() is true.
92 *
93 * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
94 * something else went wrong... in such cases, trying to install a new
95 * hypervisor is unlikely to work as desired.
96 *
97 * When you call into your shiny new hypervisor, sp_el2 will contain junk,
98 * so you will need to set that to something sensible at the new hypervisor's
99 * initialisation entry point.
100 */
101
102ENTRY(__hyp_get_vectors)
103 mov x0, xzr
104 // fall through
105ENTRY(__hyp_set_vectors)
106 hvc #0
107 ret
108ENDPROC(__hyp_get_vectors)
109ENDPROC(__hyp_set_vectors)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index e04cebdbb47f..cb0956bc96ed 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -234,33 +234,46 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
234asmlinkage void ret_from_fork(void) asm("ret_from_fork"); 234asmlinkage void ret_from_fork(void) asm("ret_from_fork");
235 235
236int copy_thread(unsigned long clone_flags, unsigned long stack_start, 236int copy_thread(unsigned long clone_flags, unsigned long stack_start,
237 unsigned long stk_sz, struct task_struct *p, 237 unsigned long stk_sz, struct task_struct *p)
238 struct pt_regs *regs)
239{ 238{
240 struct pt_regs *childregs = task_pt_regs(p); 239 struct pt_regs *childregs = task_pt_regs(p);
241 unsigned long tls = p->thread.tp_value; 240 unsigned long tls = p->thread.tp_value;
242 241
243 *childregs = *regs; 242 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
244 childregs->regs[0] = 0;
245 243
246 if (is_compat_thread(task_thread_info(p))) 244 if (likely(!(p->flags & PF_KTHREAD))) {
247 childregs->compat_sp = stack_start; 245 *childregs = *current_pt_regs();
248 else { 246 childregs->regs[0] = 0;
247 if (is_compat_thread(task_thread_info(p))) {
248 if (stack_start)
249 childregs->compat_sp = stack_start;
250 } else {
251 /*
252 * Read the current TLS pointer from tpidr_el0 as it may be
253 * out-of-sync with the saved value.
254 */
255 asm("mrs %0, tpidr_el0" : "=r" (tls));
256 if (stack_start) {
257 /* 16-byte aligned stack mandatory on AArch64 */
258 if (stack_start & 15)
259 return -EINVAL;
260 childregs->sp = stack_start;
261 }
262 }
249 /* 263 /*
250 * Read the current TLS pointer from tpidr_el0 as it may be 264 * If a TLS pointer was passed to clone (4th argument), use it
251 * out-of-sync with the saved value. 265 * for the new thread.
252 */ 266 */
253 asm("mrs %0, tpidr_el0" : "=r" (tls)); 267 if (clone_flags & CLONE_SETTLS)
254 childregs->sp = stack_start; 268 tls = childregs->regs[3];
269 } else {
270 memset(childregs, 0, sizeof(struct pt_regs));
271 childregs->pstate = PSR_MODE_EL1h;
272 p->thread.cpu_context.x19 = stack_start;
273 p->thread.cpu_context.x20 = stk_sz;
255 } 274 }
256
257 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
258 p->thread.cpu_context.sp = (unsigned long)childregs;
259 p->thread.cpu_context.pc = (unsigned long)ret_from_fork; 275 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
260 276 p->thread.cpu_context.sp = (unsigned long)childregs;
261 /* If a TLS pointer was passed to clone, use that for the new thread. */
262 if (clone_flags & CLONE_SETTLS)
263 tls = regs->regs[3];
264 p->thread.tp_value = tls; 277 p->thread.tp_value = tls;
265 278
266 ptrace_hw_copy_thread(p); 279 ptrace_hw_copy_thread(p);
@@ -309,43 +322,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
309 return last; 322 return last;
310} 323}
311 324
312/*
313 * Shuffle the argument into the correct register before calling the
314 * thread function. x1 is the thread argument, x2 is the pointer to
315 * the thread function, and x3 points to the exit function.
316 */
317extern void kernel_thread_helper(void);
318asm( ".section .text\n"
319" .align\n"
320" .type kernel_thread_helper, #function\n"
321"kernel_thread_helper:\n"
322" mov x0, x1\n"
323" mov x30, x3\n"
324" br x2\n"
325" .size kernel_thread_helper, . - kernel_thread_helper\n"
326" .previous");
327
328#define kernel_thread_exit do_exit
329
330/*
331 * Create a kernel thread.
332 */
333pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
334{
335 struct pt_regs regs;
336
337 memset(&regs, 0, sizeof(regs));
338
339 regs.regs[1] = (unsigned long)arg;
340 regs.regs[2] = (unsigned long)fn;
341 regs.regs[3] = (unsigned long)kernel_thread_exit;
342 regs.pc = (unsigned long)kernel_thread_helper;
343 regs.pstate = PSR_MODE_EL1h;
344
345 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
346}
347EXPORT_SYMBOL(kernel_thread);
348
349unsigned long get_wchan(struct task_struct *p) 325unsigned long get_wchan(struct task_struct *p)
350{ 326{
351 struct stackframe frame; 327 struct stackframe frame;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 8807ba2cf262..abd756315cb5 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -41,6 +41,8 @@
41struct rt_sigframe { 41struct rt_sigframe {
42 struct siginfo info; 42 struct siginfo info;
43 struct ucontext uc; 43 struct ucontext uc;
44 u64 fp;
45 u64 lr;
44}; 46};
45 47
46static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 48static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
@@ -175,6 +177,10 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
175 struct aux_context __user *aux = 177 struct aux_context __user *aux =
176 (struct aux_context __user *)sf->uc.uc_mcontext.__reserved; 178 (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
177 179
180 /* set up the stack frame for unwinding */
181 __put_user_error(regs->regs[29], &sf->fp, err);
182 __put_user_error(regs->regs[30], &sf->lr, err);
183
178 for (i = 0; i < 31; i++) 184 for (i = 0; i < 31; i++)
179 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 185 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
180 err); 186 err);
@@ -196,11 +202,11 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
196 return err; 202 return err;
197} 203}
198 204
199static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 205static struct rt_sigframe __user *get_sigframe(struct k_sigaction *ka,
200 int framesize) 206 struct pt_regs *regs)
201{ 207{
202 unsigned long sp, sp_top; 208 unsigned long sp, sp_top;
203 void __user *frame; 209 struct rt_sigframe __user *frame;
204 210
205 sp = sp_top = regs->sp; 211 sp = sp_top = regs->sp;
206 212
@@ -210,11 +216,8 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
210 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) 216 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
211 sp = sp_top = current->sas_ss_sp + current->sas_ss_size; 217 sp = sp_top = current->sas_ss_sp + current->sas_ss_size;
212 218
213 /* room for stack frame (FP, LR) */ 219 sp = (sp - sizeof(struct rt_sigframe)) & ~15;
214 sp -= 16; 220 frame = (struct rt_sigframe __user *)sp;
215
216 sp = (sp - framesize) & ~15;
217 frame = (void __user *)sp;
218 221
219 /* 222 /*
220 * Check that we can actually write to the signal frame. 223 * Check that we can actually write to the signal frame.
@@ -225,20 +228,14 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
225 return frame; 228 return frame;
226} 229}
227 230
228static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, 231static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
229 void __user *frame, int usig) 232 void __user *frame, int usig)
230{ 233{
231 int err = 0;
232 __sigrestore_t sigtramp; 234 __sigrestore_t sigtramp;
233 unsigned long __user *sp = (unsigned long __user *)regs->sp;
234
235 /* set up the stack frame */
236 __put_user_error(regs->regs[29], sp - 2, err);
237 __put_user_error(regs->regs[30], sp - 1, err);
238 235
239 regs->regs[0] = usig; 236 regs->regs[0] = usig;
240 regs->regs[29] = regs->sp - 16;
241 regs->sp = (unsigned long)frame; 237 regs->sp = (unsigned long)frame;
238 regs->regs[29] = regs->sp + offsetof(struct rt_sigframe, fp);
242 regs->pc = (unsigned long)ka->sa.sa_handler; 239 regs->pc = (unsigned long)ka->sa.sa_handler;
243 240
244 if (ka->sa.sa_flags & SA_RESTORER) 241 if (ka->sa.sa_flags & SA_RESTORER)
@@ -247,8 +244,6 @@ static int setup_return(struct pt_regs *regs, struct k_sigaction *ka,
247 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 244 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
248 245
249 regs->regs[30] = (unsigned long)sigtramp; 246 regs->regs[30] = (unsigned long)sigtramp;
250
251 return err;
252} 247}
253 248
254static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, 249static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
@@ -258,7 +253,7 @@ static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
258 stack_t stack; 253 stack_t stack;
259 int err = 0; 254 int err = 0;
260 255
261 frame = get_sigframe(ka, regs, sizeof(*frame)); 256 frame = get_sigframe(ka, regs);
262 if (!frame) 257 if (!frame)
263 return 1; 258 return 1;
264 259
@@ -272,13 +267,13 @@ static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
272 err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack)); 267 err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack));
273 268
274 err |= setup_sigframe(frame, regs, set); 269 err |= setup_sigframe(frame, regs, set);
275 if (err == 0) 270 if (err == 0) {
276 err = setup_return(regs, ka, frame, usig); 271 setup_return(regs, ka, frame, usig);
277 272 if (ka->sa.sa_flags & SA_SIGINFO) {
278 if (err == 0 && ka->sa.sa_flags & SA_SIGINFO) { 273 err |= copy_siginfo_to_user(&frame->info, info);
279 err |= copy_siginfo_to_user(&frame->info, info); 274 regs->regs[1] = (unsigned long)&frame->info;
280 regs->regs[1] = (unsigned long)&frame->info; 275 regs->regs[2] = (unsigned long)&frame->uc;
281 regs->regs[2] = (unsigned long)&frame->uc; 276 }
282 } 277 }
283 278
284 return err; 279 return err;
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 4654824747a4..a4db3d22aac4 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -578,9 +578,9 @@ badframe:
578 return 0; 578 return 0;
579} 579}
580 580
581static inline void __user *compat_get_sigframe(struct k_sigaction *ka, 581static void __user *compat_get_sigframe(struct k_sigaction *ka,
582 struct pt_regs *regs, 582 struct pt_regs *regs,
583 int framesize) 583 int framesize)
584{ 584{
585 compat_ulong_t sp = regs->compat_sp; 585 compat_ulong_t sp = regs->compat_sp;
586 void __user *frame; 586 void __user *frame;
@@ -605,9 +605,9 @@ static inline void __user *compat_get_sigframe(struct k_sigaction *ka,
605 return frame; 605 return frame;
606} 606}
607 607
608static int compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, 608static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
609 compat_ulong_t __user *rc, void __user *frame, 609 compat_ulong_t __user *rc, void __user *frame,
610 int usig) 610 int usig)
611{ 611{
612 compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); 612 compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
613 compat_ulong_t retcode; 613 compat_ulong_t retcode;
@@ -643,8 +643,6 @@ static int compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
643 regs->compat_lr = retcode; 643 regs->compat_lr = retcode;
644 regs->pc = handler; 644 regs->pc = handler;
645 regs->pstate = spsr; 645 regs->pstate = spsr;
646
647 return 0;
648} 646}
649 647
650static int compat_setup_sigframe(struct compat_sigframe __user *sf, 648static int compat_setup_sigframe(struct compat_sigframe __user *sf,
@@ -714,11 +712,9 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
714 err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); 712 err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
715 713
716 err |= compat_setup_sigframe(&frame->sig, regs, set); 714 err |= compat_setup_sigframe(&frame->sig, regs, set);
717 if (err == 0)
718 err = compat_setup_return(regs, ka, frame->sig.retcode, frame,
719 usig);
720 715
721 if (err == 0) { 716 if (err == 0) {
717 compat_setup_return(regs, ka, frame->sig.retcode, frame, usig);
722 regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info; 718 regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info;
723 regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc; 719 regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc;
724 } 720 }
@@ -741,7 +737,7 @@ int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set,
741 737
742 err |= compat_setup_sigframe(frame, regs, set); 738 err |= compat_setup_sigframe(frame, regs, set);
743 if (err == 0) 739 if (err == 0)
744 err = compat_setup_return(regs, ka, frame->retcode, frame, usig); 740 compat_setup_return(regs, ka, frame->retcode, frame, usig);
745 741
746 return err; 742 return err;
747} 743}
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index b120df37de35..8292a9b090f8 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -26,85 +26,6 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/syscalls.h> 27#include <linux/syscalls.h>
28 28
29/*
30 * Clone a task - this clones the calling program thread.
31 */
32asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
33 int __user *parent_tidptr, unsigned long tls_val,
34 int __user *child_tidptr, struct pt_regs *regs)
35{
36 if (!newsp)
37 newsp = regs->sp;
38 /* 16-byte aligned stack mandatory on AArch64 */
39 if (newsp & 15)
40 return -EINVAL;
41 return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
42}
43
44/*
45 * sys_execve() executes a new program.
46 */
47asmlinkage long sys_execve(const char __user *filenamei,
48 const char __user *const __user *argv,
49 const char __user *const __user *envp,
50 struct pt_regs *regs)
51{
52 long error;
53 struct filename *filename;
54
55 filename = getname(filenamei);
56 error = PTR_ERR(filename);
57 if (IS_ERR(filename))
58 goto out;
59 error = do_execve(filename->name, argv, envp, regs);
60 putname(filename);
61out:
62 return error;
63}
64
65int kernel_execve(const char *filename,
66 const char *const argv[],
67 const char *const envp[])
68{
69 struct pt_regs regs;
70 int ret;
71
72 memset(&regs, 0, sizeof(struct pt_regs));
73 ret = do_execve(filename,
74 (const char __user *const __user *)argv,
75 (const char __user *const __user *)envp, &regs);
76 if (ret < 0)
77 goto out;
78
79 /*
80 * Save argc to the register structure for userspace.
81 */
82 regs.regs[0] = ret;
83
84 /*
85 * We were successful. We won't be returning to our caller, but
86 * instead to user space by manipulating the kernel stack.
87 */
88 asm( "add x0, %0, %1\n\t"
89 "mov x1, %2\n\t"
90 "mov x2, %3\n\t"
91 "bl memmove\n\t" /* copy regs to top of stack */
92 "mov x27, #0\n\t" /* not a syscall */
93 "mov x28, %0\n\t" /* thread structure */
94 "mov sp, x0\n\t" /* reposition stack pointer */
95 "b ret_to_user"
96 :
97 : "r" (current_thread_info()),
98 "Ir" (THREAD_START_SP - sizeof(regs)),
99 "r" (&regs),
100 "Ir" (sizeof(regs))
101 : "x0", "x1", "x2", "x27", "x28", "x30", "memory");
102
103 out:
104 return ret;
105}
106EXPORT_SYMBOL(kernel_execve);
107
108asmlinkage long sys_mmap(unsigned long addr, unsigned long len, 29asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
109 unsigned long prot, unsigned long flags, 30 unsigned long prot, unsigned long flags,
110 unsigned long fd, off_t off) 31 unsigned long fd, off_t off)
@@ -118,8 +39,6 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
118/* 39/*
119 * Wrappers to pass the pt_regs argument. 40 * Wrappers to pass the pt_regs argument.
120 */ 41 */
121#define sys_execve sys_execve_wrapper
122#define sys_clone sys_clone_wrapper
123#define sys_rt_sigreturn sys_rt_sigreturn_wrapper 42#define sys_rt_sigreturn sys_rt_sigreturn_wrapper
124#define sys_sigaltstack sys_sigaltstack_wrapper 43#define sys_sigaltstack sys_sigaltstack_wrapper
125 44
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index 54c4aec47a08..7ef59e9245ef 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -26,25 +26,6 @@
26/* 26/*
27 * System call wrappers for the AArch32 compatibility layer. 27 * System call wrappers for the AArch32 compatibility layer.
28 */ 28 */
29compat_sys_fork_wrapper:
30 mov x0, sp
31 b compat_sys_fork
32ENDPROC(compat_sys_fork_wrapper)
33
34compat_sys_vfork_wrapper:
35 mov x0, sp
36 b compat_sys_vfork
37ENDPROC(compat_sys_vfork_wrapper)
38
39compat_sys_execve_wrapper:
40 mov x3, sp
41 b compat_sys_execve
42ENDPROC(compat_sys_execve_wrapper)
43
44compat_sys_clone_wrapper:
45 mov x5, sp
46 b compat_sys_clone
47ENDPROC(compat_sys_clone_wrapper)
48 29
49compat_sys_sigreturn_wrapper: 30compat_sys_sigreturn_wrapper:
50 mov x0, sp 31 mov x0, sp
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 906e3bd270b0..f7b05edf8ce3 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -28,45 +28,6 @@
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/unistd32.h> 29#include <asm/unistd32.h>
30 30
31asmlinkage int compat_sys_fork(struct pt_regs *regs)
32{
33 return do_fork(SIGCHLD, regs->compat_sp, regs, 0, NULL, NULL);
34}
35
36asmlinkage int compat_sys_clone(unsigned long clone_flags, unsigned long newsp,
37 int __user *parent_tidptr, int tls_val,
38 int __user *child_tidptr, struct pt_regs *regs)
39{
40 if (!newsp)
41 newsp = regs->compat_sp;
42
43 return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
44}
45
46asmlinkage int compat_sys_vfork(struct pt_regs *regs)
47{
48 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->compat_sp,
49 regs, 0, NULL, NULL);
50}
51
52asmlinkage int compat_sys_execve(const char __user *filenamei,
53 compat_uptr_t argv, compat_uptr_t envp,
54 struct pt_regs *regs)
55{
56 int error;
57 struct filename *filename;
58
59 filename = getname(filenamei);
60 error = PTR_ERR(filename);
61 if (IS_ERR(filename))
62 goto out;
63 error = compat_do_execve(filename->name, compat_ptr(argv),
64 compat_ptr(envp), regs);
65 putname(filename);
66out:
67 return error;
68}
69
70asmlinkage int compat_sys_sched_rr_get_interval(compat_pid_t pid, 31asmlinkage int compat_sys_sched_rr_get_interval(compat_pid_t pid,
71 struct compat_timespec __user *interval) 32 struct compat_timespec __user *interval)
72{ 33{
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ba457943a16b..c958cb84d75f 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -239,7 +239,7 @@ void update_vsyscall(struct timekeeper *tk)
239 if (!use_syscall) { 239 if (!use_syscall) {
240 vdso_data->cs_cycle_last = tk->clock->cycle_last; 240 vdso_data->cs_cycle_last = tk->clock->cycle_last;
241 vdso_data->xtime_clock_sec = tk->xtime_sec; 241 vdso_data->xtime_clock_sec = tk->xtime_sec;
242 vdso_data->xtime_clock_nsec = tk->xtime_nsec >> tk->shift; 242 vdso_data->xtime_clock_nsec = tk->xtime_nsec;
243 vdso_data->cs_mult = tk->mult; 243 vdso_data->cs_mult = tk->mult;
244 vdso_data->cs_shift = tk->shift; 244 vdso_data->cs_shift = tk->shift;
245 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; 245 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index dcb8c203a3b2..8bf658d974f9 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -62,18 +62,19 @@ ENTRY(__kernel_gettimeofday)
62 /* If tv is NULL, skip to the timezone code. */ 62 /* If tv is NULL, skip to the timezone code. */
63 cbz x0, 2f 63 cbz x0, 2f
64 bl __do_get_tspec 64 bl __do_get_tspec
65 seqcnt_check w13, 1b 65 seqcnt_check w9, 1b
66 66
67 /* Convert ns to us. */ 67 /* Convert ns to us. */
68 mov x11, #1000 68 mov x13, #1000
69 udiv x10, x10, x11 69 lsl x13, x13, x12
70 stp x9, x10, [x0, #TVAL_TV_SEC] 70 udiv x11, x11, x13
71 stp x10, x11, [x0, #TVAL_TV_SEC]
712: 722:
72 /* If tz is NULL, return 0. */ 73 /* If tz is NULL, return 0. */
73 cbz x1, 3f 74 cbz x1, 3f
74 ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST] 75 ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
75 seqcnt_read w13 76 seqcnt_read w9
76 seqcnt_check w13, 1b 77 seqcnt_check w9, 1b
77 stp w4, w5, [x1, #TZ_MINWEST] 78 stp w4, w5, [x1, #TZ_MINWEST]
783: 793:
79 mov x0, xzr 80 mov x0, xzr
@@ -102,17 +103,17 @@ ENTRY(__kernel_clock_gettime)
102 cbnz use_syscall, 7f 103 cbnz use_syscall, 7f
103 104
104 bl __do_get_tspec 105 bl __do_get_tspec
105 seqcnt_check w13, 1b 106 seqcnt_check w9, 1b
106 107
107 cmp w0, #CLOCK_MONOTONIC 108 cmp w0, #CLOCK_MONOTONIC
108 b.ne 6f 109 b.ne 6f
109 110
110 /* Get wtm timespec. */ 111 /* Get wtm timespec. */
111 ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC] 112 ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
112 113
113 /* Check the sequence counter. */ 114 /* Check the sequence counter. */
114 seqcnt_read w13 115 seqcnt_read w9
115 seqcnt_check w13, 1b 116 seqcnt_check w9, 1b
116 b 4f 117 b 4f
1172: 1182:
118 cmp w0, #CLOCK_REALTIME_COARSE 119 cmp w0, #CLOCK_REALTIME_COARSE
@@ -122,37 +123,40 @@ ENTRY(__kernel_clock_gettime)
122 /* Get coarse timespec. */ 123 /* Get coarse timespec. */
123 adr vdso_data, _vdso_data 124 adr vdso_data, _vdso_data
1243: seqcnt_acquire 1253: seqcnt_acquire
125 ldp x9, x10, [vdso_data, #VDSO_XTIME_CRS_SEC] 126 ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
126
127 cmp w0, #CLOCK_MONOTONIC_COARSE
128 b.ne 6f
129 127
130 /* Get wtm timespec. */ 128 /* Get wtm timespec. */
131 ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC] 129 ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
132 130
133 /* Check the sequence counter. */ 131 /* Check the sequence counter. */
134 seqcnt_read w13 132 seqcnt_read w9
135 seqcnt_check w13, 3b 133 seqcnt_check w9, 3b
134
135 cmp w0, #CLOCK_MONOTONIC_COARSE
136 b.ne 6f
1364: 1374:
137 /* Add on wtm timespec. */ 138 /* Add on wtm timespec. */
138 add x9, x9, x14 139 add x10, x10, x13
139 add x10, x10, x15 140 lsl x14, x14, x12
141 add x11, x11, x14
140 142
141 /* Normalise the new timespec. */ 143 /* Normalise the new timespec. */
142 mov x14, #NSEC_PER_SEC_LO16 144 mov x15, #NSEC_PER_SEC_LO16
143 movk x14, #NSEC_PER_SEC_HI16, lsl #16 145 movk x15, #NSEC_PER_SEC_HI16, lsl #16
144 cmp x10, x14 146 lsl x15, x15, x12
147 cmp x11, x15
145 b.lt 5f 148 b.lt 5f
146 sub x10, x10, x14 149 sub x11, x11, x15
147 add x9, x9, #1 150 add x10, x10, #1
1485: 1515:
149 cmp x10, #0 152 cmp x11, #0
150 b.ge 6f 153 b.ge 6f
151 add x10, x10, x14 154 add x11, x11, x15
152 sub x9, x9, #1 155 sub x10, x10, #1
153 156
1546: /* Store to the user timespec. */ 1576: /* Store to the user timespec. */
155 stp x9, x10, [x1, #TSPEC_TV_SEC] 158 lsr x11, x11, x12
159 stp x10, x11, [x1, #TSPEC_TV_SEC]
156 mov x0, xzr 160 mov x0, xzr
157 ret x2 161 ret x2
1587: 1627:
@@ -203,39 +207,39 @@ ENDPROC(__kernel_clock_getres)
203 * Expects vdso_data to be initialised. 207 * Expects vdso_data to be initialised.
204 * Clobbers the temporary registers (x9 - x15). 208 * Clobbers the temporary registers (x9 - x15).
205 * Returns: 209 * Returns:
206 * - (x9, x10) = (ts->tv_sec, ts->tv_nsec) 210 * - w9 = vDSO sequence counter
207 * - (x11, x12) = (xtime->tv_sec, xtime->tv_nsec) 211 * - (x10, x11) = (ts->tv_sec, shifted ts->tv_nsec)
208 * - w13 = vDSO sequence counter 212 * - w12 = cs_shift
209 */ 213 */
210ENTRY(__do_get_tspec) 214ENTRY(__do_get_tspec)
211 .cfi_startproc 215 .cfi_startproc
212 216
213 /* Read from the vDSO data page. */ 217 /* Read from the vDSO data page. */
214 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] 218 ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
215 ldp x11, x12, [vdso_data, #VDSO_XTIME_CLK_SEC] 219 ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
216 ldp w14, w15, [vdso_data, #VDSO_CS_MULT] 220 ldp w11, w12, [vdso_data, #VDSO_CS_MULT]
217 seqcnt_read w13 221 seqcnt_read w9
218 222
219 /* Read the physical counter. */ 223 /* Read the virtual counter. */
220 isb 224 isb
221 mrs x9, cntpct_el0 225 mrs x15, cntvct_el0
222 226
223 /* Calculate cycle delta and convert to ns. */ 227 /* Calculate cycle delta and convert to ns. */
224 sub x10, x9, x10 228 sub x10, x15, x10
225 /* We can only guarantee 56 bits of precision. */ 229 /* We can only guarantee 56 bits of precision. */
226 movn x9, #0xff0, lsl #48 230 movn x15, #0xff00, lsl #48
227 and x10, x9, x10 231 and x10, x15, x10
228 mul x10, x10, x14 232 mul x10, x10, x11
229 lsr x10, x10, x15
230 233
231 /* Use the kernel time to calculate the new timespec. */ 234 /* Use the kernel time to calculate the new timespec. */
232 add x10, x12, x10 235 mov x11, #NSEC_PER_SEC_LO16
233 mov x14, #NSEC_PER_SEC_LO16 236 movk x11, #NSEC_PER_SEC_HI16, lsl #16
234 movk x14, #NSEC_PER_SEC_HI16, lsl #16 237 lsl x11, x11, x12
235 udiv x15, x10, x14 238 add x15, x10, x14
236 add x9, x15, x11 239 udiv x14, x15, x11
237 mul x14, x14, x15 240 add x10, x13, x14
238 sub x10, x10, x14 241 mul x13, x14, x11
242 sub x11, x15, x13
239 243
240 ret 244 ret
241 .cfi_endproc 245 .cfi_endproc
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1909a69983ca..afadae6682ed 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -36,6 +36,8 @@
36#include <asm/pgtable.h> 36#include <asm/pgtable.h>
37#include <asm/tlbflush.h> 37#include <asm/tlbflush.h>
38 38
39static const char *fault_name(unsigned int esr);
40
39/* 41/*
40 * Dump out the page tables associated with 'addr' in mm 'mm'. 42 * Dump out the page tables associated with 'addr' in mm 'mm'.
41 */ 43 */
@@ -112,8 +114,9 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
112 struct siginfo si; 114 struct siginfo si;
113 115
114 if (show_unhandled_signals) { 116 if (show_unhandled_signals) {
115 pr_info("%s[%d]: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", 117 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
116 tsk->comm, task_pid_nr(tsk), sig, addr, esr); 118 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
119 addr, esr);
117 show_pte(tsk->mm, addr); 120 show_pte(tsk->mm, addr);
118 show_regs(regs); 121 show_regs(regs);
119 } 122 }
@@ -450,6 +453,12 @@ static struct fault_info {
450 { do_bad, SIGBUS, 0, "unknown 63" }, 453 { do_bad, SIGBUS, 0, "unknown 63" },
451}; 454};
452 455
456static const char *fault_name(unsigned int esr)
457{
458 const struct fault_info *inf = fault_info + (esr & 63);
459 return inf->name;
460}
461
453/* 462/*
454 * Dispatch a data abort to the relevant handler. 463 * Dispatch a data abort to the relevant handler.
455 */ 464 */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index c144adb1682f..88611c3a421a 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -27,10 +27,6 @@
27 27
28#include "mm.h" 28#include "mm.h"
29 29
30void flush_cache_mm(struct mm_struct *mm)
31{
32}
33
34void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 30void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
35 unsigned long end) 31 unsigned long end)
36{ 32{
@@ -38,11 +34,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
38 __flush_icache_all(); 34 __flush_icache_all();
39} 35}
40 36
41void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
42 unsigned long pfn)
43{
44}
45
46static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 37static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
47 unsigned long uaddr, void *kaddr, 38 unsigned long uaddr, void *kaddr,
48 unsigned long len) 39 unsigned long len)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 4cd28931dba9..800aac306a08 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -79,8 +79,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
79 79
80#ifdef CONFIG_ZONE_DMA32 80#ifdef CONFIG_ZONE_DMA32
81 /* 4GB maximum for 32-bit only capable devices */ 81 /* 4GB maximum for 32-bit only capable devices */
82 max_dma32 = min(max, MAX_DMA32_PFN); 82 max_dma32 = max(min, min(max, MAX_DMA32_PFN));
83 zone_size[ZONE_DMA32] = max(min, max_dma32) - min; 83 zone_size[ZONE_DMA32] = max_dma32 - min;
84#endif 84#endif
85 zone_size[ZONE_NORMAL] = max - max_dma32; 85 zone_size[ZONE_NORMAL] = max - max_dma32;
86 86