aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 15:31:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 15:31:53 -0400
commit5167d09ffad5b16b574d35ce3047ed34caf1e837 (patch)
treefc45dd9cbd578f5010e7b8208ecdfc6534547989 /arch/arm64/kernel
parent8533ce72718871fb528d853391746f36243273af (diff)
parentea1719672f59eeb85829073b567495c4f472ac9f (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "Once again, Catalin's off on holiday and I'm looking after the arm64 tree. Please can you pull the following arm64 updates for 3.17? Note that this branch also includes the new GICv3 driver (merged via a stable tag from Jason's irqchip tree), since there is a fix for older binutils on top. Changes include: - context tracking support (NO_HZ_FULL) which narrowly missed 3.16 - vDSO layout rework following Andy's work on x86 - TEXT_OFFSET fuzzing for bootloader testing - /proc/cpuinfo tidy-up - preliminary work to support 48-bit virtual addresses, but this is currently disabled until KVM has been ported to use it (the patches do, however, bring some nice clean-up) - boot-time CPU sanity checks (especially useful on heterogenous systems) - support for syscall auditing - support for CC_STACKPROTECTOR - defconfig updates" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (55 commits) arm64: add newline to I-cache policy string Revert "arm64: dmi: Add SMBIOS/DMI support" arm64: fpsimd: fix a typo in fpsimd_save_partial_state ENDPROC arm64: don't call break hooks for BRK exceptions from EL0 arm64: defconfig: enable devtmpfs mount option arm64: vdso: fix build error when switching from LE to BE arm64: defconfig: add virtio support for running as a kvm guest arm64: gicv3: Allow GICv3 compilation with older binutils arm64: fix soft lockup due to large tlb flush range arm64/crypto: fix makefile rule for aes-glue-%.o arm64: Do not invoke audit_syscall_* functions if !CONFIG_AUDIT_SYSCALL arm64: Fix barriers used for page table modifications arm64: Add support for 48-bit VA space with 64KB page configuration arm64: asm/pgtable.h pmd/pud definitions clean-up arm64: Determine the vmalloc/vmemmap space at build time based on VA_BITS arm64: Clean up the initial page table creation in head.S arm64: Remove asm/pgtable-*level-types.h files arm64: Remove asm/pgtable-*level-hwdef.h files arm64: Convert bool ARM64_x_LEVELS to int ARM64_PGTABLE_LEVELS arm64: mm: Implement 4 levels of translation tables ...
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/cpu_ops.c2
-rw-r--r--arch/arm64/kernel/cpuinfo.c192
-rw-r--r--arch/arm64/kernel/debug-monitors.c22
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S2
-rw-r--r--arch/arm64/kernel/entry.S56
-rw-r--r--arch/arm64/kernel/head.S121
-rw-r--r--arch/arm64/kernel/hyp-stub.S1
-rw-r--r--arch/arm64/kernel/image.h62
-rw-r--r--arch/arm64/kernel/kuser32.S2
-rw-r--r--arch/arm64/kernel/process.c6
-rw-r--r--arch/arm64/kernel/psci.c8
-rw-r--r--arch/arm64/kernel/ptrace.c11
-rw-r--r--arch/arm64/kernel/setup.c47
-rw-r--r--arch/arm64/kernel/signal32.c2
-rw-r--r--arch/arm64/kernel/smp.c6
-rw-r--r--arch/arm64/kernel/suspend.c2
-rw-r--r--arch/arm64/kernel/sys_compat.c2
-rw-r--r--arch/arm64/kernel/topology.c47
-rw-r--r--arch/arm64/kernel/traps.c13
-rw-r--r--arch/arm64/kernel/vdso.c94
-rw-r--r--arch/arm64/kernel/vdso/Makefile6
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S4
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S16
24 files changed, 562 insertions, 165 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index cdaedad3afe5..27c72ef4fd7a 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -15,7 +15,8 @@ CFLAGS_REMOVE_return_address.o = -pg
15arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ 15arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
16 entry-fpsimd.o process.o ptrace.o setup.o signal.o \ 16 entry-fpsimd.o process.o ptrace.o setup.o signal.o \
17 sys.o stacktrace.o time.o traps.o io.o vdso.o \ 17 sys.o stacktrace.o time.o traps.o io.o vdso.o \
18 hyp-stub.o psci.o cpu_ops.o insn.o return_address.o 18 hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \
19 cpuinfo.o
19 20
20arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ 21arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
21 sys_compat.o 22 sys_compat.o
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index d62d12fb36c8..cce952440c64 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -30,8 +30,8 @@ const struct cpu_operations *cpu_ops[NR_CPUS];
30static const struct cpu_operations *supported_cpu_ops[] __initconst = { 30static const struct cpu_operations *supported_cpu_ops[] __initconst = {
31#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
32 &smp_spin_table_ops, 32 &smp_spin_table_ops,
33 &cpu_psci_ops,
34#endif 33#endif
34 &cpu_psci_ops,
35 NULL, 35 NULL,
36}; 36};
37 37
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
new file mode 100644
index 000000000000..f798f66634af
--- /dev/null
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -0,0 +1,192 @@
1/*
2 * Record and handle CPU attributes.
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17#include <asm/arch_timer.h>
18#include <asm/cachetype.h>
19#include <asm/cpu.h>
20#include <asm/cputype.h>
21
22#include <linux/bitops.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/printk.h>
26#include <linux/smp.h>
27
28/*
29 * In case the boot CPU is hotpluggable, we record its initial state and
30 * current state separately. Certain system registers may contain different
31 * values depending on configuration at or after reset.
32 */
33DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
34static struct cpuinfo_arm64 boot_cpu_data;
35
36static char *icache_policy_str[] = {
37 [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
38 [ICACHE_POLICY_AIVIVT] = "AIVIVT",
39 [ICACHE_POLICY_VIPT] = "VIPT",
40 [ICACHE_POLICY_PIPT] = "PIPT",
41};
42
43unsigned long __icache_flags;
44
45static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
46{
47 unsigned int cpu = smp_processor_id();
48 u32 l1ip = CTR_L1IP(info->reg_ctr);
49
50 if (l1ip != ICACHE_POLICY_PIPT)
51 set_bit(ICACHEF_ALIASING, &__icache_flags);
52 if (l1ip == ICACHE_POLICY_AIVIVT);
53 set_bit(ICACHEF_AIVIVT, &__icache_flags);
54
55 pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
56}
57
58static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
59{
60 if ((boot & mask) == (cur & mask))
61 return 0;
62
63 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n",
64 name, (unsigned long)boot, cpu, (unsigned long)cur);
65
66 return 1;
67}
68
69#define CHECK_MASK(field, mask, boot, cur, cpu) \
70 check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
71
72#define CHECK(field, boot, cur, cpu) \
73 CHECK_MASK(field, ~0ULL, boot, cur, cpu)
74
75/*
76 * Verify that CPUs don't have unexpected differences that will cause problems.
77 */
78static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
79{
80 unsigned int cpu = smp_processor_id();
81 struct cpuinfo_arm64 *boot = &boot_cpu_data;
82 unsigned int diff = 0;
83
84 /*
85 * The kernel can handle differing I-cache policies, but otherwise
86 * caches should look identical. Userspace JITs will make use of
87 * *minLine.
88 */
89 diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu);
90
91 /*
92 * Userspace may perform DC ZVA instructions. Mismatched block sizes
93 * could result in too much or too little memory being zeroed if a
94 * process is preempted and migrated between CPUs.
95 */
96 diff |= CHECK(dczid, boot, cur, cpu);
97
98 /* If different, timekeeping will be broken (especially with KVM) */
99 diff |= CHECK(cntfrq, boot, cur, cpu);
100
101 /*
102 * Even in big.LITTLE, processors should be identical instruction-set
103 * wise.
104 */
105 diff |= CHECK(id_aa64isar0, boot, cur, cpu);
106 diff |= CHECK(id_aa64isar1, boot, cur, cpu);
107
108 /*
109 * Differing PARange support is fine as long as all peripherals and
110 * memory are mapped within the minimum PARange of all CPUs.
111 * Linux should not care about secure memory.
112 * ID_AA64MMFR1 is currently RES0.
113 */
114 diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu);
115 diff |= CHECK(id_aa64mmfr1, boot, cur, cpu);
116
117 /*
118 * EL3 is not our concern.
119 * ID_AA64PFR1 is currently RES0.
120 */
121 diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu);
122 diff |= CHECK(id_aa64pfr1, boot, cur, cpu);
123
124 /*
125 * If we have AArch32, we care about 32-bit features for compat. These
126 * registers should be RES0 otherwise.
127 */
128 diff |= CHECK(id_isar0, boot, cur, cpu);
129 diff |= CHECK(id_isar1, boot, cur, cpu);
130 diff |= CHECK(id_isar2, boot, cur, cpu);
131 diff |= CHECK(id_isar3, boot, cur, cpu);
132 diff |= CHECK(id_isar4, boot, cur, cpu);
133 diff |= CHECK(id_isar5, boot, cur, cpu);
134 diff |= CHECK(id_mmfr0, boot, cur, cpu);
135 diff |= CHECK(id_mmfr1, boot, cur, cpu);
136 diff |= CHECK(id_mmfr2, boot, cur, cpu);
137 diff |= CHECK(id_mmfr3, boot, cur, cpu);
138 diff |= CHECK(id_pfr0, boot, cur, cpu);
139 diff |= CHECK(id_pfr1, boot, cur, cpu);
140
141 /*
142 * Mismatched CPU features are a recipe for disaster. Don't even
143 * pretend to support them.
144 */
145 WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
146 "Unsupported CPU feature variation.");
147}
148
149static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
150{
151 info->reg_cntfrq = arch_timer_get_cntfrq();
152 info->reg_ctr = read_cpuid_cachetype();
153 info->reg_dczid = read_cpuid(DCZID_EL0);
154 info->reg_midr = read_cpuid_id();
155
156 info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
157 info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
158 info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
159 info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
160 info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
161 info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
162
163 info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
164 info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
165 info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
166 info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
167 info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
168 info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
169 info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
170 info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
171 info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
172 info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
173 info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
174 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
175
176 cpuinfo_detect_icache_policy(info);
177}
178
179void cpuinfo_store_cpu(void)
180{
181 struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
182 __cpuinfo_store_cpu(info);
183 cpuinfo_sanity_check(info);
184}
185
186void __init cpuinfo_store_boot_cpu(void)
187{
188 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
189 __cpuinfo_store_cpu(info);
190
191 boot_cpu_data = *info;
192}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index a7fb874b595e..fe5b94078d82 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -315,20 +315,20 @@ static int brk_handler(unsigned long addr, unsigned int esr,
315{ 315{
316 siginfo_t info; 316 siginfo_t info;
317 317
318 if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) 318 if (user_mode(regs)) {
319 return 0; 319 info = (siginfo_t) {
320 .si_signo = SIGTRAP,
321 .si_errno = 0,
322 .si_code = TRAP_BRKPT,
323 .si_addr = (void __user *)instruction_pointer(regs),
324 };
320 325
321 if (!user_mode(regs)) 326 force_sig_info(SIGTRAP, &info, current);
327 } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
328 pr_warning("Unexpected kernel BRK exception at EL1\n");
322 return -EFAULT; 329 return -EFAULT;
330 }
323 331
324 info = (siginfo_t) {
325 .si_signo = SIGTRAP,
326 .si_errno = 0,
327 .si_code = TRAP_BRKPT,
328 .si_addr = (void __user *)instruction_pointer(regs),
329 };
330
331 force_sig_info(SIGTRAP, &info, current);
332 return 0; 332 return 0;
333} 333}
334 334
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index d358ccacfc00..c44a82f146b1 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -52,7 +52,7 @@ ENDPROC(fpsimd_load_state)
52ENTRY(fpsimd_save_partial_state) 52ENTRY(fpsimd_save_partial_state)
53 fpsimd_save_partial x0, 1, 8, 9 53 fpsimd_save_partial x0, 1, 8, 9
54 ret 54 ret
55ENDPROC(fpsimd_load_partial_state) 55ENDPROC(fpsimd_save_partial_state)
56 56
57/* 57/*
58 * Load the bottom n FP registers. 58 * Load the bottom n FP registers.
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9ce04ba6bcb0..f0b5e5120a87 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -27,7 +27,32 @@
27#include <asm/esr.h> 27#include <asm/esr.h>
28#include <asm/thread_info.h> 28#include <asm/thread_info.h>
29#include <asm/unistd.h> 29#include <asm/unistd.h>
30#include <asm/unistd32.h> 30
31/*
32 * Context tracking subsystem. Used to instrument transitions
33 * between user and kernel mode.
34 */
35 .macro ct_user_exit, syscall = 0
36#ifdef CONFIG_CONTEXT_TRACKING
37 bl context_tracking_user_exit
38 .if \syscall == 1
39 /*
40 * Save/restore needed during syscalls. Restore syscall arguments from
41 * the values already saved on stack during kernel_entry.
42 */
43 ldp x0, x1, [sp]
44 ldp x2, x3, [sp, #S_X2]
45 ldp x4, x5, [sp, #S_X4]
46 ldp x6, x7, [sp, #S_X6]
47 .endif
48#endif
49 .endm
50
51 .macro ct_user_enter
52#ifdef CONFIG_CONTEXT_TRACKING
53 bl context_tracking_user_enter
54#endif
55 .endm
31 56
32/* 57/*
33 * Bad Abort numbers 58 * Bad Abort numbers
@@ -91,6 +116,7 @@
91 .macro kernel_exit, el, ret = 0 116 .macro kernel_exit, el, ret = 0
92 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR 117 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
93 .if \el == 0 118 .if \el == 0
119 ct_user_enter
94 ldr x23, [sp, #S_SP] // load return stack pointer 120 ldr x23, [sp, #S_SP] // load return stack pointer
95 .endif 121 .endif
96 .if \ret 122 .if \ret
@@ -353,7 +379,6 @@ el0_sync:
353 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class 379 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
354 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state 380 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
355 b.eq el0_svc 381 b.eq el0_svc
356 adr lr, ret_to_user
357 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 382 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
358 b.eq el0_da 383 b.eq el0_da
359 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 384 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
@@ -382,7 +407,6 @@ el0_sync_compat:
382 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class 407 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
383 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state 408 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
384 b.eq el0_svc_compat 409 b.eq el0_svc_compat
385 adr lr, ret_to_user
386 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 410 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
387 b.eq el0_da 411 b.eq el0_da
388 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 412 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
@@ -425,48 +449,59 @@ el0_da:
425 /* 449 /*
426 * Data abort handling 450 * Data abort handling
427 */ 451 */
428 mrs x0, far_el1 452 mrs x26, far_el1
429 bic x0, x0, #(0xff << 56)
430 // enable interrupts before calling the main handler 453 // enable interrupts before calling the main handler
431 enable_dbg_and_irq 454 enable_dbg_and_irq
455 ct_user_exit
456 bic x0, x26, #(0xff << 56)
432 mov x1, x25 457 mov x1, x25
433 mov x2, sp 458 mov x2, sp
459 adr lr, ret_to_user
434 b do_mem_abort 460 b do_mem_abort
435el0_ia: 461el0_ia:
436 /* 462 /*
437 * Instruction abort handling 463 * Instruction abort handling
438 */ 464 */
439 mrs x0, far_el1 465 mrs x26, far_el1
440 // enable interrupts before calling the main handler 466 // enable interrupts before calling the main handler
441 enable_dbg_and_irq 467 enable_dbg_and_irq
468 ct_user_exit
469 mov x0, x26
442 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts 470 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
443 mov x2, sp 471 mov x2, sp
472 adr lr, ret_to_user
444 b do_mem_abort 473 b do_mem_abort
445el0_fpsimd_acc: 474el0_fpsimd_acc:
446 /* 475 /*
447 * Floating Point or Advanced SIMD access 476 * Floating Point or Advanced SIMD access
448 */ 477 */
449 enable_dbg 478 enable_dbg
479 ct_user_exit
450 mov x0, x25 480 mov x0, x25
451 mov x1, sp 481 mov x1, sp
482 adr lr, ret_to_user
452 b do_fpsimd_acc 483 b do_fpsimd_acc
453el0_fpsimd_exc: 484el0_fpsimd_exc:
454 /* 485 /*
455 * Floating Point or Advanced SIMD exception 486 * Floating Point or Advanced SIMD exception
456 */ 487 */
457 enable_dbg 488 enable_dbg
489 ct_user_exit
458 mov x0, x25 490 mov x0, x25
459 mov x1, sp 491 mov x1, sp
492 adr lr, ret_to_user
460 b do_fpsimd_exc 493 b do_fpsimd_exc
461el0_sp_pc: 494el0_sp_pc:
462 /* 495 /*
463 * Stack or PC alignment exception handling 496 * Stack or PC alignment exception handling
464 */ 497 */
465 mrs x0, far_el1 498 mrs x26, far_el1
466 // enable interrupts before calling the main handler 499 // enable interrupts before calling the main handler
467 enable_dbg_and_irq 500 enable_dbg_and_irq
501 mov x0, x26
468 mov x1, x25 502 mov x1, x25
469 mov x2, sp 503 mov x2, sp
504 adr lr, ret_to_user
470 b do_sp_pc_abort 505 b do_sp_pc_abort
471el0_undef: 506el0_undef:
472 /* 507 /*
@@ -474,7 +509,9 @@ el0_undef:
474 */ 509 */
475 // enable interrupts before calling the main handler 510 // enable interrupts before calling the main handler
476 enable_dbg_and_irq 511 enable_dbg_and_irq
512 ct_user_exit
477 mov x0, sp 513 mov x0, sp
514 adr lr, ret_to_user
478 b do_undefinstr 515 b do_undefinstr
479el0_dbg: 516el0_dbg:
480 /* 517 /*
@@ -486,12 +523,15 @@ el0_dbg:
486 mov x2, sp 523 mov x2, sp
487 bl do_debug_exception 524 bl do_debug_exception
488 enable_dbg 525 enable_dbg
526 ct_user_exit
489 b ret_to_user 527 b ret_to_user
490el0_inv: 528el0_inv:
491 enable_dbg 529 enable_dbg
530 ct_user_exit
492 mov x0, sp 531 mov x0, sp
493 mov x1, #BAD_SYNC 532 mov x1, #BAD_SYNC
494 mrs x2, esr_el1 533 mrs x2, esr_el1
534 adr lr, ret_to_user
495 b bad_mode 535 b bad_mode
496ENDPROC(el0_sync) 536ENDPROC(el0_sync)
497 537
@@ -504,6 +544,7 @@ el0_irq_naked:
504 bl trace_hardirqs_off 544 bl trace_hardirqs_off
505#endif 545#endif
506 546
547 ct_user_exit
507 irq_handler 548 irq_handler
508 549
509#ifdef CONFIG_TRACE_IRQFLAGS 550#ifdef CONFIG_TRACE_IRQFLAGS
@@ -608,6 +649,7 @@ el0_svc:
608el0_svc_naked: // compat entry point 649el0_svc_naked: // compat entry point
609 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number 650 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
610 enable_dbg_and_irq 651 enable_dbg_and_irq
652 ct_user_exit 1
611 653
612 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks 654 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
613 tst x16, #_TIF_SYSCALL_WORK 655 tst x16, #_TIF_SYSCALL_WORK
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a2c1195abb7f..144f10567f82 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -22,6 +22,7 @@
22 22
23#include <linux/linkage.h> 23#include <linux/linkage.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/irqchip/arm-gic-v3.h>
25 26
26#include <asm/assembler.h> 27#include <asm/assembler.h>
27#include <asm/ptrace.h> 28#include <asm/ptrace.h>
@@ -35,37 +36,31 @@
35#include <asm/page.h> 36#include <asm/page.h>
36#include <asm/virt.h> 37#include <asm/virt.h>
37 38
38/*
39 * swapper_pg_dir is the virtual address of the initial page table. We place
40 * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
41 * 2 pages and is placed below swapper_pg_dir.
42 */
43#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 39#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
44 40
45#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000 41#if (TEXT_OFFSET & 0xf) != 0
46#error KERNEL_RAM_VADDR must start at 0xXXX80000 42#error TEXT_OFFSET must be at least 16B aligned
43#elif (PAGE_OFFSET & 0xfffff) != 0
44#error PAGE_OFFSET must be at least 2MB aligned
45#elif TEXT_OFFSET > 0xfffff
46#error TEXT_OFFSET must be less than 2MB
47#endif 47#endif
48 48
49#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) 49 .macro pgtbl, ttb0, ttb1, virt_to_phys
50#define IDMAP_DIR_SIZE (2 * PAGE_SIZE) 50 ldr \ttb1, =swapper_pg_dir
51 51 ldr \ttb0, =idmap_pg_dir
52 .globl swapper_pg_dir 52 add \ttb1, \ttb1, \virt_to_phys
53 .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE 53 add \ttb0, \ttb0, \virt_to_phys
54
55 .globl idmap_pg_dir
56 .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
57
58 .macro pgtbl, ttb0, ttb1, phys
59 add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
60 sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
61 .endm 54 .endm
62 55
63#ifdef CONFIG_ARM64_64K_PAGES 56#ifdef CONFIG_ARM64_64K_PAGES
64#define BLOCK_SHIFT PAGE_SHIFT 57#define BLOCK_SHIFT PAGE_SHIFT
65#define BLOCK_SIZE PAGE_SIZE 58#define BLOCK_SIZE PAGE_SIZE
59#define TABLE_SHIFT PMD_SHIFT
66#else 60#else
67#define BLOCK_SHIFT SECTION_SHIFT 61#define BLOCK_SHIFT SECTION_SHIFT
68#define BLOCK_SIZE SECTION_SIZE 62#define BLOCK_SIZE SECTION_SIZE
63#define TABLE_SHIFT PUD_SHIFT
69#endif 64#endif
70 65
71#define KERNEL_START KERNEL_RAM_VADDR 66#define KERNEL_START KERNEL_RAM_VADDR
@@ -120,9 +115,9 @@ efi_head:
120 b stext // branch to kernel start, magic 115 b stext // branch to kernel start, magic
121 .long 0 // reserved 116 .long 0 // reserved
122#endif 117#endif
123 .quad TEXT_OFFSET // Image load offset from start of RAM 118 .quad _kernel_offset_le // Image load offset from start of RAM, little-endian
124 .quad 0 // reserved 119 .quad _kernel_size_le // Effective size of kernel image, little-endian
125 .quad 0 // reserved 120 .quad _kernel_flags_le // Informative flags, little-endian
126 .quad 0 // reserved 121 .quad 0 // reserved
127 .quad 0 // reserved 122 .quad 0 // reserved
128 .quad 0 // reserved 123 .quad 0 // reserved
@@ -295,6 +290,23 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
295 msr cnthctl_el2, x0 290 msr cnthctl_el2, x0
296 msr cntvoff_el2, xzr // Clear virtual offset 291 msr cntvoff_el2, xzr // Clear virtual offset
297 292
293#ifdef CONFIG_ARM_GIC_V3
294 /* GICv3 system register access */
295 mrs x0, id_aa64pfr0_el1
296 ubfx x0, x0, #24, #4
297 cmp x0, #1
298 b.ne 3f
299
300 mrs_s x0, ICC_SRE_EL2
301 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
302 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
303 msr_s ICC_SRE_EL2, x0
304 isb // Make sure SRE is now set
305 msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
306
3073:
308#endif
309
298 /* Populate ID registers. */ 310 /* Populate ID registers. */
299 mrs x0, midr_el1 311 mrs x0, midr_el1
300 mrs x1, mpidr_el1 312 mrs x1, mpidr_el1
@@ -413,7 +425,7 @@ ENTRY(secondary_startup)
413 mov x23, x0 // x23=current cpu_table 425 mov x23, x0 // x23=current cpu_table
414 cbz x23, __error_p // invalid processor (x23=0)? 426 cbz x23, __error_p // invalid processor (x23=0)?
415 427
416 pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1 428 pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
417 ldr x12, [x23, #CPU_INFO_SETUP] 429 ldr x12, [x23, #CPU_INFO_SETUP]
418 add x12, x12, x28 // __virt_to_phys 430 add x12, x12, x28 // __virt_to_phys
419 blr x12 // initialise processor 431 blr x12 // initialise processor
@@ -455,8 +467,13 @@ ENDPROC(__enable_mmu)
455 * x27 = *virtual* address to jump to upon completion 467 * x27 = *virtual* address to jump to upon completion
456 * 468 *
457 * other registers depend on the function called upon completion 469 * other registers depend on the function called upon completion
470 *
471 * We align the entire function to the smallest power of two larger than it to
472 * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
473 * close to the end of a 512MB or 1GB block we might require an additional
474 * table to map the entire function.
458 */ 475 */
459 .align 6 476 .align 4
460__turn_mmu_on: 477__turn_mmu_on:
461 msr sctlr_el1, x0 478 msr sctlr_el1, x0
462 isb 479 isb
@@ -479,17 +496,38 @@ ENDPROC(__calc_phys_offset)
479 .quad PAGE_OFFSET 496 .quad PAGE_OFFSET
480 497
481/* 498/*
482 * Macro to populate the PGD for the corresponding block entry in the next 499 * Macro to create a table entry to the next page.
483 * level (tbl) for the given virtual address. 500 *
501 * tbl: page table address
502 * virt: virtual address
503 * shift: #imm page table shift
504 * ptrs: #imm pointers per table page
505 *
506 * Preserves: virt
507 * Corrupts: tmp1, tmp2
508 * Returns: tbl -> next level table page address
509 */
510 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
511 lsr \tmp1, \virt, #\shift
512 and \tmp1, \tmp1, #\ptrs - 1 // table index
513 add \tmp2, \tbl, #PAGE_SIZE
514 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
515 str \tmp2, [\tbl, \tmp1, lsl #3]
516 add \tbl, \tbl, #PAGE_SIZE // next level table page
517 .endm
518
519/*
520 * Macro to populate the PGD (and possibily PUD) for the corresponding
521 * block entry in the next level (tbl) for the given virtual address.
484 * 522 *
485 * Preserves: pgd, tbl, virt 523 * Preserves: tbl, next, virt
486 * Corrupts: tmp1, tmp2 524 * Corrupts: tmp1, tmp2
487 */ 525 */
488 .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2 526 .macro create_pgd_entry, tbl, virt, tmp1, tmp2
489 lsr \tmp1, \virt, #PGDIR_SHIFT 527 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
490 and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index 528#if SWAPPER_PGTABLE_LEVELS == 3
491 orr \tmp2, \tbl, #3 // PGD entry table type 529 create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
492 str \tmp2, [\pgd, \tmp1, lsl #3] 530#endif
493 .endm 531 .endm
494 532
495/* 533/*
@@ -522,7 +560,7 @@ ENDPROC(__calc_phys_offset)
522 * - pgd entry for fixed mappings (TTBR1) 560 * - pgd entry for fixed mappings (TTBR1)
523 */ 561 */
524__create_page_tables: 562__create_page_tables:
525 pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses 563 pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
526 mov x27, lr 564 mov x27, lr
527 565
528 /* 566 /*
@@ -550,10 +588,10 @@ __create_page_tables:
550 /* 588 /*
551 * Create the identity mapping. 589 * Create the identity mapping.
552 */ 590 */
553 add x0, x25, #PAGE_SIZE // section table address 591 mov x0, x25 // idmap_pg_dir
554 ldr x3, =KERNEL_START 592 ldr x3, =KERNEL_START
555 add x3, x3, x28 // __pa(KERNEL_START) 593 add x3, x3, x28 // __pa(KERNEL_START)
556 create_pgd_entry x25, x0, x3, x5, x6 594 create_pgd_entry x0, x3, x5, x6
557 ldr x6, =KERNEL_END 595 ldr x6, =KERNEL_END
558 mov x5, x3 // __pa(KERNEL_START) 596 mov x5, x3 // __pa(KERNEL_START)
559 add x6, x6, x28 // __pa(KERNEL_END) 597 add x6, x6, x28 // __pa(KERNEL_END)
@@ -562,9 +600,9 @@ __create_page_tables:
562 /* 600 /*
563 * Map the kernel image (starting with PHYS_OFFSET). 601 * Map the kernel image (starting with PHYS_OFFSET).
564 */ 602 */
565 add x0, x26, #PAGE_SIZE // section table address 603 mov x0, x26 // swapper_pg_dir
566 mov x5, #PAGE_OFFSET 604 mov x5, #PAGE_OFFSET
567 create_pgd_entry x26, x0, x5, x3, x6 605 create_pgd_entry x0, x5, x3, x6
568 ldr x6, =KERNEL_END 606 ldr x6, =KERNEL_END
569 mov x3, x24 // phys offset 607 mov x3, x24 // phys offset
570 create_block_map x0, x7, x3, x5, x6 608 create_block_map x0, x7, x3, x5, x6
@@ -586,13 +624,6 @@ __create_page_tables:
586 create_block_map x0, x7, x3, x5, x6 624 create_block_map x0, x7, x3, x5, x6
5871: 6251:
588 /* 626 /*
589 * Create the pgd entry for the fixed mappings.
590 */
591 ldr x5, =FIXADDR_TOP // Fixed mapping virtual address
592 add x0, x26, #2 * PAGE_SIZE // section table address
593 create_pgd_entry x26, x0, x5, x6, x7
594
595 /*
596 * Since the page tables have been populated with non-cacheable 627 * Since the page tables have been populated with non-cacheable
597 * accesses (MMU disabled), invalidate the idmap and swapper page 628 * accesses (MMU disabled), invalidate the idmap and swapper page
598 * tables again to remove any speculatively loaded cache lines. 629 * tables again to remove any speculatively loaded cache lines.
@@ -611,7 +642,7 @@ ENDPROC(__create_page_tables)
611__switch_data: 642__switch_data:
612 .quad __mmap_switched 643 .quad __mmap_switched
613 .quad __bss_start // x6 644 .quad __bss_start // x6
614 .quad _end // x7 645 .quad __bss_stop // x7
615 .quad processor_id // x4 646 .quad processor_id // x4
616 .quad __fdt_pointer // x5 647 .quad __fdt_pointer // x5
617 .quad memstart_addr // x6 648 .quad memstart_addr // x6
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 0959611d9ff1..a272f335c289 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -19,6 +19,7 @@
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/linkage.h> 21#include <linux/linkage.h>
22#include <linux/irqchip/arm-gic-v3.h>
22 23
23#include <asm/assembler.h> 24#include <asm/assembler.h>
24#include <asm/ptrace.h> 25#include <asm/ptrace.h>
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
new file mode 100644
index 000000000000..8fae0756e175
--- /dev/null
+++ b/arch/arm64/kernel/image.h
@@ -0,0 +1,62 @@
1/*
2 * Linker script macros to generate Image header fields.
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_IMAGE_H
19#define __ASM_IMAGE_H
20
21#ifndef LINKER_SCRIPT
22#error This file should only be included in vmlinux.lds.S
23#endif
24
25/*
26 * There aren't any ELF relocations we can use to endian-swap values known only
27 * at link time (e.g. the subtraction of two symbol addresses), so we must get
28 * the linker to endian-swap certain values before emitting them.
29 */
30#ifdef CONFIG_CPU_BIG_ENDIAN
31#define DATA_LE64(data) \
32 ((((data) & 0x00000000000000ff) << 56) | \
33 (((data) & 0x000000000000ff00) << 40) | \
34 (((data) & 0x0000000000ff0000) << 24) | \
35 (((data) & 0x00000000ff000000) << 8) | \
36 (((data) & 0x000000ff00000000) >> 8) | \
37 (((data) & 0x0000ff0000000000) >> 24) | \
38 (((data) & 0x00ff000000000000) >> 40) | \
39 (((data) & 0xff00000000000000) >> 56))
40#else
41#define DATA_LE64(data) ((data) & 0xffffffffffffffff)
42#endif
43
44#ifdef CONFIG_CPU_BIG_ENDIAN
45#define __HEAD_FLAG_BE 1
46#else
47#define __HEAD_FLAG_BE 0
48#endif
49
50#define __HEAD_FLAGS (__HEAD_FLAG_BE << 0)
51
52/*
53 * These will output as part of the Image header, which should be little-endian
54 * regardless of the endianness of the kernel. While constant values could be
55 * endian swapped in head.S, all are done here for consistency.
56 */
57#define HEAD_SYMBOLS \
58 _kernel_size_le = DATA_LE64(_end - _text); \
59 _kernel_offset_le = DATA_LE64(TEXT_OFFSET); \
60 _kernel_flags_le = DATA_LE64(__HEAD_FLAGS);
61
62#endif /* __ASM_IMAGE_H */
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 7787208e8cc6..997e6b27ff6a 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -28,7 +28,7 @@
28 * See Documentation/arm/kernel_user_helpers.txt for formal definitions. 28 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
29 */ 29 */
30 30
31#include <asm/unistd32.h> 31#include <asm/unistd.h>
32 32
33 .align 5 33 .align 5
34 .globl __kuser_helper_start 34 .globl __kuser_helper_start
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 43b7c34f92cb..1309d64aa926 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -51,6 +51,12 @@
51#include <asm/processor.h> 51#include <asm/processor.h>
52#include <asm/stacktrace.h> 52#include <asm/stacktrace.h>
53 53
54#ifdef CONFIG_CC_STACKPROTECTOR
55#include <linux/stackprotector.h>
56unsigned long __stack_chk_guard __read_mostly;
57EXPORT_SYMBOL(__stack_chk_guard);
58#endif
59
54static void setup_restart(void) 60static void setup_restart(void)
55{ 61{
56 /* 62 /*
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 9e9798f91172..553954771a67 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -235,7 +235,7 @@ static void psci_sys_poweroff(void)
235 * PSCI Function IDs for v0.2+ are well defined so use 235 * PSCI Function IDs for v0.2+ are well defined so use
236 * standard values. 236 * standard values.
237 */ 237 */
238static int psci_0_2_init(struct device_node *np) 238static int __init psci_0_2_init(struct device_node *np)
239{ 239{
240 int err, ver; 240 int err, ver;
241 241
@@ -296,7 +296,7 @@ out_put_node:
296/* 296/*
297 * PSCI < v0.2 get PSCI Function IDs via DT. 297 * PSCI < v0.2 get PSCI Function IDs via DT.
298 */ 298 */
299static int psci_0_1_init(struct device_node *np) 299static int __init psci_0_1_init(struct device_node *np)
300{ 300{
301 u32 id; 301 u32 id;
302 int err; 302 int err;
@@ -434,9 +434,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
434 return 0; 434 return 0;
435} 435}
436#endif 436#endif
437#endif
437 438
438const struct cpu_operations cpu_psci_ops = { 439const struct cpu_operations cpu_psci_ops = {
439 .name = "psci", 440 .name = "psci",
441#ifdef CONFIG_SMP
440 .cpu_init = cpu_psci_cpu_init, 442 .cpu_init = cpu_psci_cpu_init,
441 .cpu_prepare = cpu_psci_cpu_prepare, 443 .cpu_prepare = cpu_psci_cpu_prepare,
442 .cpu_boot = cpu_psci_cpu_boot, 444 .cpu_boot = cpu_psci_cpu_boot,
@@ -445,6 +447,6 @@ const struct cpu_operations cpu_psci_ops = {
445 .cpu_die = cpu_psci_cpu_die, 447 .cpu_die = cpu_psci_cpu_die,
446 .cpu_kill = cpu_psci_cpu_kill, 448 .cpu_kill = cpu_psci_cpu_kill,
447#endif 449#endif
450#endif
448}; 451};
449 452
450#endif
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 9fde010c945f..0310811bd77d 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -19,6 +19,7 @@
19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */ 20 */
21 21
22#include <linux/audit.h>
22#include <linux/compat.h> 23#include <linux/compat.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/sched.h> 25#include <linux/sched.h>
@@ -39,6 +40,7 @@
39#include <asm/compat.h> 40#include <asm/compat.h>
40#include <asm/debug-monitors.h> 41#include <asm/debug-monitors.h>
41#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/syscall.h>
42#include <asm/traps.h> 44#include <asm/traps.h>
43#include <asm/system_misc.h> 45#include <asm/system_misc.h>
44 46
@@ -1113,11 +1115,20 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1113 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1115 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1114 trace_sys_enter(regs, regs->syscallno); 1116 trace_sys_enter(regs, regs->syscallno);
1115 1117
1118#ifdef CONFIG_AUDITSYSCALL
1119 audit_syscall_entry(syscall_get_arch(), regs->syscallno,
1120 regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
1121#endif
1122
1116 return regs->syscallno; 1123 return regs->syscallno;
1117} 1124}
1118 1125
1119asmlinkage void syscall_trace_exit(struct pt_regs *regs) 1126asmlinkage void syscall_trace_exit(struct pt_regs *regs)
1120{ 1127{
1128#ifdef CONFIG_AUDITSYSCALL
1129 audit_syscall_exit(regs);
1130#endif
1131
1121 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1132 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1122 trace_sys_exit(regs, regs_return_value(regs)); 1133 trace_sys_exit(regs, regs_return_value(regs));
1123 1134
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 46d1125571f6..f6f0ccf35ae6 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,6 +45,7 @@
45#include <linux/efi.h> 45#include <linux/efi.h>
46 46
47#include <asm/fixmap.h> 47#include <asm/fixmap.h>
48#include <asm/cpu.h>
48#include <asm/cputype.h> 49#include <asm/cputype.h>
49#include <asm/elf.h> 50#include <asm/elf.h>
50#include <asm/cputable.h> 51#include <asm/cputable.h>
@@ -77,7 +78,6 @@ unsigned int compat_elf_hwcap2 __read_mostly;
77#endif 78#endif
78 79
79static const char *cpu_name; 80static const char *cpu_name;
80static const char *machine_name;
81phys_addr_t __fdt_pointer __initdata; 81phys_addr_t __fdt_pointer __initdata;
82 82
83/* 83/*
@@ -219,6 +219,8 @@ static void __init setup_processor(void)
219 sprintf(init_utsname()->machine, ELF_PLATFORM); 219 sprintf(init_utsname()->machine, ELF_PLATFORM);
220 elf_hwcap = 0; 220 elf_hwcap = 0;
221 221
222 cpuinfo_store_boot_cpu();
223
222 /* 224 /*
223 * Check for sane CTR_EL0.CWG value. 225 * Check for sane CTR_EL0.CWG value.
224 */ 226 */
@@ -307,8 +309,6 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
307 while (true) 309 while (true)
308 cpu_relax(); 310 cpu_relax();
309 } 311 }
310
311 machine_name = of_flat_dt_get_machine_name();
312} 312}
313 313
314/* 314/*
@@ -417,14 +417,12 @@ static int __init arm64_device_init(void)
417} 417}
418arch_initcall_sync(arm64_device_init); 418arch_initcall_sync(arm64_device_init);
419 419
420static DEFINE_PER_CPU(struct cpu, cpu_data);
421
422static int __init topology_init(void) 420static int __init topology_init(void)
423{ 421{
424 int i; 422 int i;
425 423
426 for_each_possible_cpu(i) { 424 for_each_possible_cpu(i) {
427 struct cpu *cpu = &per_cpu(cpu_data, i); 425 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
428 cpu->hotpluggable = 1; 426 cpu->hotpluggable = 1;
429 register_cpu(cpu, i); 427 register_cpu(cpu, i);
430 } 428 }
@@ -449,10 +447,21 @@ static int c_show(struct seq_file *m, void *v)
449{ 447{
450 int i; 448 int i;
451 449
452 seq_printf(m, "Processor\t: %s rev %d (%s)\n", 450 /*
453 cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); 451 * Dump out the common processor features in a single line. Userspace
452 * should read the hwcaps with getauxval(AT_HWCAP) rather than
453 * attempting to parse this.
454 */
455 seq_puts(m, "features\t:");
456 for (i = 0; hwcap_str[i]; i++)
457 if (elf_hwcap & (1 << i))
458 seq_printf(m, " %s", hwcap_str[i]);
459 seq_puts(m, "\n\n");
454 460
455 for_each_online_cpu(i) { 461 for_each_online_cpu(i) {
462 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
463 u32 midr = cpuinfo->reg_midr;
464
456 /* 465 /*
457 * glibc reads /proc/cpuinfo to determine the number of 466 * glibc reads /proc/cpuinfo to determine the number of
458 * online processors, looking for lines beginning with 467 * online processors, looking for lines beginning with
@@ -461,25 +470,13 @@ static int c_show(struct seq_file *m, void *v)
461#ifdef CONFIG_SMP 470#ifdef CONFIG_SMP
462 seq_printf(m, "processor\t: %d\n", i); 471 seq_printf(m, "processor\t: %d\n", i);
463#endif 472#endif
473 seq_printf(m, "implementer\t: 0x%02x\n",
474 MIDR_IMPLEMENTOR(midr));
475 seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr));
476 seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr));
477 seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr));
464 } 478 }
465 479
466 /* dump out the processor features */
467 seq_puts(m, "Features\t: ");
468
469 for (i = 0; hwcap_str[i]; i++)
470 if (elf_hwcap & (1 << i))
471 seq_printf(m, "%s ", hwcap_str[i]);
472
473 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
474 seq_printf(m, "CPU architecture: AArch64\n");
475 seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
476 seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
477 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
478
479 seq_puts(m, "\n");
480
481 seq_printf(m, "Hardware\t: %s\n", machine_name);
482
483 return 0; 480 return 0;
484} 481}
485 482
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 3491c638f172..c5ee208321c3 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -27,7 +27,7 @@
27#include <asm/fpsimd.h> 27#include <asm/fpsimd.h>
28#include <asm/signal32.h> 28#include <asm/signal32.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/unistd32.h> 30#include <asm/unistd.h>
31 31
32struct compat_sigcontext { 32struct compat_sigcontext {
33 /* We always set these two fields to 0 */ 33 /* We always set these two fields to 0 */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 40f38f46c8e0..3e2f5ebbf63e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,6 +39,7 @@
39 39
40#include <asm/atomic.h> 40#include <asm/atomic.h>
41#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
42#include <asm/cpu.h>
42#include <asm/cputype.h> 43#include <asm/cputype.h>
43#include <asm/cpu_ops.h> 44#include <asm/cpu_ops.h>
44#include <asm/mmu_context.h> 45#include <asm/mmu_context.h>
@@ -155,6 +156,11 @@ asmlinkage void secondary_start_kernel(void)
155 cpu_ops[cpu]->cpu_postboot(); 156 cpu_ops[cpu]->cpu_postboot();
156 157
157 /* 158 /*
159 * Log the CPU info before it is marked online and might get read.
160 */
161 cpuinfo_store_cpu();
162
163 /*
158 * Enable GIC and timers. 164 * Enable GIC and timers.
159 */ 165 */
160 notify_cpu_starting(cpu); 166 notify_cpu_starting(cpu);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 1fa9ce4afd8f..55a99b9a97e0 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg)
119extern struct sleep_save_sp sleep_save_sp; 119extern struct sleep_save_sp sleep_save_sp;
120extern phys_addr_t sleep_idmap_phys; 120extern phys_addr_t sleep_idmap_phys;
121 121
122static int cpu_suspend_init(void) 122static int __init cpu_suspend_init(void)
123{ 123{
124 void *ctx_ptr; 124 void *ctx_ptr;
125 125
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 26e9c4eeaba8..de2b0226e06d 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -26,7 +26,7 @@
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27 27
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/unistd32.h> 29#include <asm/unistd.h>
30 30
31static inline void 31static inline void
32do_compat_cache_op(unsigned long start, unsigned long end, int flags) 32do_compat_cache_op(unsigned long start, unsigned long end, int flags)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 43514f905916..b6ee26b0939a 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -20,6 +20,7 @@
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22 22
23#include <asm/cputype.h>
23#include <asm/topology.h> 24#include <asm/topology.h>
24 25
25static int __init get_cpu_for_node(struct device_node *node) 26static int __init get_cpu_for_node(struct device_node *node)
@@ -188,13 +189,9 @@ static int __init parse_dt_topology(void)
188 * Check that all cores are in the topology; the SMP code will 189 * Check that all cores are in the topology; the SMP code will
189 * only mark cores described in the DT as possible. 190 * only mark cores described in the DT as possible.
190 */ 191 */
191 for_each_possible_cpu(cpu) { 192 for_each_possible_cpu(cpu)
192 if (cpu_topology[cpu].cluster_id == -1) { 193 if (cpu_topology[cpu].cluster_id == -1)
193 pr_err("CPU%d: No topology information specified\n",
194 cpu);
195 ret = -EINVAL; 194 ret = -EINVAL;
196 }
197 }
198 195
199out_map: 196out_map:
200 of_node_put(map); 197 of_node_put(map);
@@ -219,14 +216,6 @@ static void update_siblings_masks(unsigned int cpuid)
219 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; 216 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
220 int cpu; 217 int cpu;
221 218
222 if (cpuid_topo->cluster_id == -1) {
223 /*
224 * DT does not contain topology information for this cpu.
225 */
226 pr_debug("CPU%u: No topology information configured\n", cpuid);
227 return;
228 }
229
230 /* update core and thread sibling masks */ 219 /* update core and thread sibling masks */
231 for_each_possible_cpu(cpu) { 220 for_each_possible_cpu(cpu) {
232 cpu_topo = &cpu_topology[cpu]; 221 cpu_topo = &cpu_topology[cpu];
@@ -249,6 +238,36 @@ static void update_siblings_masks(unsigned int cpuid)
249 238
250void store_cpu_topology(unsigned int cpuid) 239void store_cpu_topology(unsigned int cpuid)
251{ 240{
241 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
242 u64 mpidr;
243
244 if (cpuid_topo->cluster_id != -1)
245 goto topology_populated;
246
247 mpidr = read_cpuid_mpidr();
248
249 /* Uniprocessor systems can rely on default topology values */
250 if (mpidr & MPIDR_UP_BITMASK)
251 return;
252
253 /* Create cpu topology mapping based on MPIDR. */
254 if (mpidr & MPIDR_MT_BITMASK) {
255 /* Multiprocessor system : Multi-threads per core */
256 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
257 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
258 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
259 } else {
260 /* Multiprocessor system : Single-thread per core */
261 cpuid_topo->thread_id = -1;
262 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
263 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
264 }
265
266 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
267 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
268 cpuid_topo->thread_id, mpidr);
269
270topology_populated:
252 update_siblings_masks(cpuid); 271 update_siblings_masks(cpuid);
253} 272}
254 273
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c43cfa9b8304..02cd3f023e9a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -156,7 +156,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
156 frame.pc = thread_saved_pc(tsk); 156 frame.pc = thread_saved_pc(tsk);
157 } 157 }
158 158
159 printk("Call trace:\n"); 159 pr_emerg("Call trace:\n");
160 while (1) { 160 while (1) {
161 unsigned long where = frame.pc; 161 unsigned long where = frame.pc;
162 int ret; 162 int ret;
@@ -331,17 +331,22 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
331 331
332void __pte_error(const char *file, int line, unsigned long val) 332void __pte_error(const char *file, int line, unsigned long val)
333{ 333{
334 printk("%s:%d: bad pte %016lx.\n", file, line, val); 334 pr_crit("%s:%d: bad pte %016lx.\n", file, line, val);
335} 335}
336 336
337void __pmd_error(const char *file, int line, unsigned long val) 337void __pmd_error(const char *file, int line, unsigned long val)
338{ 338{
339 printk("%s:%d: bad pmd %016lx.\n", file, line, val); 339 pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
340}
341
342void __pud_error(const char *file, int line, unsigned long val)
343{
344 pr_crit("%s:%d: bad pud %016lx.\n", file, line, val);
340} 345}
341 346
342void __pgd_error(const char *file, int line, unsigned long val) 347void __pgd_error(const char *file, int line, unsigned long val)
343{ 348{
344 printk("%s:%d: bad pgd %016lx.\n", file, line, val); 349 pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
345} 350}
346 351
347void __init trap_init(void) 352void __init trap_init(void)
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 50384fec56c4..24f2e8c62479 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -88,22 +88,29 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
88{ 88{
89 struct mm_struct *mm = current->mm; 89 struct mm_struct *mm = current->mm;
90 unsigned long addr = AARCH32_VECTORS_BASE; 90 unsigned long addr = AARCH32_VECTORS_BASE;
91 int ret; 91 static struct vm_special_mapping spec = {
92 .name = "[vectors]",
93 .pages = vectors_page,
94
95 };
96 void *ret;
92 97
93 down_write(&mm->mmap_sem); 98 down_write(&mm->mmap_sem);
94 current->mm->context.vdso = (void *)addr; 99 current->mm->context.vdso = (void *)addr;
95 100
96 /* Map vectors page at the high address. */ 101 /* Map vectors page at the high address. */
97 ret = install_special_mapping(mm, addr, PAGE_SIZE, 102 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
98 VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, 103 VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
99 vectors_page); 104 &spec);
100 105
101 up_write(&mm->mmap_sem); 106 up_write(&mm->mmap_sem);
102 107
103 return ret; 108 return PTR_ERR_OR_ZERO(ret);
104} 109}
105#endif /* CONFIG_COMPAT */ 110#endif /* CONFIG_COMPAT */
106 111
112static struct vm_special_mapping vdso_spec[2];
113
107static int __init vdso_init(void) 114static int __init vdso_init(void)
108{ 115{
109 int i; 116 int i;
@@ -114,8 +121,8 @@ static int __init vdso_init(void)
114 } 121 }
115 122
116 vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; 123 vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
117 pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n", 124 pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
118 vdso_pages + 1, vdso_pages, 1L, &vdso_start); 125 vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
119 126
120 /* Allocate the vDSO pagelist, plus a page for the data. */ 127 /* Allocate the vDSO pagelist, plus a page for the data. */
121 vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), 128 vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -123,12 +130,23 @@ static int __init vdso_init(void)
123 if (vdso_pagelist == NULL) 130 if (vdso_pagelist == NULL)
124 return -ENOMEM; 131 return -ENOMEM;
125 132
133 /* Grab the vDSO data page. */
134 vdso_pagelist[0] = virt_to_page(vdso_data);
135
126 /* Grab the vDSO code pages. */ 136 /* Grab the vDSO code pages. */
127 for (i = 0; i < vdso_pages; i++) 137 for (i = 0; i < vdso_pages; i++)
128 vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); 138 vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
129 139
130 /* Grab the vDSO data page. */ 140 /* Populate the special mapping structures */
131 vdso_pagelist[i] = virt_to_page(vdso_data); 141 vdso_spec[0] = (struct vm_special_mapping) {
142 .name = "[vvar]",
143 .pages = vdso_pagelist,
144 };
145
146 vdso_spec[1] = (struct vm_special_mapping) {
147 .name = "[vdso]",
148 .pages = &vdso_pagelist[1],
149 };
132 150
133 return 0; 151 return 0;
134} 152}
@@ -138,52 +156,42 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
138 int uses_interp) 156 int uses_interp)
139{ 157{
140 struct mm_struct *mm = current->mm; 158 struct mm_struct *mm = current->mm;
141 unsigned long vdso_base, vdso_mapping_len; 159 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
142 int ret; 160 void *ret;
143 161
162 vdso_text_len = vdso_pages << PAGE_SHIFT;
144 /* Be sure to map the data page */ 163 /* Be sure to map the data page */
145 vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT; 164 vdso_mapping_len = vdso_text_len + PAGE_SIZE;
146 165
147 down_write(&mm->mmap_sem); 166 down_write(&mm->mmap_sem);
148 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 167 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
149 if (IS_ERR_VALUE(vdso_base)) { 168 if (IS_ERR_VALUE(vdso_base)) {
150 ret = vdso_base; 169 ret = ERR_PTR(vdso_base);
151 goto up_fail; 170 goto up_fail;
152 } 171 }
153 mm->context.vdso = (void *)vdso_base; 172 ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
154 173 VM_READ|VM_MAYREAD,
155 ret = install_special_mapping(mm, vdso_base, vdso_mapping_len, 174 &vdso_spec[0]);
156 VM_READ|VM_EXEC| 175 if (IS_ERR(ret))
157 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
158 vdso_pagelist);
159 if (ret) {
160 mm->context.vdso = NULL;
161 goto up_fail; 176 goto up_fail;
162 }
163 177
164up_fail: 178 vdso_base += PAGE_SIZE;
165 up_write(&mm->mmap_sem); 179 mm->context.vdso = (void *)vdso_base;
180 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
181 VM_READ|VM_EXEC|
182 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
183 &vdso_spec[1]);
184 if (IS_ERR(ret))
185 goto up_fail;
166 186
167 return ret;
168}
169 187
170const char *arch_vma_name(struct vm_area_struct *vma) 188 up_write(&mm->mmap_sem);
171{ 189 return 0;
172 /*
173 * We can re-use the vdso pointer in mm_context_t for identifying
174 * the vectors page for compat applications. The vDSO will always
175 * sit above TASK_UNMAPPED_BASE and so we don't need to worry about
176 * it conflicting with the vectors base.
177 */
178 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) {
179#ifdef CONFIG_COMPAT
180 if (vma->vm_start == AARCH32_VECTORS_BASE)
181 return "[vectors]";
182#endif
183 return "[vdso]";
184 }
185 190
186 return NULL; 191up_fail:
192 mm->context.vdso = NULL;
193 up_write(&mm->mmap_sem);
194 return PTR_ERR(ret);
187} 195}
188 196
189/* 197/*
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 6d20b7d162d8..ff3bddea482d 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -43,13 +43,13 @@ $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
43 $(call if_changed,vdsosym) 43 $(call if_changed,vdsosym)
44 44
45# Assembly rules for the .S files 45# Assembly rules for the .S files
46$(obj-vdso): %.o: %.S 46$(obj-vdso): %.o: %.S FORCE
47 $(call if_changed_dep,vdsoas) 47 $(call if_changed_dep,vdsoas)
48 48
49# Actual build commands 49# Actual build commands
50quiet_cmd_vdsold = VDSOL $@ 50quiet_cmd_vdsold = VDSOL $@
51 cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ 51 cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
52quiet_cmd_vdsoas = VDSOA $@ 52quiet_cmd_vdsoas = VDSOA $@
53 cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< 53 cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
54 54
55# Install commands for the unstripped file 55# Install commands for the unstripped file
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index 8154b8d1c826..beca249bc2f3 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -28,6 +28,7 @@ OUTPUT_ARCH(aarch64)
28 28
29SECTIONS 29SECTIONS
30{ 30{
31 PROVIDE(_vdso_data = . - PAGE_SIZE);
31 . = VDSO_LBASE + SIZEOF_HEADERS; 32 . = VDSO_LBASE + SIZEOF_HEADERS;
32 33
33 .hash : { *(.hash) } :text 34 .hash : { *(.hash) } :text
@@ -57,9 +58,6 @@ SECTIONS
57 _end = .; 58 _end = .;
58 PROVIDE(end = .); 59 PROVIDE(end = .);
59 60
60 . = ALIGN(PAGE_SIZE);
61 PROVIDE(_vdso_data = .);
62
63 /DISCARD/ : { 61 /DISCARD/ : {
64 *(.note.GNU-stack) 62 *(.note.GNU-stack)
65 *(.data .data.* .gnu.linkonce.d.* .sdata*) 63 *(.data .data.* .gnu.linkonce.d.* .sdata*)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index f1e6d5c032e1..97f0c0429dfa 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -9,6 +9,8 @@
9#include <asm/memory.h> 9#include <asm/memory.h>
10#include <asm/page.h> 10#include <asm/page.h>
11 11
12#include "image.h"
13
12#define ARM_EXIT_KEEP(x) 14#define ARM_EXIT_KEEP(x)
13#define ARM_EXIT_DISCARD(x) x 15#define ARM_EXIT_DISCARD(x) x
14 16
@@ -104,9 +106,18 @@ SECTIONS
104 _edata = .; 106 _edata = .;
105 107
106 BSS_SECTION(0, 0, 0) 108 BSS_SECTION(0, 0, 0)
109
110 . = ALIGN(PAGE_SIZE);
111 idmap_pg_dir = .;
112 . += IDMAP_DIR_SIZE;
113 swapper_pg_dir = .;
114 . += SWAPPER_DIR_SIZE;
115
107 _end = .; 116 _end = .;
108 117
109 STABS_DEBUG 118 STABS_DEBUG
119
120 HEAD_SYMBOLS
110} 121}
111 122
112/* 123/*
@@ -114,3 +125,8 @@ SECTIONS
114 */ 125 */
115ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), 126ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
116 "HYP init code too big") 127 "HYP init code too big")
128
129/*
130 * If padding is applied before .head.text, virt<->phys conversions will fail.
131 */
132ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")