aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c1
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/kernel/fpsimd.c1
-rw-r--r--arch/arm64/kernel/head.S4
-rw-r--r--arch/arm64/kernel/irq.c12
-rw-r--r--arch/arm64/kernel/perf_regs.c6
-rw-r--r--arch/arm64/kernel/process.c18
-rw-r--r--arch/arm64/kernel/ptrace.c13
-rw-r--r--arch/arm64/kernel/setup.c40
-rw-r--r--arch/arm64/kernel/sys_compat.c6
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/hyp-init.S4
14 files changed, 74 insertions, 38 deletions
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index c294e67d3925..ae67e88c28b9 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -150,7 +150,6 @@ static void sha2_finup(struct shash_desc *desc, const u8 *data,
150 kernel_neon_begin_partial(28); 150 kernel_neon_begin_partial(28);
151 sha2_ce_transform(blocks, data, sctx->state, NULL, len); 151 sha2_ce_transform(blocks, data, sctx->state, NULL, len);
152 kernel_neon_end(); 152 kernel_neon_end();
153 data += blocks * SHA256_BLOCK_SIZE;
154} 153}
155 154
156static int sha224_finup(struct shash_desc *desc, const u8 *data, 155static int sha224_finup(struct shash_desc *desc, const u8 *data,
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index d064047612b1..52b484b6aa1a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg,
79 */ 79 */
80#define ARM_MAX_BRP 16 80#define ARM_MAX_BRP 16
81#define ARM_MAX_WRP 16 81#define ARM_MAX_WRP 16
82#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
83 82
84/* Virtual debug register bases. */ 83/* Virtual debug register bases. */
85#define AARCH64_DBG_REG_BVR 0 84#define AARCH64_DBG_REG_BVR 0
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 3df21feeabdd..286b1bec547c 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -139,7 +139,7 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev,
139 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) 139 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
140 140
141#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) 141#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
142#define KSTK_ESP(tsk) ((unsigned long)task_pt_regs(tsk)->sp) 142#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
143 143
144/* 144/*
145 * Prefetching support 145 * Prefetching support
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 501000fadb6f..41ed9e13795e 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -137,7 +137,7 @@ struct pt_regs {
137 (!((regs)->pstate & PSR_F_BIT)) 137 (!((regs)->pstate & PSR_F_BIT))
138 138
139#define user_stack_pointer(regs) \ 139#define user_stack_pointer(regs) \
140 (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp) 140 (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
141 141
142static inline unsigned long regs_return_value(struct pt_regs *regs) 142static inline unsigned long regs_return_value(struct pt_regs *regs)
143{ 143{
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index ad8aebb1cdef..3dca15634e69 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -270,6 +270,7 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
270 case CPU_PM_ENTER: 270 case CPU_PM_ENTER:
271 if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) 271 if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
272 fpsimd_save_state(&current->thread.fpsimd_state); 272 fpsimd_save_state(&current->thread.fpsimd_state);
273 this_cpu_write(fpsimd_last_state, NULL);
273 break; 274 break;
274 case CPU_PM_EXIT: 275 case CPU_PM_EXIT:
275 if (current->mm) 276 if (current->mm)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index bed028364a93..873069056229 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -373,10 +373,6 @@ ENTRY(__boot_cpu_mode)
373 .long 0 373 .long 0
374 .popsection 374 .popsection
375 375
376 .align 3
3772: .quad .
378 .quad PAGE_OFFSET
379
380#ifdef CONFIG_SMP 376#ifdef CONFIG_SMP
381 .align 3 377 .align 3
3821: .quad . 3781: .quad .
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 0f08dfd69ebc..dfa6e3e74fdd 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
97 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) 97 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
98 return false; 98 return false;
99 99
100 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) 100 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
101 affinity = cpu_online_mask;
101 ret = true; 102 ret = true;
103 }
102 104
103 /*
104 * when using forced irq_set_affinity we must ensure that the cpu
105 * being offlined is not present in the affinity mask, it may be
106 * selected as the target CPU otherwise
107 */
108 affinity = cpu_online_mask;
109 c = irq_data_get_irq_chip(d); 105 c = irq_data_get_irq_chip(d);
110 if (!c->irq_set_affinity) 106 if (!c->irq_set_affinity)
111 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 107 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
112 else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) 108 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
113 cpumask_copy(d->affinity, affinity); 109 cpumask_copy(d->affinity, affinity);
114 110
115 return ret; 111 return ret;
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 422ebd63b619..6762ad705587 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -24,6 +24,12 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
24 return regs->compat_lr; 24 return regs->compat_lr;
25 } 25 }
26 26
27 if ((u32)idx == PERF_REG_ARM64_SP)
28 return regs->sp;
29
30 if ((u32)idx == PERF_REG_ARM64_PC)
31 return regs->pc;
32
27 return regs->regs[idx]; 33 return regs->regs[idx];
28} 34}
29 35
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 1309d64aa926..29d48690f2ac 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -230,9 +230,27 @@ void exit_thread(void)
230{ 230{
231} 231}
232 232
233static void tls_thread_flush(void)
234{
235 asm ("msr tpidr_el0, xzr");
236
237 if (is_compat_task()) {
238 current->thread.tp_value = 0;
239
240 /*
241 * We need to ensure ordering between the shadow state and the
242 * hardware state, so that we don't corrupt the hardware state
243 * with a stale shadow state during context switch.
244 */
245 barrier();
246 asm ("msr tpidrro_el0, xzr");
247 }
248}
249
233void flush_thread(void) 250void flush_thread(void)
234{ 251{
235 fpsimd_flush_thread(); 252 fpsimd_flush_thread();
253 tls_thread_flush();
236 flush_ptrace_hw_breakpoint(current); 254 flush_ptrace_hw_breakpoint(current);
237} 255}
238 256
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 70526cfda056..fe63ac5e9bf5 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -87,7 +87,8 @@ static void ptrace_hbptriggered(struct perf_event *bp,
87 break; 87 break;
88 } 88 }
89 } 89 }
90 for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { 90
91 for (i = 0; i < ARM_MAX_WRP; ++i) {
91 if (current->thread.debug.hbp_watch[i] == bp) { 92 if (current->thread.debug.hbp_watch[i] == bp) {
92 info.si_errno = -((i << 1) + 1); 93 info.si_errno = -((i << 1) + 1);
93 break; 94 break;
@@ -662,8 +663,10 @@ static int compat_gpr_get(struct task_struct *target,
662 kbuf += sizeof(reg); 663 kbuf += sizeof(reg);
663 } else { 664 } else {
664 ret = copy_to_user(ubuf, &reg, sizeof(reg)); 665 ret = copy_to_user(ubuf, &reg, sizeof(reg));
665 if (ret) 666 if (ret) {
667 ret = -EFAULT;
666 break; 668 break;
669 }
667 670
668 ubuf += sizeof(reg); 671 ubuf += sizeof(reg);
669 } 672 }
@@ -701,8 +704,10 @@ static int compat_gpr_set(struct task_struct *target,
701 kbuf += sizeof(reg); 704 kbuf += sizeof(reg);
702 } else { 705 } else {
703 ret = copy_from_user(&reg, ubuf, sizeof(reg)); 706 ret = copy_from_user(&reg, ubuf, sizeof(reg));
704 if (ret) 707 if (ret) {
705 return ret; 708 ret = -EFAULT;
709 break;
710 }
706 711
707 ubuf += sizeof(reg); 712 ubuf += sizeof(reg);
708 } 713 }
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f6f0ccf35ae6..edb146d01857 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -78,6 +78,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
78#endif 78#endif
79 79
80static const char *cpu_name; 80static const char *cpu_name;
81static const char *machine_name;
81phys_addr_t __fdt_pointer __initdata; 82phys_addr_t __fdt_pointer __initdata;
82 83
83/* 84/*
@@ -309,6 +310,8 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
309 while (true) 310 while (true)
310 cpu_relax(); 311 cpu_relax();
311 } 312 }
313
314 machine_name = of_flat_dt_get_machine_name();
312} 315}
313 316
314/* 317/*
@@ -447,21 +450,10 @@ static int c_show(struct seq_file *m, void *v)
447{ 450{
448 int i; 451 int i;
449 452
450 /* 453 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
451 * Dump out the common processor features in a single line. Userspace 454 cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
452 * should read the hwcaps with getauxval(AT_HWCAP) rather than
453 * attempting to parse this.
454 */
455 seq_puts(m, "features\t:");
456 for (i = 0; hwcap_str[i]; i++)
457 if (elf_hwcap & (1 << i))
458 seq_printf(m, " %s", hwcap_str[i]);
459 seq_puts(m, "\n\n");
460 455
461 for_each_online_cpu(i) { 456 for_each_online_cpu(i) {
462 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
463 u32 midr = cpuinfo->reg_midr;
464
465 /* 457 /*
466 * glibc reads /proc/cpuinfo to determine the number of 458 * glibc reads /proc/cpuinfo to determine the number of
467 * online processors, looking for lines beginning with 459 * online processors, looking for lines beginning with
@@ -470,13 +462,25 @@ static int c_show(struct seq_file *m, void *v)
470#ifdef CONFIG_SMP 462#ifdef CONFIG_SMP
471 seq_printf(m, "processor\t: %d\n", i); 463 seq_printf(m, "processor\t: %d\n", i);
472#endif 464#endif
473 seq_printf(m, "implementer\t: 0x%02x\n",
474 MIDR_IMPLEMENTOR(midr));
475 seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr));
476 seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr));
477 seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr));
478 } 465 }
479 466
467 /* dump out the processor features */
468 seq_puts(m, "Features\t: ");
469
470 for (i = 0; hwcap_str[i]; i++)
471 if (elf_hwcap & (1 << i))
472 seq_printf(m, "%s ", hwcap_str[i]);
473
474 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
475 seq_printf(m, "CPU architecture: AArch64\n");
476 seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
477 seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
478 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
479
480 seq_puts(m, "\n");
481
482 seq_printf(m, "Hardware\t: %s\n", machine_name);
483
480 return 0; 484 return 0;
481} 485}
482 486
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index de2b0226e06d..dc47e53e9e28 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
79 79
80 case __ARM_NR_compat_set_tls: 80 case __ARM_NR_compat_set_tls:
81 current->thread.tp_value = regs->regs[0]; 81 current->thread.tp_value = regs->regs[0];
82
83 /*
84 * Protect against register corruption from context switch.
85 * See comment in tls_thread_flush.
86 */
87 barrier();
82 asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); 88 asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
83 return 0; 89 return 0;
84 90
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index e28be510380c..34b8bd0711e9 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -66,6 +66,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
66 else 66 else
67 kvm_vcpu_block(vcpu); 67 kvm_vcpu_block(vcpu);
68 68
69 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
70
69 return 1; 71 return 1;
70} 72}
71 73
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index d968796f4b2d..c3191168a994 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -80,6 +80,10 @@ __do_hyp_init:
80 msr mair_el2, x4 80 msr mair_el2, x4
81 isb 81 isb
82 82
83 /* Invalidate the stale TLBs from Bootloader */
84 tlbi alle2
85 dsb sy
86
83 mrs x4, sctlr_el2 87 mrs x4, sctlr_el2
84 and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2 88 and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
85 ldr x5, =SCTLR_EL2_FLAGS 89 ldr x5, =SCTLR_EL2_FLAGS