aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 00:03:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 00:03:26 -0400
commitbb0fd7ab0986105765d11baa82e619c618a235aa (patch)
tree6a0585ece827e1025aa48819959d02155a871be9 /arch/arm/kernel
parentbdfa54dfd9eea001274dbcd622657a904fe43b81 (diff)
parent4b2f8838479eb2abe042e094f7d2cced6d5ea772 (diff)
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Included in this update are both some long term fixes and some new features. Fixes: - An integer overflow in the calculation of ELF_ET_DYN_BASE. - Avoiding OOMs for high-order IOMMU allocations - SMP requires the data cache to be enabled for synchronisation primitives to work, so prevent the CPU_DCACHE_DISABLE option being visible on SMP builds. - A bug going back 10+ years in the noMMU ARM94* CPU support code, where it corrupts registers. Found by folk getting Linux running on their cameras. - Versatile Express needs an errata workaround enabled for CPU hot-unplug to work. Features: - Clean up module linker by handling out of range relocations separately from relocation cases we don't handle. - Fix a long term bug in the pci_mmap_page_range() code, which we hope won't impact userspace (we hope there's no users of the existing broken interface.) - Don't map DMA coherent allocations when we don't have a MMU. - Drop experimental status for SMP_ON_UP. - Warn when DT doesn't specify ePAPR mandatory cache properties. - Add documentation concerning how we find the start of physical memory for AUTO_ZRELADDR kernels, detailing why we have chosen the mask and the implications of changing it. - Updates from Ard Biesheuvel to address some issues with large kernels (such as allyesconfig) failing to link. - Allow hibernation to work on modern (ARMv7) CPUs - this appears to have never worked in the past on these CPUs. - Enable IRQ_SHOW_LEVEL, which changes the /proc/interrupts output format (hopefully without userspace breaking... let's hope that if it causes someone a problem, they tell us.) - Fix tegra-ahb DT offsets. - Rework ARM errata 643719 code (and ARMv7 flush_cache_louis()/ flush_dcache_all()) code to be more efficient, and enable this errata workaround by default for ARMv7+SMP CPUs. This complements the Versatile Express fix above. - Rework ARMv7 context code for errata 430973, so that only Cortex A8 CPUs are impacted by the branch target buffer flush when this errata is enabled. Also update the help text to indicate that all r1p* A8 CPUs are impacted. - Switch ARM to the generic show_mem() implementation, it conveys all the information which we were already reporting. - Prevent slow timer sources being used for udelay() - timers running at less than 1MHz are not useful for this, and can cause udelay() to return immediately, without any wait. Using such a slow timer is silly. - VDSO support for 32-bit ARM, mainly for gettimeofday() using the ARM architected timer. - Perf support for Scorpion performance monitoring units" vdso semantic conflict fixed up as per linux-next. * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (52 commits) ARM: update errata 430973 documentation to cover Cortex A8 r1p* ARM: ensure delay timer has sufficient accuracy for delays ARM: switch to use the generic show_mem() implementation ARM: proc-v7: avoid errata 430973 workaround for non-Cortex A8 CPUs ARM: enable ARM errata 643719 workaround by default ARM: cache-v7: optimise test for Cortex A9 r0pX devices ARM: cache-v7: optimise branches in v7_flush_cache_louis ARM: cache-v7: consolidate initialisation of cache level index ARM: cache-v7: shift CLIDR to extract appropriate field before masking ARM: cache-v7: use movw/movt instructions ARM: allow 16-bit instructions in ALT_UP() ARM: proc-arm94*.S: fix setup function ARM: vexpress: fix CPU hotplug with CT9x4 tile. ARM: 8276/1: Make CPU_DCACHE_DISABLE depend on !SMP ARM: 8335/1: Documentation: DT bindings: Tegra AHB: document the legacy base address ARM: 8334/1: amba: tegra-ahb: detect and correct bogus base address ARM: 8333/1: amba: tegra-ahb: fix register offsets in the macros ARM: 8339/1: Enable CONFIG_GENERIC_IRQ_SHOW_LEVEL ARM: 8338/1: kexec: Relax SMP validation to improve DT compatibility ARM: 8337/1: mm: Do not invoke OOM for higher order IOMMU DMA allocations ...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/asm-offsets.c5
-rw-r--r--arch/arm/kernel/bios32.c10
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head.S14
-rw-r--r--arch/arm/kernel/hibernate.c6
-rw-r--r--arch/arm/kernel/machine_kexec.c3
-rw-r--r--arch/arm/kernel/module.c38
-rw-r--r--arch/arm/kernel/perf_event.c21
-rw-r--r--arch/arm/kernel/perf_event_cpu.c71
-rw-r--r--arch/arm/kernel/perf_event_v7.c525
-rw-r--r--arch/arm/kernel/process.c159
-rw-r--r--arch/arm/kernel/psci-call.S31
-rw-r--r--arch/arm/kernel/psci.c39
-rw-r--r--arch/arm/kernel/reboot.c155
-rw-r--r--arch/arm/kernel/reboot.h7
-rw-r--r--arch/arm/kernel/return_address.c4
-rw-r--r--arch/arm/kernel/setup.c44
-rw-r--r--arch/arm/kernel/sleep.S15
-rw-r--r--arch/arm/kernel/smp.c5
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/kernel/vdso.c337
-rw-r--r--arch/arm/kernel/vmlinux.lds.S7
23 files changed, 1185 insertions, 320 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 902397dd1000..ba5f83226011 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -16,7 +16,7 @@ CFLAGS_REMOVE_return_address.o = -pg
16# Object file lists. 16# Object file lists.
17 17
18obj-y := elf.o entry-common.o irq.o opcodes.o \ 18obj-y := elf.o entry-common.o irq.o opcodes.o \
19 process.o ptrace.o return_address.o \ 19 process.o ptrace.o reboot.o return_address.o \
20 setup.o signal.o sigreturn_codes.o \ 20 setup.o signal.o sigreturn_codes.o \
21 stacktrace.o sys_arm.o time.o traps.o 21 stacktrace.o sys_arm.o time.o traps.o
22 22
@@ -75,6 +75,7 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
75CFLAGS_pj4-cp0.o := -marm 75CFLAGS_pj4-cp0.o := -marm
76AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 76AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
77obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o 77obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
78obj-$(CONFIG_VDSO) += vdso.o
78 79
79ifneq ($(CONFIG_ARCH_EBSA110),y) 80ifneq ($(CONFIG_ARCH_EBSA110),y)
80 obj-y += io.o 81 obj-y += io.o
@@ -86,7 +87,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
86 87
87obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o 88obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
88ifeq ($(CONFIG_ARM_PSCI),y) 89ifeq ($(CONFIG_ARM_PSCI),y)
89obj-y += psci.o 90obj-y += psci.o psci-call.o
90obj-$(CONFIG_SMP) += psci_smp.o 91obj-$(CONFIG_SMP) += psci_smp.o
91endif 92endif
92 93
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 488eaac56028..61bb5a65eb37 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -25,6 +25,7 @@
25#include <asm/memory.h> 25#include <asm/memory.h>
26#include <asm/procinfo.h> 26#include <asm/procinfo.h>
27#include <asm/suspend.h> 27#include <asm/suspend.h>
28#include <asm/vdso_datapage.h>
28#include <asm/hardware/cache-l2x0.h> 29#include <asm/hardware/cache-l2x0.h>
29#include <linux/kbuild.h> 30#include <linux/kbuild.h>
30 31
@@ -206,5 +207,9 @@ int main(void)
206 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); 207 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
207 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 208 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
208#endif 209#endif
210 BLANK();
211#ifdef CONFIG_VDSO
212 DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
213#endif
209 return 0; 214 return 0;
210} 215}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index ab19b7c03423..fcbbbb1b9e95 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -618,21 +618,15 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
618int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 618int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
619 enum pci_mmap_state mmap_state, int write_combine) 619 enum pci_mmap_state mmap_state, int write_combine)
620{ 620{
621 struct pci_sys_data *root = dev->sysdata; 621 if (mmap_state == pci_mmap_io)
622 unsigned long phys;
623
624 if (mmap_state == pci_mmap_io) {
625 return -EINVAL; 622 return -EINVAL;
626 } else {
627 phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
628 }
629 623
630 /* 624 /*
631 * Mark this as IO 625 * Mark this as IO
632 */ 626 */
633 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 627 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
634 628
635 if (remap_pfn_range(vma, vma->vm_start, phys, 629 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
636 vma->vm_end - vma->vm_start, 630 vma->vm_end - vma->vm_start,
637 vma->vm_page_prot)) 631 vma->vm_page_prot))
638 return -EAGAIN; 632 return -EAGAIN;
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 672b21942fff..570306c49406 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -545,7 +545,7 @@ ENDPROC(__und_usr)
545/* 545/*
546 * The out of line fixup for the ldrt instructions above. 546 * The out of line fixup for the ldrt instructions above.
547 */ 547 */
548 .pushsection .fixup, "ax" 548 .pushsection .text.fixup, "ax"
549 .align 2 549 .align 2
5504: str r4, [sp, #S_PC] @ retry current instruction 5504: str r4, [sp, #S_PC] @ retry current instruction
551 ret r9 551 ret r9
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 01963273c07a..3637973a9708 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -138,9 +138,9 @@ ENTRY(stext)
138 @ mmu has been enabled 138 @ mmu has been enabled
139 adr lr, BSYM(1f) @ return (PIC) address 139 adr lr, BSYM(1f) @ return (PIC) address
140 mov r8, r4 @ set TTBR1 to swapper_pg_dir 140 mov r8, r4 @ set TTBR1 to swapper_pg_dir
141 ARM( add pc, r10, #PROCINFO_INITFUNC ) 141 ldr r12, [r10, #PROCINFO_INITFUNC]
142 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 142 add r12, r12, r10
143 THUMB( ret r12 ) 143 ret r12
1441: b __enable_mmu 1441: b __enable_mmu
145ENDPROC(stext) 145ENDPROC(stext)
146 .ltorg 146 .ltorg
@@ -386,10 +386,10 @@ ENTRY(secondary_startup)
386 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir 386 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir
387 adr lr, BSYM(__enable_mmu) @ return address 387 adr lr, BSYM(__enable_mmu) @ return address
388 mov r13, r12 @ __secondary_switched address 388 mov r13, r12 @ __secondary_switched address
389 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 389 ldr r12, [r10, #PROCINFO_INITFUNC]
390 @ (return control reg) 390 add r12, r12, r10 @ initialise processor
391 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 391 @ (return control reg)
392 THUMB( ret r12 ) 392 ret r12
393ENDPROC(secondary_startup) 393ENDPROC(secondary_startup)
394ENDPROC(secondary_startup_arm) 394ENDPROC(secondary_startup_arm)
395 395
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
index c4cc50e58c13..a71501ff6f18 100644
--- a/arch/arm/kernel/hibernate.c
+++ b/arch/arm/kernel/hibernate.c
@@ -22,6 +22,7 @@
22#include <asm/suspend.h> 22#include <asm/suspend.h>
23#include <asm/memory.h> 23#include <asm/memory.h>
24#include <asm/sections.h> 24#include <asm/sections.h>
25#include "reboot.h"
25 26
26int pfn_is_nosave(unsigned long pfn) 27int pfn_is_nosave(unsigned long pfn)
27{ 28{
@@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
61 62
62 ret = swsusp_save(); 63 ret = swsusp_save();
63 if (ret == 0) 64 if (ret == 0)
64 soft_restart(virt_to_phys(cpu_resume)); 65 _soft_restart(virt_to_phys(cpu_resume), false);
65 return ret; 66 return ret;
66} 67}
67 68
@@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
86 for (pbe = restore_pblist; pbe; pbe = pbe->next) 87 for (pbe = restore_pblist; pbe; pbe = pbe->next)
87 copy_page(pbe->orig_address, pbe->address); 88 copy_page(pbe->orig_address, pbe->address);
88 89
89 soft_restart(virt_to_phys(cpu_resume)); 90 _soft_restart(virt_to_phys(cpu_resume), false);
90} 91}
91 92
92static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; 93static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
@@ -99,7 +100,6 @@ static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
99 */ 100 */
100int swsusp_arch_resume(void) 101int swsusp_arch_resume(void)
101{ 102{
102 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
103 call_with_stack(arch_restore_image, 0, 103 call_with_stack(arch_restore_image, 0,
104 resume_stack + ARRAY_SIZE(resume_stack)); 104 resume_stack + ARRAY_SIZE(resume_stack));
105 return 0; 105 return 0;
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index de2b085ad753..8bf3b7c09888 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -46,7 +46,8 @@ int machine_kexec_prepare(struct kimage *image)
46 * and implements CPU hotplug for the current HW. If not, we won't be 46 * and implements CPU hotplug for the current HW. If not, we won't be
47 * able to kexec reliably, so fail the prepare operation. 47 * able to kexec reliably, so fail the prepare operation.
48 */ 48 */
49 if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) 49 if (num_possible_cpus() > 1 && platform_can_secondary_boot() &&
50 !platform_can_cpu_hotplug())
50 return -EINVAL; 51 return -EINVAL;
51 52
52 /* 53 /*
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 2e11961f65ae..af791f4a6205 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -98,14 +98,19 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
98 case R_ARM_PC24: 98 case R_ARM_PC24:
99 case R_ARM_CALL: 99 case R_ARM_CALL:
100 case R_ARM_JUMP24: 100 case R_ARM_JUMP24:
101 if (sym->st_value & 3) {
102 pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (ARM -> Thumb)\n",
103 module->name, relindex, i, symname);
104 return -ENOEXEC;
105 }
106
101 offset = __mem_to_opcode_arm(*(u32 *)loc); 107 offset = __mem_to_opcode_arm(*(u32 *)loc);
102 offset = (offset & 0x00ffffff) << 2; 108 offset = (offset & 0x00ffffff) << 2;
103 if (offset & 0x02000000) 109 if (offset & 0x02000000)
104 offset -= 0x04000000; 110 offset -= 0x04000000;
105 111
106 offset += sym->st_value - loc; 112 offset += sym->st_value - loc;
107 if (offset & 3 || 113 if (offset <= (s32)0xfe000000 ||
108 offset <= (s32)0xfe000000 ||
109 offset >= (s32)0x02000000) { 114 offset >= (s32)0x02000000) {
110 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", 115 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
111 module->name, relindex, i, symname, 116 module->name, relindex, i, symname,
@@ -155,6 +160,22 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
155#ifdef CONFIG_THUMB2_KERNEL 160#ifdef CONFIG_THUMB2_KERNEL
156 case R_ARM_THM_CALL: 161 case R_ARM_THM_CALL:
157 case R_ARM_THM_JUMP24: 162 case R_ARM_THM_JUMP24:
163 /*
164 * For function symbols, only Thumb addresses are
165 * allowed (no interworking).
166 *
167 * For non-function symbols, the destination
168 * has no specific ARM/Thumb disposition, so
169 * the branch is resolved under the assumption
170 * that interworking is not required.
171 */
172 if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
173 !(sym->st_value & 1)) {
174 pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (Thumb -> ARM)\n",
175 module->name, relindex, i, symname);
176 return -ENOEXEC;
177 }
178
158 upper = __mem_to_opcode_thumb16(*(u16 *)loc); 179 upper = __mem_to_opcode_thumb16(*(u16 *)loc);
159 lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2)); 180 lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
160 181
@@ -182,18 +203,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
182 offset -= 0x02000000; 203 offset -= 0x02000000;
183 offset += sym->st_value - loc; 204 offset += sym->st_value - loc;
184 205
185 /* 206 if (offset <= (s32)0xff000000 ||
186 * For function symbols, only Thumb addresses are
187 * allowed (no interworking).
188 *
189 * For non-function symbols, the destination
190 * has no specific ARM/Thumb disposition, so
191 * the branch is resolved under the assumption
192 * that interworking is not required.
193 */
194 if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
195 !(offset & 1)) ||
196 offset <= (s32)0xff000000 ||
197 offset >= (s32)0x01000000) { 207 offset >= (s32)0x01000000) {
198 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", 208 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
199 module->name, relindex, i, symname, 209 module->name, relindex, i, symname,
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 557e128e4df0..4a86a0133ac3 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -259,20 +259,29 @@ out:
259} 259}
260 260
261static int 261static int
262validate_event(struct pmu_hw_events *hw_events, 262validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
263 struct perf_event *event) 263 struct perf_event *event)
264{ 264{
265 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 265 struct arm_pmu *armpmu;
266 266
267 if (is_software_event(event)) 267 if (is_software_event(event))
268 return 1; 268 return 1;
269 269
270 /*
271 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
272 * core perf code won't check that the pmu->ctx == leader->ctx
273 * until after pmu->event_init(event).
274 */
275 if (event->pmu != pmu)
276 return 0;
277
270 if (event->state < PERF_EVENT_STATE_OFF) 278 if (event->state < PERF_EVENT_STATE_OFF)
271 return 1; 279 return 1;
272 280
273 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) 281 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
274 return 1; 282 return 1;
275 283
284 armpmu = to_arm_pmu(event->pmu);
276 return armpmu->get_event_idx(hw_events, event) >= 0; 285 return armpmu->get_event_idx(hw_events, event) >= 0;
277} 286}
278 287
@@ -288,15 +297,15 @@ validate_group(struct perf_event *event)
288 */ 297 */
289 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); 298 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
290 299
291 if (!validate_event(&fake_pmu, leader)) 300 if (!validate_event(event->pmu, &fake_pmu, leader))
292 return -EINVAL; 301 return -EINVAL;
293 302
294 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 303 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
295 if (!validate_event(&fake_pmu, sibling)) 304 if (!validate_event(event->pmu, &fake_pmu, sibling))
296 return -EINVAL; 305 return -EINVAL;
297 } 306 }
298 307
299 if (!validate_event(&fake_pmu, event)) 308 if (!validate_event(event->pmu, &fake_pmu, event))
300 return -EINVAL; 309 return -EINVAL;
301 310
302 return 0; 311 return 0;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 61b53c46edfa..91c7ba182dcd 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -92,11 +92,16 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
92 free_percpu_irq(irq, &hw_events->percpu_pmu); 92 free_percpu_irq(irq, &hw_events->percpu_pmu);
93 } else { 93 } else {
94 for (i = 0; i < irqs; ++i) { 94 for (i = 0; i < irqs; ++i) {
95 if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) 95 int cpu = i;
96
97 if (cpu_pmu->irq_affinity)
98 cpu = cpu_pmu->irq_affinity[i];
99
100 if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
96 continue; 101 continue;
97 irq = platform_get_irq(pmu_device, i); 102 irq = platform_get_irq(pmu_device, i);
98 if (irq >= 0) 103 if (irq >= 0)
99 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i)); 104 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
100 } 105 }
101 } 106 }
102} 107}
@@ -128,32 +133,37 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
128 on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); 133 on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
129 } else { 134 } else {
130 for (i = 0; i < irqs; ++i) { 135 for (i = 0; i < irqs; ++i) {
136 int cpu = i;
137
131 err = 0; 138 err = 0;
132 irq = platform_get_irq(pmu_device, i); 139 irq = platform_get_irq(pmu_device, i);
133 if (irq < 0) 140 if (irq < 0)
134 continue; 141 continue;
135 142
143 if (cpu_pmu->irq_affinity)
144 cpu = cpu_pmu->irq_affinity[i];
145
136 /* 146 /*
137 * If we have a single PMU interrupt that we can't shift, 147 * If we have a single PMU interrupt that we can't shift,
138 * assume that we're running on a uniprocessor machine and 148 * assume that we're running on a uniprocessor machine and
139 * continue. Otherwise, continue without this interrupt. 149 * continue. Otherwise, continue without this interrupt.
140 */ 150 */
141 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { 151 if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
142 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", 152 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
143 irq, i); 153 irq, cpu);
144 continue; 154 continue;
145 } 155 }
146 156
147 err = request_irq(irq, handler, 157 err = request_irq(irq, handler,
148 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", 158 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
149 per_cpu_ptr(&hw_events->percpu_pmu, i)); 159 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
150 if (err) { 160 if (err) {
151 pr_err("unable to request IRQ%d for ARM PMU counters\n", 161 pr_err("unable to request IRQ%d for ARM PMU counters\n",
152 irq); 162 irq);
153 return err; 163 return err;
154 } 164 }
155 165
156 cpumask_set_cpu(i, &cpu_pmu->active_irqs); 166 cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
157 } 167 }
158 } 168 }
159 169
@@ -243,6 +253,8 @@ static const struct of_device_id cpu_pmu_of_device_ids[] = {
243 {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, 253 {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
244 {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, 254 {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
245 {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, 255 {.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
256 {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init},
257 {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init},
246 {}, 258 {},
247}; 259};
248 260
@@ -289,6 +301,48 @@ static int probe_current_pmu(struct arm_pmu *pmu)
289 return ret; 301 return ret;
290} 302}
291 303
304static int of_pmu_irq_cfg(struct platform_device *pdev)
305{
306 int i;
307 int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
308
309 if (!irqs)
310 return -ENOMEM;
311
312 for (i = 0; i < pdev->num_resources; ++i) {
313 struct device_node *dn;
314 int cpu;
315
316 dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
317 i);
318 if (!dn) {
319 pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
320 of_node_full_name(dn), i);
321 break;
322 }
323
324 for_each_possible_cpu(cpu)
325 if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
326 break;
327
328 of_node_put(dn);
329 if (cpu >= nr_cpu_ids) {
330 pr_warn("Failed to find logical CPU for %s\n",
331 dn->name);
332 break;
333 }
334
335 irqs[i] = cpu;
336 }
337
338 if (i == pdev->num_resources)
339 cpu_pmu->irq_affinity = irqs;
340 else
341 kfree(irqs);
342
343 return 0;
344}
345
292static int cpu_pmu_device_probe(struct platform_device *pdev) 346static int cpu_pmu_device_probe(struct platform_device *pdev)
293{ 347{
294 const struct of_device_id *of_id; 348 const struct of_device_id *of_id;
@@ -313,7 +367,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
313 367
314 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { 368 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
315 init_fn = of_id->data; 369 init_fn = of_id->data;
316 ret = init_fn(pmu); 370
371 ret = of_pmu_irq_cfg(pdev);
372 if (!ret)
373 ret = init_fn(pmu);
317 } else { 374 } else {
318 ret = probe_current_pmu(pmu); 375 ret = probe_current_pmu(pmu);
319 } 376 }
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 8993770c47de..f4207a4dcb01 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -140,6 +140,23 @@ enum krait_perf_types {
140 KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210, 140 KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210,
141}; 141};
142 142
143/* ARMv7 Scorpion specific event types */
144enum scorpion_perf_types {
145 SCORPION_LPM0_GROUP0 = 0x4c,
146 SCORPION_LPM1_GROUP0 = 0x50,
147 SCORPION_LPM2_GROUP0 = 0x54,
148 SCORPION_L2LPM_GROUP0 = 0x58,
149 SCORPION_VLPM_GROUP0 = 0x5c,
150
151 SCORPION_ICACHE_ACCESS = 0x10053,
152 SCORPION_ICACHE_MISS = 0x10052,
153
154 SCORPION_DTLB_ACCESS = 0x12013,
155 SCORPION_DTLB_MISS = 0x12012,
156
157 SCORPION_ITLB_MISS = 0x12021,
158};
159
143/* 160/*
144 * Cortex-A8 HW events mapping 161 * Cortex-A8 HW events mapping
145 * 162 *
@@ -482,6 +499,49 @@ static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
482}; 499};
483 500
484/* 501/*
502 * Scorpion HW events mapping
503 */
504static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
505 PERF_MAP_ALL_UNSUPPORTED,
506 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
507 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
508 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
509 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
510 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
511};
512
513static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
514 [PERF_COUNT_HW_CACHE_OP_MAX]
515 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
516 PERF_CACHE_MAP_ALL_UNSUPPORTED,
517 /*
518 * The performance counters don't differentiate between read and write
519 * accesses/misses so this isn't strictly correct, but it's the best we
520 * can do. Writes and reads get combined.
521 */
522 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
523 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
524 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
525 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
526 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
527 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
528 /*
529 * Only ITLB misses and DTLB refills are supported. If users want the
530 * DTLB refills misses a raw counter must be used.
531 */
532 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
533 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
534 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
535 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
536 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
537 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
538 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
539 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
540 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
541 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
542};
543
544/*
485 * Perf Events' indices 545 * Perf Events' indices
486 */ 546 */
487#define ARMV7_IDX_CYCLE_COUNTER 0 547#define ARMV7_IDX_CYCLE_COUNTER 0
@@ -976,6 +1036,12 @@ static int krait_map_event_no_branch(struct perf_event *event)
976 &krait_perf_cache_map, 0xFFFFF); 1036 &krait_perf_cache_map, 0xFFFFF);
977} 1037}
978 1038
1039static int scorpion_map_event(struct perf_event *event)
1040{
1041 return armpmu_map_event(event, &scorpion_perf_map,
1042 &scorpion_perf_cache_map, 0xFFFFF);
1043}
1044
979static void armv7pmu_init(struct arm_pmu *cpu_pmu) 1045static void armv7pmu_init(struct arm_pmu *cpu_pmu)
980{ 1046{
981 cpu_pmu->handle_irq = armv7pmu_handle_irq; 1047 cpu_pmu->handle_irq = armv7pmu_handle_irq;
@@ -1103,6 +1169,12 @@ static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1103#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT) 1169#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
1104#define PMRESRn_EN BIT(31) 1170#define PMRESRn_EN BIT(31)
1105 1171
1172#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
1173#define EVENT_GROUP(event) ((event) & 0xf) /* G */
1174#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
1175#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
1176#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
1177
1106static u32 krait_read_pmresrn(int n) 1178static u32 krait_read_pmresrn(int n)
1107{ 1179{
1108 u32 val; 1180 u32 val;
@@ -1141,19 +1213,19 @@ static void krait_write_pmresrn(int n, u32 val)
1141 } 1213 }
1142} 1214}
1143 1215
1144static u32 krait_read_vpmresr0(void) 1216static u32 venum_read_pmresr(void)
1145{ 1217{
1146 u32 val; 1218 u32 val;
1147 asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val)); 1219 asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1148 return val; 1220 return val;
1149} 1221}
1150 1222
1151static void krait_write_vpmresr0(u32 val) 1223static void venum_write_pmresr(u32 val)
1152{ 1224{
1153 asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val)); 1225 asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1154} 1226}
1155 1227
1156static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val) 1228static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1157{ 1229{
1158 u32 venum_new_val; 1230 u32 venum_new_val;
1159 u32 fp_new_val; 1231 u32 fp_new_val;
@@ -1170,7 +1242,7 @@ static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
1170 fmxr(FPEXC, fp_new_val); 1242 fmxr(FPEXC, fp_new_val);
1171} 1243}
1172 1244
1173static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val) 1245static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1174{ 1246{
1175 BUG_ON(preemptible()); 1247 BUG_ON(preemptible());
1176 /* Restore FPEXC */ 1248 /* Restore FPEXC */
@@ -1193,16 +1265,11 @@ static void krait_evt_setup(int idx, u32 config_base)
1193 u32 val; 1265 u32 val;
1194 u32 mask; 1266 u32 mask;
1195 u32 vval, fval; 1267 u32 vval, fval;
1196 unsigned int region; 1268 unsigned int region = EVENT_REGION(config_base);
1197 unsigned int group; 1269 unsigned int group = EVENT_GROUP(config_base);
1198 unsigned int code; 1270 unsigned int code = EVENT_CODE(config_base);
1199 unsigned int group_shift; 1271 unsigned int group_shift;
1200 bool venum_event; 1272 bool venum_event = EVENT_VENUM(config_base);
1201
1202 venum_event = !!(config_base & VENUM_EVENT);
1203 region = (config_base >> 12) & 0xf;
1204 code = (config_base >> 4) & 0xff;
1205 group = (config_base >> 0) & 0xf;
1206 1273
1207 group_shift = group * 8; 1274 group_shift = group * 8;
1208 mask = 0xff << group_shift; 1275 mask = 0xff << group_shift;
@@ -1217,16 +1284,14 @@ static void krait_evt_setup(int idx, u32 config_base)
1217 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); 1284 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1218 armv7_pmnc_write_evtsel(idx, val); 1285 armv7_pmnc_write_evtsel(idx, val);
1219 1286
1220 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1221
1222 if (venum_event) { 1287 if (venum_event) {
1223 krait_pre_vpmresr0(&vval, &fval); 1288 venum_pre_pmresr(&vval, &fval);
1224 val = krait_read_vpmresr0(); 1289 val = venum_read_pmresr();
1225 val &= ~mask; 1290 val &= ~mask;
1226 val |= code << group_shift; 1291 val |= code << group_shift;
1227 val |= PMRESRn_EN; 1292 val |= PMRESRn_EN;
1228 krait_write_vpmresr0(val); 1293 venum_write_pmresr(val);
1229 krait_post_vpmresr0(vval, fval); 1294 venum_post_pmresr(vval, fval);
1230 } else { 1295 } else {
1231 val = krait_read_pmresrn(region); 1296 val = krait_read_pmresrn(region);
1232 val &= ~mask; 1297 val &= ~mask;
@@ -1236,7 +1301,7 @@ static void krait_evt_setup(int idx, u32 config_base)
1236 } 1301 }
1237} 1302}
1238 1303
1239static u32 krait_clear_pmresrn_group(u32 val, int group) 1304static u32 clear_pmresrn_group(u32 val, int group)
1240{ 1305{
1241 u32 mask; 1306 u32 mask;
1242 int group_shift; 1307 int group_shift;
@@ -1256,23 +1321,19 @@ static void krait_clearpmu(u32 config_base)
1256{ 1321{
1257 u32 val; 1322 u32 val;
1258 u32 vval, fval; 1323 u32 vval, fval;
1259 unsigned int region; 1324 unsigned int region = EVENT_REGION(config_base);
1260 unsigned int group; 1325 unsigned int group = EVENT_GROUP(config_base);
1261 bool venum_event; 1326 bool venum_event = EVENT_VENUM(config_base);
1262
1263 venum_event = !!(config_base & VENUM_EVENT);
1264 region = (config_base >> 12) & 0xf;
1265 group = (config_base >> 0) & 0xf;
1266 1327
1267 if (venum_event) { 1328 if (venum_event) {
1268 krait_pre_vpmresr0(&vval, &fval); 1329 venum_pre_pmresr(&vval, &fval);
1269 val = krait_read_vpmresr0(); 1330 val = venum_read_pmresr();
1270 val = krait_clear_pmresrn_group(val, group); 1331 val = clear_pmresrn_group(val, group);
1271 krait_write_vpmresr0(val); 1332 venum_write_pmresr(val);
1272 krait_post_vpmresr0(vval, fval); 1333 venum_post_pmresr(vval, fval);
1273 } else { 1334 } else {
1274 val = krait_read_pmresrn(region); 1335 val = krait_read_pmresrn(region);
1275 val = krait_clear_pmresrn_group(val, group); 1336 val = clear_pmresrn_group(val, group);
1276 krait_write_pmresrn(region, val); 1337 krait_write_pmresrn(region, val);
1277 } 1338 }
1278} 1339}
@@ -1342,6 +1403,8 @@ static void krait_pmu_enable_event(struct perf_event *event)
1342static void krait_pmu_reset(void *info) 1403static void krait_pmu_reset(void *info)
1343{ 1404{
1344 u32 vval, fval; 1405 u32 vval, fval;
1406 struct arm_pmu *cpu_pmu = info;
1407 u32 idx, nb_cnt = cpu_pmu->num_events;
1345 1408
1346 armv7pmu_reset(info); 1409 armv7pmu_reset(info);
1347 1410
@@ -1350,9 +1413,16 @@ static void krait_pmu_reset(void *info)
1350 krait_write_pmresrn(1, 0); 1413 krait_write_pmresrn(1, 0);
1351 krait_write_pmresrn(2, 0); 1414 krait_write_pmresrn(2, 0);
1352 1415
1353 krait_pre_vpmresr0(&vval, &fval); 1416 venum_pre_pmresr(&vval, &fval);
1354 krait_write_vpmresr0(0); 1417 venum_write_pmresr(0);
1355 krait_post_vpmresr0(vval, fval); 1418 venum_post_pmresr(vval, fval);
1419
1420 /* Reset PMxEVNCTCR to sane default */
1421 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1422 armv7_pmnc_select_counter(idx);
1423 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1424 }
1425
1356} 1426}
1357 1427
1358static int krait_event_to_bit(struct perf_event *event, unsigned int region, 1428static int krait_event_to_bit(struct perf_event *event, unsigned int region,
@@ -1386,26 +1456,18 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1386{ 1456{
1387 int idx; 1457 int idx;
1388 int bit = -1; 1458 int bit = -1;
1389 unsigned int prefix;
1390 unsigned int region;
1391 unsigned int code;
1392 unsigned int group;
1393 bool krait_event;
1394 struct hw_perf_event *hwc = &event->hw; 1459 struct hw_perf_event *hwc = &event->hw;
1460 unsigned int region = EVENT_REGION(hwc->config_base);
1461 unsigned int code = EVENT_CODE(hwc->config_base);
1462 unsigned int group = EVENT_GROUP(hwc->config_base);
1463 bool venum_event = EVENT_VENUM(hwc->config_base);
1464 bool krait_event = EVENT_CPU(hwc->config_base);
1395 1465
1396 region = (hwc->config_base >> 12) & 0xf; 1466 if (venum_event || krait_event) {
1397 code = (hwc->config_base >> 4) & 0xff;
1398 group = (hwc->config_base >> 0) & 0xf;
1399 krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1400
1401 if (krait_event) {
1402 /* Ignore invalid events */ 1467 /* Ignore invalid events */
1403 if (group > 3 || region > 2) 1468 if (group > 3 || region > 2)
1404 return -EINVAL; 1469 return -EINVAL;
1405 prefix = hwc->config_base & KRAIT_EVENT_MASK; 1470 if (venum_event && (code & 0xe0))
1406 if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
1407 return -EINVAL;
1408 if (prefix == VENUM_EVENT && (code & 0xe0))
1409 return -EINVAL; 1471 return -EINVAL;
1410 1472
1411 bit = krait_event_to_bit(event, region, group); 1473 bit = krait_event_to_bit(event, region, group);
@@ -1425,15 +1487,12 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1425{ 1487{
1426 int bit; 1488 int bit;
1427 struct hw_perf_event *hwc = &event->hw; 1489 struct hw_perf_event *hwc = &event->hw;
1428 unsigned int region; 1490 unsigned int region = EVENT_REGION(hwc->config_base);
1429 unsigned int group; 1491 unsigned int group = EVENT_GROUP(hwc->config_base);
1430 bool krait_event; 1492 bool venum_event = EVENT_VENUM(hwc->config_base);
1493 bool krait_event = EVENT_CPU(hwc->config_base);
1431 1494
1432 region = (hwc->config_base >> 12) & 0xf; 1495 if (venum_event || krait_event) {
1433 group = (hwc->config_base >> 0) & 0xf;
1434 krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1435
1436 if (krait_event) {
1437 bit = krait_event_to_bit(event, region, group); 1496 bit = krait_event_to_bit(event, region, group);
1438 clear_bit(bit, cpuc->used_mask); 1497 clear_bit(bit, cpuc->used_mask);
1439 } 1498 }
@@ -1458,6 +1517,344 @@ static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1458 cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx; 1517 cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1459 return 0; 1518 return 0;
1460} 1519}
1520
1521/*
1522 * Scorpion Local Performance Monitor Register (LPMn)
1523 *
1524 * 31 30 24 16 8 0
1525 * +--------------------------------+
1526 * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0
1527 * +--------------------------------+
1528 * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1
1529 * +--------------------------------+
1530 * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2
1531 * +--------------------------------+
1532 * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3
1533 * +--------------------------------+
1534 * VLPM | EN | CC | CC | CC | CC | N = 2, R = ?
1535 * +--------------------------------+
1536 * EN | G=3 | G=2 | G=1 | G=0
1537 *
1538 *
1539 * Event Encoding:
1540 *
1541 * hwc->config_base = 0xNRCCG
1542 *
1543 * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1544 * R = region register
1545 * CC = class of events the group G is choosing from
1546 * G = group or particular event
1547 *
1548 * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1549 *
1550 * A region (R) corresponds to a piece of the CPU (execution unit, instruction
1551 * unit, etc.) while the event code (CC) corresponds to a particular class of
1552 * events (interrupts for example). An event code is broken down into
1553 * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1554 * example).
1555 */
1556
1557static u32 scorpion_read_pmresrn(int n)
1558{
1559 u32 val;
1560
1561 switch (n) {
1562 case 0:
1563 asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1564 break;
1565 case 1:
1566 asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1567 break;
1568 case 2:
1569 asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1570 break;
1571 case 3:
1572 asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1573 break;
1574 default:
1575 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1576 }
1577
1578 return val;
1579}
1580
1581static void scorpion_write_pmresrn(int n, u32 val)
1582{
1583 switch (n) {
1584 case 0:
1585 asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1586 break;
1587 case 1:
1588 asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1589 break;
1590 case 2:
1591 asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1592 break;
1593 case 3:
1594 asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1595 break;
1596 default:
1597 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1598 }
1599}
1600
1601static u32 scorpion_get_pmresrn_event(unsigned int region)
1602{
1603 static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1604 SCORPION_LPM1_GROUP0,
1605 SCORPION_LPM2_GROUP0,
1606 SCORPION_L2LPM_GROUP0 };
1607 return pmresrn_table[region];
1608}
1609
1610static void scorpion_evt_setup(int idx, u32 config_base)
1611{
1612 u32 val;
1613 u32 mask;
1614 u32 vval, fval;
1615 unsigned int region = EVENT_REGION(config_base);
1616 unsigned int group = EVENT_GROUP(config_base);
1617 unsigned int code = EVENT_CODE(config_base);
1618 unsigned int group_shift;
1619 bool venum_event = EVENT_VENUM(config_base);
1620
1621 group_shift = group * 8;
1622 mask = 0xff << group_shift;
1623
1624 /* Configure evtsel for the region and group */
1625 if (venum_event)
1626 val = SCORPION_VLPM_GROUP0;
1627 else
1628 val = scorpion_get_pmresrn_event(region);
1629 val += group;
1630 /* Mix in mode-exclusion bits */
1631 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1632 armv7_pmnc_write_evtsel(idx, val);
1633
1634 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1635
1636 if (venum_event) {
1637 venum_pre_pmresr(&vval, &fval);
1638 val = venum_read_pmresr();
1639 val &= ~mask;
1640 val |= code << group_shift;
1641 val |= PMRESRn_EN;
1642 venum_write_pmresr(val);
1643 venum_post_pmresr(vval, fval);
1644 } else {
1645 val = scorpion_read_pmresrn(region);
1646 val &= ~mask;
1647 val |= code << group_shift;
1648 val |= PMRESRn_EN;
1649 scorpion_write_pmresrn(region, val);
1650 }
1651}
1652
1653static void scorpion_clearpmu(u32 config_base)
1654{
1655 u32 val;
1656 u32 vval, fval;
1657 unsigned int region = EVENT_REGION(config_base);
1658 unsigned int group = EVENT_GROUP(config_base);
1659 bool venum_event = EVENT_VENUM(config_base);
1660
1661 if (venum_event) {
1662 venum_pre_pmresr(&vval, &fval);
1663 val = venum_read_pmresr();
1664 val = clear_pmresrn_group(val, group);
1665 venum_write_pmresr(val);
1666 venum_post_pmresr(vval, fval);
1667 } else {
1668 val = scorpion_read_pmresrn(region);
1669 val = clear_pmresrn_group(val, group);
1670 scorpion_write_pmresrn(region, val);
1671 }
1672}
1673
1674static void scorpion_pmu_disable_event(struct perf_event *event)
1675{
1676 unsigned long flags;
1677 struct hw_perf_event *hwc = &event->hw;
1678 int idx = hwc->idx;
1679 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1680 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1681
1682 /* Disable counter and interrupt */
1683 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1684
1685 /* Disable counter */
1686 armv7_pmnc_disable_counter(idx);
1687
1688 /*
1689 * Clear pmresr code (if destined for PMNx counters)
1690 */
1691 if (hwc->config_base & KRAIT_EVENT_MASK)
1692 scorpion_clearpmu(hwc->config_base);
1693
1694 /* Disable interrupt for this counter */
1695 armv7_pmnc_disable_intens(idx);
1696
1697 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1698}
1699
1700static void scorpion_pmu_enable_event(struct perf_event *event)
1701{
1702 unsigned long flags;
1703 struct hw_perf_event *hwc = &event->hw;
1704 int idx = hwc->idx;
1705 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1706 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1707
1708 /*
1709 * Enable counter and interrupt, and set the counter to count
1710 * the event that we're interested in.
1711 */
1712 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1713
1714 /* Disable counter */
1715 armv7_pmnc_disable_counter(idx);
1716
1717 /*
1718 * Set event (if destined for PMNx counters)
1719 * We don't set the event for the cycle counter because we
1720 * don't have the ability to perform event filtering.
1721 */
1722 if (hwc->config_base & KRAIT_EVENT_MASK)
1723 scorpion_evt_setup(idx, hwc->config_base);
1724 else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1725 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1726
1727 /* Enable interrupt for this counter */
1728 armv7_pmnc_enable_intens(idx);
1729
1730 /* Enable counter */
1731 armv7_pmnc_enable_counter(idx);
1732
1733 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1734}
1735
1736static void scorpion_pmu_reset(void *info)
1737{
1738 u32 vval, fval;
1739 struct arm_pmu *cpu_pmu = info;
1740 u32 idx, nb_cnt = cpu_pmu->num_events;
1741
1742 armv7pmu_reset(info);
1743
1744 /* Clear all pmresrs */
1745 scorpion_write_pmresrn(0, 0);
1746 scorpion_write_pmresrn(1, 0);
1747 scorpion_write_pmresrn(2, 0);
1748 scorpion_write_pmresrn(3, 0);
1749
1750 venum_pre_pmresr(&vval, &fval);
1751 venum_write_pmresr(0);
1752 venum_post_pmresr(vval, fval);
1753
1754 /* Reset PMxEVNCTCR to sane default */
1755 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1756 armv7_pmnc_select_counter(idx);
1757 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1758 }
1759}
1760
1761static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1762 unsigned int group)
1763{
1764 int bit;
1765 struct hw_perf_event *hwc = &event->hw;
1766 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1767
1768 if (hwc->config_base & VENUM_EVENT)
1769 bit = SCORPION_VLPM_GROUP0;
1770 else
1771 bit = scorpion_get_pmresrn_event(region);
1772 bit -= scorpion_get_pmresrn_event(0);
1773 bit += group;
1774 /*
1775 * Lower bits are reserved for use by the counters (see
1776 * armv7pmu_get_event_idx() for more info)
1777 */
1778 bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1779
1780 return bit;
1781}
1782
1783/*
1784 * We check for column exclusion constraints here.
1785 * Two events cant use the same group within a pmresr register.
1786 */
1787static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1788 struct perf_event *event)
1789{
1790 int idx;
1791 int bit = -1;
1792 struct hw_perf_event *hwc = &event->hw;
1793 unsigned int region = EVENT_REGION(hwc->config_base);
1794 unsigned int group = EVENT_GROUP(hwc->config_base);
1795 bool venum_event = EVENT_VENUM(hwc->config_base);
1796 bool scorpion_event = EVENT_CPU(hwc->config_base);
1797
1798 if (venum_event || scorpion_event) {
1799 /* Ignore invalid events */
1800 if (group > 3 || region > 3)
1801 return -EINVAL;
1802
1803 bit = scorpion_event_to_bit(event, region, group);
1804 if (test_and_set_bit(bit, cpuc->used_mask))
1805 return -EAGAIN;
1806 }
1807
1808 idx = armv7pmu_get_event_idx(cpuc, event);
1809 if (idx < 0 && bit >= 0)
1810 clear_bit(bit, cpuc->used_mask);
1811
1812 return idx;
1813}
1814
1815static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1816 struct perf_event *event)
1817{
1818 int bit;
1819 struct hw_perf_event *hwc = &event->hw;
1820 unsigned int region = EVENT_REGION(hwc->config_base);
1821 unsigned int group = EVENT_GROUP(hwc->config_base);
1822 bool venum_event = EVENT_VENUM(hwc->config_base);
1823 bool scorpion_event = EVENT_CPU(hwc->config_base);
1824
1825 if (venum_event || scorpion_event) {
1826 bit = scorpion_event_to_bit(event, region, group);
1827 clear_bit(bit, cpuc->used_mask);
1828 }
1829}
1830
1831static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1832{
1833 armv7pmu_init(cpu_pmu);
1834 cpu_pmu->name = "armv7_scorpion";
1835 cpu_pmu->map_event = scorpion_map_event;
1836 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1837 cpu_pmu->reset = scorpion_pmu_reset;
1838 cpu_pmu->enable = scorpion_pmu_enable_event;
1839 cpu_pmu->disable = scorpion_pmu_disable_event;
1840 cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
1841 cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1842 return 0;
1843}
1844
1845static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1846{
1847 armv7pmu_init(cpu_pmu);
1848 cpu_pmu->name = "armv7_scorpion_mp";
1849 cpu_pmu->map_event = scorpion_map_event;
1850 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1851 cpu_pmu->reset = scorpion_pmu_reset;
1852 cpu_pmu->enable = scorpion_pmu_enable_event;
1853 cpu_pmu->disable = scorpion_pmu_disable_event;
1854 cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
1855 cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1856 return 0;
1857}
1461#else 1858#else
1462static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) 1859static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1463{ 1860{
@@ -1498,4 +1895,14 @@ static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
1498{ 1895{
1499 return -ENODEV; 1896 return -ENODEV;
1500} 1897}
1898
1899static inline int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1900{
1901 return -ENODEV;
1902}
1903
1904static inline int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1905{
1906 return -ENODEV;
1907}
1501#endif /* CONFIG_CPU_V7 */ 1908#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index fdfa3a78ec8c..f192a2a41719 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -17,12 +17,9 @@
17#include <linux/stddef.h> 17#include <linux/stddef.h>
18#include <linux/unistd.h> 18#include <linux/unistd.h>
19#include <linux/user.h> 19#include <linux/user.h>
20#include <linux/delay.h>
21#include <linux/reboot.h>
22#include <linux/interrupt.h> 20#include <linux/interrupt.h>
23#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
24#include <linux/init.h> 22#include <linux/init.h>
25#include <linux/cpu.h>
26#include <linux/elfcore.h> 23#include <linux/elfcore.h>
27#include <linux/pm.h> 24#include <linux/pm.h>
28#include <linux/tick.h> 25#include <linux/tick.h>
@@ -31,16 +28,14 @@
31#include <linux/random.h> 28#include <linux/random.h>
32#include <linux/hw_breakpoint.h> 29#include <linux/hw_breakpoint.h>
33#include <linux/leds.h> 30#include <linux/leds.h>
34#include <linux/reboot.h>
35 31
36#include <asm/cacheflush.h>
37#include <asm/idmap.h>
38#include <asm/processor.h> 32#include <asm/processor.h>
39#include <asm/thread_notify.h> 33#include <asm/thread_notify.h>
40#include <asm/stacktrace.h> 34#include <asm/stacktrace.h>
41#include <asm/system_misc.h> 35#include <asm/system_misc.h>
42#include <asm/mach/time.h> 36#include <asm/mach/time.h>
43#include <asm/tls.h> 37#include <asm/tls.h>
38#include <asm/vdso.h>
44 39
45#ifdef CONFIG_CC_STACKPROTECTOR 40#ifdef CONFIG_CC_STACKPROTECTOR
46#include <linux/stackprotector.h> 41#include <linux/stackprotector.h>
@@ -59,69 +54,6 @@ static const char *isa_modes[] __maybe_unused = {
59 "ARM" , "Thumb" , "Jazelle", "ThumbEE" 54 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
60}; 55};
61 56
62extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
63typedef void (*phys_reset_t)(unsigned long);
64
65/*
66 * A temporary stack to use for CPU reset. This is static so that we
67 * don't clobber it with the identity mapping. When running with this
68 * stack, any references to the current task *will not work* so you
69 * should really do as little as possible before jumping to your reset
70 * code.
71 */
72static u64 soft_restart_stack[16];
73
74static void __soft_restart(void *addr)
75{
76 phys_reset_t phys_reset;
77
78 /* Take out a flat memory mapping. */
79 setup_mm_for_reboot();
80
81 /* Clean and invalidate caches */
82 flush_cache_all();
83
84 /* Turn off caching */
85 cpu_proc_fin();
86
87 /* Push out any further dirty data, and ensure cache is empty */
88 flush_cache_all();
89
90 /* Switch to the identity mapping. */
91 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
92 phys_reset((unsigned long)addr);
93
94 /* Should never get here. */
95 BUG();
96}
97
98void soft_restart(unsigned long addr)
99{
100 u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
101
102 /* Disable interrupts first */
103 raw_local_irq_disable();
104 local_fiq_disable();
105
106 /* Disable the L2 if we're the last man standing. */
107 if (num_online_cpus() == 1)
108 outer_disable();
109
110 /* Change to the new stack and continue with the reset. */
111 call_with_stack(__soft_restart, (void *)addr, (void *)stack);
112
113 /* Should never get here. */
114 BUG();
115}
116
117/*
118 * Function pointers to optional machine specific functions
119 */
120void (*pm_power_off)(void);
121EXPORT_SYMBOL(pm_power_off);
122
123void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
124
125/* 57/*
126 * This is our default idle handler. 58 * This is our default idle handler.
127 */ 59 */
@@ -166,79 +98,6 @@ void arch_cpu_idle_dead(void)
166} 98}
167#endif 99#endif
168 100
169/*
170 * Called by kexec, immediately prior to machine_kexec().
171 *
172 * This must completely disable all secondary CPUs; simply causing those CPUs
173 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
174 * kexec'd kernel to use any and all RAM as it sees fit, without having to
175 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
176 * functionality embodied in disable_nonboot_cpus() to achieve this.
177 */
178void machine_shutdown(void)
179{
180 disable_nonboot_cpus();
181}
182
183/*
184 * Halting simply requires that the secondary CPUs stop performing any
185 * activity (executing tasks, handling interrupts). smp_send_stop()
186 * achieves this.
187 */
188void machine_halt(void)
189{
190 local_irq_disable();
191 smp_send_stop();
192
193 local_irq_disable();
194 while (1);
195}
196
197/*
198 * Power-off simply requires that the secondary CPUs stop performing any
199 * activity (executing tasks, handling interrupts). smp_send_stop()
200 * achieves this. When the system power is turned off, it will take all CPUs
201 * with it.
202 */
203void machine_power_off(void)
204{
205 local_irq_disable();
206 smp_send_stop();
207
208 if (pm_power_off)
209 pm_power_off();
210}
211
212/*
213 * Restart requires that the secondary CPUs stop performing any activity
214 * while the primary CPU resets the system. Systems with a single CPU can
215 * use soft_restart() as their machine descriptor's .restart hook, since that
216 * will cause the only available CPU to reset. Systems with multiple CPUs must
217 * provide a HW restart implementation, to ensure that all CPUs reset at once.
218 * This is required so that any code running after reset on the primary CPU
219 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
220 * executing pre-reset code, and using RAM that the primary CPU's code wishes
221 * to use. Implementing such co-ordination would be essentially impossible.
222 */
223void machine_restart(char *cmd)
224{
225 local_irq_disable();
226 smp_send_stop();
227
228 if (arm_pm_restart)
229 arm_pm_restart(reboot_mode, cmd);
230 else
231 do_kernel_restart(cmd);
232
233 /* Give a grace period for failure to restart of 1s */
234 mdelay(1000);
235
236 /* Whoops - the platform was unable to reboot. Tell the user! */
237 printk("Reboot failed -- System halted\n");
238 local_irq_disable();
239 while (1);
240}
241
242void __show_regs(struct pt_regs *regs) 101void __show_regs(struct pt_regs *regs)
243{ 102{
244 unsigned long flags; 103 unsigned long flags;
@@ -475,7 +334,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
475} 334}
476 335
477/* If possible, provide a placement hint at a random offset from the 336/* If possible, provide a placement hint at a random offset from the
478 * stack for the signal page. 337 * stack for the sigpage and vdso pages.
479 */ 338 */
480static unsigned long sigpage_addr(const struct mm_struct *mm, 339static unsigned long sigpage_addr(const struct mm_struct *mm,
481 unsigned int npages) 340 unsigned int npages)
@@ -519,6 +378,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
519{ 378{
520 struct mm_struct *mm = current->mm; 379 struct mm_struct *mm = current->mm;
521 struct vm_area_struct *vma; 380 struct vm_area_struct *vma;
381 unsigned long npages;
522 unsigned long addr; 382 unsigned long addr;
523 unsigned long hint; 383 unsigned long hint;
524 int ret = 0; 384 int ret = 0;
@@ -528,9 +388,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
528 if (!signal_page) 388 if (!signal_page)
529 return -ENOMEM; 389 return -ENOMEM;
530 390
391 npages = 1; /* for sigpage */
392 npages += vdso_total_pages;
393
531 down_write(&mm->mmap_sem); 394 down_write(&mm->mmap_sem);
532 hint = sigpage_addr(mm, 1); 395 hint = sigpage_addr(mm, npages);
533 addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0); 396 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
534 if (IS_ERR_VALUE(addr)) { 397 if (IS_ERR_VALUE(addr)) {
535 ret = addr; 398 ret = addr;
536 goto up_fail; 399 goto up_fail;
@@ -547,6 +410,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
547 410
548 mm->context.sigpage = addr; 411 mm->context.sigpage = addr;
549 412
413 /* Unlike the sigpage, failure to install the vdso is unlikely
414 * to be fatal to the process, so no error check needed
415 * here.
416 */
417 arm_install_vdso(mm, addr + PAGE_SIZE);
418
550 up_fail: 419 up_fail:
551 up_write(&mm->mmap_sem); 420 up_write(&mm->mmap_sem);
552 return ret; 421 return ret;
diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S
new file mode 100644
index 000000000000..a78e9e1e206d
--- /dev/null
+++ b/arch/arm/kernel/psci-call.S
@@ -0,0 +1,31 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2015 ARM Limited
12 *
13 * Author: Mark Rutland <mark.rutland@arm.com>
14 */
15
16#include <linux/linkage.h>
17
18#include <asm/opcodes-sec.h>
19#include <asm/opcodes-virt.h>
20
21/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
22ENTRY(__invoke_psci_fn_hvc)
23 __HVC(0)
24 bx lr
25ENDPROC(__invoke_psci_fn_hvc)
26
27/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
28ENTRY(__invoke_psci_fn_smc)
29 __SMC(0)
30 bx lr
31ENDPROC(__invoke_psci_fn_smc)
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
index f73891b6b730..f90fdf4ce7c7 100644
--- a/arch/arm/kernel/psci.c
+++ b/arch/arm/kernel/psci.c
@@ -23,8 +23,6 @@
23 23
24#include <asm/compiler.h> 24#include <asm/compiler.h>
25#include <asm/errno.h> 25#include <asm/errno.h>
26#include <asm/opcodes-sec.h>
27#include <asm/opcodes-virt.h>
28#include <asm/psci.h> 26#include <asm/psci.h>
29#include <asm/system_misc.h> 27#include <asm/system_misc.h>
30 28
@@ -33,6 +31,9 @@ struct psci_operations psci_ops;
33static int (*invoke_psci_fn)(u32, u32, u32, u32); 31static int (*invoke_psci_fn)(u32, u32, u32, u32);
34typedef int (*psci_initcall_t)(const struct device_node *); 32typedef int (*psci_initcall_t)(const struct device_node *);
35 33
34asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
35asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
36
36enum psci_function { 37enum psci_function {
37 PSCI_FN_CPU_SUSPEND, 38 PSCI_FN_CPU_SUSPEND,
38 PSCI_FN_CPU_ON, 39 PSCI_FN_CPU_ON,
@@ -71,40 +72,6 @@ static u32 psci_power_state_pack(struct psci_power_state state)
71 & PSCI_0_2_POWER_STATE_AFFL_MASK); 72 & PSCI_0_2_POWER_STATE_AFFL_MASK);
72} 73}
73 74
74/*
75 * The following two functions are invoked via the invoke_psci_fn pointer
76 * and will not be inlined, allowing us to piggyback on the AAPCS.
77 */
78static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
79 u32 arg2)
80{
81 asm volatile(
82 __asmeq("%0", "r0")
83 __asmeq("%1", "r1")
84 __asmeq("%2", "r2")
85 __asmeq("%3", "r3")
86 __HVC(0)
87 : "+r" (function_id)
88 : "r" (arg0), "r" (arg1), "r" (arg2));
89
90 return function_id;
91}
92
93static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
94 u32 arg2)
95{
96 asm volatile(
97 __asmeq("%0", "r0")
98 __asmeq("%1", "r1")
99 __asmeq("%2", "r2")
100 __asmeq("%3", "r3")
101 __SMC(0)
102 : "+r" (function_id)
103 : "r" (arg0), "r" (arg1), "r" (arg2));
104
105 return function_id;
106}
107
108static int psci_get_version(void) 75static int psci_get_version(void)
109{ 76{
110 int err; 77 int err;
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
new file mode 100644
index 000000000000..1a4d232796be
--- /dev/null
+++ b/arch/arm/kernel/reboot.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
3 * Original Copyright (C) 1995 Linus Torvalds
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/cpu.h>
10#include <linux/delay.h>
11#include <linux/reboot.h>
12
13#include <asm/cacheflush.h>
14#include <asm/idmap.h>
15
16#include "reboot.h"
17
18typedef void (*phys_reset_t)(unsigned long);
19
20/*
21 * Function pointers to optional machine specific functions
22 */
23void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
24void (*pm_power_off)(void);
25EXPORT_SYMBOL(pm_power_off);
26
27/*
28 * A temporary stack to use for CPU reset. This is static so that we
29 * don't clobber it with the identity mapping. When running with this
30 * stack, any references to the current task *will not work* so you
31 * should really do as little as possible before jumping to your reset
32 * code.
33 */
34static u64 soft_restart_stack[16];
35
36static void __soft_restart(void *addr)
37{
38 phys_reset_t phys_reset;
39
40 /* Take out a flat memory mapping. */
41 setup_mm_for_reboot();
42
43 /* Clean and invalidate caches */
44 flush_cache_all();
45
46 /* Turn off caching */
47 cpu_proc_fin();
48
49 /* Push out any further dirty data, and ensure cache is empty */
50 flush_cache_all();
51
52 /* Switch to the identity mapping. */
53 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
54 phys_reset((unsigned long)addr);
55
56 /* Should never get here. */
57 BUG();
58}
59
60void _soft_restart(unsigned long addr, bool disable_l2)
61{
62 u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
63
64 /* Disable interrupts first */
65 raw_local_irq_disable();
66 local_fiq_disable();
67
68 /* Disable the L2 if we're the last man standing. */
69 if (disable_l2)
70 outer_disable();
71
72 /* Change to the new stack and continue with the reset. */
73 call_with_stack(__soft_restart, (void *)addr, (void *)stack);
74
75 /* Should never get here. */
76 BUG();
77}
78
79void soft_restart(unsigned long addr)
80{
81 _soft_restart(addr, num_online_cpus() == 1);
82}
83
84/*
85 * Called by kexec, immediately prior to machine_kexec().
86 *
87 * This must completely disable all secondary CPUs; simply causing those CPUs
88 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
89 * kexec'd kernel to use any and all RAM as it sees fit, without having to
90 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
91 * functionality embodied in disable_nonboot_cpus() to achieve this.
92 */
93void machine_shutdown(void)
94{
95 disable_nonboot_cpus();
96}
97
98/*
99 * Halting simply requires that the secondary CPUs stop performing any
100 * activity (executing tasks, handling interrupts). smp_send_stop()
101 * achieves this.
102 */
103void machine_halt(void)
104{
105 local_irq_disable();
106 smp_send_stop();
107
108 local_irq_disable();
109 while (1);
110}
111
112/*
113 * Power-off simply requires that the secondary CPUs stop performing any
114 * activity (executing tasks, handling interrupts). smp_send_stop()
115 * achieves this. When the system power is turned off, it will take all CPUs
116 * with it.
117 */
118void machine_power_off(void)
119{
120 local_irq_disable();
121 smp_send_stop();
122
123 if (pm_power_off)
124 pm_power_off();
125}
126
127/*
128 * Restart requires that the secondary CPUs stop performing any activity
129 * while the primary CPU resets the system. Systems with a single CPU can
130 * use soft_restart() as their machine descriptor's .restart hook, since that
131 * will cause the only available CPU to reset. Systems with multiple CPUs must
132 * provide a HW restart implementation, to ensure that all CPUs reset at once.
133 * This is required so that any code running after reset on the primary CPU
134 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
135 * executing pre-reset code, and using RAM that the primary CPU's code wishes
136 * to use. Implementing such co-ordination would be essentially impossible.
137 */
138void machine_restart(char *cmd)
139{
140 local_irq_disable();
141 smp_send_stop();
142
143 if (arm_pm_restart)
144 arm_pm_restart(reboot_mode, cmd);
145 else
146 do_kernel_restart(cmd);
147
148 /* Give a grace period for failure to restart of 1s */
149 mdelay(1000);
150
151 /* Whoops - the platform was unable to reboot. Tell the user! */
152 printk("Reboot failed -- System halted\n");
153 local_irq_disable();
154 while (1);
155}
diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
new file mode 100644
index 000000000000..bf7a0b1f076e
--- /dev/null
+++ b/arch/arm/kernel/reboot.h
@@ -0,0 +1,7 @@
1#ifndef REBOOT_H
2#define REBOOT_H
3
4extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
5extern void _soft_restart(unsigned long addr, bool disable_l2);
6
7#endif
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 24b4a04846eb..36ed35073289 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -56,8 +56,6 @@ void *return_address(unsigned int level)
56 return NULL; 56 return NULL;
57} 57}
58 58
59#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ 59#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
60
61#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
62 60
63EXPORT_SYMBOL_GPL(return_address); 61EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1d60bebea4b8..6c777e908a24 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -372,30 +372,48 @@ void __init early_print(const char *str, ...)
372 372
373static void __init cpuid_init_hwcaps(void) 373static void __init cpuid_init_hwcaps(void)
374{ 374{
375 unsigned int divide_instrs, vmsa; 375 int block;
376 u32 isar5;
376 377
377 if (cpu_architecture() < CPU_ARCH_ARMv7) 378 if (cpu_architecture() < CPU_ARCH_ARMv7)
378 return; 379 return;
379 380
380 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; 381 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
381 382 if (block >= 2)
382 switch (divide_instrs) {
383 case 2:
384 elf_hwcap |= HWCAP_IDIVA; 383 elf_hwcap |= HWCAP_IDIVA;
385 case 1: 384 if (block >= 1)
386 elf_hwcap |= HWCAP_IDIVT; 385 elf_hwcap |= HWCAP_IDIVT;
387 }
388 386
389 /* LPAE implies atomic ldrd/strd instructions */ 387 /* LPAE implies atomic ldrd/strd instructions */
390 vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0; 388 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
391 if (vmsa >= 5) 389 if (block >= 5)
392 elf_hwcap |= HWCAP_LPAE; 390 elf_hwcap |= HWCAP_LPAE;
391
392 /* check for supported v8 Crypto instructions */
393 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
394
395 block = cpuid_feature_extract_field(isar5, 4);
396 if (block >= 2)
397 elf_hwcap2 |= HWCAP2_PMULL;
398 if (block >= 1)
399 elf_hwcap2 |= HWCAP2_AES;
400
401 block = cpuid_feature_extract_field(isar5, 8);
402 if (block >= 1)
403 elf_hwcap2 |= HWCAP2_SHA1;
404
405 block = cpuid_feature_extract_field(isar5, 12);
406 if (block >= 1)
407 elf_hwcap2 |= HWCAP2_SHA2;
408
409 block = cpuid_feature_extract_field(isar5, 16);
410 if (block >= 1)
411 elf_hwcap2 |= HWCAP2_CRC32;
393} 412}
394 413
395static void __init elf_hwcap_fixup(void) 414static void __init elf_hwcap_fixup(void)
396{ 415{
397 unsigned id = read_cpuid_id(); 416 unsigned id = read_cpuid_id();
398 unsigned sync_prim;
399 417
400 /* 418 /*
401 * HWCAP_TLS is available only on 1136 r1p0 and later, 419 * HWCAP_TLS is available only on 1136 r1p0 and later,
@@ -416,9 +434,9 @@ static void __init elf_hwcap_fixup(void)
416 * avoid advertising SWP; it may not be atomic with 434 * avoid advertising SWP; it may not be atomic with
417 * multiprocessing cores. 435 * multiprocessing cores.
418 */ 436 */
419 sync_prim = ((read_cpuid_ext(CPUID_EXT_ISAR3) >> 8) & 0xf0) | 437 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
420 ((read_cpuid_ext(CPUID_EXT_ISAR4) >> 20) & 0x0f); 438 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
421 if (sync_prim >= 0x13) 439 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
422 elf_hwcap &= ~HWCAP_SWP; 440 elf_hwcap &= ~HWCAP_SWP;
423} 441}
424 442
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index e1e60e5a7a27..7d37bfc50830 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -116,14 +116,7 @@ cpu_resume_after_mmu:
116 ldmfd sp!, {r4 - r11, pc} 116 ldmfd sp!, {r4 - r11, pc}
117ENDPROC(cpu_resume_after_mmu) 117ENDPROC(cpu_resume_after_mmu)
118 118
119/* 119 .text
120 * Note: Yes, part of the following code is located into the .data section.
121 * This is to allow sleep_save_sp to be accessed with a relative load
122 * while we can't rely on any MMU translation. We could have put
123 * sleep_save_sp in the .text section as well, but some setups might
124 * insist on it to be truly read-only.
125 */
126 .data
127 .align 120 .align
128ENTRY(cpu_resume) 121ENTRY(cpu_resume)
129ARM_BE8(setend be) @ ensure we are in BE mode 122ARM_BE8(setend be) @ ensure we are in BE mode
@@ -145,6 +138,8 @@ ARM_BE8(setend be) @ ensure we are in BE mode
145 compute_mpidr_hash r1, r4, r5, r6, r0, r3 138 compute_mpidr_hash r1, r4, r5, r6, r0, r3
1461: 1391:
147 adr r0, _sleep_save_sp 140 adr r0, _sleep_save_sp
141 ldr r2, [r0]
142 add r0, r0, r2
148 ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] 143 ldr r0, [r0, #SLEEP_SAVE_SP_PHYS]
149 ldr r0, [r0, r1, lsl #2] 144 ldr r0, [r0, r1, lsl #2]
150 145
@@ -156,10 +151,12 @@ THUMB( bx r3 )
156ENDPROC(cpu_resume) 151ENDPROC(cpu_resume)
157 152
158 .align 2 153 .align 2
154_sleep_save_sp:
155 .long sleep_save_sp - .
159mpidr_hash_ptr: 156mpidr_hash_ptr:
160 .long mpidr_hash - . @ mpidr_hash struct offset 157 .long mpidr_hash - . @ mpidr_hash struct offset
161 158
159 .data
162 .type sleep_save_sp, #object 160 .type sleep_save_sp, #object
163ENTRY(sleep_save_sp) 161ENTRY(sleep_save_sp)
164_sleep_save_sp:
165 .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp 162 .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 86ef244c5a24..cca5b8758185 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -145,6 +145,11 @@ void __init smp_init_cpus(void)
145 smp_ops.smp_init_cpus(); 145 smp_ops.smp_init_cpus();
146} 146}
147 147
148int platform_can_secondary_boot(void)
149{
150 return !!smp_ops.smp_boot_secondary;
151}
152
148int platform_can_cpu_hotplug(void) 153int platform_can_cpu_hotplug(void)
149{ 154{
150#ifdef CONFIG_HOTPLUG_CPU 155#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index afdd51e30bec..1361756782c7 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -42,7 +42,7 @@
42 " cmp %0, #0\n" \ 42 " cmp %0, #0\n" \
43 " movne %0, %4\n" \ 43 " movne %0, %4\n" \
44 "2:\n" \ 44 "2:\n" \
45 " .section .fixup,\"ax\"\n" \ 45 " .section .text.fixup,\"ax\"\n" \
46 " .align 2\n" \ 46 " .align 2\n" \
47 "3: mov %0, %5\n" \ 47 "3: mov %0, %5\n" \
48 " b 2b\n" \ 48 " b 2b\n" \
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
new file mode 100644
index 000000000000..efe17dd9b921
--- /dev/null
+++ b/arch/arm/kernel/vdso.c
@@ -0,0 +1,337 @@
1/*
2 * Adapted from arm64 version.
3 *
4 * Copyright (C) 2012 ARM Limited
5 * Copyright (C) 2015 Mentor Graphics Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/elf.h>
21#include <linux/err.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/of.h>
25#include <linux/printk.h>
26#include <linux/slab.h>
27#include <linux/timekeeper_internal.h>
28#include <linux/vmalloc.h>
29#include <asm/arch_timer.h>
30#include <asm/barrier.h>
31#include <asm/cacheflush.h>
32#include <asm/page.h>
33#include <asm/vdso.h>
34#include <asm/vdso_datapage.h>
35#include <clocksource/arm_arch_timer.h>
36
37#define MAX_SYMNAME 64
38
39static struct page **vdso_text_pagelist;
40
41/* Total number of pages needed for the data and text portions of the VDSO. */
42unsigned int vdso_total_pages __read_mostly;
43
44/*
45 * The VDSO data page.
46 */
47static union vdso_data_store vdso_data_store __page_aligned_data;
48static struct vdso_data *vdso_data = &vdso_data_store.data;
49
50static struct page *vdso_data_page;
51static struct vm_special_mapping vdso_data_mapping = {
52 .name = "[vvar]",
53 .pages = &vdso_data_page,
54};
55
56static struct vm_special_mapping vdso_text_mapping = {
57 .name = "[vdso]",
58};
59
60struct elfinfo {
61 Elf32_Ehdr *hdr; /* ptr to ELF */
62 Elf32_Sym *dynsym; /* ptr to .dynsym section */
63 unsigned long dynsymsize; /* size of .dynsym section */
64 char *dynstr; /* ptr to .dynstr section */
65};
66
67/* Cached result of boot-time check for whether the arch timer exists,
68 * and if so, whether the virtual counter is useable.
69 */
70static bool cntvct_ok __read_mostly;
71
72static bool __init cntvct_functional(void)
73{
74 struct device_node *np;
75 bool ret = false;
76
77 if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
78 goto out;
79
80 /* The arm_arch_timer core should export
81 * arch_timer_use_virtual or similar so we don't have to do
82 * this.
83 */
84 np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
85 if (!np)
86 goto out_put;
87
88 if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
89 goto out_put;
90
91 ret = true;
92
93out_put:
94 of_node_put(np);
95out:
96 return ret;
97}
98
99static void * __init find_section(Elf32_Ehdr *ehdr, const char *name,
100 unsigned long *size)
101{
102 Elf32_Shdr *sechdrs;
103 unsigned int i;
104 char *secnames;
105
106 /* Grab section headers and strings so we can tell who is who */
107 sechdrs = (void *)ehdr + ehdr->e_shoff;
108 secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
109
110 /* Find the section they want */
111 for (i = 1; i < ehdr->e_shnum; i++) {
112 if (strcmp(secnames + sechdrs[i].sh_name, name) == 0) {
113 if (size)
114 *size = sechdrs[i].sh_size;
115 return (void *)ehdr + sechdrs[i].sh_offset;
116 }
117 }
118
119 if (size)
120 *size = 0;
121 return NULL;
122}
123
124static Elf32_Sym * __init find_symbol(struct elfinfo *lib, const char *symname)
125{
126 unsigned int i;
127
128 for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
129 char name[MAX_SYMNAME], *c;
130
131 if (lib->dynsym[i].st_name == 0)
132 continue;
133 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
134 MAX_SYMNAME);
135 c = strchr(name, '@');
136 if (c)
137 *c = 0;
138 if (strcmp(symname, name) == 0)
139 return &lib->dynsym[i];
140 }
141 return NULL;
142}
143
144static void __init vdso_nullpatch_one(struct elfinfo *lib, const char *symname)
145{
146 Elf32_Sym *sym;
147
148 sym = find_symbol(lib, symname);
149 if (!sym)
150 return;
151
152 sym->st_name = 0;
153}
154
155static void __init patch_vdso(void *ehdr)
156{
157 struct elfinfo einfo;
158
159 einfo = (struct elfinfo) {
160 .hdr = ehdr,
161 };
162
163 einfo.dynsym = find_section(einfo.hdr, ".dynsym", &einfo.dynsymsize);
164 einfo.dynstr = find_section(einfo.hdr, ".dynstr", NULL);
165
166 /* If the virtual counter is absent or non-functional we don't
167 * want programs to incur the slight additional overhead of
168 * dispatching through the VDSO only to fall back to syscalls.
169 */
170 if (!cntvct_ok) {
171 vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
172 vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
173 }
174}
175
176static int __init vdso_init(void)
177{
178 unsigned int text_pages;
179 int i;
180
181 if (memcmp(&vdso_start, "\177ELF", 4)) {
182 pr_err("VDSO is not a valid ELF object!\n");
183 return -ENOEXEC;
184 }
185
186 text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
187 pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
188
189 /* Allocate the VDSO text pagelist */
190 vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
191 GFP_KERNEL);
192 if (vdso_text_pagelist == NULL)
193 return -ENOMEM;
194
195 /* Grab the VDSO data page. */
196 vdso_data_page = virt_to_page(vdso_data);
197
198 /* Grab the VDSO text pages. */
199 for (i = 0; i < text_pages; i++) {
200 struct page *page;
201
202 page = virt_to_page(&vdso_start + i * PAGE_SIZE);
203 vdso_text_pagelist[i] = page;
204 }
205
206 vdso_text_mapping.pages = vdso_text_pagelist;
207
208 vdso_total_pages = 1; /* for the data/vvar page */
209 vdso_total_pages += text_pages;
210
211 cntvct_ok = cntvct_functional();
212
213 patch_vdso(&vdso_start);
214
215 return 0;
216}
217arch_initcall(vdso_init);
218
219static int install_vvar(struct mm_struct *mm, unsigned long addr)
220{
221 struct vm_area_struct *vma;
222
223 vma = _install_special_mapping(mm, addr, PAGE_SIZE,
224 VM_READ | VM_MAYREAD,
225 &vdso_data_mapping);
226
227 return IS_ERR(vma) ? PTR_ERR(vma) : 0;
228}
229
230/* assumes mmap_sem is write-locked */
231void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
232{
233 struct vm_area_struct *vma;
234 unsigned long len;
235
236 mm->context.vdso = 0;
237
238 if (vdso_text_pagelist == NULL)
239 return;
240
241 if (install_vvar(mm, addr))
242 return;
243
244 /* Account for vvar page. */
245 addr += PAGE_SIZE;
246 len = (vdso_total_pages - 1) << PAGE_SHIFT;
247
248 vma = _install_special_mapping(mm, addr, len,
249 VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
250 &vdso_text_mapping);
251
252 if (!IS_ERR(vma))
253 mm->context.vdso = addr;
254}
255
256static void vdso_write_begin(struct vdso_data *vdata)
257{
258 ++vdso_data->seq_count;
259 smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */
260}
261
262static void vdso_write_end(struct vdso_data *vdata)
263{
264 smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */
265 ++vdso_data->seq_count;
266}
267
268static bool tk_is_cntvct(const struct timekeeper *tk)
269{
270 if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
271 return false;
272
273 if (strcmp(tk->tkr_mono.clock->name, "arch_sys_counter") != 0)
274 return false;
275
276 return true;
277}
278
279/**
280 * update_vsyscall - update the vdso data page
281 *
282 * Increment the sequence counter, making it odd, indicating to
283 * userspace that an update is in progress. Update the fields used
284 * for coarse clocks and, if the architected system timer is in use,
285 * the fields used for high precision clocks. Increment the sequence
286 * counter again, making it even, indicating to userspace that the
287 * update is finished.
288 *
289 * Userspace is expected to sample seq_count before reading any other
290 * fields from the data page. If seq_count is odd, userspace is
291 * expected to wait until it becomes even. After copying data from
292 * the page, userspace must sample seq_count again; if it has changed
293 * from its previous value, userspace must retry the whole sequence.
294 *
295 * Calls to update_vsyscall are serialized by the timekeeping core.
296 */
297void update_vsyscall(struct timekeeper *tk)
298{
299 struct timespec xtime_coarse;
300 struct timespec64 *wtm = &tk->wall_to_monotonic;
301
302 if (!cntvct_ok) {
303 /* The entry points have been zeroed, so there is no
304 * point in updating the data page.
305 */
306 return;
307 }
308
309 vdso_write_begin(vdso_data);
310
311 xtime_coarse = __current_kernel_time();
312 vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
313 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
314 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
315 vdso_data->wtm_clock_sec = wtm->tv_sec;
316 vdso_data->wtm_clock_nsec = wtm->tv_nsec;
317
318 if (vdso_data->tk_is_cntvct) {
319 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
320 vdso_data->xtime_clock_sec = tk->xtime_sec;
321 vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
322 vdso_data->cs_mult = tk->tkr_mono.mult;
323 vdso_data->cs_shift = tk->tkr_mono.shift;
324 vdso_data->cs_mask = tk->tkr_mono.mask;
325 }
326
327 vdso_write_end(vdso_data);
328
329 flush_dcache_page(virt_to_page(vdso_data));
330}
331
332void update_vsyscall_tz(void)
333{
334 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
335 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
336 flush_dcache_page(virt_to_page(vdso_data));
337}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b31aa73e8076..7a301be9ac67 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -74,7 +74,7 @@ SECTIONS
74 ARM_EXIT_DISCARD(EXIT_DATA) 74 ARM_EXIT_DISCARD(EXIT_DATA)
75 EXIT_CALL 75 EXIT_CALL
76#ifndef CONFIG_MMU 76#ifndef CONFIG_MMU
77 *(.fixup) 77 *(.text.fixup)
78 *(__ex_table) 78 *(__ex_table)
79#endif 79#endif
80#ifndef CONFIG_SMP_ON_UP 80#ifndef CONFIG_SMP_ON_UP
@@ -100,6 +100,7 @@ SECTIONS
100 100
101 .text : { /* Real text segment */ 101 .text : { /* Real text segment */
102 _stext = .; /* Text and read-only data */ 102 _stext = .; /* Text and read-only data */
103 IDMAP_TEXT
103 __exception_text_start = .; 104 __exception_text_start = .;
104 *(.exception.text) 105 *(.exception.text)
105 __exception_text_end = .; 106 __exception_text_end = .;
@@ -108,10 +109,6 @@ SECTIONS
108 SCHED_TEXT 109 SCHED_TEXT
109 LOCK_TEXT 110 LOCK_TEXT
110 KPROBES_TEXT 111 KPROBES_TEXT
111 IDMAP_TEXT
112#ifdef CONFIG_MMU
113 *(.fixup)
114#endif
115 *(.gnu.warning) 112 *(.gnu.warning)
116 *(.glue_7) 113 *(.glue_7)
117 *(.glue_7t) 114 *(.glue_7t)