aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 00:03:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 00:03:26 -0400
commitbb0fd7ab0986105765d11baa82e619c618a235aa (patch)
tree6a0585ece827e1025aa48819959d02155a871be9 /arch/arm/include/asm
parentbdfa54dfd9eea001274dbcd622657a904fe43b81 (diff)
parent4b2f8838479eb2abe042e094f7d2cced6d5ea772 (diff)
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Included in this update are both some long term fixes and some new features. Fixes: - An integer overflow in the calculation of ELF_ET_DYN_BASE. - Avoiding OOMs for high-order IOMMU allocations - SMP requires the data cache to be enabled for synchronisation primitives to work, so prevent the CPU_DCACHE_DISABLE option being visible on SMP builds. - A bug going back 10+ years in the noMMU ARM94* CPU support code, where it corrupts registers. Found by folk getting Linux running on their cameras. - Versatile Express needs an errata workaround enabled for CPU hot-unplug to work. Features: - Clean up module linker by handling out of range relocations separately from relocation cases we don't handle. - Fix a long term bug in the pci_mmap_page_range() code, which we hope won't impact userspace (we hope there's no users of the existing broken interface.) - Don't map DMA coherent allocations when we don't have a MMU. - Drop experimental status for SMP_ON_UP. - Warn when DT doesn't specify ePAPR mandatory cache properties. - Add documentation concerning how we find the start of physical memory for AUTO_ZRELADDR kernels, detailing why we have chosen the mask and the implications of changing it. - Updates from Ard Biesheuvel to address some issues with large kernels (such as allyesconfig) failing to link. - Allow hibernation to work on modern (ARMv7) CPUs - this appears to have never worked in the past on these CPUs. - Enable IRQ_SHOW_LEVEL, which changes the /proc/interrupts output format (hopefully without userspace breaking... let's hope that if it causes someone a problem, they tell us.) - Fix tegra-ahb DT offsets. - Rework ARM errata 643719 code (and ARMv7 flush_cache_louis()/ flush_dcache_all()) code to be more efficient, and enable this errata workaround by default for ARMv7+SMP CPUs. This complements the Versatile Express fix above. - Rework ARMv7 context code for errata 430973, so that only Cortex A8 CPUs are impacted by the branch target buffer flush when this errata is enabled. Also update the help text to indicate that all r1p* A8 CPUs are impacted. - Switch ARM to the generic show_mem() implementation, it conveys all the information which we were already reporting. - Prevent slow timer sources being used for udelay() - timers running at less than 1MHz are not useful for this, and can cause udelay() to return immediately, without any wait. Using such a slow timer is silly. - VDSO support for 32-bit ARM, mainly for gettimeofday() using the ARM architected timer. - Perf support for Scorpion performance monitoring units" vdso semantic conflict fixed up as per linux-next. * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (52 commits) ARM: update errata 430973 documentation to cover Cortex A8 r1p* ARM: ensure delay timer has sufficient accuracy for delays ARM: switch to use the generic show_mem() implementation ARM: proc-v7: avoid errata 430973 workaround for non-Cortex A8 CPUs ARM: enable ARM errata 643719 workaround by default ARM: cache-v7: optimise test for Cortex A9 r0pX devices ARM: cache-v7: optimise branches in v7_flush_cache_louis ARM: cache-v7: consolidate initialisation of cache level index ARM: cache-v7: shift CLIDR to extract appropriate field before masking ARM: cache-v7: use movw/movt instructions ARM: allow 16-bit instructions in ALT_UP() ARM: proc-arm94*.S: fix setup function ARM: vexpress: fix CPU hotplug with CT9x4 tile. ARM: 8276/1: Make CPU_DCACHE_DISABLE depend on !SMP ARM: 8335/1: Documentation: DT bindings: Tegra AHB: document the legacy base address ARM: 8334/1: amba: tegra-ahb: detect and correct bogus base address ARM: 8333/1: amba: tegra-ahb: fix register offsets in the macros ARM: 8339/1: Enable CONFIG_GENERIC_IRQ_SHOW_LEVEL ARM: 8338/1: kexec: Relax SMP validation to improve DT compatibility ARM: 8337/1: mm: Do not invoke OOM for higher order IOMMU DMA allocations ...
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h3
-rw-r--r--arch/arm/include/asm/auxvec.h1
-rw-r--r--arch/arm/include/asm/cputype.h16
-rw-r--r--arch/arm/include/asm/elf.h11
-rw-r--r--arch/arm/include/asm/futex.h2
-rw-r--r--arch/arm/include/asm/mmu.h3
-rw-r--r--arch/arm/include/asm/pmu.h1
-rw-r--r--arch/arm/include/asm/smp_plat.h1
-rw-r--r--arch/arm/include/asm/uaccess.h10
-rw-r--r--arch/arm/include/asm/unified.h8
-rw-r--r--arch/arm/include/asm/vdso.h32
-rw-r--r--arch/arm/include/asm/vdso_datapage.h60
-rw-r--r--arch/arm/include/asm/word-at-a-time.h2
14 files changed, 142 insertions, 9 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index fe74c0d1e485..eb0f43f3e3f1 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,6 +1,5 @@
1 1
2 2
3generic-y += auxvec.h
4generic-y += bitsperlong.h 3generic-y += bitsperlong.h
5generic-y += cputime.h 4generic-y += cputime.h
6generic-y += current.h 5generic-y += current.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index f67fd3afebdf..186270b3e194 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -237,6 +237,9 @@
237 .pushsection ".alt.smp.init", "a" ;\ 237 .pushsection ".alt.smp.init", "a" ;\
238 .long 9998b ;\ 238 .long 9998b ;\
2399997: instr ;\ 2399997: instr ;\
240 .if . - 9997b == 2 ;\
241 nop ;\
242 .endif ;\
240 .if . - 9997b != 4 ;\ 243 .if . - 9997b != 4 ;\
241 .error "ALT_UP() content must assemble to exactly 4 bytes";\ 244 .error "ALT_UP() content must assemble to exactly 4 bytes";\
242 .endif ;\ 245 .endif ;\
diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h
new file mode 100644
index 000000000000..fbd388c46299
--- /dev/null
+++ b/arch/arm/include/asm/auxvec.h
@@ -0,0 +1 @@
#include <uapi/asm/auxvec.h>
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 819777d0e91f..85e374f873ac 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -253,4 +253,20 @@ static inline int cpu_is_pj4(void)
253#else 253#else
254#define cpu_is_pj4() 0 254#define cpu_is_pj4() 0
255#endif 255#endif
256
257static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
258 int field)
259{
260 int feature = (features >> field) & 15;
261
262 /* feature registers are signed values */
263 if (feature > 8)
264 feature -= 16;
265
266 return feature;
267}
268
269#define cpuid_feature_extract(reg, field) \
270 cpuid_feature_extract_field(read_cpuid_ext(reg), field)
271
256#endif 272#endif
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index c1ff8ab12914..d2315ffd8f12 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -1,7 +1,9 @@
1#ifndef __ASMARM_ELF_H 1#ifndef __ASMARM_ELF_H
2#define __ASMARM_ELF_H 2#define __ASMARM_ELF_H
3 3
4#include <asm/auxvec.h>
4#include <asm/hwcap.h> 5#include <asm/hwcap.h>
6#include <asm/vdso_datapage.h>
5 7
6/* 8/*
7 * ELF register definitions.. 9 * ELF register definitions..
@@ -115,7 +117,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
115 the loader. We need to make sure that it is out of the way of the program 117 the loader. We need to make sure that it is out of the way of the program
116 that it will "exec", and that there is sufficient room for the brk. */ 118 that it will "exec", and that there is sufficient room for the brk. */
117 119
118#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) 120#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
119 121
120/* When the program starts, a1 contains a pointer to a function to be 122/* When the program starts, a1 contains a pointer to a function to be
121 registered with atexit, as per the SVR4 ABI. A value of 0 means we 123 registered with atexit, as per the SVR4 ABI. A value of 0 means we
@@ -126,6 +128,13 @@ extern void elf_set_personality(const struct elf32_hdr *);
126#define SET_PERSONALITY(ex) elf_set_personality(&(ex)) 128#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
127 129
128#ifdef CONFIG_MMU 130#ifdef CONFIG_MMU
131#ifdef CONFIG_VDSO
132#define ARCH_DLINFO \
133do { \
134 NEW_AUX_ENT(AT_SYSINFO_EHDR, \
135 (elf_addr_t)current->mm->context.vdso); \
136} while (0)
137#endif
129#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 138#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
130struct linux_binprm; 139struct linux_binprm;
131int arch_setup_additional_pages(struct linux_binprm *, int); 140int arch_setup_additional_pages(struct linux_binprm *, int);
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 53e69dae796f..4e78065a16aa 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -13,7 +13,7 @@
13 " .align 3\n" \ 13 " .align 3\n" \
14 " .long 1b, 4f, 2b, 4f\n" \ 14 " .long 1b, 4f, 2b, 4f\n" \
15 " .popsection\n" \ 15 " .popsection\n" \
16 " .pushsection .fixup,\"ax\"\n" \ 16 " .pushsection .text.fixup,\"ax\"\n" \
17 " .align 2\n" \ 17 " .align 2\n" \
18 "4: mov %0, " err_reg "\n" \ 18 "4: mov %0, " err_reg "\n" \
19 " b 3b\n" \ 19 " b 3b\n" \
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 64fd15159b7d..a5b47421059d 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -11,6 +11,9 @@ typedef struct {
11#endif 11#endif
12 unsigned int vmalloc_seq; 12 unsigned int vmalloc_seq;
13 unsigned long sigpage; 13 unsigned long sigpage;
14#ifdef CONFIG_VDSO
15 unsigned long vdso;
16#endif
14} mm_context_t; 17} mm_context_t;
15 18
16#ifdef CONFIG_CPU_HAS_ASID 19#ifdef CONFIG_CPU_HAS_ASID
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index b1596bd59129..675e4ab79f68 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -92,6 +92,7 @@ struct pmu_hw_events {
92struct arm_pmu { 92struct arm_pmu {
93 struct pmu pmu; 93 struct pmu pmu;
94 cpumask_t active_irqs; 94 cpumask_t active_irqs;
95 int *irq_affinity;
95 char *name; 96 char *name;
96 irqreturn_t (*handle_irq)(int irq_num, void *dev); 97 irqreturn_t (*handle_irq)(int irq_num, void *dev);
97 void (*enable)(struct perf_event *event); 98 void (*enable)(struct perf_event *event);
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 0ad7d490ee6f..993e5224d8f7 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -104,6 +104,7 @@ static inline u32 mpidr_hash_size(void)
104 return 1 << mpidr_hash.bits; 104 return 1 << mpidr_hash.bits;
105} 105}
106 106
107extern int platform_can_secondary_boot(void);
107extern int platform_can_cpu_hotplug(void); 108extern int platform_can_cpu_hotplug(void);
108 109
109#endif 110#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index ce0786efd26c..74b17d09ef7a 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -315,7 +315,7 @@ do { \
315 __asm__ __volatile__( \ 315 __asm__ __volatile__( \
316 "1: " TUSER(ldrb) " %1,[%2],#0\n" \ 316 "1: " TUSER(ldrb) " %1,[%2],#0\n" \
317 "2:\n" \ 317 "2:\n" \
318 " .pushsection .fixup,\"ax\"\n" \ 318 " .pushsection .text.fixup,\"ax\"\n" \
319 " .align 2\n" \ 319 " .align 2\n" \
320 "3: mov %0, %3\n" \ 320 "3: mov %0, %3\n" \
321 " mov %1, #0\n" \ 321 " mov %1, #0\n" \
@@ -351,7 +351,7 @@ do { \
351 __asm__ __volatile__( \ 351 __asm__ __volatile__( \
352 "1: " TUSER(ldr) " %1,[%2],#0\n" \ 352 "1: " TUSER(ldr) " %1,[%2],#0\n" \
353 "2:\n" \ 353 "2:\n" \
354 " .pushsection .fixup,\"ax\"\n" \ 354 " .pushsection .text.fixup,\"ax\"\n" \
355 " .align 2\n" \ 355 " .align 2\n" \
356 "3: mov %0, %3\n" \ 356 "3: mov %0, %3\n" \
357 " mov %1, #0\n" \ 357 " mov %1, #0\n" \
@@ -397,7 +397,7 @@ do { \
397 __asm__ __volatile__( \ 397 __asm__ __volatile__( \
398 "1: " TUSER(strb) " %1,[%2],#0\n" \ 398 "1: " TUSER(strb) " %1,[%2],#0\n" \
399 "2:\n" \ 399 "2:\n" \
400 " .pushsection .fixup,\"ax\"\n" \ 400 " .pushsection .text.fixup,\"ax\"\n" \
401 " .align 2\n" \ 401 " .align 2\n" \
402 "3: mov %0, %3\n" \ 402 "3: mov %0, %3\n" \
403 " b 2b\n" \ 403 " b 2b\n" \
@@ -430,7 +430,7 @@ do { \
430 __asm__ __volatile__( \ 430 __asm__ __volatile__( \
431 "1: " TUSER(str) " %1,[%2],#0\n" \ 431 "1: " TUSER(str) " %1,[%2],#0\n" \
432 "2:\n" \ 432 "2:\n" \
433 " .pushsection .fixup,\"ax\"\n" \ 433 " .pushsection .text.fixup,\"ax\"\n" \
434 " .align 2\n" \ 434 " .align 2\n" \
435 "3: mov %0, %3\n" \ 435 "3: mov %0, %3\n" \
436 " b 2b\n" \ 436 " b 2b\n" \
@@ -458,7 +458,7 @@ do { \
458 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \ 458 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
459 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \ 459 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
460 "3:\n" \ 460 "3:\n" \
461 " .pushsection .fixup,\"ax\"\n" \ 461 " .pushsection .text.fixup,\"ax\"\n" \
462 " .align 2\n" \ 462 " .align 2\n" \
463 "4: mov %0, %3\n" \ 463 "4: mov %0, %3\n" \
464 " b 3b\n" \ 464 " b 3b\n" \
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index b88beaba6b4a..200f9a7cd623 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -24,6 +24,14 @@
24 .syntax unified 24 .syntax unified
25#endif 25#endif
26 26
27#ifdef CONFIG_CPU_V7M
28#define AR_CLASS(x...)
29#define M_CLASS(x...) x
30#else
31#define AR_CLASS(x...) x
32#define M_CLASS(x...)
33#endif
34
27#ifdef CONFIG_THUMB2_KERNEL 35#ifdef CONFIG_THUMB2_KERNEL
28 36
29#if __GNUC__ < 4 37#if __GNUC__ < 4
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
new file mode 100644
index 000000000000..d0295f1dd1a3
--- /dev/null
+++ b/arch/arm/include/asm/vdso.h
@@ -0,0 +1,32 @@
1#ifndef __ASM_VDSO_H
2#define __ASM_VDSO_H
3
4#ifdef __KERNEL__
5
6#ifndef __ASSEMBLY__
7
8struct mm_struct;
9
10#ifdef CONFIG_VDSO
11
12void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
13
14extern char vdso_start, vdso_end;
15
16extern unsigned int vdso_total_pages;
17
18#else /* CONFIG_VDSO */
19
20static inline void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
21{
22}
23
24#define vdso_total_pages 0
25
26#endif /* CONFIG_VDSO */
27
28#endif /* __ASSEMBLY__ */
29
30#endif /* __KERNEL__ */
31
32#endif /* __ASM_VDSO_H */
diff --git a/arch/arm/include/asm/vdso_datapage.h b/arch/arm/include/asm/vdso_datapage.h
new file mode 100644
index 000000000000..9be259442fca
--- /dev/null
+++ b/arch/arm/include/asm/vdso_datapage.h
@@ -0,0 +1,60 @@
1/*
2 * Adapted from arm64 version.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_VDSO_DATAPAGE_H
19#define __ASM_VDSO_DATAPAGE_H
20
21#ifdef __KERNEL__
22
23#ifndef __ASSEMBLY__
24
25#include <asm/page.h>
26
27/* Try to be cache-friendly on systems that don't implement the
28 * generic timer: fit the unconditionally updated fields in the first
29 * 32 bytes.
30 */
31struct vdso_data {
32 u32 seq_count; /* sequence count - odd during updates */
33 u16 tk_is_cntvct; /* fall back to syscall if false */
34 u16 cs_shift; /* clocksource shift */
35 u32 xtime_coarse_sec; /* coarse time */
36 u32 xtime_coarse_nsec;
37
38 u32 wtm_clock_sec; /* wall to monotonic offset */
39 u32 wtm_clock_nsec;
40 u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
41 u32 cs_mult; /* clocksource multiplier */
42
43 u64 cs_cycle_last; /* last cycle value */
44 u64 cs_mask; /* clocksource mask */
45
46 u64 xtime_clock_snsec; /* CLOCK_REALTIME sub-ns base */
47 u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
48 u32 tz_dsttime;
49};
50
51union vdso_data_store {
52 struct vdso_data data;
53 u8 page[PAGE_SIZE];
54};
55
56#endif /* !__ASSEMBLY__ */
57
58#endif /* __KERNEL__ */
59
60#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h
index a6d0a29861e7..5831dce4b51c 100644
--- a/arch/arm/include/asm/word-at-a-time.h
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -71,7 +71,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
71 asm( 71 asm(
72 "1: ldr %0, [%2]\n" 72 "1: ldr %0, [%2]\n"
73 "2:\n" 73 "2:\n"
74 " .pushsection .fixup,\"ax\"\n" 74 " .pushsection .text.fixup,\"ax\"\n"
75 " .align 2\n" 75 " .align 2\n"
76 "3: and %1, %2, #0x3\n" 76 "3: and %1, %2, #0x3\n"
77 " bic %2, %2, #0x3\n" 77 " bic %2, %2, #0x3\n"