diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-16 14:58:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-16 14:58:29 -0400 |
commit | 714d8e7e27197dd39b2550e762a6a6fcf397a471 (patch) | |
tree | bc989a2a0e14f21912943e56d0002a26a2b7793e /arch/arm64/include | |
parent | d19d5efd8c8840aa4f38a6dfbfe500d8cc27de46 (diff) | |
parent | 6d1966dfd6e0ad2f8aa4b664ae1a62e33abe1998 (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
"Here are the core arm64 updates for 4.1.
Highlights include a significant rework to head.S (allowing us to boot
on machines with physical memory at a really high address), an AES
performance boost on Cortex-A57 and the ability to run a 32-bit
userspace with 64k pages (although this requires said userspace to be
built with a recent binutils).
The head.S rework spilt over into KVM, so there are some changes under
arch/arm/ which have been acked by Marc Zyngier (KVM co-maintainer).
In particular, the linker script changes caused us some issues in
-next, so there are a few merge commits where we had to apply fixes on
top of a stable branch.
Other changes include:
- AES performance boost for Cortex-A57
- AArch32 (compat) userspace with 64k pages
- Cortex-A53 erratum workaround for #845719
- defconfig updates (new platforms, PCI, ...)"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (39 commits)
arm64: fix midr range for Cortex-A57 erratum 832075
arm64: errata: add workaround for cortex-a53 erratum #845719
arm64: Use bool function return values of true/false not 1/0
arm64: defconfig: updates for 4.1
arm64: Extract feature parsing code from cpu_errata.c
arm64: alternative: Allow immediate branch as alternative instruction
arm64: insn: Add aarch64_insn_decode_immediate
ARM: kvm: round HYP section to page size instead of log2 upper bound
ARM: kvm: assert on HYP section boundaries not actual code size
arm64: head.S: ensure idmap_t0sz is visible
arm64: pmu: add support for interrupt-affinity property
dt: pmu: extend ARM PMU binding to allow for explicit interrupt affinity
arm64: head.S: ensure visibility of page tables
arm64: KVM: use ID map with increased VA range if required
arm64: mm: increase VA range of identity map
ARM: kvm: implement replacement for ld's LOG2CEIL()
arm64: proc: remove unused cpu_get_pgd macro
arm64: enforce x1|x2|x3 == 0 upon kernel entry as per boot protocol
arm64: remove __calc_phys_offset
arm64: merge __enable_mmu and __turn_mmu_on
...
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 48 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 18 | ||||
-rw-r--r-- | arch/arm64/include/asm/cputable.h | 30 | ||||
-rw-r--r-- | arch/arm64/include/asm/dma-mapping.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/fixmap.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/insn.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 33 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 43 | ||||
-rw-r--r-- | arch/arm64/include/asm/page.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable-hwdef.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/pmu.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/proc-fns.h | 9 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/smp_plat.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd32.h | 2 |
15 files changed, 163 insertions, 47 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 750bac4e637e..144b64ad96c3 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -159,4 +159,52 @@ lr .req x30 // link register | |||
159 | orr \rd, \lbits, \hbits, lsl #32 | 159 | orr \rd, \lbits, \hbits, lsl #32 |
160 | .endm | 160 | .endm |
161 | 161 | ||
162 | /* | ||
163 | * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where | ||
164 | * <symbol> is within the range +/- 4 GB of the PC. | ||
165 | */ | ||
166 | /* | ||
167 | * @dst: destination register (64 bit wide) | ||
168 | * @sym: name of the symbol | ||
169 | * @tmp: optional scratch register to be used if <dst> == sp, which | ||
170 | * is not allowed in an adrp instruction | ||
171 | */ | ||
172 | .macro adr_l, dst, sym, tmp= | ||
173 | .ifb \tmp | ||
174 | adrp \dst, \sym | ||
175 | add \dst, \dst, :lo12:\sym | ||
176 | .else | ||
177 | adrp \tmp, \sym | ||
178 | add \dst, \tmp, :lo12:\sym | ||
179 | .endif | ||
180 | .endm | ||
181 | |||
182 | /* | ||
183 | * @dst: destination register (32 or 64 bit wide) | ||
184 | * @sym: name of the symbol | ||
185 | * @tmp: optional 64-bit scratch register to be used if <dst> is a | ||
186 | * 32-bit wide register, in which case it cannot be used to hold | ||
187 | * the address | ||
188 | */ | ||
189 | .macro ldr_l, dst, sym, tmp= | ||
190 | .ifb \tmp | ||
191 | adrp \dst, \sym | ||
192 | ldr \dst, [\dst, :lo12:\sym] | ||
193 | .else | ||
194 | adrp \tmp, \sym | ||
195 | ldr \dst, [\tmp, :lo12:\sym] | ||
196 | .endif | ||
197 | .endm | ||
198 | |||
199 | /* | ||
200 | * @src: source register (32 or 64 bit wide) | ||
201 | * @sym: name of the symbol | ||
202 | * @tmp: mandatory 64-bit scratch register to calculate the address | ||
203 | * while <src> needs to be preserved. | ||
204 | */ | ||
205 | .macro str_l, src, sym, tmp | ||
206 | adrp \tmp, \sym | ||
207 | str \src, [\tmp, :lo12:\sym] | ||
208 | .endm | ||
209 | |||
162 | #endif /* __ASM_ASSEMBLER_H */ | 210 | #endif /* __ASM_ASSEMBLER_H */ |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index b6c16d5f622f..82cb9f98ba1a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -23,11 +23,24 @@ | |||
23 | 23 | ||
24 | #define ARM64_WORKAROUND_CLEAN_CACHE 0 | 24 | #define ARM64_WORKAROUND_CLEAN_CACHE 0 |
25 | #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 | 25 | #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 |
26 | #define ARM64_WORKAROUND_845719 2 | ||
26 | 27 | ||
27 | #define ARM64_NCAPS 2 | 28 | #define ARM64_NCAPS 3 |
28 | 29 | ||
29 | #ifndef __ASSEMBLY__ | 30 | #ifndef __ASSEMBLY__ |
30 | 31 | ||
32 | struct arm64_cpu_capabilities { | ||
33 | const char *desc; | ||
34 | u16 capability; | ||
35 | bool (*matches)(const struct arm64_cpu_capabilities *); | ||
36 | union { | ||
37 | struct { /* To be used for erratum handling only */ | ||
38 | u32 midr_model; | ||
39 | u32 midr_range_min, midr_range_max; | ||
40 | }; | ||
41 | }; | ||
42 | }; | ||
43 | |||
31 | extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | 44 | extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); |
32 | 45 | ||
33 | static inline bool cpu_have_feature(unsigned int num) | 46 | static inline bool cpu_have_feature(unsigned int num) |
@@ -51,7 +64,10 @@ static inline void cpus_set_cap(unsigned int num) | |||
51 | __set_bit(num, cpu_hwcaps); | 64 | __set_bit(num, cpu_hwcaps); |
52 | } | 65 | } |
53 | 66 | ||
67 | void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, | ||
68 | const char *info); | ||
54 | void check_local_cpu_errata(void); | 69 | void check_local_cpu_errata(void); |
70 | void check_local_cpu_features(void); | ||
55 | bool cpu_supports_mixed_endian_el0(void); | 71 | bool cpu_supports_mixed_endian_el0(void); |
56 | bool system_supports_mixed_endian_el0(void); | 72 | bool system_supports_mixed_endian_el0(void); |
57 | 73 | ||
diff --git a/arch/arm64/include/asm/cputable.h b/arch/arm64/include/asm/cputable.h deleted file mode 100644 index e3bd983d3661..000000000000 --- a/arch/arm64/include/asm/cputable.h +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm64/include/asm/cputable.h | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software: you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #ifndef __ASM_CPUTABLE_H | ||
19 | #define __ASM_CPUTABLE_H | ||
20 | |||
21 | struct cpu_info { | ||
22 | unsigned int cpu_id_val; | ||
23 | unsigned int cpu_id_mask; | ||
24 | const char *cpu_name; | ||
25 | unsigned long (*cpu_setup)(void); | ||
26 | }; | ||
27 | |||
28 | extern struct cpu_info *lookup_processor_type(unsigned int); | ||
29 | |||
30 | #endif | ||
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 6932bb57dba0..9437e3dc5833 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
@@ -97,7 +97,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask) | |||
97 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | 97 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
98 | { | 98 | { |
99 | if (!dev->dma_mask) | 99 | if (!dev->dma_mask) |
100 | return 0; | 100 | return false; |
101 | 101 | ||
102 | return addr + size - 1 <= *dev->dma_mask; | 102 | return addr + size - 1 <= *dev->dma_mask; |
103 | } | 103 | } |
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index defa0ff98250..926495686554 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h | |||
@@ -33,6 +33,7 @@ | |||
33 | enum fixed_addresses { | 33 | enum fixed_addresses { |
34 | FIX_HOLE, | 34 | FIX_HOLE, |
35 | FIX_EARLYCON_MEM_BASE, | 35 | FIX_EARLYCON_MEM_BASE, |
36 | FIX_TEXT_POKE0, | ||
36 | __end_of_permanent_fixed_addresses, | 37 | __end_of_permanent_fixed_addresses, |
37 | 38 | ||
38 | /* | 39 | /* |
@@ -49,7 +50,6 @@ enum fixed_addresses { | |||
49 | 50 | ||
50 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, | 51 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, |
51 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, | 52 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, |
52 | FIX_TEXT_POKE0, | ||
53 | __end_of_fixed_addresses | 53 | __end_of_fixed_addresses |
54 | }; | 54 | }; |
55 | 55 | ||
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index d2f49423c5dc..f81b328d9cf4 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h | |||
@@ -285,6 +285,7 @@ bool aarch64_insn_is_nop(u32 insn); | |||
285 | int aarch64_insn_read(void *addr, u32 *insnp); | 285 | int aarch64_insn_read(void *addr, u32 *insnp); |
286 | int aarch64_insn_write(void *addr, u32 insn); | 286 | int aarch64_insn_write(void *addr, u32 insn); |
287 | enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); | 287 | enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); |
288 | u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn); | ||
288 | u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | 289 | u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, |
289 | u32 insn, u64 imm); | 290 | u32 insn, u64 imm); |
290 | u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, | 291 | u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 36250705dc4c..61505676d085 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -68,6 +68,8 @@ | |||
68 | #include <asm/pgalloc.h> | 68 | #include <asm/pgalloc.h> |
69 | #include <asm/cachetype.h> | 69 | #include <asm/cachetype.h> |
70 | #include <asm/cacheflush.h> | 70 | #include <asm/cacheflush.h> |
71 | #include <asm/mmu_context.h> | ||
72 | #include <asm/pgtable.h> | ||
71 | 73 | ||
72 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) | 74 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) |
73 | 75 | ||
@@ -269,5 +271,36 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) | |||
269 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); | 271 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
270 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); | 272 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); |
271 | 273 | ||
274 | static inline bool __kvm_cpu_uses_extended_idmap(void) | ||
275 | { | ||
276 | return __cpu_uses_extended_idmap(); | ||
277 | } | ||
278 | |||
279 | static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, | ||
280 | pgd_t *hyp_pgd, | ||
281 | pgd_t *merged_hyp_pgd, | ||
282 | unsigned long hyp_idmap_start) | ||
283 | { | ||
284 | int idmap_idx; | ||
285 | |||
286 | /* | ||
287 | * Use the first entry to access the HYP mappings. It is | ||
288 | * guaranteed to be free, otherwise we wouldn't use an | ||
289 | * extended idmap. | ||
290 | */ | ||
291 | VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); | ||
292 | merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); | ||
293 | |||
294 | /* | ||
295 | * Create another extended level entry that points to the boot HYP map, | ||
296 | * which contains an ID mapping of the HYP init code. We essentially | ||
297 | * merge the boot and runtime HYP maps by doing so, but they don't | ||
298 | * overlap anyway, so this is fine. | ||
299 | */ | ||
300 | idmap_idx = hyp_idmap_start >> VA_BITS; | ||
301 | VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); | ||
302 | merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); | ||
303 | } | ||
304 | |||
272 | #endif /* __ASSEMBLY__ */ | 305 | #endif /* __ASSEMBLY__ */ |
273 | #endif /* __ARM64_KVM_MMU_H__ */ | 306 | #endif /* __ARM64_KVM_MMU_H__ */ |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 101a42bde728..8ec41e5f56f0 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void) | |||
64 | : "r" (ttbr)); | 64 | : "r" (ttbr)); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | ||
68 | * TCR.T0SZ value to use when the ID map is active. Usually equals | ||
69 | * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in | ||
70 | * physical memory, in which case it will be smaller. | ||
71 | */ | ||
72 | extern u64 idmap_t0sz; | ||
73 | |||
74 | static inline bool __cpu_uses_extended_idmap(void) | ||
75 | { | ||
76 | return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) && | ||
77 | unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); | ||
78 | } | ||
79 | |||
80 | static inline void __cpu_set_tcr_t0sz(u64 t0sz) | ||
81 | { | ||
82 | unsigned long tcr; | ||
83 | |||
84 | if (__cpu_uses_extended_idmap()) | ||
85 | asm volatile ( | ||
86 | " mrs %0, tcr_el1 ;" | ||
87 | " bfi %0, %1, %2, %3 ;" | ||
88 | " msr tcr_el1, %0 ;" | ||
89 | " isb" | ||
90 | : "=&r" (tcr) | ||
91 | : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Set TCR.T0SZ to the value appropriate for activating the identity map. | ||
96 | */ | ||
97 | static inline void cpu_set_idmap_tcr_t0sz(void) | ||
98 | { | ||
99 | __cpu_set_tcr_t0sz(idmap_t0sz); | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Set TCR.T0SZ to its default value (based on VA_BITS) | ||
104 | */ | ||
105 | static inline void cpu_set_default_tcr_t0sz(void) | ||
106 | { | ||
107 | __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)); | ||
108 | } | ||
109 | |||
67 | static inline void switch_new_context(struct mm_struct *mm) | 110 | static inline void switch_new_context(struct mm_struct *mm) |
68 | { | 111 | { |
69 | unsigned long flags; | 112 | unsigned long flags; |
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 8fc8fa280e92..7d9c7e4a424b 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h | |||
@@ -33,7 +33,9 @@ | |||
33 | * image. Both require pgd, pud (4 levels only) and pmd tables to (section) | 33 | * image. Both require pgd, pud (4 levels only) and pmd tables to (section) |
34 | * map the kernel. With the 64K page configuration, swapper and idmap need to | 34 | * map the kernel. With the 64K page configuration, swapper and idmap need to |
35 | * map to pte level. The swapper also maps the FDT (see __create_page_tables | 35 | * map to pte level. The swapper also maps the FDT (see __create_page_tables |
36 | * for more information). | 36 | * for more information). Note that the number of ID map translation levels |
37 | * could be increased on the fly if system RAM is out of reach for the default | ||
38 | * VA range, so 3 pages are reserved in all cases. | ||
37 | */ | 39 | */ |
38 | #ifdef CONFIG_ARM64_64K_PAGES | 40 | #ifdef CONFIG_ARM64_64K_PAGES |
39 | #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) | 41 | #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) |
@@ -42,7 +44,7 @@ | |||
42 | #endif | 44 | #endif |
43 | 45 | ||
44 | #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) | 46 | #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) |
45 | #define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE) | 47 | #define IDMAP_DIR_SIZE (3 * PAGE_SIZE) |
46 | 48 | ||
47 | #ifndef __ASSEMBLY__ | 49 | #ifndef __ASSEMBLY__ |
48 | 50 | ||
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 80f3d241cff8..59bfae75dc98 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -143,7 +143,12 @@ | |||
143 | /* | 143 | /* |
144 | * TCR flags. | 144 | * TCR flags. |
145 | */ | 145 | */ |
146 | #define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0)) | 146 | #define TCR_T0SZ_OFFSET 0 |
147 | #define TCR_T1SZ_OFFSET 16 | ||
148 | #define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) | ||
149 | #define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) | ||
150 | #define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) | ||
151 | #define TCR_TxSZ_WIDTH 6 | ||
147 | #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) | 152 | #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) |
148 | #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) | 153 | #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) |
149 | #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) | 154 | #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) |
diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h index e6f087806aaf..b7710a59672c 100644 --- a/arch/arm64/include/asm/pmu.h +++ b/arch/arm64/include/asm/pmu.h | |||
@@ -44,6 +44,7 @@ struct pmu_hw_events { | |||
44 | struct arm_pmu { | 44 | struct arm_pmu { |
45 | struct pmu pmu; | 45 | struct pmu pmu; |
46 | cpumask_t active_irqs; | 46 | cpumask_t active_irqs; |
47 | int *irq_affinity; | ||
47 | const char *name; | 48 | const char *name; |
48 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 49 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
49 | void (*enable)(struct hw_perf_event *evt, int idx); | 50 | void (*enable)(struct hw_perf_event *evt, int idx); |
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 941c375616e2..220633b791b8 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h | |||
@@ -45,15 +45,6 @@ do { \ | |||
45 | cpu_do_switch_mm(virt_to_phys(pgd),mm); \ | 45 | cpu_do_switch_mm(virt_to_phys(pgd),mm); \ |
46 | } while (0) | 46 | } while (0) |
47 | 47 | ||
48 | #define cpu_get_pgd() \ | ||
49 | ({ \ | ||
50 | unsigned long pg; \ | ||
51 | asm("mrs %0, ttbr0_el1\n" \ | ||
52 | : "=r" (pg)); \ | ||
53 | pg &= ~0xffff000000003ffful; \ | ||
54 | (pgd_t *)phys_to_virt(pg); \ | ||
55 | }) | ||
56 | |||
57 | #endif /* __ASSEMBLY__ */ | 48 | #endif /* __ASSEMBLY__ */ |
58 | #endif /* __KERNEL__ */ | 49 | #endif /* __KERNEL__ */ |
59 | #endif /* __ASM_PROCFNS_H */ | 50 | #endif /* __ASM_PROCFNS_H */ |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 20e9591a60cf..d2c37a1df0eb 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
@@ -127,7 +127,11 @@ extern void release_thread(struct task_struct *); | |||
127 | 127 | ||
128 | unsigned long get_wchan(struct task_struct *p); | 128 | unsigned long get_wchan(struct task_struct *p); |
129 | 129 | ||
130 | #define cpu_relax() barrier() | 130 | static inline void cpu_relax(void) |
131 | { | ||
132 | asm volatile("yield" ::: "memory"); | ||
133 | } | ||
134 | |||
131 | #define cpu_relax_lowlatency() cpu_relax() | 135 | #define cpu_relax_lowlatency() cpu_relax() |
132 | 136 | ||
133 | /* Thread switching */ | 137 | /* Thread switching */ |
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index 59e282311b58..8dcd61e32176 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h | |||
@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void) | |||
40 | extern u64 __cpu_logical_map[NR_CPUS]; | 40 | extern u64 __cpu_logical_map[NR_CPUS]; |
41 | #define cpu_logical_map(cpu) __cpu_logical_map[cpu] | 41 | #define cpu_logical_map(cpu) __cpu_logical_map[cpu] |
42 | 42 | ||
43 | void __init do_post_cpus_up_work(void); | ||
44 | |||
43 | #endif /* __ASM_SMP_PLAT_H */ | 45 | #endif /* __ASM_SMP_PLAT_H */ |
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 27224426e0bf..cef934a90f17 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
@@ -406,7 +406,7 @@ __SYSCALL(__NR_vfork, sys_vfork) | |||
406 | #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ | 406 | #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ |
407 | __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ | 407 | __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ |
408 | #define __NR_mmap2 192 | 408 | #define __NR_mmap2 192 |
409 | __SYSCALL(__NR_mmap2, sys_mmap_pgoff) | 409 | __SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper) |
410 | #define __NR_truncate64 193 | 410 | #define __NR_truncate64 193 |
411 | __SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) | 411 | __SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) |
412 | #define __NR_ftruncate64 194 | 412 | #define __NR_ftruncate64 194 |