aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-05-14 21:35:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-05-14 21:35:33 -0400
commitfc8c540b1d66fe5f44da916338423ec62946e8b5 (patch)
tree4a1e3f5abdbb023595587db6bb63eef0e749e2d6 /arch/arm64
parent3c25a75ee047c3393cc5b995c6c68c8cbdaf02fa (diff)
parent4801ba338acad2e69e905e0c537e8ba2682c4e65 (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: - fix potential memory leak in perf PMU probing - BPF sign extension fix for 64-bit immediates - fix build failure with unusual configuration - revert unused and broken branch patching from alternative code * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: perf: fix memory leak when probing PMU PPIs arm64: bpf: fix signedness bug in loading 64-bit immediate arm64: mm: Fix build error with CONFIG_SPARSEMEM_VMEMMAP disabled Revert "arm64: alternative: Allow immediate branch as alternative instruction"
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kernel/alternative.c53
-rw-r--r--arch/arm64/kernel/perf_event.c8
-rw-r--r--arch/arm64/mm/dump.c2
-rw-r--r--arch/arm64/net/bpf_jit_comp.c2
4 files changed, 8 insertions, 57 deletions
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 21033bba9390..28f8365edc4c 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -24,7 +24,6 @@
24#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
25#include <asm/alternative.h> 25#include <asm/alternative.h>
26#include <asm/cpufeature.h> 26#include <asm/cpufeature.h>
27#include <asm/insn.h>
28#include <linux/stop_machine.h> 27#include <linux/stop_machine.h>
29 28
30extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 29extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
@@ -34,48 +33,6 @@ struct alt_region {
34 struct alt_instr *end; 33 struct alt_instr *end;
35}; 34};
36 35
37/*
38 * Decode the imm field of a b/bl instruction, and return the byte
39 * offset as a signed value (so it can be used when computing a new
40 * branch target).
41 */
42static s32 get_branch_offset(u32 insn)
43{
44 s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
45
46 /* sign-extend the immediate before turning it into a byte offset */
47 return (imm << 6) >> 4;
48}
49
50static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr)
51{
52 u32 insn;
53
54 aarch64_insn_read(altinsnptr, &insn);
55
56 /* Stop the world on instructions we don't support... */
57 BUG_ON(aarch64_insn_is_cbz(insn));
58 BUG_ON(aarch64_insn_is_cbnz(insn));
59 BUG_ON(aarch64_insn_is_bcond(insn));
60 /* ... and there is probably more. */
61
62 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
63 enum aarch64_insn_branch_type type;
64 unsigned long target;
65
66 if (aarch64_insn_is_b(insn))
67 type = AARCH64_INSN_BRANCH_NOLINK;
68 else
69 type = AARCH64_INSN_BRANCH_LINK;
70
71 target = (unsigned long)altinsnptr + get_branch_offset(insn);
72 insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr,
73 target, type);
74 }
75
76 return insn;
77}
78
79static int __apply_alternatives(void *alt_region) 36static int __apply_alternatives(void *alt_region)
80{ 37{
81 struct alt_instr *alt; 38 struct alt_instr *alt;
@@ -83,9 +40,6 @@ static int __apply_alternatives(void *alt_region)
83 u8 *origptr, *replptr; 40 u8 *origptr, *replptr;
84 41
85 for (alt = region->begin; alt < region->end; alt++) { 42 for (alt = region->begin; alt < region->end; alt++) {
86 u32 insn;
87 int i;
88
89 if (!cpus_have_cap(alt->cpufeature)) 43 if (!cpus_have_cap(alt->cpufeature))
90 continue; 44 continue;
91 45
@@ -95,12 +49,7 @@ static int __apply_alternatives(void *alt_region)
95 49
96 origptr = (u8 *)&alt->orig_offset + alt->orig_offset; 50 origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
97 replptr = (u8 *)&alt->alt_offset + alt->alt_offset; 51 replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
98 52 memcpy(origptr, replptr, alt->alt_len);
99 for (i = 0; i < alt->alt_len; i += sizeof(insn)) {
100 insn = get_alt_insn(origptr + i, replptr + i);
101 aarch64_insn_write(origptr + i, insn);
102 }
103
104 flush_icache_range((uintptr_t)origptr, 53 flush_icache_range((uintptr_t)origptr,
105 (uintptr_t)(origptr + alt->alt_len)); 54 (uintptr_t)(origptr + alt->alt_len));
106 } 55 }
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 23f25acf43a9..cce18c85d2e8 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1315,15 +1315,15 @@ static int armpmu_device_probe(struct platform_device *pdev)
1315 if (!cpu_pmu) 1315 if (!cpu_pmu)
1316 return -ENODEV; 1316 return -ENODEV;
1317 1317
1318 irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
1319 if (!irqs)
1320 return -ENOMEM;
1321
1322 /* Don't bother with PPIs; they're already affine */ 1318 /* Don't bother with PPIs; they're already affine */
1323 irq = platform_get_irq(pdev, 0); 1319 irq = platform_get_irq(pdev, 0);
1324 if (irq >= 0 && irq_is_percpu(irq)) 1320 if (irq >= 0 && irq_is_percpu(irq))
1325 return 0; 1321 return 0;
1326 1322
1323 irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
1324 if (!irqs)
1325 return -ENOMEM;
1326
1327 for (i = 0; i < pdev->num_resources; ++i) { 1327 for (i = 0; i < pdev->num_resources; ++i) {
1328 struct device_node *dn; 1328 struct device_node *dn;
1329 int cpu; 1329 int cpu;
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 74c256744b25..f3d6221cd5bd 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -328,10 +328,12 @@ static int ptdump_init(void)
328 for (j = 0; j < pg_level[i].num; j++) 328 for (j = 0; j < pg_level[i].num; j++)
329 pg_level[i].mask |= pg_level[i].bits[j].mask; 329 pg_level[i].mask |= pg_level[i].bits[j].mask;
330 330
331#ifdef CONFIG_SPARSEMEM_VMEMMAP
331 address_markers[VMEMMAP_START_NR].start_address = 332 address_markers[VMEMMAP_START_NR].start_address =
332 (unsigned long)virt_to_page(PAGE_OFFSET); 333 (unsigned long)virt_to_page(PAGE_OFFSET);
333 address_markers[VMEMMAP_END_NR].start_address = 334 address_markers[VMEMMAP_END_NR].start_address =
334 (unsigned long)virt_to_page(high_memory); 335 (unsigned long)virt_to_page(high_memory);
336#endif
335 337
336 pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, 338 pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
337 &ptdump_fops); 339 &ptdump_fops);
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index edba042b2325..dc6a4842683a 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -487,7 +487,7 @@ emit_cond_jmp:
487 return -EINVAL; 487 return -EINVAL;
488 } 488 }
489 489
490 imm64 = (u64)insn1.imm << 32 | imm; 490 imm64 = (u64)insn1.imm << 32 | (u32)imm;
491 emit_a64_mov_i64(dst, imm64, ctx); 491 emit_a64_mov_i64(dst, imm64, ctx);
492 492
493 return 1; 493 return 1;