aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2017-11-20 12:41:29 -0500
committerWill Deacon <will.deacon@arm.com>2017-12-01 07:30:21 -0500
commit7e8b9c1d2e2f5f45db7d40b50d14f606097c25de (patch)
treebbce43dfb8ebcfc44fcfb837363205628a608592
parentf81a348728ec5ac43f3bbcf81c97d52baba253f7 (diff)
arm64: module-plts: factor out PLT generation code for ftrace
To allow the ftrace trampoline code to reuse the PLT entry routines, factor it out and move it into asm/module.h. Cc: <stable@vger.kernel.org> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/include/asm/module.h44
-rw-r--r--arch/arm64/kernel/module-plts.c38
2 files changed, 46 insertions, 36 deletions
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 19bd97671bb8..11d4aaee82e1 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -45,4 +45,48 @@ extern u64 module_alloc_base;
45#define module_alloc_base ((u64)_etext - MODULES_VSIZE) 45#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
46#endif 46#endif
47 47
48struct plt_entry {
49 /*
50 * A program that conforms to the AArch64 Procedure Call Standard
51 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
52 * IP1 (x17) may be inserted at any branch instruction that is
53 * exposed to a relocation that supports long branches. Since that
54 * is exactly what we are dealing with here, we are free to use x16
55 * as a scratch register in the PLT veneers.
56 */
57 __le32 mov0; /* movn x16, #0x.... */
58 __le32 mov1; /* movk x16, #0x...., lsl #16 */
59 __le32 mov2; /* movk x16, #0x...., lsl #32 */
60 __le32 br; /* br x16 */
61};
62
63static inline struct plt_entry get_plt_entry(u64 val)
64{
65 /*
66 * MOVK/MOVN/MOVZ opcode:
67 * +--------+------------+--------+-----------+-------------+---------+
68 * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
69 * +--------+------------+--------+-----------+-------------+---------+
70 *
71 * Rd := 0x10 (x16)
72 * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
73 * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
74 * sf := 1 (64-bit variant)
75 */
76 return (struct plt_entry){
77 cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
78 cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
79 cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
80 cpu_to_le32(0xd61f0200)
81 };
82}
83
84static inline bool plt_entries_equal(const struct plt_entry *a,
85 const struct plt_entry *b)
86{
87 return a->mov0 == b->mov0 &&
88 a->mov1 == b->mov1 &&
89 a->mov2 == b->mov2;
90}
91
48#endif /* __ASM_MODULE_H */ 92#endif /* __ASM_MODULE_H */
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index d05dbe658409..ebff6c155cac 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -11,21 +11,6 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/sort.h> 12#include <linux/sort.h>
13 13
14struct plt_entry {
15 /*
16 * A program that conforms to the AArch64 Procedure Call Standard
17 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
18 * IP1 (x17) may be inserted at any branch instruction that is
19 * exposed to a relocation that supports long branches. Since that
20 * is exactly what we are dealing with here, we are free to use x16
21 * as a scratch register in the PLT veneers.
22 */
23 __le32 mov0; /* movn x16, #0x.... */
24 __le32 mov1; /* movk x16, #0x...., lsl #16 */
25 __le32 mov2; /* movk x16, #0x...., lsl #32 */
26 __le32 br; /* br x16 */
27};
28
29static bool in_init(const struct module *mod, void *loc) 14static bool in_init(const struct module *mod, void *loc)
30{ 15{
31 return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size; 16 return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
@@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
40 int i = pltsec->plt_num_entries; 25 int i = pltsec->plt_num_entries;
41 u64 val = sym->st_value + rela->r_addend; 26 u64 val = sym->st_value + rela->r_addend;
42 27
43 /* 28 plt[i] = get_plt_entry(val);
44 * MOVK/MOVN/MOVZ opcode:
45 * +--------+------------+--------+-----------+-------------+---------+
46 * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
47 * +--------+------------+--------+-----------+-------------+---------+
48 *
49 * Rd := 0x10 (x16)
50 * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
51 * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
52 * sf := 1 (64-bit variant)
53 */
54 plt[i] = (struct plt_entry){
55 cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
56 cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
57 cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
58 cpu_to_le32(0xd61f0200)
59 };
60 29
61 /* 30 /*
62 * Check if the entry we just created is a duplicate. Given that the 31 * Check if the entry we just created is a duplicate. Given that the
63 * relocations are sorted, this will be the last entry we allocated. 32 * relocations are sorted, this will be the last entry we allocated.
64 * (if one exists). 33 * (if one exists).
65 */ 34 */
66 if (i > 0 && 35 if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
67 plt[i].mov0 == plt[i - 1].mov0 &&
68 plt[i].mov1 == plt[i - 1].mov1 &&
69 plt[i].mov2 == plt[i - 1].mov2)
70 return (u64)&plt[i - 1]; 36 return (u64)&plt[i - 1];
71 37
72 pltsec->plt_num_entries++; 38 pltsec->plt_num_entries++;