aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-03-05 06:49:33 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2012-09-17 08:42:19 -0400
commit257cb251925f854da435cbf79b140984413871ac (patch)
tree42da7bf9d7a81060d8d31bc67f12cb0399e69f6b /arch/arm64/kernel
parentf27bb139c3876806a2c82e979d2dbbece44c66df (diff)
arm64: Loadable modules
This patch adds support for loadable modules. Loadable modules are loaded 64MB below the kernel image due to branch relocation restrictions (see Documentation/arm64/memory.txt). Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Tony Lindgren <tony@atomide.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Olof Johansson <olof@lixom.net> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/module.c456
1 files changed, 456 insertions, 0 deletions
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
new file mode 100644
index 000000000000..ca0e3d55da99
--- /dev/null
+++ b/arch/arm64/kernel/module.c
@@ -0,0 +1,456 @@
1/*
2 * AArch64 loadable module support.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/bitops.h>
22#include <linux/elf.h>
23#include <linux/gfp.h>
24#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/moduleloader.h>
27#include <linux/vmalloc.h>
28
29void *module_alloc(unsigned long size)
30{
31 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
32 GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
33 __builtin_return_address(0));
34}
35
36enum aarch64_reloc_op {
37 RELOC_OP_NONE,
38 RELOC_OP_ABS,
39 RELOC_OP_PREL,
40 RELOC_OP_PAGE,
41};
42
43static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
44{
45 switch (reloc_op) {
46 case RELOC_OP_ABS:
47 return val;
48 case RELOC_OP_PREL:
49 return val - (u64)place;
50 case RELOC_OP_PAGE:
51 return (val & ~0xfff) - ((u64)place & ~0xfff);
52 case RELOC_OP_NONE:
53 return 0;
54 }
55
56 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
57 return 0;
58}
59
60static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
61{
62 u64 imm_mask = (1 << len) - 1;
63 s64 sval = do_reloc(op, place, val);
64
65 switch (len) {
66 case 16:
67 *(s16 *)place = sval;
68 break;
69 case 32:
70 *(s32 *)place = sval;
71 break;
72 case 64:
73 *(s64 *)place = sval;
74 break;
75 default:
76 pr_err("Invalid length (%d) for data relocation\n", len);
77 return 0;
78 }
79
80 /*
81 * Extract the upper value bits (including the sign bit) and
82 * shift them to bit 0.
83 */
84 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
85
86 /*
87 * Overflow has occurred if the value is not representable in
88 * len bits (i.e the bottom len bits are not sign-extended and
89 * the top bits are not all zero).
90 */
91 if ((u64)(sval + 1) > 2)
92 return -ERANGE;
93
94 return 0;
95}
96
97enum aarch64_imm_type {
98 INSN_IMM_MOVNZ,
99 INSN_IMM_MOVK,
100 INSN_IMM_ADR,
101 INSN_IMM_26,
102 INSN_IMM_19,
103 INSN_IMM_16,
104 INSN_IMM_14,
105 INSN_IMM_12,
106 INSN_IMM_9,
107};
108
109static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
110{
111 u32 immlo, immhi, lomask, himask, mask;
112 int shift;
113
114 switch (type) {
115 case INSN_IMM_MOVNZ:
116 /*
117 * For signed MOVW relocations, we have to manipulate the
118 * instruction encoding depending on whether or not the
119 * immediate is less than zero.
120 */
121 insn &= ~(3 << 29);
122 if ((s64)imm >= 0) {
123 /* >=0: Set the instruction to MOVZ (opcode 10b). */
124 insn |= 2 << 29;
125 } else {
126 /*
127 * <0: Set the instruction to MOVN (opcode 00b).
128 * Since we've masked the opcode already, we
129 * don't need to do anything other than
130 * inverting the new immediate field.
131 */
132 imm = ~imm;
133 }
134 case INSN_IMM_MOVK:
135 mask = BIT(16) - 1;
136 shift = 5;
137 break;
138 case INSN_IMM_ADR:
139 lomask = 0x3;
140 himask = 0x7ffff;
141 immlo = imm & lomask;
142 imm >>= 2;
143 immhi = imm & himask;
144 imm = (immlo << 24) | (immhi);
145 mask = (lomask << 24) | (himask);
146 shift = 5;
147 break;
148 case INSN_IMM_26:
149 mask = BIT(26) - 1;
150 shift = 0;
151 break;
152 case INSN_IMM_19:
153 mask = BIT(19) - 1;
154 shift = 5;
155 break;
156 case INSN_IMM_16:
157 mask = BIT(16) - 1;
158 shift = 5;
159 break;
160 case INSN_IMM_14:
161 mask = BIT(14) - 1;
162 shift = 5;
163 break;
164 case INSN_IMM_12:
165 mask = BIT(12) - 1;
166 shift = 10;
167 break;
168 case INSN_IMM_9:
169 mask = BIT(9) - 1;
170 shift = 12;
171 break;
172 default:
173 pr_err("encode_insn_immediate: unknown immediate encoding %d\n",
174 type);
175 return 0;
176 }
177
178 /* Update the immediate field. */
179 insn &= ~(mask << shift);
180 insn |= (imm & mask) << shift;
181
182 return insn;
183}
184
185static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
186 int lsb, enum aarch64_imm_type imm_type)
187{
188 u64 imm, limit = 0;
189 s64 sval;
190 u32 insn = *(u32 *)place;
191
192 sval = do_reloc(op, place, val);
193 sval >>= lsb;
194 imm = sval & 0xffff;
195
196 /* Update the instruction with the new encoding. */
197 *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
198
199 /* Shift out the immediate field. */
200 sval >>= 16;
201
202 /*
203 * For unsigned immediates, the overflow check is straightforward.
204 * For signed immediates, the sign bit is actually the bit past the
205 * most significant bit of the field.
206 * The INSN_IMM_16 immediate type is unsigned.
207 */
208 if (imm_type != INSN_IMM_16) {
209 sval++;
210 limit++;
211 }
212
213 /* Check the upper bits depending on the sign of the immediate. */
214 if ((u64)sval > limit)
215 return -ERANGE;
216
217 return 0;
218}
219
220static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
221 int lsb, int len, enum aarch64_imm_type imm_type)
222{
223 u64 imm, imm_mask;
224 s64 sval;
225 u32 insn = *(u32 *)place;
226
227 /* Calculate the relocation value. */
228 sval = do_reloc(op, place, val);
229 sval >>= lsb;
230
231 /* Extract the value bits and shift them to bit 0. */
232 imm_mask = (BIT(lsb + len) - 1) >> lsb;
233 imm = sval & imm_mask;
234
235 /* Update the instruction's immediate field. */
236 *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
237
238 /*
239 * Extract the upper value bits (including the sign bit) and
240 * shift them to bit 0.
241 */
242 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
243
244 /*
245 * Overflow has occurred if the upper bits are not all equal to
246 * the sign bit of the value.
247 */
248 if ((u64)(sval + 1) >= 2)
249 return -ERANGE;
250
251 return 0;
252}
253
254int apply_relocate_add(Elf64_Shdr *sechdrs,
255 const char *strtab,
256 unsigned int symindex,
257 unsigned int relsec,
258 struct module *me)
259{
260 unsigned int i;
261 int ovf;
262 bool overflow_check;
263 Elf64_Sym *sym;
264 void *loc;
265 u64 val;
266 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
267
268 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
269 /* loc corresponds to P in the AArch64 ELF document. */
270 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
271 + rel[i].r_offset;
272
273 /* sym is the ELF symbol we're referring to. */
274 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
275 + ELF64_R_SYM(rel[i].r_info);
276
277 /* val corresponds to (S + A) in the AArch64 ELF document. */
278 val = sym->st_value + rel[i].r_addend;
279
280 /* Check for overflow by default. */
281 overflow_check = true;
282
283 /* Perform the static relocation. */
284 switch (ELF64_R_TYPE(rel[i].r_info)) {
285 /* Null relocations. */
286 case R_ARM_NONE:
287 case R_AARCH64_NONE:
288 ovf = 0;
289 break;
290
291 /* Data relocations. */
292 case R_AARCH64_ABS64:
293 overflow_check = false;
294 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
295 break;
296 case R_AARCH64_ABS32:
297 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
298 break;
299 case R_AARCH64_ABS16:
300 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
301 break;
302 case R_AARCH64_PREL64:
303 overflow_check = false;
304 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
305 break;
306 case R_AARCH64_PREL32:
307 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
308 break;
309 case R_AARCH64_PREL16:
310 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
311 break;
312
313 /* MOVW instruction relocations. */
314 case R_AARCH64_MOVW_UABS_G0_NC:
315 overflow_check = false;
316 case R_AARCH64_MOVW_UABS_G0:
317 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
318 INSN_IMM_16);
319 break;
320 case R_AARCH64_MOVW_UABS_G1_NC:
321 overflow_check = false;
322 case R_AARCH64_MOVW_UABS_G1:
323 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
324 INSN_IMM_16);
325 break;
326 case R_AARCH64_MOVW_UABS_G2_NC:
327 overflow_check = false;
328 case R_AARCH64_MOVW_UABS_G2:
329 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
330 INSN_IMM_16);
331 break;
332 case R_AARCH64_MOVW_UABS_G3:
333 /* We're using the top bits so we can't overflow. */
334 overflow_check = false;
335 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
336 INSN_IMM_16);
337 break;
338 case R_AARCH64_MOVW_SABS_G0:
339 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
340 INSN_IMM_MOVNZ);
341 break;
342 case R_AARCH64_MOVW_SABS_G1:
343 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
344 INSN_IMM_MOVNZ);
345 break;
346 case R_AARCH64_MOVW_SABS_G2:
347 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
348 INSN_IMM_MOVNZ);
349 break;
350 case R_AARCH64_MOVW_PREL_G0_NC:
351 overflow_check = false;
352 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
353 INSN_IMM_MOVK);
354 break;
355 case R_AARCH64_MOVW_PREL_G0:
356 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
357 INSN_IMM_MOVNZ);
358 break;
359 case R_AARCH64_MOVW_PREL_G1_NC:
360 overflow_check = false;
361 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
362 INSN_IMM_MOVK);
363 break;
364 case R_AARCH64_MOVW_PREL_G1:
365 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
366 INSN_IMM_MOVNZ);
367 break;
368 case R_AARCH64_MOVW_PREL_G2_NC:
369 overflow_check = false;
370 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
371 INSN_IMM_MOVK);
372 break;
373 case R_AARCH64_MOVW_PREL_G2:
374 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
375 INSN_IMM_MOVNZ);
376 break;
377 case R_AARCH64_MOVW_PREL_G3:
378 /* We're using the top bits so we can't overflow. */
379 overflow_check = false;
380 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
381 INSN_IMM_MOVNZ);
382 break;
383
384 /* Immediate instruction relocations. */
385 case R_AARCH64_LD_PREL_LO19:
386 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
387 INSN_IMM_19);
388 break;
389 case R_AARCH64_ADR_PREL_LO21:
390 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
391 INSN_IMM_ADR);
392 break;
393 case R_AARCH64_ADR_PREL_PG_HI21_NC:
394 overflow_check = false;
395 case R_AARCH64_ADR_PREL_PG_HI21:
396 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
397 INSN_IMM_ADR);
398 break;
399 case R_AARCH64_ADD_ABS_LO12_NC:
400 case R_AARCH64_LDST8_ABS_LO12_NC:
401 overflow_check = false;
402 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
403 INSN_IMM_12);
404 break;
405 case R_AARCH64_LDST16_ABS_LO12_NC:
406 overflow_check = false;
407 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
408 INSN_IMM_12);
409 break;
410 case R_AARCH64_LDST32_ABS_LO12_NC:
411 overflow_check = false;
412 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
413 INSN_IMM_12);
414 break;
415 case R_AARCH64_LDST64_ABS_LO12_NC:
416 overflow_check = false;
417 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
418 INSN_IMM_12);
419 break;
420 case R_AARCH64_LDST128_ABS_LO12_NC:
421 overflow_check = false;
422 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
423 INSN_IMM_12);
424 break;
425 case R_AARCH64_TSTBR14:
426 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
427 INSN_IMM_14);
428 break;
429 case R_AARCH64_CONDBR19:
430 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
431 INSN_IMM_19);
432 break;
433 case R_AARCH64_JUMP26:
434 case R_AARCH64_CALL26:
435 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
436 INSN_IMM_26);
437 break;
438
439 default:
440 pr_err("module %s: unsupported RELA relocation: %llu\n",
441 me->name, ELF64_R_TYPE(rel[i].r_info));
442 return -ENOEXEC;
443 }
444
445 if (overflow_check && ovf == -ERANGE)
446 goto overflow;
447
448 }
449
450 return 0;
451
452overflow:
453 pr_err("module %s: overflow in relocation type %d val %Lx\n",
454 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
455 return -ENOEXEC;
456}