diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-08 05:34:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-08 05:34:24 -0400 |
commit | 6325e940e7e0c690c6bdfaf5d54309e71845d3d9 (patch) | |
tree | bd1d2c33ae9420e98d3feee1f924fdad3f22552f /arch/arm64 | |
parent | 536fd93d432858eb6b7c1ad1dcfe051840ebef47 (diff) | |
parent | 0a6479b0ffad8dd236915e271faaf2cbb4cac287 (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas:
- eBPF JIT compiler for arm64
- CPU suspend backend for PSCI (firmware interface) with standard idle
states defined in DT (generic idle driver to be merged via a
different tree)
- Support for CONFIG_DEBUG_SET_MODULE_RONX
- Support for unmapped cpu-release-addr (outside kernel linear mapping)
- set_arch_dma_coherent_ops() implemented and bus notifiers removed
- EFI_STUB improvements when base of DRAM is occupied
- Typos in KGDB macros
- Clean-up to (partially) allow kernel building with LLVM
- Other clean-ups (extern keyword, phys_addr_t usage)
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (51 commits)
arm64: Remove unneeded extern keyword
ARM64: make of_device_ids const
arm64: Use phys_addr_t type for physical address
aarch64: filter $x from kallsyms
arm64: Use DMA_ERROR_CODE to denote failed allocation
arm64: Fix typos in KGDB macros
arm64: insn: Add return statements after BUG_ON()
arm64: debug: don't re-enable debug exceptions on return from el1_dbg
Revert "arm64: dmi: Add SMBIOS/DMI support"
arm64: Implement set_arch_dma_coherent_ops() to replace bus notifiers
of: amba: use of_dma_configure for AMBA devices
arm64: dmi: Add SMBIOS/DMI support
arm64: Correct ftrace calls to aarch64_insn_gen_branch_imm()
arm64:mm: initialize max_mapnr using function set_max_mapnr
setup: Move unmask of async interrupts after possible earlycon setup
arm64: LLVMLinux: Fix inline arm64 assembly for use with clang
arm64: pageattr: Correctly adjust unaligned start addresses
net: bpf: arm64: fix module memory leak when JIT image build fails
arm64: add PSCI CPU_SUSPEND based cpu_suspend support
arm64: kernel: introduce cpu_init_idle CPU operation
...
Diffstat (limited to 'arch/arm64')
46 files changed, 2270 insertions, 178 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fd4e81a4e1ce..f0d3a2d85a5b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -35,6 +35,7 @@ config ARM64 | |||
35 | select HAVE_ARCH_JUMP_LABEL | 35 | select HAVE_ARCH_JUMP_LABEL |
36 | select HAVE_ARCH_KGDB | 36 | select HAVE_ARCH_KGDB |
37 | select HAVE_ARCH_TRACEHOOK | 37 | select HAVE_ARCH_TRACEHOOK |
38 | select HAVE_BPF_JIT | ||
38 | select HAVE_C_RECORDMCOUNT | 39 | select HAVE_C_RECORDMCOUNT |
39 | select HAVE_CC_STACKPROTECTOR | 40 | select HAVE_CC_STACKPROTECTOR |
40 | select HAVE_DEBUG_BUGVERBOSE | 41 | select HAVE_DEBUG_BUGVERBOSE |
@@ -252,11 +253,11 @@ config SCHED_SMT | |||
252 | places. If unsure say N here. | 253 | places. If unsure say N here. |
253 | 254 | ||
254 | config NR_CPUS | 255 | config NR_CPUS |
255 | int "Maximum number of CPUs (2-32)" | 256 | int "Maximum number of CPUs (2-64)" |
256 | range 2 32 | 257 | range 2 64 |
257 | depends on SMP | 258 | depends on SMP |
258 | # These have to remain sorted largest to smallest | 259 | # These have to remain sorted largest to smallest |
259 | default "8" | 260 | default "64" |
260 | 261 | ||
261 | config HOTPLUG_CPU | 262 | config HOTPLUG_CPU |
262 | bool "Support for hot-pluggable CPUs" | 263 | bool "Support for hot-pluggable CPUs" |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 4ee8e90b7a45..0a12933e50ed 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
@@ -43,4 +43,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET | |||
43 | of TEXT_OFFSET and platforms must not require a specific | 43 | of TEXT_OFFSET and platforms must not require a specific |
44 | value. | 44 | value. |
45 | 45 | ||
46 | config DEBUG_SET_MODULE_RONX | ||
47 | bool "Set loadable kernel module data as NX and text as RO" | ||
48 | depends on MODULES | ||
49 | help | ||
50 | This option helps catch unintended modifications to loadable | ||
51 | kernel module's text and read-only data. It also prevents execution | ||
52 | of module data. Such protection may interfere with run-time code | ||
53 | patching and dynamic kernel tracing - and they might also protect | ||
54 | against certain classes of kernel exploits. | ||
55 | If in doubt, say "N". | ||
56 | |||
46 | endmenu | 57 | endmenu |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 2df5e5daeebe..59c86b6b3052 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -47,6 +47,7 @@ endif | |||
47 | export TEXT_OFFSET GZFLAGS | 47 | export TEXT_OFFSET GZFLAGS |
48 | 48 | ||
49 | core-y += arch/arm64/kernel/ arch/arm64/mm/ | 49 | core-y += arch/arm64/kernel/ arch/arm64/mm/ |
50 | core-$(CONFIG_NET) += arch/arm64/net/ | ||
50 | core-$(CONFIG_KVM) += arch/arm64/kvm/ | 51 | core-$(CONFIG_KVM) += arch/arm64/kvm/ |
51 | core-$(CONFIG_XEN) += arch/arm64/xen/ | 52 | core-$(CONFIG_XEN) += arch/arm64/xen/ |
52 | core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ | 53 | core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ |
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index f2defe1c380c..689b6379188c 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h | |||
@@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | |||
148 | { | 148 | { |
149 | } | 149 | } |
150 | 150 | ||
151 | int set_memory_ro(unsigned long addr, int numpages); | ||
152 | int set_memory_rw(unsigned long addr, int numpages); | ||
153 | int set_memory_x(unsigned long addr, int numpages); | ||
154 | int set_memory_nx(unsigned long addr, int numpages); | ||
151 | #endif | 155 | #endif |
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h index 7a2e0762cb40..4c631a0a3609 100644 --- a/arch/arm64/include/asm/cachetype.h +++ b/arch/arm64/include/asm/cachetype.h | |||
@@ -39,6 +39,26 @@ | |||
39 | 39 | ||
40 | extern unsigned long __icache_flags; | 40 | extern unsigned long __icache_flags; |
41 | 41 | ||
42 | #define CCSIDR_EL1_LINESIZE_MASK 0x7 | ||
43 | #define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK) | ||
44 | |||
45 | #define CCSIDR_EL1_NUMSETS_SHIFT 13 | ||
46 | #define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT) | ||
47 | #define CCSIDR_EL1_NUMSETS(x) \ | ||
48 | (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT) | ||
49 | |||
50 | extern u64 __attribute_const__ icache_get_ccsidr(void); | ||
51 | |||
52 | static inline int icache_get_linesize(void) | ||
53 | { | ||
54 | return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr()); | ||
55 | } | ||
56 | |||
57 | static inline int icache_get_numsets(void) | ||
58 | { | ||
59 | return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr()); | ||
60 | } | ||
61 | |||
42 | /* | 62 | /* |
43 | * Whilst the D-side always behaves as PIPT on AArch64, aliasing is | 63 | * Whilst the D-side always behaves as PIPT on AArch64, aliasing is |
44 | * permitted in the I-cache. | 64 | * permitted in the I-cache. |
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index d7b4b38a8e86..6f8e2ef9094a 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h | |||
@@ -28,6 +28,8 @@ struct device_node; | |||
28 | * enable-method property. | 28 | * enable-method property. |
29 | * @cpu_init: Reads any data necessary for a specific enable-method from the | 29 | * @cpu_init: Reads any data necessary for a specific enable-method from the |
30 | * devicetree, for a given cpu node and proposed logical id. | 30 | * devicetree, for a given cpu node and proposed logical id. |
31 | * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from | ||
32 | * devicetree, for a given cpu node and proposed logical id. | ||
31 | * @cpu_prepare: Early one-time preparation step for a cpu. If there is a | 33 | * @cpu_prepare: Early one-time preparation step for a cpu. If there is a |
32 | * mechanism for doing so, tests whether it is possible to boot | 34 | * mechanism for doing so, tests whether it is possible to boot |
33 | * the given CPU. | 35 | * the given CPU. |
@@ -47,6 +49,7 @@ struct device_node; | |||
47 | struct cpu_operations { | 49 | struct cpu_operations { |
48 | const char *name; | 50 | const char *name; |
49 | int (*cpu_init)(struct device_node *, unsigned int); | 51 | int (*cpu_init)(struct device_node *, unsigned int); |
52 | int (*cpu_init_idle)(struct device_node *, unsigned int); | ||
50 | int (*cpu_prepare)(unsigned int); | 53 | int (*cpu_prepare)(unsigned int); |
51 | int (*cpu_boot)(unsigned int); | 54 | int (*cpu_boot)(unsigned int); |
52 | void (*cpu_postboot)(void); | 55 | void (*cpu_postboot)(void); |
@@ -61,7 +64,7 @@ struct cpu_operations { | |||
61 | }; | 64 | }; |
62 | 65 | ||
63 | extern const struct cpu_operations *cpu_ops[NR_CPUS]; | 66 | extern const struct cpu_operations *cpu_ops[NR_CPUS]; |
64 | extern int __init cpu_read_ops(struct device_node *dn, int cpu); | 67 | int __init cpu_read_ops(struct device_node *dn, int cpu); |
65 | extern void __init cpu_read_bootcpu_ops(void); | 68 | void __init cpu_read_bootcpu_ops(void); |
66 | 69 | ||
67 | #endif /* ifndef __ASM_CPU_OPS_H */ | 70 | #endif /* ifndef __ASM_CPU_OPS_H */ |
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h new file mode 100644 index 000000000000..b52a9932e2b1 --- /dev/null +++ b/arch/arm64/include/asm/cpuidle.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_CPUIDLE_H | ||
2 | #define __ASM_CPUIDLE_H | ||
3 | |||
4 | #ifdef CONFIG_CPU_IDLE | ||
5 | extern int cpu_init_idle(unsigned int cpu); | ||
6 | #else | ||
7 | static inline int cpu_init_idle(unsigned int cpu) | ||
8 | { | ||
9 | return -EOPNOTSUPP; | ||
10 | } | ||
11 | #endif | ||
12 | |||
13 | #endif | ||
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 7fb343779498..40ec68aa6870 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h | |||
@@ -48,11 +48,13 @@ | |||
48 | /* | 48 | /* |
49 | * #imm16 values used for BRK instruction generation | 49 | * #imm16 values used for BRK instruction generation |
50 | * Allowed values for kgbd are 0x400 - 0x7ff | 50 | * Allowed values for kgbd are 0x400 - 0x7ff |
51 | * 0x100: for triggering a fault on purpose (reserved) | ||
51 | * 0x400: for dynamic BRK instruction | 52 | * 0x400: for dynamic BRK instruction |
52 | * 0x401: for compile time BRK instruction | 53 | * 0x401: for compile time BRK instruction |
53 | */ | 54 | */ |
54 | #define KGDB_DYN_DGB_BRK_IMM 0x400 | 55 | #define FAULT_BRK_IMM 0x100 |
55 | #define KDBG_COMPILED_DBG_BRK_IMM 0x401 | 56 | #define KGDB_DYN_DBG_BRK_IMM 0x400 |
57 | #define KGDB_COMPILED_DBG_BRK_IMM 0x401 | ||
56 | 58 | ||
57 | /* | 59 | /* |
58 | * BRK instruction encoding | 60 | * BRK instruction encoding |
@@ -61,24 +63,30 @@ | |||
61 | #define AARCH64_BREAK_MON 0xd4200000 | 63 | #define AARCH64_BREAK_MON 0xd4200000 |
62 | 64 | ||
63 | /* | 65 | /* |
66 | * BRK instruction for provoking a fault on purpose | ||
67 | * Unlike kgdb, #imm16 value with unallocated handler is used for faulting. | ||
68 | */ | ||
69 | #define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5)) | ||
70 | |||
71 | /* | ||
64 | * Extract byte from BRK instruction | 72 | * Extract byte from BRK instruction |
65 | */ | 73 | */ |
66 | #define KGDB_DYN_DGB_BRK_INS_BYTE(x) \ | 74 | #define KGDB_DYN_DBG_BRK_INS_BYTE(x) \ |
67 | ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff) | 75 | ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff) |
68 | 76 | ||
69 | /* | 77 | /* |
70 | * Extract byte from BRK #imm16 | 78 | * Extract byte from BRK #imm16 |
71 | */ | 79 | */ |
72 | #define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \ | 80 | #define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \ |
73 | (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) | 81 | (((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) |
74 | 82 | ||
75 | #define KGDB_DYN_DGB_BRK_BYTE(x) \ | 83 | #define KGDB_DYN_DBG_BRK_BYTE(x) \ |
76 | (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x)) | 84 | (KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x)) |
77 | 85 | ||
78 | #define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0) | 86 | #define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0) |
79 | #define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1) | 87 | #define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1) |
80 | #define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2) | 88 | #define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2) |
81 | #define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3) | 89 | #define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3) |
82 | 90 | ||
83 | #define CACHE_FLUSH_IS_SAFE 1 | 91 | #define CACHE_FLUSH_IS_SAFE 1 |
84 | 92 | ||
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index dc82e52acdb3..adeae3f6f0fc 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
@@ -52,6 +52,13 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) | |||
52 | dev->archdata.dma_ops = ops; | 52 | dev->archdata.dma_ops = ops; |
53 | } | 53 | } |
54 | 54 | ||
55 | static inline int set_arch_dma_coherent_ops(struct device *dev) | ||
56 | { | ||
57 | set_dma_ops(dev, &coherent_swiotlb_dma_ops); | ||
58 | return 0; | ||
59 | } | ||
60 | #define set_arch_dma_coherent_ops set_arch_dma_coherent_ops | ||
61 | |||
55 | #include <asm-generic/dma-mapping-common.h> | 62 | #include <asm-generic/dma-mapping-common.h> |
56 | 63 | ||
57 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | 64 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index dc1f73b13e74..56a9e63b6c33 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h | |||
@@ -2,6 +2,8 @@ | |||
2 | * Copyright (C) 2013 Huawei Ltd. | 2 | * Copyright (C) 2013 Huawei Ltd. |
3 | * Author: Jiang Liu <liuj97@gmail.com> | 3 | * Author: Jiang Liu <liuj97@gmail.com> |
4 | * | 4 | * |
5 | * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> | ||
6 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
@@ -64,12 +66,155 @@ enum aarch64_insn_imm_type { | |||
64 | AARCH64_INSN_IMM_14, | 66 | AARCH64_INSN_IMM_14, |
65 | AARCH64_INSN_IMM_12, | 67 | AARCH64_INSN_IMM_12, |
66 | AARCH64_INSN_IMM_9, | 68 | AARCH64_INSN_IMM_9, |
69 | AARCH64_INSN_IMM_7, | ||
70 | AARCH64_INSN_IMM_6, | ||
71 | AARCH64_INSN_IMM_S, | ||
72 | AARCH64_INSN_IMM_R, | ||
67 | AARCH64_INSN_IMM_MAX | 73 | AARCH64_INSN_IMM_MAX |
68 | }; | 74 | }; |
69 | 75 | ||
76 | enum aarch64_insn_register_type { | ||
77 | AARCH64_INSN_REGTYPE_RT, | ||
78 | AARCH64_INSN_REGTYPE_RN, | ||
79 | AARCH64_INSN_REGTYPE_RT2, | ||
80 | AARCH64_INSN_REGTYPE_RM, | ||
81 | AARCH64_INSN_REGTYPE_RD, | ||
82 | AARCH64_INSN_REGTYPE_RA, | ||
83 | }; | ||
84 | |||
85 | enum aarch64_insn_register { | ||
86 | AARCH64_INSN_REG_0 = 0, | ||
87 | AARCH64_INSN_REG_1 = 1, | ||
88 | AARCH64_INSN_REG_2 = 2, | ||
89 | AARCH64_INSN_REG_3 = 3, | ||
90 | AARCH64_INSN_REG_4 = 4, | ||
91 | AARCH64_INSN_REG_5 = 5, | ||
92 | AARCH64_INSN_REG_6 = 6, | ||
93 | AARCH64_INSN_REG_7 = 7, | ||
94 | AARCH64_INSN_REG_8 = 8, | ||
95 | AARCH64_INSN_REG_9 = 9, | ||
96 | AARCH64_INSN_REG_10 = 10, | ||
97 | AARCH64_INSN_REG_11 = 11, | ||
98 | AARCH64_INSN_REG_12 = 12, | ||
99 | AARCH64_INSN_REG_13 = 13, | ||
100 | AARCH64_INSN_REG_14 = 14, | ||
101 | AARCH64_INSN_REG_15 = 15, | ||
102 | AARCH64_INSN_REG_16 = 16, | ||
103 | AARCH64_INSN_REG_17 = 17, | ||
104 | AARCH64_INSN_REG_18 = 18, | ||
105 | AARCH64_INSN_REG_19 = 19, | ||
106 | AARCH64_INSN_REG_20 = 20, | ||
107 | AARCH64_INSN_REG_21 = 21, | ||
108 | AARCH64_INSN_REG_22 = 22, | ||
109 | AARCH64_INSN_REG_23 = 23, | ||
110 | AARCH64_INSN_REG_24 = 24, | ||
111 | AARCH64_INSN_REG_25 = 25, | ||
112 | AARCH64_INSN_REG_26 = 26, | ||
113 | AARCH64_INSN_REG_27 = 27, | ||
114 | AARCH64_INSN_REG_28 = 28, | ||
115 | AARCH64_INSN_REG_29 = 29, | ||
116 | AARCH64_INSN_REG_FP = 29, /* Frame pointer */ | ||
117 | AARCH64_INSN_REG_30 = 30, | ||
118 | AARCH64_INSN_REG_LR = 30, /* Link register */ | ||
119 | AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */ | ||
120 | AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */ | ||
121 | }; | ||
122 | |||
123 | enum aarch64_insn_variant { | ||
124 | AARCH64_INSN_VARIANT_32BIT, | ||
125 | AARCH64_INSN_VARIANT_64BIT | ||
126 | }; | ||
127 | |||
128 | enum aarch64_insn_condition { | ||
129 | AARCH64_INSN_COND_EQ = 0x0, /* == */ | ||
130 | AARCH64_INSN_COND_NE = 0x1, /* != */ | ||
131 | AARCH64_INSN_COND_CS = 0x2, /* unsigned >= */ | ||
132 | AARCH64_INSN_COND_CC = 0x3, /* unsigned < */ | ||
133 | AARCH64_INSN_COND_MI = 0x4, /* < 0 */ | ||
134 | AARCH64_INSN_COND_PL = 0x5, /* >= 0 */ | ||
135 | AARCH64_INSN_COND_VS = 0x6, /* overflow */ | ||
136 | AARCH64_INSN_COND_VC = 0x7, /* no overflow */ | ||
137 | AARCH64_INSN_COND_HI = 0x8, /* unsigned > */ | ||
138 | AARCH64_INSN_COND_LS = 0x9, /* unsigned <= */ | ||
139 | AARCH64_INSN_COND_GE = 0xa, /* signed >= */ | ||
140 | AARCH64_INSN_COND_LT = 0xb, /* signed < */ | ||
141 | AARCH64_INSN_COND_GT = 0xc, /* signed > */ | ||
142 | AARCH64_INSN_COND_LE = 0xd, /* signed <= */ | ||
143 | AARCH64_INSN_COND_AL = 0xe, /* always */ | ||
144 | }; | ||
145 | |||
70 | enum aarch64_insn_branch_type { | 146 | enum aarch64_insn_branch_type { |
71 | AARCH64_INSN_BRANCH_NOLINK, | 147 | AARCH64_INSN_BRANCH_NOLINK, |
72 | AARCH64_INSN_BRANCH_LINK, | 148 | AARCH64_INSN_BRANCH_LINK, |
149 | AARCH64_INSN_BRANCH_RETURN, | ||
150 | AARCH64_INSN_BRANCH_COMP_ZERO, | ||
151 | AARCH64_INSN_BRANCH_COMP_NONZERO, | ||
152 | }; | ||
153 | |||
154 | enum aarch64_insn_size_type { | ||
155 | AARCH64_INSN_SIZE_8, | ||
156 | AARCH64_INSN_SIZE_16, | ||
157 | AARCH64_INSN_SIZE_32, | ||
158 | AARCH64_INSN_SIZE_64, | ||
159 | }; | ||
160 | |||
161 | enum aarch64_insn_ldst_type { | ||
162 | AARCH64_INSN_LDST_LOAD_REG_OFFSET, | ||
163 | AARCH64_INSN_LDST_STORE_REG_OFFSET, | ||
164 | AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX, | ||
165 | AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX, | ||
166 | AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX, | ||
167 | AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX, | ||
168 | }; | ||
169 | |||
170 | enum aarch64_insn_adsb_type { | ||
171 | AARCH64_INSN_ADSB_ADD, | ||
172 | AARCH64_INSN_ADSB_SUB, | ||
173 | AARCH64_INSN_ADSB_ADD_SETFLAGS, | ||
174 | AARCH64_INSN_ADSB_SUB_SETFLAGS | ||
175 | }; | ||
176 | |||
177 | enum aarch64_insn_movewide_type { | ||
178 | AARCH64_INSN_MOVEWIDE_ZERO, | ||
179 | AARCH64_INSN_MOVEWIDE_KEEP, | ||
180 | AARCH64_INSN_MOVEWIDE_INVERSE | ||
181 | }; | ||
182 | |||
183 | enum aarch64_insn_bitfield_type { | ||
184 | AARCH64_INSN_BITFIELD_MOVE, | ||
185 | AARCH64_INSN_BITFIELD_MOVE_UNSIGNED, | ||
186 | AARCH64_INSN_BITFIELD_MOVE_SIGNED | ||
187 | }; | ||
188 | |||
189 | enum aarch64_insn_data1_type { | ||
190 | AARCH64_INSN_DATA1_REVERSE_16, | ||
191 | AARCH64_INSN_DATA1_REVERSE_32, | ||
192 | AARCH64_INSN_DATA1_REVERSE_64, | ||
193 | }; | ||
194 | |||
195 | enum aarch64_insn_data2_type { | ||
196 | AARCH64_INSN_DATA2_UDIV, | ||
197 | AARCH64_INSN_DATA2_SDIV, | ||
198 | AARCH64_INSN_DATA2_LSLV, | ||
199 | AARCH64_INSN_DATA2_LSRV, | ||
200 | AARCH64_INSN_DATA2_ASRV, | ||
201 | AARCH64_INSN_DATA2_RORV, | ||
202 | }; | ||
203 | |||
204 | enum aarch64_insn_data3_type { | ||
205 | AARCH64_INSN_DATA3_MADD, | ||
206 | AARCH64_INSN_DATA3_MSUB, | ||
207 | }; | ||
208 | |||
209 | enum aarch64_insn_logic_type { | ||
210 | AARCH64_INSN_LOGIC_AND, | ||
211 | AARCH64_INSN_LOGIC_BIC, | ||
212 | AARCH64_INSN_LOGIC_ORR, | ||
213 | AARCH64_INSN_LOGIC_ORN, | ||
214 | AARCH64_INSN_LOGIC_EOR, | ||
215 | AARCH64_INSN_LOGIC_EON, | ||
216 | AARCH64_INSN_LOGIC_AND_SETFLAGS, | ||
217 | AARCH64_INSN_LOGIC_BIC_SETFLAGS | ||
73 | }; | 218 | }; |
74 | 219 | ||
75 | #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ | 220 | #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ |
@@ -78,13 +223,58 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ | |||
78 | static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ | 223 | static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ |
79 | { return (val); } | 224 | { return (val); } |
80 | 225 | ||
226 | __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) | ||
227 | __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) | ||
228 | __AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000) | ||
229 | __AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000) | ||
230 | __AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000) | ||
231 | __AARCH64_INSN_FUNCS(ldp_pre, 0x7FC00000, 0x29C00000) | ||
232 | __AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000) | ||
233 | __AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000) | ||
234 | __AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000) | ||
235 | __AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000) | ||
236 | __AARCH64_INSN_FUNCS(movn, 0x7F800000, 0x12800000) | ||
237 | __AARCH64_INSN_FUNCS(sbfm, 0x7F800000, 0x13000000) | ||
238 | __AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000) | ||
239 | __AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000) | ||
240 | __AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000) | ||
241 | __AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000) | ||
242 | __AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000) | ||
243 | __AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000) | ||
244 | __AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000) | ||
245 | __AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000) | ||
246 | __AARCH64_INSN_FUNCS(madd, 0x7FE08000, 0x1B000000) | ||
247 | __AARCH64_INSN_FUNCS(msub, 0x7FE08000, 0x1B008000) | ||
248 | __AARCH64_INSN_FUNCS(udiv, 0x7FE0FC00, 0x1AC00800) | ||
249 | __AARCH64_INSN_FUNCS(sdiv, 0x7FE0FC00, 0x1AC00C00) | ||
250 | __AARCH64_INSN_FUNCS(lslv, 0x7FE0FC00, 0x1AC02000) | ||
251 | __AARCH64_INSN_FUNCS(lsrv, 0x7FE0FC00, 0x1AC02400) | ||
252 | __AARCH64_INSN_FUNCS(asrv, 0x7FE0FC00, 0x1AC02800) | ||
253 | __AARCH64_INSN_FUNCS(rorv, 0x7FE0FC00, 0x1AC02C00) | ||
254 | __AARCH64_INSN_FUNCS(rev16, 0x7FFFFC00, 0x5AC00400) | ||
255 | __AARCH64_INSN_FUNCS(rev32, 0x7FFFFC00, 0x5AC00800) | ||
256 | __AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00) | ||
257 | __AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000) | ||
258 | __AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000) | ||
259 | __AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000) | ||
260 | __AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000) | ||
261 | __AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000) | ||
262 | __AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000) | ||
263 | __AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000) | ||
264 | __AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) | ||
81 | __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) | 265 | __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) |
82 | __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) | 266 | __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) |
267 | __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) | ||
268 | __AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) | ||
269 | __AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) | ||
83 | __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) | 270 | __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) |
84 | __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) | 271 | __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) |
85 | __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) | 272 | __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) |
86 | __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) | 273 | __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) |
87 | __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) | 274 | __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) |
275 | __AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000) | ||
276 | __AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000) | ||
277 | __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) | ||
88 | 278 | ||
89 | #undef __AARCH64_INSN_FUNCS | 279 | #undef __AARCH64_INSN_FUNCS |
90 | 280 | ||
@@ -97,8 +287,67 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | |||
97 | u32 insn, u64 imm); | 287 | u32 insn, u64 imm); |
98 | u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, | 288 | u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, |
99 | enum aarch64_insn_branch_type type); | 289 | enum aarch64_insn_branch_type type); |
290 | u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, | ||
291 | enum aarch64_insn_register reg, | ||
292 | enum aarch64_insn_variant variant, | ||
293 | enum aarch64_insn_branch_type type); | ||
294 | u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr, | ||
295 | enum aarch64_insn_condition cond); | ||
100 | u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); | 296 | u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); |
101 | u32 aarch64_insn_gen_nop(void); | 297 | u32 aarch64_insn_gen_nop(void); |
298 | u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, | ||
299 | enum aarch64_insn_branch_type type); | ||
300 | u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, | ||
301 | enum aarch64_insn_register base, | ||
302 | enum aarch64_insn_register offset, | ||
303 | enum aarch64_insn_size_type size, | ||
304 | enum aarch64_insn_ldst_type type); | ||
305 | u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, | ||
306 | enum aarch64_insn_register reg2, | ||
307 | enum aarch64_insn_register base, | ||
308 | int offset, | ||
309 | enum aarch64_insn_variant variant, | ||
310 | enum aarch64_insn_ldst_type type); | ||
311 | u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, | ||
312 | enum aarch64_insn_register src, | ||
313 | int imm, enum aarch64_insn_variant variant, | ||
314 | enum aarch64_insn_adsb_type type); | ||
315 | u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, | ||
316 | enum aarch64_insn_register src, | ||
317 | int immr, int imms, | ||
318 | enum aarch64_insn_variant variant, | ||
319 | enum aarch64_insn_bitfield_type type); | ||
320 | u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, | ||
321 | int imm, int shift, | ||
322 | enum aarch64_insn_variant variant, | ||
323 | enum aarch64_insn_movewide_type type); | ||
324 | u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, | ||
325 | enum aarch64_insn_register src, | ||
326 | enum aarch64_insn_register reg, | ||
327 | int shift, | ||
328 | enum aarch64_insn_variant variant, | ||
329 | enum aarch64_insn_adsb_type type); | ||
330 | u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, | ||
331 | enum aarch64_insn_register src, | ||
332 | enum aarch64_insn_variant variant, | ||
333 | enum aarch64_insn_data1_type type); | ||
334 | u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, | ||
335 | enum aarch64_insn_register src, | ||
336 | enum aarch64_insn_register reg, | ||
337 | enum aarch64_insn_variant variant, | ||
338 | enum aarch64_insn_data2_type type); | ||
339 | u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, | ||
340 | enum aarch64_insn_register src, | ||
341 | enum aarch64_insn_register reg1, | ||
342 | enum aarch64_insn_register reg2, | ||
343 | enum aarch64_insn_variant variant, | ||
344 | enum aarch64_insn_data3_type type); | ||
345 | u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, | ||
346 | enum aarch64_insn_register src, | ||
347 | enum aarch64_insn_register reg, | ||
348 | int shift, | ||
349 | enum aarch64_insn_variant variant, | ||
350 | enum aarch64_insn_logic_type type); | ||
102 | 351 | ||
103 | bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); | 352 | bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); |
104 | 353 | ||
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index e0ecdcf6632d..f771e8bcad4a 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -243,7 +243,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); | |||
243 | * (PHYS_OFFSET and PHYS_MASK taken into account). | 243 | * (PHYS_OFFSET and PHYS_MASK taken into account). |
244 | */ | 244 | */ |
245 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | 245 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
246 | extern int valid_phys_addr_range(unsigned long addr, size_t size); | 246 | extern int valid_phys_addr_range(phys_addr_t addr, size_t size); |
247 | extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); | 247 | extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); |
248 | 248 | ||
249 | extern int devmem_is_allowed(unsigned long pfn); | 249 | extern int devmem_is_allowed(unsigned long pfn); |
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h index 3c8aafc1082f..f69f69c8120c 100644 --- a/arch/arm64/include/asm/kgdb.h +++ b/arch/arm64/include/asm/kgdb.h | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | static inline void arch_kgdb_breakpoint(void) | 30 | static inline void arch_kgdb_breakpoint(void) |
31 | { | 31 | { |
32 | asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM)); | 32 | asm ("brk %0" : : "I" (KGDB_COMPILED_DBG_BRK_IMM)); |
33 | } | 33 | } |
34 | 34 | ||
35 | extern void kgdb_handle_bus_error(void); | 35 | extern void kgdb_handle_bus_error(void); |
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 453a179469a3..5279e5733386 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h | |||
@@ -26,13 +26,13 @@ static inline void set_my_cpu_offset(unsigned long off) | |||
26 | static inline unsigned long __my_cpu_offset(void) | 26 | static inline unsigned long __my_cpu_offset(void) |
27 | { | 27 | { |
28 | unsigned long off; | 28 | unsigned long off; |
29 | register unsigned long *sp asm ("sp"); | ||
30 | 29 | ||
31 | /* | 30 | /* |
32 | * We want to allow caching the value, so avoid using volatile and | 31 | * We want to allow caching the value, so avoid using volatile and |
33 | * instead use a fake stack read to hazard against barrier(). | 32 | * instead use a fake stack read to hazard against barrier(). |
34 | */ | 33 | */ |
35 | asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp)); | 34 | asm("mrs %0, tpidr_el1" : "=r" (off) : |
35 | "Q" (*(const unsigned long *)current_stack_pointer)); | ||
36 | 36 | ||
37 | return off; | 37 | return off; |
38 | } | 38 | } |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index ffe1ba0506d1..d58e40cde88e 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -149,46 +149,51 @@ extern struct page *empty_zero_page; | |||
149 | #define pte_valid_not_user(pte) \ | 149 | #define pte_valid_not_user(pte) \ |
150 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) | 150 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) |
151 | 151 | ||
152 | static inline pte_t pte_wrprotect(pte_t pte) | 152 | static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) |
153 | { | 153 | { |
154 | pte_val(pte) &= ~PTE_WRITE; | 154 | pte_val(pte) &= ~pgprot_val(prot); |
155 | return pte; | 155 | return pte; |
156 | } | 156 | } |
157 | 157 | ||
158 | static inline pte_t pte_mkwrite(pte_t pte) | 158 | static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) |
159 | { | 159 | { |
160 | pte_val(pte) |= PTE_WRITE; | 160 | pte_val(pte) |= pgprot_val(prot); |
161 | return pte; | 161 | return pte; |
162 | } | 162 | } |
163 | 163 | ||
164 | static inline pte_t pte_wrprotect(pte_t pte) | ||
165 | { | ||
166 | return clear_pte_bit(pte, __pgprot(PTE_WRITE)); | ||
167 | } | ||
168 | |||
169 | static inline pte_t pte_mkwrite(pte_t pte) | ||
170 | { | ||
171 | return set_pte_bit(pte, __pgprot(PTE_WRITE)); | ||
172 | } | ||
173 | |||
164 | static inline pte_t pte_mkclean(pte_t pte) | 174 | static inline pte_t pte_mkclean(pte_t pte) |
165 | { | 175 | { |
166 | pte_val(pte) &= ~PTE_DIRTY; | 176 | return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); |
167 | return pte; | ||
168 | } | 177 | } |
169 | 178 | ||
170 | static inline pte_t pte_mkdirty(pte_t pte) | 179 | static inline pte_t pte_mkdirty(pte_t pte) |
171 | { | 180 | { |
172 | pte_val(pte) |= PTE_DIRTY; | 181 | return set_pte_bit(pte, __pgprot(PTE_DIRTY)); |
173 | return pte; | ||
174 | } | 182 | } |
175 | 183 | ||
176 | static inline pte_t pte_mkold(pte_t pte) | 184 | static inline pte_t pte_mkold(pte_t pte) |
177 | { | 185 | { |
178 | pte_val(pte) &= ~PTE_AF; | 186 | return clear_pte_bit(pte, __pgprot(PTE_AF)); |
179 | return pte; | ||
180 | } | 187 | } |
181 | 188 | ||
182 | static inline pte_t pte_mkyoung(pte_t pte) | 189 | static inline pte_t pte_mkyoung(pte_t pte) |
183 | { | 190 | { |
184 | pte_val(pte) |= PTE_AF; | 191 | return set_pte_bit(pte, __pgprot(PTE_AF)); |
185 | return pte; | ||
186 | } | 192 | } |
187 | 193 | ||
188 | static inline pte_t pte_mkspecial(pte_t pte) | 194 | static inline pte_t pte_mkspecial(pte_t pte) |
189 | { | 195 | { |
190 | pte_val(pte) |= PTE_SPECIAL; | 196 | return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); |
191 | return pte; | ||
192 | } | 197 | } |
193 | 198 | ||
194 | static inline void set_pte(pte_t *ptep, pte_t pte) | 199 | static inline void set_pte(pte_t *ptep, pte_t pte) |
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 0c657bb54597..9a8fd84f8fb2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h | |||
@@ -32,6 +32,8 @@ extern void cpu_cache_off(void); | |||
32 | extern void cpu_do_idle(void); | 32 | extern void cpu_do_idle(void); |
33 | extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); | 33 | extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); |
34 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); | 34 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); |
35 | void cpu_soft_restart(phys_addr_t cpu_reset, | ||
36 | unsigned long addr) __attribute__((noreturn)); | ||
35 | extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); | 37 | extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); |
36 | extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); | 38 | extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); |
37 | 39 | ||
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index e9c149c042e0..456d67c1f0fa 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h | |||
@@ -21,6 +21,7 @@ struct sleep_save_sp { | |||
21 | phys_addr_t save_ptr_stash_phys; | 21 | phys_addr_t save_ptr_stash_phys; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); | ||
24 | extern void cpu_resume(void); | 25 | extern void cpu_resume(void); |
25 | extern int cpu_suspend(unsigned long); | 26 | extern int cpu_suspend(unsigned long); |
26 | 27 | ||
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 45108d802f5e..459bf8e53208 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h | |||
@@ -69,14 +69,19 @@ struct thread_info { | |||
69 | #define init_stack (init_thread_union.stack) | 69 | #define init_stack (init_thread_union.stack) |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * how to get the current stack pointer from C | ||
73 | */ | ||
74 | register unsigned long current_stack_pointer asm ("sp"); | ||
75 | |||
76 | /* | ||
72 | * how to get the thread information struct from C | 77 | * how to get the thread information struct from C |
73 | */ | 78 | */ |
74 | static inline struct thread_info *current_thread_info(void) __attribute_const__; | 79 | static inline struct thread_info *current_thread_info(void) __attribute_const__; |
75 | 80 | ||
76 | static inline struct thread_info *current_thread_info(void) | 81 | static inline struct thread_info *current_thread_info(void) |
77 | { | 82 | { |
78 | register unsigned long sp asm ("sp"); | 83 | return (struct thread_info *) |
79 | return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); | 84 | (current_stack_pointer & ~(THREAD_SIZE - 1)); |
80 | } | 85 | } |
81 | 86 | ||
82 | #define thread_saved_pc(tsk) \ | 87 | #define thread_saved_pc(tsk) \ |
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index df7ef8768fc2..6e9538c2d28a 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -26,6 +26,7 @@ arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o | |||
26 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o | 26 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o |
27 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 27 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o |
28 | arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o | 28 | arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o |
29 | arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o | ||
29 | arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o | 30 | arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o |
30 | arm64-obj-$(CONFIG_KGDB) += kgdb.o | 31 | arm64-obj-$(CONFIG_KGDB) += kgdb.o |
31 | arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o | 32 | arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o |
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c new file mode 100644 index 000000000000..19d17f51db37 --- /dev/null +++ b/arch/arm64/kernel/cpuidle.c | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * ARM64 CPU idle arch support | ||
3 | * | ||
4 | * Copyright (C) 2014 ARM Ltd. | ||
5 | * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/of.h> | ||
13 | #include <linux/of_device.h> | ||
14 | |||
15 | #include <asm/cpuidle.h> | ||
16 | #include <asm/cpu_ops.h> | ||
17 | |||
18 | int cpu_init_idle(unsigned int cpu) | ||
19 | { | ||
20 | int ret = -EOPNOTSUPP; | ||
21 | struct device_node *cpu_node = of_cpu_device_node_get(cpu); | ||
22 | |||
23 | if (!cpu_node) | ||
24 | return -ENODEV; | ||
25 | |||
26 | if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle) | ||
27 | ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu); | ||
28 | |||
29 | of_node_put(cpu_node); | ||
30 | return ret; | ||
31 | } | ||
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 177169623026..504fdaa8367e 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
@@ -20,8 +20,10 @@ | |||
20 | #include <asm/cputype.h> | 20 | #include <asm/cputype.h> |
21 | 21 | ||
22 | #include <linux/bitops.h> | 22 | #include <linux/bitops.h> |
23 | #include <linux/bug.h> | ||
23 | #include <linux/init.h> | 24 | #include <linux/init.h> |
24 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/preempt.h> | ||
25 | #include <linux/printk.h> | 27 | #include <linux/printk.h> |
26 | #include <linux/smp.h> | 28 | #include <linux/smp.h> |
27 | 29 | ||
@@ -47,8 +49,18 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) | |||
47 | unsigned int cpu = smp_processor_id(); | 49 | unsigned int cpu = smp_processor_id(); |
48 | u32 l1ip = CTR_L1IP(info->reg_ctr); | 50 | u32 l1ip = CTR_L1IP(info->reg_ctr); |
49 | 51 | ||
50 | if (l1ip != ICACHE_POLICY_PIPT) | 52 | if (l1ip != ICACHE_POLICY_PIPT) { |
51 | set_bit(ICACHEF_ALIASING, &__icache_flags); | 53 | /* |
54 | * VIPT caches are non-aliasing if the VA always equals the PA | ||
55 | * in all bit positions that are covered by the index. This is | ||
56 | * the case if the size of a way (# of sets * line size) does | ||
57 | * not exceed PAGE_SIZE. | ||
58 | */ | ||
59 | u32 waysize = icache_get_numsets() * icache_get_linesize(); | ||
60 | |||
61 | if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE) | ||
62 | set_bit(ICACHEF_ALIASING, &__icache_flags); | ||
63 | } | ||
52 | if (l1ip == ICACHE_POLICY_AIVIVT) | 64 | if (l1ip == ICACHE_POLICY_AIVIVT) |
53 | set_bit(ICACHEF_AIVIVT, &__icache_flags); | 65 | set_bit(ICACHEF_AIVIVT, &__icache_flags); |
54 | 66 | ||
@@ -190,3 +202,15 @@ void __init cpuinfo_store_boot_cpu(void) | |||
190 | 202 | ||
191 | boot_cpu_data = *info; | 203 | boot_cpu_data = *info; |
192 | } | 204 | } |
205 | |||
206 | u64 __attribute_const__ icache_get_ccsidr(void) | ||
207 | { | ||
208 | u64 ccsidr; | ||
209 | |||
210 | WARN_ON(preemptible()); | ||
211 | |||
212 | /* Select L1 I-cache and read its size ID register */ | ||
213 | asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1" | ||
214 | : "=r"(ccsidr) : "r"(1L)); | ||
215 | return ccsidr; | ||
216 | } | ||
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c index 1317fef8dde9..d27dd982ff26 100644 --- a/arch/arm64/kernel/efi-stub.c +++ b/arch/arm64/kernel/efi-stub.c | |||
@@ -28,20 +28,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, | |||
28 | kernel_size = _edata - _text; | 28 | kernel_size = _edata - _text; |
29 | if (*image_addr != (dram_base + TEXT_OFFSET)) { | 29 | if (*image_addr != (dram_base + TEXT_OFFSET)) { |
30 | kernel_memsize = kernel_size + (_end - _edata); | 30 | kernel_memsize = kernel_size + (_end - _edata); |
31 | status = efi_relocate_kernel(sys_table, image_addr, | 31 | status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET, |
32 | kernel_size, kernel_memsize, | 32 | SZ_2M, reserve_addr); |
33 | dram_base + TEXT_OFFSET, | ||
34 | PAGE_SIZE); | ||
35 | if (status != EFI_SUCCESS) { | 33 | if (status != EFI_SUCCESS) { |
36 | pr_efi_err(sys_table, "Failed to relocate kernel\n"); | 34 | pr_efi_err(sys_table, "Failed to relocate kernel\n"); |
37 | return status; | 35 | return status; |
38 | } | 36 | } |
39 | if (*image_addr != (dram_base + TEXT_OFFSET)) { | 37 | memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr, |
40 | pr_efi_err(sys_table, "Failed to alloc kernel memory\n"); | 38 | kernel_size); |
41 | efi_free(sys_table, kernel_memsize, *image_addr); | 39 | *image_addr = *reserve_addr + TEXT_OFFSET; |
42 | return EFI_LOAD_ERROR; | 40 | *reserve_size = kernel_memsize + TEXT_OFFSET; |
43 | } | ||
44 | *image_size = kernel_memsize; | ||
45 | } | 41 | } |
46 | 42 | ||
47 | 43 | ||
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f0b5e5120a87..726b910fe6ec 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -324,7 +324,6 @@ el1_dbg: | |||
324 | mrs x0, far_el1 | 324 | mrs x0, far_el1 |
325 | mov x2, sp // struct pt_regs | 325 | mov x2, sp // struct pt_regs |
326 | bl do_debug_exception | 326 | bl do_debug_exception |
327 | enable_dbg | ||
328 | kernel_exit 1 | 327 | kernel_exit 1 |
329 | el1_inv: | 328 | el1_inv: |
330 | // TODO: add support for undefined instructions in kernel mode | 329 | // TODO: add support for undefined instructions in kernel mode |
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 7924d73b6476..cf8556ae09d0 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c | |||
@@ -58,7 +58,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func) | |||
58 | u32 new; | 58 | u32 new; |
59 | 59 | ||
60 | pc = (unsigned long)&ftrace_call; | 60 | pc = (unsigned long)&ftrace_call; |
61 | new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true); | 61 | new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, |
62 | AARCH64_INSN_BRANCH_LINK); | ||
62 | 63 | ||
63 | return ftrace_modify_code(pc, 0, new, false); | 64 | return ftrace_modify_code(pc, 0, new, false); |
64 | } | 65 | } |
@@ -72,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
72 | u32 old, new; | 73 | u32 old, new; |
73 | 74 | ||
74 | old = aarch64_insn_gen_nop(); | 75 | old = aarch64_insn_gen_nop(); |
75 | new = aarch64_insn_gen_branch_imm(pc, addr, true); | 76 | new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); |
76 | 77 | ||
77 | return ftrace_modify_code(pc, old, new, true); | 78 | return ftrace_modify_code(pc, old, new, true); |
78 | } | 79 | } |
@@ -86,7 +87,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, | |||
86 | unsigned long pc = rec->ip; | 87 | unsigned long pc = rec->ip; |
87 | u32 old, new; | 88 | u32 old, new; |
88 | 89 | ||
89 | old = aarch64_insn_gen_branch_imm(pc, addr, true); | 90 | old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); |
90 | new = aarch64_insn_gen_nop(); | 91 | new = aarch64_insn_gen_nop(); |
91 | 92 | ||
92 | return ftrace_modify_code(pc, old, new, true); | 93 | return ftrace_modify_code(pc, old, new, true); |
@@ -154,7 +155,8 @@ static int ftrace_modify_graph_caller(bool enable) | |||
154 | u32 branch, nop; | 155 | u32 branch, nop; |
155 | 156 | ||
156 | branch = aarch64_insn_gen_branch_imm(pc, | 157 | branch = aarch64_insn_gen_branch_imm(pc, |
157 | (unsigned long)ftrace_graph_caller, false); | 158 | (unsigned long)ftrace_graph_caller, |
159 | AARCH64_INSN_BRANCH_LINK); | ||
158 | nop = aarch64_insn_gen_nop(); | 160 | nop = aarch64_insn_gen_nop(); |
159 | 161 | ||
160 | if (enable) | 162 | if (enable) |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 873069056229..0a6e4f924df8 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -151,7 +151,7 @@ optional_header: | |||
151 | .short 0x20b // PE32+ format | 151 | .short 0x20b // PE32+ format |
152 | .byte 0x02 // MajorLinkerVersion | 152 | .byte 0x02 // MajorLinkerVersion |
153 | .byte 0x14 // MinorLinkerVersion | 153 | .byte 0x14 // MinorLinkerVersion |
154 | .long _edata - stext // SizeOfCode | 154 | .long _end - stext // SizeOfCode |
155 | .long 0 // SizeOfInitializedData | 155 | .long 0 // SizeOfInitializedData |
156 | .long 0 // SizeOfUninitializedData | 156 | .long 0 // SizeOfUninitializedData |
157 | .long efi_stub_entry - efi_head // AddressOfEntryPoint | 157 | .long efi_stub_entry - efi_head // AddressOfEntryPoint |
@@ -169,7 +169,7 @@ extra_header_fields: | |||
169 | .short 0 // MinorSubsystemVersion | 169 | .short 0 // MinorSubsystemVersion |
170 | .long 0 // Win32VersionValue | 170 | .long 0 // Win32VersionValue |
171 | 171 | ||
172 | .long _edata - efi_head // SizeOfImage | 172 | .long _end - efi_head // SizeOfImage |
173 | 173 | ||
174 | // Everything before the kernel image is considered part of the header | 174 | // Everything before the kernel image is considered part of the header |
175 | .long stext - efi_head // SizeOfHeaders | 175 | .long stext - efi_head // SizeOfHeaders |
@@ -216,7 +216,7 @@ section_table: | |||
216 | .byte 0 | 216 | .byte 0 |
217 | .byte 0 | 217 | .byte 0 |
218 | .byte 0 // end of 0 padding of section name | 218 | .byte 0 // end of 0 padding of section name |
219 | .long _edata - stext // VirtualSize | 219 | .long _end - stext // VirtualSize |
220 | .long stext - efi_head // VirtualAddress | 220 | .long stext - efi_head // VirtualAddress |
221 | .long _edata - stext // SizeOfRawData | 221 | .long _edata - stext // SizeOfRawData |
222 | .long stext - efi_head // PointerToRawData | 222 | .long stext - efi_head // PointerToRawData |
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 92f36835486b..e007714ded04 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c | |||
@@ -2,6 +2,8 @@ | |||
2 | * Copyright (C) 2013 Huawei Ltd. | 2 | * Copyright (C) 2013 Huawei Ltd. |
3 | * Author: Jiang Liu <liuj97@gmail.com> | 3 | * Author: Jiang Liu <liuj97@gmail.com> |
4 | * | 4 | * |
5 | * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> | ||
6 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
@@ -20,9 +22,14 @@ | |||
20 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
21 | #include <linux/stop_machine.h> | 23 | #include <linux/stop_machine.h> |
22 | #include <linux/uaccess.h> | 24 | #include <linux/uaccess.h> |
25 | |||
23 | #include <asm/cacheflush.h> | 26 | #include <asm/cacheflush.h> |
27 | #include <asm/debug-monitors.h> | ||
24 | #include <asm/insn.h> | 28 | #include <asm/insn.h> |
25 | 29 | ||
30 | #define AARCH64_INSN_SF_BIT BIT(31) | ||
31 | #define AARCH64_INSN_N_BIT BIT(22) | ||
32 | |||
26 | static int aarch64_insn_encoding_class[] = { | 33 | static int aarch64_insn_encoding_class[] = { |
27 | AARCH64_INSN_CLS_UNKNOWN, | 34 | AARCH64_INSN_CLS_UNKNOWN, |
28 | AARCH64_INSN_CLS_UNKNOWN, | 35 | AARCH64_INSN_CLS_UNKNOWN, |
@@ -251,6 +258,19 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | |||
251 | mask = BIT(9) - 1; | 258 | mask = BIT(9) - 1; |
252 | shift = 12; | 259 | shift = 12; |
253 | break; | 260 | break; |
261 | case AARCH64_INSN_IMM_7: | ||
262 | mask = BIT(7) - 1; | ||
263 | shift = 15; | ||
264 | break; | ||
265 | case AARCH64_INSN_IMM_6: | ||
266 | case AARCH64_INSN_IMM_S: | ||
267 | mask = BIT(6) - 1; | ||
268 | shift = 10; | ||
269 | break; | ||
270 | case AARCH64_INSN_IMM_R: | ||
271 | mask = BIT(6) - 1; | ||
272 | shift = 16; | ||
273 | break; | ||
254 | default: | 274 | default: |
255 | pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", | 275 | pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", |
256 | type); | 276 | type); |
@@ -264,10 +284,76 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | |||
264 | return insn; | 284 | return insn; |
265 | } | 285 | } |
266 | 286 | ||
267 | u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, | 287 | static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, |
268 | enum aarch64_insn_branch_type type) | 288 | u32 insn, |
289 | enum aarch64_insn_register reg) | ||
290 | { | ||
291 | int shift; | ||
292 | |||
293 | if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) { | ||
294 | pr_err("%s: unknown register encoding %d\n", __func__, reg); | ||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | switch (type) { | ||
299 | case AARCH64_INSN_REGTYPE_RT: | ||
300 | case AARCH64_INSN_REGTYPE_RD: | ||
301 | shift = 0; | ||
302 | break; | ||
303 | case AARCH64_INSN_REGTYPE_RN: | ||
304 | shift = 5; | ||
305 | break; | ||
306 | case AARCH64_INSN_REGTYPE_RT2: | ||
307 | case AARCH64_INSN_REGTYPE_RA: | ||
308 | shift = 10; | ||
309 | break; | ||
310 | case AARCH64_INSN_REGTYPE_RM: | ||
311 | shift = 16; | ||
312 | break; | ||
313 | default: | ||
314 | pr_err("%s: unknown register type encoding %d\n", __func__, | ||
315 | type); | ||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | insn &= ~(GENMASK(4, 0) << shift); | ||
320 | insn |= reg << shift; | ||
321 | |||
322 | return insn; | ||
323 | } | ||
324 | |||
325 | static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type, | ||
326 | u32 insn) | ||
327 | { | ||
328 | u32 size; | ||
329 | |||
330 | switch (type) { | ||
331 | case AARCH64_INSN_SIZE_8: | ||
332 | size = 0; | ||
333 | break; | ||
334 | case AARCH64_INSN_SIZE_16: | ||
335 | size = 1; | ||
336 | break; | ||
337 | case AARCH64_INSN_SIZE_32: | ||
338 | size = 2; | ||
339 | break; | ||
340 | case AARCH64_INSN_SIZE_64: | ||
341 | size = 3; | ||
342 | break; | ||
343 | default: | ||
344 | pr_err("%s: unknown size encoding %d\n", __func__, type); | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | insn &= ~GENMASK(31, 30); | ||
349 | insn |= size << 30; | ||
350 | |||
351 | return insn; | ||
352 | } | ||
353 | |||
354 | static inline long branch_imm_common(unsigned long pc, unsigned long addr, | ||
355 | long range) | ||
269 | { | 356 | { |
270 | u32 insn; | ||
271 | long offset; | 357 | long offset; |
272 | 358 | ||
273 | /* | 359 | /* |
@@ -276,23 +362,97 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, | |||
276 | */ | 362 | */ |
277 | BUG_ON((pc & 0x3) || (addr & 0x3)); | 363 | BUG_ON((pc & 0x3) || (addr & 0x3)); |
278 | 364 | ||
365 | offset = ((long)addr - (long)pc); | ||
366 | BUG_ON(offset < -range || offset >= range); | ||
367 | |||
368 | return offset; | ||
369 | } | ||
370 | |||
371 | u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, | ||
372 | enum aarch64_insn_branch_type type) | ||
373 | { | ||
374 | u32 insn; | ||
375 | long offset; | ||
376 | |||
279 | /* | 377 | /* |
280 | * B/BL support [-128M, 128M) offset | 378 | * B/BL support [-128M, 128M) offset |
281 | * ARM64 virtual address arrangement guarantees all kernel and module | 379 | * ARM64 virtual address arrangement guarantees all kernel and module |
282 | * texts are within +/-128M. | 380 | * texts are within +/-128M. |
283 | */ | 381 | */ |
284 | offset = ((long)addr - (long)pc); | 382 | offset = branch_imm_common(pc, addr, SZ_128M); |
285 | BUG_ON(offset < -SZ_128M || offset >= SZ_128M); | ||
286 | 383 | ||
287 | if (type == AARCH64_INSN_BRANCH_LINK) | 384 | switch (type) { |
385 | case AARCH64_INSN_BRANCH_LINK: | ||
288 | insn = aarch64_insn_get_bl_value(); | 386 | insn = aarch64_insn_get_bl_value(); |
289 | else | 387 | break; |
388 | case AARCH64_INSN_BRANCH_NOLINK: | ||
290 | insn = aarch64_insn_get_b_value(); | 389 | insn = aarch64_insn_get_b_value(); |
390 | break; | ||
391 | default: | ||
392 | BUG_ON(1); | ||
393 | return AARCH64_BREAK_FAULT; | ||
394 | } | ||
291 | 395 | ||
292 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, | 396 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, |
293 | offset >> 2); | 397 | offset >> 2); |
294 | } | 398 | } |
295 | 399 | ||
400 | u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, | ||
401 | enum aarch64_insn_register reg, | ||
402 | enum aarch64_insn_variant variant, | ||
403 | enum aarch64_insn_branch_type type) | ||
404 | { | ||
405 | u32 insn; | ||
406 | long offset; | ||
407 | |||
408 | offset = branch_imm_common(pc, addr, SZ_1M); | ||
409 | |||
410 | switch (type) { | ||
411 | case AARCH64_INSN_BRANCH_COMP_ZERO: | ||
412 | insn = aarch64_insn_get_cbz_value(); | ||
413 | break; | ||
414 | case AARCH64_INSN_BRANCH_COMP_NONZERO: | ||
415 | insn = aarch64_insn_get_cbnz_value(); | ||
416 | break; | ||
417 | default: | ||
418 | BUG_ON(1); | ||
419 | return AARCH64_BREAK_FAULT; | ||
420 | } | ||
421 | |||
422 | switch (variant) { | ||
423 | case AARCH64_INSN_VARIANT_32BIT: | ||
424 | break; | ||
425 | case AARCH64_INSN_VARIANT_64BIT: | ||
426 | insn |= AARCH64_INSN_SF_BIT; | ||
427 | break; | ||
428 | default: | ||
429 | BUG_ON(1); | ||
430 | return AARCH64_BREAK_FAULT; | ||
431 | } | ||
432 | |||
433 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); | ||
434 | |||
435 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, | ||
436 | offset >> 2); | ||
437 | } | ||
438 | |||
439 | u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr, | ||
440 | enum aarch64_insn_condition cond) | ||
441 | { | ||
442 | u32 insn; | ||
443 | long offset; | ||
444 | |||
445 | offset = branch_imm_common(pc, addr, SZ_1M); | ||
446 | |||
447 | insn = aarch64_insn_get_bcond_value(); | ||
448 | |||
449 | BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL); | ||
450 | insn |= cond; | ||
451 | |||
452 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, | ||
453 | offset >> 2); | ||
454 | } | ||
455 | |||
296 | u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op) | 456 | u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op) |
297 | { | 457 | { |
298 | return aarch64_insn_get_hint_value() | op; | 458 | return aarch64_insn_get_hint_value() | op; |
@@ -302,3 +462,500 @@ u32 __kprobes aarch64_insn_gen_nop(void) | |||
302 | { | 462 | { |
303 | return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); | 463 | return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); |
304 | } | 464 | } |
465 | |||
466 | u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, | ||
467 | enum aarch64_insn_branch_type type) | ||
468 | { | ||
469 | u32 insn; | ||
470 | |||
471 | switch (type) { | ||
472 | case AARCH64_INSN_BRANCH_NOLINK: | ||
473 | insn = aarch64_insn_get_br_value(); | ||
474 | break; | ||
475 | case AARCH64_INSN_BRANCH_LINK: | ||
476 | insn = aarch64_insn_get_blr_value(); | ||
477 | break; | ||
478 | case AARCH64_INSN_BRANCH_RETURN: | ||
479 | insn = aarch64_insn_get_ret_value(); | ||
480 | break; | ||
481 | default: | ||
482 | BUG_ON(1); | ||
483 | return AARCH64_BREAK_FAULT; | ||
484 | } | ||
485 | |||
486 | return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); | ||
487 | } | ||
488 | |||
489 | u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, | ||
490 | enum aarch64_insn_register base, | ||
491 | enum aarch64_insn_register offset, | ||
492 | enum aarch64_insn_size_type size, | ||
493 | enum aarch64_insn_ldst_type type) | ||
494 | { | ||
495 | u32 insn; | ||
496 | |||
497 | switch (type) { | ||
498 | case AARCH64_INSN_LDST_LOAD_REG_OFFSET: | ||
499 | insn = aarch64_insn_get_ldr_reg_value(); | ||
500 | break; | ||
501 | case AARCH64_INSN_LDST_STORE_REG_OFFSET: | ||
502 | insn = aarch64_insn_get_str_reg_value(); | ||
503 | break; | ||
504 | default: | ||
505 | BUG_ON(1); | ||
506 | return AARCH64_BREAK_FAULT; | ||
507 | } | ||
508 | |||
509 | insn = aarch64_insn_encode_ldst_size(size, insn); | ||
510 | |||
511 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); | ||
512 | |||
513 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, | ||
514 | base); | ||
515 | |||
516 | return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, | ||
517 | offset); | ||
518 | } | ||
519 | |||
520 | u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, | ||
521 | enum aarch64_insn_register reg2, | ||
522 | enum aarch64_insn_register base, | ||
523 | int offset, | ||
524 | enum aarch64_insn_variant variant, | ||
525 | enum aarch64_insn_ldst_type type) | ||
526 | { | ||
527 | u32 insn; | ||
528 | int shift; | ||
529 | |||
530 | switch (type) { | ||
531 | case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX: | ||
532 | insn = aarch64_insn_get_ldp_pre_value(); | ||
533 | break; | ||
534 | case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX: | ||
535 | insn = aarch64_insn_get_stp_pre_value(); | ||
536 | break; | ||
537 | case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX: | ||
538 | insn = aarch64_insn_get_ldp_post_value(); | ||
539 | break; | ||
540 | case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX: | ||
541 | insn = aarch64_insn_get_stp_post_value(); | ||
542 | break; | ||
543 | default: | ||
544 | BUG_ON(1); | ||
545 | return AARCH64_BREAK_FAULT; | ||
546 | } | ||
547 | |||
548 | switch (variant) { | ||
549 | case AARCH64_INSN_VARIANT_32BIT: | ||
550 | /* offset must be multiples of 4 in the range [-256, 252] */ | ||
551 | BUG_ON(offset & 0x3); | ||
552 | BUG_ON(offset < -256 || offset > 252); | ||
553 | shift = 2; | ||
554 | break; | ||
555 | case AARCH64_INSN_VARIANT_64BIT: | ||
556 | /* offset must be multiples of 8 in the range [-512, 504] */ | ||
557 | BUG_ON(offset & 0x7); | ||
558 | BUG_ON(offset < -512 || offset > 504); | ||
559 | shift = 3; | ||
560 | insn |= AARCH64_INSN_SF_BIT; | ||
561 | break; | ||
562 | default: | ||
563 | BUG_ON(1); | ||
564 | return AARCH64_BREAK_FAULT; | ||
565 | } | ||
566 | |||
567 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, | ||
568 | reg1); | ||
569 | |||
570 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn, | ||
571 | reg2); | ||
572 | |||
573 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, | ||
574 | base); | ||
575 | |||
576 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn, | ||
577 | offset >> shift); | ||
578 | } | ||
579 | |||
580 | u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, | ||
581 | enum aarch64_insn_register src, | ||
582 | int imm, enum aarch64_insn_variant variant, | ||
583 | enum aarch64_insn_adsb_type type) | ||
584 | { | ||
585 | u32 insn; | ||
586 | |||
587 | switch (type) { | ||
588 | case AARCH64_INSN_ADSB_ADD: | ||
589 | insn = aarch64_insn_get_add_imm_value(); | ||
590 | break; | ||
591 | case AARCH64_INSN_ADSB_SUB: | ||
592 | insn = aarch64_insn_get_sub_imm_value(); | ||
593 | break; | ||
594 | case AARCH64_INSN_ADSB_ADD_SETFLAGS: | ||
595 | insn = aarch64_insn_get_adds_imm_value(); | ||
596 | break; | ||
597 | case AARCH64_INSN_ADSB_SUB_SETFLAGS: | ||
598 | insn = aarch64_insn_get_subs_imm_value(); | ||
599 | break; | ||
600 | default: | ||
601 | BUG_ON(1); | ||
602 | return AARCH64_BREAK_FAULT; | ||
603 | } | ||
604 | |||
605 | switch (variant) { | ||
606 | case AARCH64_INSN_VARIANT_32BIT: | ||
607 | break; | ||
608 | case AARCH64_INSN_VARIANT_64BIT: | ||
609 | insn |= AARCH64_INSN_SF_BIT; | ||
610 | break; | ||
611 | default: | ||
612 | BUG_ON(1); | ||
613 | return AARCH64_BREAK_FAULT; | ||
614 | } | ||
615 | |||
616 | BUG_ON(imm & ~(SZ_4K - 1)); | ||
617 | |||
618 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | ||
619 | |||
620 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); | ||
621 | |||
622 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm); | ||
623 | } | ||
624 | |||
625 | u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, | ||
626 | enum aarch64_insn_register src, | ||
627 | int immr, int imms, | ||
628 | enum aarch64_insn_variant variant, | ||
629 | enum aarch64_insn_bitfield_type type) | ||
630 | { | ||
631 | u32 insn; | ||
632 | u32 mask; | ||
633 | |||
634 | switch (type) { | ||
635 | case AARCH64_INSN_BITFIELD_MOVE: | ||
636 | insn = aarch64_insn_get_bfm_value(); | ||
637 | break; | ||
638 | case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED: | ||
639 | insn = aarch64_insn_get_ubfm_value(); | ||
640 | break; | ||
641 | case AARCH64_INSN_BITFIELD_MOVE_SIGNED: | ||
642 | insn = aarch64_insn_get_sbfm_value(); | ||
643 | break; | ||
644 | default: | ||
645 | BUG_ON(1); | ||
646 | return AARCH64_BREAK_FAULT; | ||
647 | } | ||
648 | |||
649 | switch (variant) { | ||
650 | case AARCH64_INSN_VARIANT_32BIT: | ||
651 | mask = GENMASK(4, 0); | ||
652 | break; | ||
653 | case AARCH64_INSN_VARIANT_64BIT: | ||
654 | insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT; | ||
655 | mask = GENMASK(5, 0); | ||
656 | break; | ||
657 | default: | ||
658 | BUG_ON(1); | ||
659 | return AARCH64_BREAK_FAULT; | ||
660 | } | ||
661 | |||
662 | BUG_ON(immr & ~mask); | ||
663 | BUG_ON(imms & ~mask); | ||
664 | |||
665 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | ||
666 | |||
667 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); | ||
668 | |||
669 | insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr); | ||
670 | |||
671 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); | ||
672 | } | ||
673 | |||
674 | u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, | ||
675 | int imm, int shift, | ||
676 | enum aarch64_insn_variant variant, | ||
677 | enum aarch64_insn_movewide_type type) | ||
678 | { | ||
679 | u32 insn; | ||
680 | |||
681 | switch (type) { | ||
682 | case AARCH64_INSN_MOVEWIDE_ZERO: | ||
683 | insn = aarch64_insn_get_movz_value(); | ||
684 | break; | ||
685 | case AARCH64_INSN_MOVEWIDE_KEEP: | ||
686 | insn = aarch64_insn_get_movk_value(); | ||
687 | break; | ||
688 | case AARCH64_INSN_MOVEWIDE_INVERSE: | ||
689 | insn = aarch64_insn_get_movn_value(); | ||
690 | break; | ||
691 | default: | ||
692 | BUG_ON(1); | ||
693 | return AARCH64_BREAK_FAULT; | ||
694 | } | ||
695 | |||
696 | BUG_ON(imm & ~(SZ_64K - 1)); | ||
697 | |||
698 | switch (variant) { | ||
699 | case AARCH64_INSN_VARIANT_32BIT: | ||
700 | BUG_ON(shift != 0 && shift != 16); | ||
701 | break; | ||
702 | case AARCH64_INSN_VARIANT_64BIT: | ||
703 | insn |= AARCH64_INSN_SF_BIT; | ||
704 | BUG_ON(shift != 0 && shift != 16 && shift != 32 && | ||
705 | shift != 48); | ||
706 | break; | ||
707 | default: | ||
708 | BUG_ON(1); | ||
709 | return AARCH64_BREAK_FAULT; | ||
710 | } | ||
711 | |||
712 | insn |= (shift >> 4) << 21; | ||
713 | |||
714 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | ||
715 | |||
716 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); | ||
717 | } | ||
718 | |||
719 | u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, | ||
720 | enum aarch64_insn_register src, | ||
721 | enum aarch64_insn_register reg, | ||
722 | int shift, | ||
723 | enum aarch64_insn_variant variant, | ||
724 | enum aarch64_insn_adsb_type type) | ||
725 | { | ||
726 | u32 insn; | ||
727 | |||
728 | switch (type) { | ||
729 | case AARCH64_INSN_ADSB_ADD: | ||
730 | insn = aarch64_insn_get_add_value(); | ||
731 | break; | ||
732 | case AARCH64_INSN_ADSB_SUB: | ||
733 | insn = aarch64_insn_get_sub_value(); | ||
734 | break; | ||
735 | case AARCH64_INSN_ADSB_ADD_SETFLAGS: | ||
736 | insn = aarch64_insn_get_adds_value(); | ||
737 | break; | ||
738 | case AARCH64_INSN_ADSB_SUB_SETFLAGS: | ||
739 | insn = aarch64_insn_get_subs_value(); | ||
740 | break; | ||
741 | default: | ||
742 | BUG_ON(1); | ||
743 | return AARCH64_BREAK_FAULT; | ||
744 | } | ||
745 | |||
746 | switch (variant) { | ||
747 | case AARCH64_INSN_VARIANT_32BIT: | ||
748 | BUG_ON(shift & ~(SZ_32 - 1)); | ||
749 | break; | ||
750 | case AARCH64_INSN_VARIANT_64BIT: | ||
751 | insn |= AARCH64_INSN_SF_BIT; | ||
752 | BUG_ON(shift & ~(SZ_64 - 1)); | ||
753 | break; | ||
754 | default: | ||
755 | BUG_ON(1); | ||
756 | return AARCH64_BREAK_FAULT; | ||
757 | } | ||
758 | |||
759 | |||
760 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | ||
761 | |||
762 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); | ||
763 | |||
764 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); | ||
765 | |||
766 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); | ||
767 | } | ||
768 | |||
769 | u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, | ||
770 | enum aarch64_insn_register src, | ||
771 | enum aarch64_insn_variant variant, | ||
772 | enum aarch64_insn_data1_type type) | ||
773 | { | ||
774 | u32 insn; | ||
775 | |||
776 | switch (type) { | ||
777 | case AARCH64_INSN_DATA1_REVERSE_16: | ||
778 | insn = aarch64_insn_get_rev16_value(); | ||
779 | break; | ||
780 | case AARCH64_INSN_DATA1_REVERSE_32: | ||
781 | insn = aarch64_insn_get_rev32_value(); | ||
782 | break; | ||
783 | case AARCH64_INSN_DATA1_REVERSE_64: | ||
784 | BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT); | ||
785 | insn = aarch64_insn_get_rev64_value(); | ||
786 | break; | ||
787 | default: | ||
788 | BUG_ON(1); | ||
789 | return AARCH64_BREAK_FAULT; | ||
790 | } | ||
791 | |||
792 | switch (variant) { | ||
793 | case AARCH64_INSN_VARIANT_32BIT: | ||
794 | break; | ||
795 | case AARCH64_INSN_VARIANT_64BIT: | ||
796 | insn |= AARCH64_INSN_SF_BIT; | ||
797 | break; | ||
798 | default: | ||
799 | BUG_ON(1); | ||
800 | return AARCH64_BREAK_FAULT; | ||
801 | } | ||
802 | |||
803 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | ||
804 | |||
805 | return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); | ||
806 | } | ||
807 | |||
808 | u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, | ||
809 | enum aarch64_insn_register src, | ||
810 | enum aarch64_insn_register reg, | ||
811 | enum aarch64_insn_variant variant, | ||
812 | enum aarch64_insn_data2_type type) | ||
813 | { | ||
814 | u32 insn; | ||
815 | |||
816 | switch (type) { | ||
817 | case AARCH64_INSN_DATA2_UDIV: | ||
818 | insn = aarch64_insn_get_udiv_value(); | ||
819 | break; | ||
820 | case AARCH64_INSN_DATA2_SDIV: | ||
821 | insn = aarch64_insn_get_sdiv_value(); | ||
822 | break; | ||
823 | case AARCH64_INSN_DATA2_LSLV: | ||
824 | insn = aarch64_insn_get_lslv_value(); | ||
825 | break; | ||
826 | case AARCH64_INSN_DATA2_LSRV: | ||
827 | insn = aarch64_insn_get_lsrv_value(); | ||
828 | break; | ||
829 | case AARCH64_INSN_DATA2_ASRV: | ||
830 | insn = aarch64_insn_get_asrv_value(); | ||
831 | break; | ||
832 | case AARCH64_INSN_DATA2_RORV: | ||
833 | insn = aarch64_insn_get_rorv_value(); | ||
834 | break; | ||
835 | default: | ||
836 | BUG_ON(1); | ||
837 | return AARCH64_BREAK_FAULT; | ||
838 | } | ||
839 | |||
840 | switch (variant) { | ||
841 | case AARCH64_INSN_VARIANT_32BIT: | ||
842 | break; | ||
843 | case AARCH64_INSN_VARIANT_64BIT: | ||
844 | insn |= AARCH64_INSN_SF_BIT; | ||
845 | break; | ||
846 | default: | ||
847 | BUG_ON(1); | ||
848 | return AARCH64_BREAK_FAULT; | ||
849 | } | ||
850 | |||
851 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | ||
852 | |||
853 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); | ||
854 | |||
855 | return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); | ||
856 | } | ||
857 | |||
858 | u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, | ||
859 | enum aarch64_insn_register src, | ||
860 | enum aarch64_insn_register reg1, | ||
861 | enum aarch64_insn_register reg2, | ||
862 | enum aarch64_insn_variant variant, | ||
863 | enum aarch64_insn_data3_type type) | ||
864 | { | ||
865 | u32 insn; | ||
866 | |||
867 | switch (type) { | ||
868 | case AARCH64_INSN_DATA3_MADD: | ||
869 | insn = aarch64_insn_get_madd_value(); | ||
870 | break; | ||
871 | case AARCH64_INSN_DATA3_MSUB: | ||
872 | insn = aarch64_insn_get_msub_value(); | ||
873 | break; | ||
874 | default: | ||
875 | BUG_ON(1); | ||
876 | return AARCH64_BREAK_FAULT; | ||
877 | } | ||
878 | |||
879 | switch (variant) { | ||
880 | case AARCH64_INSN_VARIANT_32BIT: | ||
881 | break; | ||
882 | case AARCH64_INSN_VARIANT_64BIT: | ||
883 | insn |= AARCH64_INSN_SF_BIT; | ||
884 | break; | ||
885 | default: | ||
886 | BUG_ON(1); | ||
887 | return AARCH64_BREAK_FAULT; | ||
888 | } | ||
889 | |||
890 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | ||
891 | |||
892 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src); | ||
893 | |||
894 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, | ||
895 | reg1); | ||
896 | |||
897 | return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, | ||
898 | reg2); | ||
899 | } | ||
900 | |||
901 | u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, | ||
902 | enum aarch64_insn_register src, | ||
903 | enum aarch64_insn_register reg, | ||
904 | int shift, | ||
905 | enum aarch64_insn_variant variant, | ||
906 | enum aarch64_insn_logic_type type) | ||
907 | { | ||
908 | u32 insn; | ||
909 | |||
910 | switch (type) { | ||
911 | case AARCH64_INSN_LOGIC_AND: | ||
912 | insn = aarch64_insn_get_and_value(); | ||
913 | break; | ||
914 | case AARCH64_INSN_LOGIC_BIC: | ||
915 | insn = aarch64_insn_get_bic_value(); | ||
916 | break; | ||
917 | case AARCH64_INSN_LOGIC_ORR: | ||
918 | insn = aarch64_insn_get_orr_value(); | ||
919 | break; | ||
920 | case AARCH64_INSN_LOGIC_ORN: | ||
921 | insn = aarch64_insn_get_orn_value(); | ||
922 | break; | ||
923 | case AARCH64_INSN_LOGIC_EOR: | ||
924 | insn = aarch64_insn_get_eor_value(); | ||
925 | break; | ||
926 | case AARCH64_INSN_LOGIC_EON: | ||
927 | insn = aarch64_insn_get_eon_value(); | ||
928 | break; | ||
929 | case AARCH64_INSN_LOGIC_AND_SETFLAGS: | ||
930 | insn = aarch64_insn_get_ands_value(); | ||
931 | break; | ||
932 | case AARCH64_INSN_LOGIC_BIC_SETFLAGS: | ||
933 | insn = aarch64_insn_get_bics_value(); | ||
934 | break; | ||
935 | default: | ||
936 | BUG_ON(1); | ||
937 | return AARCH64_BREAK_FAULT; | ||
938 | } | ||
939 | |||
940 | switch (variant) { | ||
941 | case AARCH64_INSN_VARIANT_32BIT: | ||
942 | BUG_ON(shift & ~(SZ_32 - 1)); | ||
943 | break; | ||
944 | case AARCH64_INSN_VARIANT_64BIT: | ||
945 | insn |= AARCH64_INSN_SF_BIT; | ||
946 | BUG_ON(shift & ~(SZ_64 - 1)); | ||
947 | break; | ||
948 | default: | ||
949 | BUG_ON(1); | ||
950 | return AARCH64_BREAK_FAULT; | ||
951 | } | ||
952 | |||
953 | |||
954 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | ||
955 | |||
956 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); | ||
957 | |||
958 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); | ||
959 | |||
960 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); | ||
961 | } | ||
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 75c9cf1aafee..a0d10c55f307 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c | |||
@@ -235,13 +235,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) | |||
235 | 235 | ||
236 | static struct break_hook kgdb_brkpt_hook = { | 236 | static struct break_hook kgdb_brkpt_hook = { |
237 | .esr_mask = 0xffffffff, | 237 | .esr_mask = 0xffffffff, |
238 | .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM), | 238 | .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM), |
239 | .fn = kgdb_brk_fn | 239 | .fn = kgdb_brk_fn |
240 | }; | 240 | }; |
241 | 241 | ||
242 | static struct break_hook kgdb_compiled_brkpt_hook = { | 242 | static struct break_hook kgdb_compiled_brkpt_hook = { |
243 | .esr_mask = 0xffffffff, | 243 | .esr_mask = 0xffffffff, |
244 | .esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM), | 244 | .esr_val = DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM), |
245 | .fn = kgdb_compiled_brk_fn | 245 | .fn = kgdb_compiled_brk_fn |
246 | }; | 246 | }; |
247 | 247 | ||
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index baf5afb7e6a0..aa29ecb4f800 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -1276,7 +1276,7 @@ arch_initcall(cpu_pmu_reset); | |||
1276 | /* | 1276 | /* |
1277 | * PMU platform driver and devicetree bindings. | 1277 | * PMU platform driver and devicetree bindings. |
1278 | */ | 1278 | */ |
1279 | static struct of_device_id armpmu_of_device_ids[] = { | 1279 | static const struct of_device_id armpmu_of_device_ids[] = { |
1280 | {.compatible = "arm,armv8-pmuv3"}, | 1280 | {.compatible = "arm,armv8-pmuv3"}, |
1281 | {}, | 1281 | {}, |
1282 | }; | 1282 | }; |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 29d48690f2ac..89f41f7d27dd 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -57,36 +57,10 @@ unsigned long __stack_chk_guard __read_mostly; | |||
57 | EXPORT_SYMBOL(__stack_chk_guard); | 57 | EXPORT_SYMBOL(__stack_chk_guard); |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | static void setup_restart(void) | ||
61 | { | ||
62 | /* | ||
63 | * Tell the mm system that we are going to reboot - | ||
64 | * we may need it to insert some 1:1 mappings so that | ||
65 | * soft boot works. | ||
66 | */ | ||
67 | setup_mm_for_reboot(); | ||
68 | |||
69 | /* Clean and invalidate caches */ | ||
70 | flush_cache_all(); | ||
71 | |||
72 | /* Turn D-cache off */ | ||
73 | cpu_cache_off(); | ||
74 | |||
75 | /* Push out any further dirty data, and ensure cache is empty */ | ||
76 | flush_cache_all(); | ||
77 | } | ||
78 | |||
79 | void soft_restart(unsigned long addr) | 60 | void soft_restart(unsigned long addr) |
80 | { | 61 | { |
81 | typedef void (*phys_reset_t)(unsigned long); | 62 | setup_mm_for_reboot(); |
82 | phys_reset_t phys_reset; | 63 | cpu_soft_restart(virt_to_phys(cpu_reset), addr); |
83 | |||
84 | setup_restart(); | ||
85 | |||
86 | /* Switch to the identity mapping */ | ||
87 | phys_reset = (phys_reset_t)virt_to_phys(cpu_reset); | ||
88 | phys_reset(addr); | ||
89 | |||
90 | /* Should never get here */ | 64 | /* Should never get here */ |
91 | BUG(); | 65 | BUG(); |
92 | } | 66 | } |
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 553954771a67..866c1c821860 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/reboot.h> | 21 | #include <linux/reboot.h> |
22 | #include <linux/pm.h> | 22 | #include <linux/pm.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/slab.h> | ||
24 | #include <uapi/linux/psci.h> | 25 | #include <uapi/linux/psci.h> |
25 | 26 | ||
26 | #include <asm/compiler.h> | 27 | #include <asm/compiler.h> |
@@ -28,6 +29,7 @@ | |||
28 | #include <asm/errno.h> | 29 | #include <asm/errno.h> |
29 | #include <asm/psci.h> | 30 | #include <asm/psci.h> |
30 | #include <asm/smp_plat.h> | 31 | #include <asm/smp_plat.h> |
32 | #include <asm/suspend.h> | ||
31 | #include <asm/system_misc.h> | 33 | #include <asm/system_misc.h> |
32 | 34 | ||
33 | #define PSCI_POWER_STATE_TYPE_STANDBY 0 | 35 | #define PSCI_POWER_STATE_TYPE_STANDBY 0 |
@@ -65,6 +67,8 @@ enum psci_function { | |||
65 | PSCI_FN_MAX, | 67 | PSCI_FN_MAX, |
66 | }; | 68 | }; |
67 | 69 | ||
70 | static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state); | ||
71 | |||
68 | static u32 psci_function_id[PSCI_FN_MAX]; | 72 | static u32 psci_function_id[PSCI_FN_MAX]; |
69 | 73 | ||
70 | static int psci_to_linux_errno(int errno) | 74 | static int psci_to_linux_errno(int errno) |
@@ -93,6 +97,18 @@ static u32 psci_power_state_pack(struct psci_power_state state) | |||
93 | & PSCI_0_2_POWER_STATE_AFFL_MASK); | 97 | & PSCI_0_2_POWER_STATE_AFFL_MASK); |
94 | } | 98 | } |
95 | 99 | ||
100 | static void psci_power_state_unpack(u32 power_state, | ||
101 | struct psci_power_state *state) | ||
102 | { | ||
103 | state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >> | ||
104 | PSCI_0_2_POWER_STATE_ID_SHIFT; | ||
105 | state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >> | ||
106 | PSCI_0_2_POWER_STATE_TYPE_SHIFT; | ||
107 | state->affinity_level = | ||
108 | (power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >> | ||
109 | PSCI_0_2_POWER_STATE_AFFL_SHIFT; | ||
110 | } | ||
111 | |||
96 | /* | 112 | /* |
97 | * The following two functions are invoked via the invoke_psci_fn pointer | 113 | * The following two functions are invoked via the invoke_psci_fn pointer |
98 | * and will not be inlined, allowing us to piggyback on the AAPCS. | 114 | * and will not be inlined, allowing us to piggyback on the AAPCS. |
@@ -199,6 +215,63 @@ static int psci_migrate_info_type(void) | |||
199 | return err; | 215 | return err; |
200 | } | 216 | } |
201 | 217 | ||
218 | static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node, | ||
219 | unsigned int cpu) | ||
220 | { | ||
221 | int i, ret, count = 0; | ||
222 | struct psci_power_state *psci_states; | ||
223 | struct device_node *state_node; | ||
224 | |||
225 | /* | ||
226 | * If the PSCI cpu_suspend function hook has not been initialized | ||
227 | * idle states must not be enabled, so bail out | ||
228 | */ | ||
229 | if (!psci_ops.cpu_suspend) | ||
230 | return -EOPNOTSUPP; | ||
231 | |||
232 | /* Count idle states */ | ||
233 | while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", | ||
234 | count))) { | ||
235 | count++; | ||
236 | of_node_put(state_node); | ||
237 | } | ||
238 | |||
239 | if (!count) | ||
240 | return -ENODEV; | ||
241 | |||
242 | psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); | ||
243 | if (!psci_states) | ||
244 | return -ENOMEM; | ||
245 | |||
246 | for (i = 0; i < count; i++) { | ||
247 | u32 psci_power_state; | ||
248 | |||
249 | state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); | ||
250 | |||
251 | ret = of_property_read_u32(state_node, | ||
252 | "arm,psci-suspend-param", | ||
253 | &psci_power_state); | ||
254 | if (ret) { | ||
255 | pr_warn(" * %s missing arm,psci-suspend-param property\n", | ||
256 | state_node->full_name); | ||
257 | of_node_put(state_node); | ||
258 | goto free_mem; | ||
259 | } | ||
260 | |||
261 | of_node_put(state_node); | ||
262 | pr_debug("psci-power-state %#x index %d\n", psci_power_state, | ||
263 | i); | ||
264 | psci_power_state_unpack(psci_power_state, &psci_states[i]); | ||
265 | } | ||
266 | /* Idle states parsed correctly, initialize per-cpu pointer */ | ||
267 | per_cpu(psci_power_state, cpu) = psci_states; | ||
268 | return 0; | ||
269 | |||
270 | free_mem: | ||
271 | kfree(psci_states); | ||
272 | return ret; | ||
273 | } | ||
274 | |||
202 | static int get_set_conduit_method(struct device_node *np) | 275 | static int get_set_conduit_method(struct device_node *np) |
203 | { | 276 | { |
204 | const char *method; | 277 | const char *method; |
@@ -436,8 +509,39 @@ static int cpu_psci_cpu_kill(unsigned int cpu) | |||
436 | #endif | 509 | #endif |
437 | #endif | 510 | #endif |
438 | 511 | ||
512 | static int psci_suspend_finisher(unsigned long index) | ||
513 | { | ||
514 | struct psci_power_state *state = __get_cpu_var(psci_power_state); | ||
515 | |||
516 | return psci_ops.cpu_suspend(state[index - 1], | ||
517 | virt_to_phys(cpu_resume)); | ||
518 | } | ||
519 | |||
520 | static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) | ||
521 | { | ||
522 | int ret; | ||
523 | struct psci_power_state *state = __get_cpu_var(psci_power_state); | ||
524 | /* | ||
525 | * idle state index 0 corresponds to wfi, should never be called | ||
526 | * from the cpu_suspend operations | ||
527 | */ | ||
528 | if (WARN_ON_ONCE(!index)) | ||
529 | return -EINVAL; | ||
530 | |||
531 | if (state->type == PSCI_POWER_STATE_TYPE_STANDBY) | ||
532 | ret = psci_ops.cpu_suspend(state[index - 1], 0); | ||
533 | else | ||
534 | ret = __cpu_suspend(index, psci_suspend_finisher); | ||
535 | |||
536 | return ret; | ||
537 | } | ||
538 | |||
439 | const struct cpu_operations cpu_psci_ops = { | 539 | const struct cpu_operations cpu_psci_ops = { |
440 | .name = "psci", | 540 | .name = "psci", |
541 | #ifdef CONFIG_CPU_IDLE | ||
542 | .cpu_init_idle = cpu_psci_cpu_init_idle, | ||
543 | .cpu_suspend = cpu_psci_cpu_suspend, | ||
544 | #endif | ||
441 | #ifdef CONFIG_SMP | 545 | #ifdef CONFIG_SMP |
442 | .cpu_init = cpu_psci_cpu_init, | 546 | .cpu_init = cpu_psci_cpu_init, |
443 | .cpu_prepare = cpu_psci_cpu_prepare, | 547 | .cpu_prepare = cpu_psci_cpu_prepare, |
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index 89102a6ffad5..6c4fd2810ecb 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c | |||
@@ -36,13 +36,12 @@ void *return_address(unsigned int level) | |||
36 | { | 36 | { |
37 | struct return_address_data data; | 37 | struct return_address_data data; |
38 | struct stackframe frame; | 38 | struct stackframe frame; |
39 | register unsigned long current_sp asm ("sp"); | ||
40 | 39 | ||
41 | data.level = level + 2; | 40 | data.level = level + 2; |
42 | data.addr = NULL; | 41 | data.addr = NULL; |
43 | 42 | ||
44 | frame.fp = (unsigned long)__builtin_frame_address(0); | 43 | frame.fp = (unsigned long)__builtin_frame_address(0); |
45 | frame.sp = current_sp; | 44 | frame.sp = current_stack_pointer; |
46 | frame.pc = (unsigned long)return_address; /* dummy */ | 45 | frame.pc = (unsigned long)return_address; /* dummy */ |
47 | 46 | ||
48 | walk_stackframe(&frame, save_return_addr, &data); | 47 | walk_stackframe(&frame, save_return_addr, &data); |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index edb146d01857..2437196cc5d4 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -365,11 +365,6 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; | |||
365 | 365 | ||
366 | void __init setup_arch(char **cmdline_p) | 366 | void __init setup_arch(char **cmdline_p) |
367 | { | 367 | { |
368 | /* | ||
369 | * Unmask asynchronous aborts early to catch possible system errors. | ||
370 | */ | ||
371 | local_async_enable(); | ||
372 | |||
373 | setup_processor(); | 368 | setup_processor(); |
374 | 369 | ||
375 | setup_machine_fdt(__fdt_pointer); | 370 | setup_machine_fdt(__fdt_pointer); |
@@ -385,6 +380,12 @@ void __init setup_arch(char **cmdline_p) | |||
385 | 380 | ||
386 | parse_early_param(); | 381 | parse_early_param(); |
387 | 382 | ||
383 | /* | ||
384 | * Unmask asynchronous aborts after bringing up possible earlycon. | ||
385 | * (Report possible System Errors once we can report this occurred) | ||
386 | */ | ||
387 | local_async_enable(); | ||
388 | |||
388 | efi_init(); | 389 | efi_init(); |
389 | arm64_memblock_init(); | 390 | arm64_memblock_init(); |
390 | 391 | ||
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index b1925729c692..a564b440416a 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S | |||
@@ -49,28 +49,39 @@ | |||
49 | orr \dst, \dst, \mask // dst|=(aff3>>rs3) | 49 | orr \dst, \dst, \mask // dst|=(aff3>>rs3) |
50 | .endm | 50 | .endm |
51 | /* | 51 | /* |
52 | * Save CPU state for a suspend. This saves callee registers, and allocates | 52 | * Save CPU state for a suspend and execute the suspend finisher. |
53 | * space on the kernel stack to save the CPU specific registers + some | 53 | * On success it will return 0 through cpu_resume - ie through a CPU |
54 | * other data for resume. | 54 | * soft/hard reboot from the reset vector. |
55 | * On failure it returns the suspend finisher return value or force | ||
56 | * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher | ||
57 | * is not allowed to return, if it does this must be considered failure). | ||
58 | * It saves callee registers, and allocates space on the kernel stack | ||
59 | * to save the CPU specific registers + some other data for resume. | ||
55 | * | 60 | * |
56 | * x0 = suspend finisher argument | 61 | * x0 = suspend finisher argument |
62 | * x1 = suspend finisher function pointer | ||
57 | */ | 63 | */ |
58 | ENTRY(__cpu_suspend) | 64 | ENTRY(__cpu_suspend_enter) |
59 | stp x29, lr, [sp, #-96]! | 65 | stp x29, lr, [sp, #-96]! |
60 | stp x19, x20, [sp,#16] | 66 | stp x19, x20, [sp,#16] |
61 | stp x21, x22, [sp,#32] | 67 | stp x21, x22, [sp,#32] |
62 | stp x23, x24, [sp,#48] | 68 | stp x23, x24, [sp,#48] |
63 | stp x25, x26, [sp,#64] | 69 | stp x25, x26, [sp,#64] |
64 | stp x27, x28, [sp,#80] | 70 | stp x27, x28, [sp,#80] |
71 | /* | ||
72 | * Stash suspend finisher and its argument in x20 and x19 | ||
73 | */ | ||
74 | mov x19, x0 | ||
75 | mov x20, x1 | ||
65 | mov x2, sp | 76 | mov x2, sp |
66 | sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx | 77 | sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx |
67 | mov x1, sp | 78 | mov x0, sp |
68 | /* | 79 | /* |
69 | * x1 now points to struct cpu_suspend_ctx allocated on the stack | 80 | * x0 now points to struct cpu_suspend_ctx allocated on the stack |
70 | */ | 81 | */ |
71 | str x2, [x1, #CPU_CTX_SP] | 82 | str x2, [x0, #CPU_CTX_SP] |
72 | ldr x2, =sleep_save_sp | 83 | ldr x1, =sleep_save_sp |
73 | ldr x2, [x2, #SLEEP_SAVE_SP_VIRT] | 84 | ldr x1, [x1, #SLEEP_SAVE_SP_VIRT] |
74 | #ifdef CONFIG_SMP | 85 | #ifdef CONFIG_SMP |
75 | mrs x7, mpidr_el1 | 86 | mrs x7, mpidr_el1 |
76 | ldr x9, =mpidr_hash | 87 | ldr x9, =mpidr_hash |
@@ -82,11 +93,21 @@ ENTRY(__cpu_suspend) | |||
82 | ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] | 93 | ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS] |
83 | ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] | 94 | ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] |
84 | compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 | 95 | compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 |
85 | add x2, x2, x8, lsl #3 | 96 | add x1, x1, x8, lsl #3 |
86 | #endif | 97 | #endif |
87 | bl __cpu_suspend_finisher | 98 | bl __cpu_suspend_save |
99 | /* | ||
100 | * Grab suspend finisher in x20 and its argument in x19 | ||
101 | */ | ||
102 | mov x0, x19 | ||
103 | mov x1, x20 | ||
104 | /* | ||
105 | * We are ready for power down, fire off the suspend finisher | ||
106 | * in x1, with argument in x0 | ||
107 | */ | ||
108 | blr x1 | ||
88 | /* | 109 | /* |
89 | * Never gets here, unless suspend fails. | 110 | * Never gets here, unless suspend finisher fails. |
90 | * Successful cpu_suspend should return from cpu_resume, returning | 111 | * Successful cpu_suspend should return from cpu_resume, returning |
91 | * through this code path is considered an error | 112 | * through this code path is considered an error |
92 | * If the return value is set to 0 force x0 = -EOPNOTSUPP | 113 | * If the return value is set to 0 force x0 = -EOPNOTSUPP |
@@ -103,7 +124,7 @@ ENTRY(__cpu_suspend) | |||
103 | ldp x27, x28, [sp, #80] | 124 | ldp x27, x28, [sp, #80] |
104 | ldp x29, lr, [sp], #96 | 125 | ldp x29, lr, [sp], #96 |
105 | ret | 126 | ret |
106 | ENDPROC(__cpu_suspend) | 127 | ENDPROC(__cpu_suspend_enter) |
107 | .ltorg | 128 | .ltorg |
108 | 129 | ||
109 | /* | 130 | /* |
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 0347d38eea29..4f93c67e63de 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/of.h> | 21 | #include <linux/of.h> |
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/types.h> | ||
23 | 24 | ||
24 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
25 | #include <asm/cpu_ops.h> | 26 | #include <asm/cpu_ops.h> |
@@ -65,12 +66,21 @@ static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu) | |||
65 | 66 | ||
66 | static int smp_spin_table_cpu_prepare(unsigned int cpu) | 67 | static int smp_spin_table_cpu_prepare(unsigned int cpu) |
67 | { | 68 | { |
68 | void **release_addr; | 69 | __le64 __iomem *release_addr; |
69 | 70 | ||
70 | if (!cpu_release_addr[cpu]) | 71 | if (!cpu_release_addr[cpu]) |
71 | return -ENODEV; | 72 | return -ENODEV; |
72 | 73 | ||
73 | release_addr = __va(cpu_release_addr[cpu]); | 74 | /* |
75 | * The cpu-release-addr may or may not be inside the linear mapping. | ||
76 | * As ioremap_cache will either give us a new mapping or reuse the | ||
77 | * existing linear mapping, we can use it to cover both cases. In | ||
78 | * either case the memory will be MT_NORMAL. | ||
79 | */ | ||
80 | release_addr = ioremap_cache(cpu_release_addr[cpu], | ||
81 | sizeof(*release_addr)); | ||
82 | if (!release_addr) | ||
83 | return -ENOMEM; | ||
74 | 84 | ||
75 | /* | 85 | /* |
76 | * We write the release address as LE regardless of the native | 86 | * We write the release address as LE regardless of the native |
@@ -79,15 +89,17 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) | |||
79 | * boot-loader's endianess before jumping. This is mandated by | 89 | * boot-loader's endianess before jumping. This is mandated by |
80 | * the boot protocol. | 90 | * the boot protocol. |
81 | */ | 91 | */ |
82 | release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen)); | 92 | writeq_relaxed(__pa(secondary_holding_pen), release_addr); |
83 | 93 | __flush_dcache_area((__force void *)release_addr, | |
84 | __flush_dcache_area(release_addr, sizeof(release_addr[0])); | 94 | sizeof(*release_addr)); |
85 | 95 | ||
86 | /* | 96 | /* |
87 | * Send an event to wake up the secondary CPU. | 97 | * Send an event to wake up the secondary CPU. |
88 | */ | 98 | */ |
89 | sev(); | 99 | sev(); |
90 | 100 | ||
101 | iounmap(release_addr); | ||
102 | |||
91 | return 0; | 103 | return 0; |
92 | } | 104 | } |
93 | 105 | ||
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 55437ba1f5a4..407991bf79f5 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c | |||
@@ -111,10 +111,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
111 | frame.sp = thread_saved_sp(tsk); | 111 | frame.sp = thread_saved_sp(tsk); |
112 | frame.pc = thread_saved_pc(tsk); | 112 | frame.pc = thread_saved_pc(tsk); |
113 | } else { | 113 | } else { |
114 | register unsigned long current_sp asm("sp"); | ||
115 | data.no_sched_functions = 0; | 114 | data.no_sched_functions = 0; |
116 | frame.fp = (unsigned long)__builtin_frame_address(0); | 115 | frame.fp = (unsigned long)__builtin_frame_address(0); |
117 | frame.sp = current_sp; | 116 | frame.sp = current_stack_pointer; |
118 | frame.pc = (unsigned long)save_stack_trace_tsk; | 117 | frame.pc = (unsigned long)save_stack_trace_tsk; |
119 | } | 118 | } |
120 | 119 | ||
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 55a99b9a97e0..13ad4dbb1615 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c | |||
@@ -9,22 +9,19 @@ | |||
9 | #include <asm/suspend.h> | 9 | #include <asm/suspend.h> |
10 | #include <asm/tlbflush.h> | 10 | #include <asm/tlbflush.h> |
11 | 11 | ||
12 | extern int __cpu_suspend(unsigned long); | 12 | extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long)); |
13 | /* | 13 | /* |
14 | * This is called by __cpu_suspend() to save the state, and do whatever | 14 | * This is called by __cpu_suspend_enter() to save the state, and do whatever |
15 | * flushing is required to ensure that when the CPU goes to sleep we have | 15 | * flushing is required to ensure that when the CPU goes to sleep we have |
16 | * the necessary data available when the caches are not searched. | 16 | * the necessary data available when the caches are not searched. |
17 | * | 17 | * |
18 | * @arg: Argument to pass to suspend operations | 18 | * ptr: CPU context virtual address |
19 | * @ptr: CPU context virtual address | 19 | * save_ptr: address of the location where the context physical address |
20 | * @save_ptr: address of the location where the context physical address | 20 | * must be saved |
21 | * must be saved | ||
22 | */ | 21 | */ |
23 | int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, | 22 | void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, |
24 | phys_addr_t *save_ptr) | 23 | phys_addr_t *save_ptr) |
25 | { | 24 | { |
26 | int cpu = smp_processor_id(); | ||
27 | |||
28 | *save_ptr = virt_to_phys(ptr); | 25 | *save_ptr = virt_to_phys(ptr); |
29 | 26 | ||
30 | cpu_do_suspend(ptr); | 27 | cpu_do_suspend(ptr); |
@@ -35,8 +32,6 @@ int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr, | |||
35 | */ | 32 | */ |
36 | __flush_dcache_area(ptr, sizeof(*ptr)); | 33 | __flush_dcache_area(ptr, sizeof(*ptr)); |
37 | __flush_dcache_area(save_ptr, sizeof(*save_ptr)); | 34 | __flush_dcache_area(save_ptr, sizeof(*save_ptr)); |
38 | |||
39 | return cpu_ops[cpu]->cpu_suspend(arg); | ||
40 | } | 35 | } |
41 | 36 | ||
42 | /* | 37 | /* |
@@ -56,15 +51,15 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) | |||
56 | } | 51 | } |
57 | 52 | ||
58 | /** | 53 | /** |
59 | * cpu_suspend | 54 | * cpu_suspend() - function to enter a low-power state |
55 | * @arg: argument to pass to CPU suspend operations | ||
60 | * | 56 | * |
61 | * @arg: argument to pass to the finisher function | 57 | * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU |
58 | * operations back-end error code otherwise. | ||
62 | */ | 59 | */ |
63 | int cpu_suspend(unsigned long arg) | 60 | int cpu_suspend(unsigned long arg) |
64 | { | 61 | { |
65 | struct mm_struct *mm = current->active_mm; | 62 | int cpu = smp_processor_id(); |
66 | int ret, cpu = smp_processor_id(); | ||
67 | unsigned long flags; | ||
68 | 63 | ||
69 | /* | 64 | /* |
70 | * If cpu_ops have not been registered or suspend | 65 | * If cpu_ops have not been registered or suspend |
@@ -72,6 +67,21 @@ int cpu_suspend(unsigned long arg) | |||
72 | */ | 67 | */ |
73 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) | 68 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) |
74 | return -EOPNOTSUPP; | 69 | return -EOPNOTSUPP; |
70 | return cpu_ops[cpu]->cpu_suspend(arg); | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * __cpu_suspend | ||
75 | * | ||
76 | * arg: argument to pass to the finisher function | ||
77 | * fn: finisher function pointer | ||
78 | * | ||
79 | */ | ||
80 | int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
81 | { | ||
82 | struct mm_struct *mm = current->active_mm; | ||
83 | int ret; | ||
84 | unsigned long flags; | ||
75 | 85 | ||
76 | /* | 86 | /* |
77 | * From this point debug exceptions are disabled to prevent | 87 | * From this point debug exceptions are disabled to prevent |
@@ -86,7 +96,7 @@ int cpu_suspend(unsigned long arg) | |||
86 | * page tables, so that the thread address space is properly | 96 | * page tables, so that the thread address space is properly |
87 | * set-up on function return. | 97 | * set-up on function return. |
88 | */ | 98 | */ |
89 | ret = __cpu_suspend(arg); | 99 | ret = __cpu_suspend_enter(arg, fn); |
90 | if (ret == 0) { | 100 | if (ret == 0) { |
91 | cpu_switch_mm(mm->pgd, mm); | 101 | cpu_switch_mm(mm->pgd, mm); |
92 | flush_tlb_all(); | 102 | flush_tlb_all(); |
@@ -95,7 +105,7 @@ int cpu_suspend(unsigned long arg) | |||
95 | * Restore per-cpu offset before any kernel | 105 | * Restore per-cpu offset before any kernel |
96 | * subsystem relying on it has a chance to run. | 106 | * subsystem relying on it has a chance to run. |
97 | */ | 107 | */ |
98 | set_my_cpu_offset(per_cpu_offset(cpu)); | 108 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
99 | 109 | ||
100 | /* | 110 | /* |
101 | * Restore HW breakpoint registers to sane values | 111 | * Restore HW breakpoint registers to sane values |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 02cd3f023e9a..de1b085e7963 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -132,7 +132,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) | |||
132 | static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | 132 | static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) |
133 | { | 133 | { |
134 | struct stackframe frame; | 134 | struct stackframe frame; |
135 | const register unsigned long current_sp asm ("sp"); | ||
136 | 135 | ||
137 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); | 136 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); |
138 | 137 | ||
@@ -145,7 +144,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |||
145 | frame.pc = regs->pc; | 144 | frame.pc = regs->pc; |
146 | } else if (tsk == current) { | 145 | } else if (tsk == current) { |
147 | frame.fp = (unsigned long)__builtin_frame_address(0); | 146 | frame.fp = (unsigned long)__builtin_frame_address(0); |
148 | frame.sp = current_sp; | 147 | frame.sp = current_stack_pointer; |
149 | frame.pc = (unsigned long)dump_backtrace; | 148 | frame.pc = (unsigned long)dump_backtrace; |
150 | } else { | 149 | } else { |
151 | /* | 150 | /* |
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 3ecb56c624d3..c56179ed2c09 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-y := dma-mapping.o extable.o fault.o init.o \ | 1 | obj-y := dma-mapping.o extable.o fault.o init.o \ |
2 | cache.o copypage.o flush.o \ | 2 | cache.o copypage.o flush.o \ |
3 | ioremap.o mmap.o pgd.o mmu.o \ | 3 | ioremap.o mmap.o pgd.o mmu.o \ |
4 | context.o proc.o | 4 | context.o proc.o pageattr.o |
5 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 5 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 4164c5ace9f8..2c71077cacfd 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -22,11 +22,8 @@ | |||
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/dma-mapping.h> | 23 | #include <linux/dma-mapping.h> |
24 | #include <linux/dma-contiguous.h> | 24 | #include <linux/dma-contiguous.h> |
25 | #include <linux/of.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
28 | #include <linux/swiotlb.h> | 26 | #include <linux/swiotlb.h> |
29 | #include <linux/amba/bus.h> | ||
30 | 27 | ||
31 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
32 | 29 | ||
@@ -125,7 +122,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size, | |||
125 | no_map: | 122 | no_map: |
126 | __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); | 123 | __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); |
127 | no_mem: | 124 | no_mem: |
128 | *dma_handle = ~0; | 125 | *dma_handle = DMA_ERROR_CODE; |
129 | return NULL; | 126 | return NULL; |
130 | } | 127 | } |
131 | 128 | ||
@@ -308,40 +305,12 @@ struct dma_map_ops coherent_swiotlb_dma_ops = { | |||
308 | }; | 305 | }; |
309 | EXPORT_SYMBOL(coherent_swiotlb_dma_ops); | 306 | EXPORT_SYMBOL(coherent_swiotlb_dma_ops); |
310 | 307 | ||
311 | static int dma_bus_notifier(struct notifier_block *nb, | ||
312 | unsigned long event, void *_dev) | ||
313 | { | ||
314 | struct device *dev = _dev; | ||
315 | |||
316 | if (event != BUS_NOTIFY_ADD_DEVICE) | ||
317 | return NOTIFY_DONE; | ||
318 | |||
319 | if (of_property_read_bool(dev->of_node, "dma-coherent")) | ||
320 | set_dma_ops(dev, &coherent_swiotlb_dma_ops); | ||
321 | |||
322 | return NOTIFY_OK; | ||
323 | } | ||
324 | |||
325 | static struct notifier_block platform_bus_nb = { | ||
326 | .notifier_call = dma_bus_notifier, | ||
327 | }; | ||
328 | |||
329 | static struct notifier_block amba_bus_nb = { | ||
330 | .notifier_call = dma_bus_notifier, | ||
331 | }; | ||
332 | |||
333 | extern int swiotlb_late_init_with_default_size(size_t default_size); | 308 | extern int swiotlb_late_init_with_default_size(size_t default_size); |
334 | 309 | ||
335 | static int __init swiotlb_late_init(void) | 310 | static int __init swiotlb_late_init(void) |
336 | { | 311 | { |
337 | size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); | 312 | size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); |
338 | 313 | ||
339 | /* | ||
340 | * These must be registered before of_platform_populate(). | ||
341 | */ | ||
342 | bus_register_notifier(&platform_bus_type, &platform_bus_nb); | ||
343 | bus_register_notifier(&amba_bustype, &amba_bus_nb); | ||
344 | |||
345 | dma_ops = &noncoherent_swiotlb_dma_ops; | 314 | dma_ops = &noncoherent_swiotlb_dma_ops; |
346 | 315 | ||
347 | return swiotlb_late_init_with_default_size(swiotlb_size); | 316 | return swiotlb_late_init_with_default_size(swiotlb_size); |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index adf5e07677d8..494297c698ca 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -255,7 +255,7 @@ static void __init free_unused_memmap(void) | |||
255 | */ | 255 | */ |
256 | void __init mem_init(void) | 256 | void __init mem_init(void) |
257 | { | 257 | { |
258 | max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; | 258 | set_max_mapnr(pfn_to_page(max_pfn) - mem_map); |
259 | 259 | ||
260 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | 260 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
261 | free_unused_memmap(); | 261 | free_unused_memmap(); |
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 8ed6cb1a900f..1d73662f00ff 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c | |||
@@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); | |||
102 | * You really shouldn't be using read() or write() on /dev/mem. This might go | 102 | * You really shouldn't be using read() or write() on /dev/mem. This might go |
103 | * away in the future. | 103 | * away in the future. |
104 | */ | 104 | */ |
105 | int valid_phys_addr_range(unsigned long addr, size_t size) | 105 | int valid_phys_addr_range(phys_addr_t addr, size_t size) |
106 | { | 106 | { |
107 | if (addr < PHYS_OFFSET) | 107 | if (addr < PHYS_OFFSET) |
108 | return 0; | 108 | return 0; |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c55567283cde..6894ef3e6234 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -94,7 +94,7 @@ static int __init early_cachepolicy(char *p) | |||
94 | */ | 94 | */ |
95 | asm volatile( | 95 | asm volatile( |
96 | " mrs %0, mair_el1\n" | 96 | " mrs %0, mair_el1\n" |
97 | " bfi %0, %1, #%2, #8\n" | 97 | " bfi %0, %1, %2, #8\n" |
98 | " msr mair_el1, %0\n" | 98 | " msr mair_el1, %0\n" |
99 | " isb\n" | 99 | " isb\n" |
100 | : "=&r" (tmp) | 100 | : "=&r" (tmp) |
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c new file mode 100644 index 000000000000..bb0ea94c4ba1 --- /dev/null +++ b/arch/arm64/mm/pageattr.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/sched.h> | ||
17 | |||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/tlbflush.h> | ||
20 | |||
21 | struct page_change_data { | ||
22 | pgprot_t set_mask; | ||
23 | pgprot_t clear_mask; | ||
24 | }; | ||
25 | |||
26 | static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, | ||
27 | void *data) | ||
28 | { | ||
29 | struct page_change_data *cdata = data; | ||
30 | pte_t pte = *ptep; | ||
31 | |||
32 | pte = clear_pte_bit(pte, cdata->clear_mask); | ||
33 | pte = set_pte_bit(pte, cdata->set_mask); | ||
34 | |||
35 | set_pte(ptep, pte); | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | static int change_memory_common(unsigned long addr, int numpages, | ||
40 | pgprot_t set_mask, pgprot_t clear_mask) | ||
41 | { | ||
42 | unsigned long start = addr; | ||
43 | unsigned long size = PAGE_SIZE*numpages; | ||
44 | unsigned long end = start + size; | ||
45 | int ret; | ||
46 | struct page_change_data data; | ||
47 | |||
48 | if (!IS_ALIGNED(addr, PAGE_SIZE)) { | ||
49 | start &= PAGE_MASK; | ||
50 | end = start + size; | ||
51 | WARN_ON_ONCE(1); | ||
52 | } | ||
53 | |||
54 | if (!is_module_address(start) || !is_module_address(end - 1)) | ||
55 | return -EINVAL; | ||
56 | |||
57 | data.set_mask = set_mask; | ||
58 | data.clear_mask = clear_mask; | ||
59 | |||
60 | ret = apply_to_page_range(&init_mm, start, size, change_page_range, | ||
61 | &data); | ||
62 | |||
63 | flush_tlb_kernel_range(start, end); | ||
64 | return ret; | ||
65 | } | ||
66 | |||
67 | int set_memory_ro(unsigned long addr, int numpages) | ||
68 | { | ||
69 | return change_memory_common(addr, numpages, | ||
70 | __pgprot(PTE_RDONLY), | ||
71 | __pgprot(PTE_WRITE)); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(set_memory_ro); | ||
74 | |||
75 | int set_memory_rw(unsigned long addr, int numpages) | ||
76 | { | ||
77 | return change_memory_common(addr, numpages, | ||
78 | __pgprot(PTE_WRITE), | ||
79 | __pgprot(PTE_RDONLY)); | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(set_memory_rw); | ||
82 | |||
83 | int set_memory_nx(unsigned long addr, int numpages) | ||
84 | { | ||
85 | return change_memory_common(addr, numpages, | ||
86 | __pgprot(PTE_PXN), | ||
87 | __pgprot(0)); | ||
88 | } | ||
89 | EXPORT_SYMBOL_GPL(set_memory_nx); | ||
90 | |||
91 | int set_memory_x(unsigned long addr, int numpages) | ||
92 | { | ||
93 | return change_memory_common(addr, numpages, | ||
94 | __pgprot(0), | ||
95 | __pgprot(PTE_PXN)); | ||
96 | } | ||
97 | EXPORT_SYMBOL_GPL(set_memory_x); | ||
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 7736779c9809..4e778b13291b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -76,6 +76,21 @@ ENTRY(cpu_reset) | |||
76 | ret x0 | 76 | ret x0 |
77 | ENDPROC(cpu_reset) | 77 | ENDPROC(cpu_reset) |
78 | 78 | ||
79 | ENTRY(cpu_soft_restart) | ||
80 | /* Save address of cpu_reset() and reset address */ | ||
81 | mov x19, x0 | ||
82 | mov x20, x1 | ||
83 | |||
84 | /* Turn D-cache off */ | ||
85 | bl cpu_cache_off | ||
86 | |||
87 | /* Push out all dirty data, and ensure cache is empty */ | ||
88 | bl flush_cache_all | ||
89 | |||
90 | mov x0, x20 | ||
91 | ret x19 | ||
92 | ENDPROC(cpu_soft_restart) | ||
93 | |||
79 | /* | 94 | /* |
80 | * cpu_do_idle() | 95 | * cpu_do_idle() |
81 | * | 96 | * |
diff --git a/arch/arm64/net/Makefile b/arch/arm64/net/Makefile new file mode 100644 index 000000000000..da9763378284 --- /dev/null +++ b/arch/arm64/net/Makefile | |||
@@ -0,0 +1,4 @@ | |||
1 | # | ||
2 | # ARM64 networking code | ||
3 | # | ||
4 | obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o | ||
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h new file mode 100644 index 000000000000..2134f7e6c288 --- /dev/null +++ b/arch/arm64/net/bpf_jit.h | |||
@@ -0,0 +1,169 @@ | |||
1 | /* | ||
2 | * BPF JIT compiler for ARM64 | ||
3 | * | ||
4 | * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #ifndef _BPF_JIT_H | ||
19 | #define _BPF_JIT_H | ||
20 | |||
21 | #include <asm/insn.h> | ||
22 | |||
23 | /* 5-bit Register Operand */ | ||
24 | #define A64_R(x) AARCH64_INSN_REG_##x | ||
25 | #define A64_FP AARCH64_INSN_REG_FP | ||
26 | #define A64_LR AARCH64_INSN_REG_LR | ||
27 | #define A64_ZR AARCH64_INSN_REG_ZR | ||
28 | #define A64_SP AARCH64_INSN_REG_SP | ||
29 | |||
30 | #define A64_VARIANT(sf) \ | ||
31 | ((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT) | ||
32 | |||
33 | /* Compare & branch (immediate) */ | ||
34 | #define A64_COMP_BRANCH(sf, Rt, offset, type) \ | ||
35 | aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \ | ||
36 | AARCH64_INSN_BRANCH_COMP_##type) | ||
37 | #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO) | ||
38 | |||
39 | /* Conditional branch (immediate) */ | ||
40 | #define A64_COND_BRANCH(cond, offset) \ | ||
41 | aarch64_insn_gen_cond_branch_imm(0, offset, cond) | ||
42 | #define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */ | ||
43 | #define A64_COND_NE AARCH64_INSN_COND_NE /* != */ | ||
44 | #define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */ | ||
45 | #define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */ | ||
46 | #define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */ | ||
47 | #define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */ | ||
48 | #define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2) | ||
49 | |||
50 | /* Unconditional branch (immediate) */ | ||
51 | #define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \ | ||
52 | AARCH64_INSN_BRANCH_##type) | ||
53 | #define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK) | ||
54 | #define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK) | ||
55 | |||
56 | /* Unconditional branch (register) */ | ||
57 | #define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK) | ||
58 | #define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN) | ||
59 | |||
60 | /* Load/store register (register offset) */ | ||
61 | #define A64_LS_REG(Rt, Rn, Rm, size, type) \ | ||
62 | aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \ | ||
63 | AARCH64_INSN_SIZE_##size, \ | ||
64 | AARCH64_INSN_LDST_##type##_REG_OFFSET) | ||
65 | #define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE) | ||
66 | #define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD) | ||
67 | #define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE) | ||
68 | #define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD) | ||
69 | #define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE) | ||
70 | #define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD) | ||
71 | #define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE) | ||
72 | #define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD) | ||
73 | |||
74 | /* Load/store register pair */ | ||
75 | #define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \ | ||
76 | aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \ | ||
77 | AARCH64_INSN_VARIANT_64BIT, \ | ||
78 | AARCH64_INSN_LDST_##ls##_PAIR_##type) | ||
79 | /* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */ | ||
80 | #define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX) | ||
81 | /* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */ | ||
82 | #define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX) | ||
83 | |||
84 | /* Add/subtract (immediate) */ | ||
85 | #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ | ||
86 | aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \ | ||
87 | A64_VARIANT(sf), AARCH64_INSN_ADSB_##type) | ||
88 | /* Rd = Rn OP imm12 */ | ||
89 | #define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD) | ||
90 | #define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB) | ||
91 | /* Rd = Rn */ | ||
92 | #define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0) | ||
93 | |||
94 | /* Bitfield move */ | ||
95 | #define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \ | ||
96 | aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \ | ||
97 | A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type) | ||
98 | /* Signed, with sign replication to left and zeros to right */ | ||
99 | #define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED) | ||
100 | /* Unsigned, with zeros to left and right */ | ||
101 | #define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED) | ||
102 | |||
103 | /* Rd = Rn << shift */ | ||
104 | #define A64_LSL(sf, Rd, Rn, shift) ({ \ | ||
105 | int sz = (sf) ? 64 : 32; \ | ||
106 | A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \ | ||
107 | }) | ||
108 | /* Rd = Rn >> shift */ | ||
109 | #define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31) | ||
110 | /* Rd = Rn >> shift; signed */ | ||
111 | #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31) | ||
112 | |||
113 | /* Move wide (immediate) */ | ||
114 | #define A64_MOVEW(sf, Rd, imm16, shift, type) \ | ||
115 | aarch64_insn_gen_movewide(Rd, imm16, shift, \ | ||
116 | A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type) | ||
117 | /* Rd = Zeros (for MOVZ); | ||
118 | * Rd |= imm16 << shift (where shift is {0, 16, 32, 48}); | ||
119 | * Rd = ~Rd; (for MOVN); */ | ||
120 | #define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE) | ||
121 | #define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO) | ||
122 | #define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP) | ||
123 | |||
124 | /* Add/subtract (shifted register) */ | ||
125 | #define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \ | ||
126 | aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \ | ||
127 | A64_VARIANT(sf), AARCH64_INSN_ADSB_##type) | ||
128 | /* Rd = Rn OP Rm */ | ||
129 | #define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD) | ||
130 | #define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB) | ||
131 | #define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS) | ||
132 | /* Rd = -Rm */ | ||
133 | #define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm) | ||
134 | /* Rn - Rm; set condition flags */ | ||
135 | #define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm) | ||
136 | |||
137 | /* Data-processing (1 source) */ | ||
138 | #define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \ | ||
139 | A64_VARIANT(sf), AARCH64_INSN_DATA1_##type) | ||
140 | /* Rd = BSWAPx(Rn) */ | ||
141 | #define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16) | ||
142 | #define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32) | ||
143 | #define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64) | ||
144 | |||
145 | /* Data-processing (2 source) */ | ||
146 | /* Rd = Rn OP Rm */ | ||
147 | #define A64_UDIV(sf, Rd, Rn, Rm) aarch64_insn_gen_data2(Rd, Rn, Rm, \ | ||
148 | A64_VARIANT(sf), AARCH64_INSN_DATA2_UDIV) | ||
149 | |||
150 | /* Data-processing (3 source) */ | ||
151 | /* Rd = Ra + Rn * Rm */ | ||
152 | #define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \ | ||
153 | A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD) | ||
154 | /* Rd = Rn * Rm */ | ||
155 | #define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm) | ||
156 | |||
157 | /* Logical (shifted register) */ | ||
158 | #define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \ | ||
159 | aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \ | ||
160 | A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type) | ||
161 | /* Rd = Rn OP Rm */ | ||
162 | #define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND) | ||
163 | #define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR) | ||
164 | #define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR) | ||
165 | #define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS) | ||
166 | /* Rn & Rm; set condition flags */ | ||
167 | #define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm) | ||
168 | |||
169 | #endif /* _BPF_JIT_H */ | ||
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c new file mode 100644 index 000000000000..7ae33545535b --- /dev/null +++ b/arch/arm64/net/bpf_jit_comp.c | |||
@@ -0,0 +1,679 @@ | |||
1 | /* | ||
2 | * BPF JIT compiler for ARM64 | ||
3 | * | ||
4 | * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) "bpf_jit: " fmt | ||
20 | |||
21 | #include <linux/filter.h> | ||
22 | #include <linux/moduleloader.h> | ||
23 | #include <linux/printk.h> | ||
24 | #include <linux/skbuff.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <asm/byteorder.h> | ||
27 | #include <asm/cacheflush.h> | ||
28 | |||
29 | #include "bpf_jit.h" | ||
30 | |||
31 | int bpf_jit_enable __read_mostly; | ||
32 | |||
33 | #define TMP_REG_1 (MAX_BPF_REG + 0) | ||
34 | #define TMP_REG_2 (MAX_BPF_REG + 1) | ||
35 | |||
36 | /* Map BPF registers to A64 registers */ | ||
37 | static const int bpf2a64[] = { | ||
38 | /* return value from in-kernel function, and exit value from eBPF */ | ||
39 | [BPF_REG_0] = A64_R(7), | ||
40 | /* arguments from eBPF program to in-kernel function */ | ||
41 | [BPF_REG_1] = A64_R(0), | ||
42 | [BPF_REG_2] = A64_R(1), | ||
43 | [BPF_REG_3] = A64_R(2), | ||
44 | [BPF_REG_4] = A64_R(3), | ||
45 | [BPF_REG_5] = A64_R(4), | ||
46 | /* callee saved registers that in-kernel function will preserve */ | ||
47 | [BPF_REG_6] = A64_R(19), | ||
48 | [BPF_REG_7] = A64_R(20), | ||
49 | [BPF_REG_8] = A64_R(21), | ||
50 | [BPF_REG_9] = A64_R(22), | ||
51 | /* read-only frame pointer to access stack */ | ||
52 | [BPF_REG_FP] = A64_FP, | ||
53 | /* temporary register for internal BPF JIT */ | ||
54 | [TMP_REG_1] = A64_R(23), | ||
55 | [TMP_REG_2] = A64_R(24), | ||
56 | }; | ||
57 | |||
58 | struct jit_ctx { | ||
59 | const struct bpf_prog *prog; | ||
60 | int idx; | ||
61 | int tmp_used; | ||
62 | int body_offset; | ||
63 | int *offset; | ||
64 | u32 *image; | ||
65 | }; | ||
66 | |||
67 | static inline void emit(const u32 insn, struct jit_ctx *ctx) | ||
68 | { | ||
69 | if (ctx->image != NULL) | ||
70 | ctx->image[ctx->idx] = cpu_to_le32(insn); | ||
71 | |||
72 | ctx->idx++; | ||
73 | } | ||
74 | |||
75 | static inline void emit_a64_mov_i64(const int reg, const u64 val, | ||
76 | struct jit_ctx *ctx) | ||
77 | { | ||
78 | u64 tmp = val; | ||
79 | int shift = 0; | ||
80 | |||
81 | emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx); | ||
82 | tmp >>= 16; | ||
83 | shift += 16; | ||
84 | while (tmp) { | ||
85 | if (tmp & 0xffff) | ||
86 | emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); | ||
87 | tmp >>= 16; | ||
88 | shift += 16; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | static inline void emit_a64_mov_i(const int is64, const int reg, | ||
93 | const s32 val, struct jit_ctx *ctx) | ||
94 | { | ||
95 | u16 hi = val >> 16; | ||
96 | u16 lo = val & 0xffff; | ||
97 | |||
98 | if (hi & 0x8000) { | ||
99 | if (hi == 0xffff) { | ||
100 | emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); | ||
101 | } else { | ||
102 | emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); | ||
103 | emit(A64_MOVK(is64, reg, lo, 0), ctx); | ||
104 | } | ||
105 | } else { | ||
106 | emit(A64_MOVZ(is64, reg, lo, 0), ctx); | ||
107 | if (hi) | ||
108 | emit(A64_MOVK(is64, reg, hi, 16), ctx); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | static inline int bpf2a64_offset(int bpf_to, int bpf_from, | ||
113 | const struct jit_ctx *ctx) | ||
114 | { | ||
115 | int to = ctx->offset[bpf_to + 1]; | ||
116 | /* -1 to account for the Branch instruction */ | ||
117 | int from = ctx->offset[bpf_from + 1] - 1; | ||
118 | |||
119 | return to - from; | ||
120 | } | ||
121 | |||
122 | static inline int epilogue_offset(const struct jit_ctx *ctx) | ||
123 | { | ||
124 | int to = ctx->offset[ctx->prog->len - 1]; | ||
125 | int from = ctx->idx - ctx->body_offset; | ||
126 | |||
127 | return to - from; | ||
128 | } | ||
129 | |||
130 | /* Stack must be multiples of 16B */ | ||
131 | #define STACK_ALIGN(sz) (((sz) + 15) & ~15) | ||
132 | |||
133 | static void build_prologue(struct jit_ctx *ctx) | ||
134 | { | ||
135 | const u8 r6 = bpf2a64[BPF_REG_6]; | ||
136 | const u8 r7 = bpf2a64[BPF_REG_7]; | ||
137 | const u8 r8 = bpf2a64[BPF_REG_8]; | ||
138 | const u8 r9 = bpf2a64[BPF_REG_9]; | ||
139 | const u8 fp = bpf2a64[BPF_REG_FP]; | ||
140 | const u8 ra = bpf2a64[BPF_REG_A]; | ||
141 | const u8 rx = bpf2a64[BPF_REG_X]; | ||
142 | const u8 tmp1 = bpf2a64[TMP_REG_1]; | ||
143 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | ||
144 | int stack_size = MAX_BPF_STACK; | ||
145 | |||
146 | stack_size += 4; /* extra for skb_copy_bits buffer */ | ||
147 | stack_size = STACK_ALIGN(stack_size); | ||
148 | |||
149 | /* Save callee-saved register */ | ||
150 | emit(A64_PUSH(r6, r7, A64_SP), ctx); | ||
151 | emit(A64_PUSH(r8, r9, A64_SP), ctx); | ||
152 | if (ctx->tmp_used) | ||
153 | emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); | ||
154 | |||
155 | /* Set up BPF stack */ | ||
156 | emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); | ||
157 | |||
158 | /* Set up frame pointer */ | ||
159 | emit(A64_MOV(1, fp, A64_SP), ctx); | ||
160 | |||
161 | /* Clear registers A and X */ | ||
162 | emit_a64_mov_i64(ra, 0, ctx); | ||
163 | emit_a64_mov_i64(rx, 0, ctx); | ||
164 | } | ||
165 | |||
166 | static void build_epilogue(struct jit_ctx *ctx) | ||
167 | { | ||
168 | const u8 r0 = bpf2a64[BPF_REG_0]; | ||
169 | const u8 r6 = bpf2a64[BPF_REG_6]; | ||
170 | const u8 r7 = bpf2a64[BPF_REG_7]; | ||
171 | const u8 r8 = bpf2a64[BPF_REG_8]; | ||
172 | const u8 r9 = bpf2a64[BPF_REG_9]; | ||
173 | const u8 fp = bpf2a64[BPF_REG_FP]; | ||
174 | const u8 tmp1 = bpf2a64[TMP_REG_1]; | ||
175 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | ||
176 | int stack_size = MAX_BPF_STACK; | ||
177 | |||
178 | stack_size += 4; /* extra for skb_copy_bits buffer */ | ||
179 | stack_size = STACK_ALIGN(stack_size); | ||
180 | |||
181 | /* We're done with BPF stack */ | ||
182 | emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); | ||
183 | |||
184 | /* Restore callee-saved register */ | ||
185 | if (ctx->tmp_used) | ||
186 | emit(A64_POP(tmp1, tmp2, A64_SP), ctx); | ||
187 | emit(A64_POP(r8, r9, A64_SP), ctx); | ||
188 | emit(A64_POP(r6, r7, A64_SP), ctx); | ||
189 | |||
190 | /* Restore frame pointer */ | ||
191 | emit(A64_MOV(1, fp, A64_SP), ctx); | ||
192 | |||
193 | /* Set return value */ | ||
194 | emit(A64_MOV(1, A64_R(0), r0), ctx); | ||
195 | |||
196 | emit(A64_RET(A64_LR), ctx); | ||
197 | } | ||
198 | |||
199 | static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) | ||
200 | { | ||
201 | const u8 code = insn->code; | ||
202 | const u8 dst = bpf2a64[insn->dst_reg]; | ||
203 | const u8 src = bpf2a64[insn->src_reg]; | ||
204 | const u8 tmp = bpf2a64[TMP_REG_1]; | ||
205 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | ||
206 | const s16 off = insn->off; | ||
207 | const s32 imm = insn->imm; | ||
208 | const int i = insn - ctx->prog->insnsi; | ||
209 | const bool is64 = BPF_CLASS(code) == BPF_ALU64; | ||
210 | u8 jmp_cond; | ||
211 | s32 jmp_offset; | ||
212 | |||
213 | switch (code) { | ||
214 | /* dst = src */ | ||
215 | case BPF_ALU | BPF_MOV | BPF_X: | ||
216 | case BPF_ALU64 | BPF_MOV | BPF_X: | ||
217 | emit(A64_MOV(is64, dst, src), ctx); | ||
218 | break; | ||
219 | /* dst = dst OP src */ | ||
220 | case BPF_ALU | BPF_ADD | BPF_X: | ||
221 | case BPF_ALU64 | BPF_ADD | BPF_X: | ||
222 | emit(A64_ADD(is64, dst, dst, src), ctx); | ||
223 | break; | ||
224 | case BPF_ALU | BPF_SUB | BPF_X: | ||
225 | case BPF_ALU64 | BPF_SUB | BPF_X: | ||
226 | emit(A64_SUB(is64, dst, dst, src), ctx); | ||
227 | break; | ||
228 | case BPF_ALU | BPF_AND | BPF_X: | ||
229 | case BPF_ALU64 | BPF_AND | BPF_X: | ||
230 | emit(A64_AND(is64, dst, dst, src), ctx); | ||
231 | break; | ||
232 | case BPF_ALU | BPF_OR | BPF_X: | ||
233 | case BPF_ALU64 | BPF_OR | BPF_X: | ||
234 | emit(A64_ORR(is64, dst, dst, src), ctx); | ||
235 | break; | ||
236 | case BPF_ALU | BPF_XOR | BPF_X: | ||
237 | case BPF_ALU64 | BPF_XOR | BPF_X: | ||
238 | emit(A64_EOR(is64, dst, dst, src), ctx); | ||
239 | break; | ||
240 | case BPF_ALU | BPF_MUL | BPF_X: | ||
241 | case BPF_ALU64 | BPF_MUL | BPF_X: | ||
242 | emit(A64_MUL(is64, dst, dst, src), ctx); | ||
243 | break; | ||
244 | case BPF_ALU | BPF_DIV | BPF_X: | ||
245 | case BPF_ALU64 | BPF_DIV | BPF_X: | ||
246 | emit(A64_UDIV(is64, dst, dst, src), ctx); | ||
247 | break; | ||
248 | case BPF_ALU | BPF_MOD | BPF_X: | ||
249 | case BPF_ALU64 | BPF_MOD | BPF_X: | ||
250 | ctx->tmp_used = 1; | ||
251 | emit(A64_UDIV(is64, tmp, dst, src), ctx); | ||
252 | emit(A64_MUL(is64, tmp, tmp, src), ctx); | ||
253 | emit(A64_SUB(is64, dst, dst, tmp), ctx); | ||
254 | break; | ||
255 | /* dst = -dst */ | ||
256 | case BPF_ALU | BPF_NEG: | ||
257 | case BPF_ALU64 | BPF_NEG: | ||
258 | emit(A64_NEG(is64, dst, dst), ctx); | ||
259 | break; | ||
260 | /* dst = BSWAP##imm(dst) */ | ||
261 | case BPF_ALU | BPF_END | BPF_FROM_LE: | ||
262 | case BPF_ALU | BPF_END | BPF_FROM_BE: | ||
263 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
264 | if (BPF_SRC(code) == BPF_FROM_BE) | ||
265 | break; | ||
266 | #else /* !CONFIG_CPU_BIG_ENDIAN */ | ||
267 | if (BPF_SRC(code) == BPF_FROM_LE) | ||
268 | break; | ||
269 | #endif | ||
270 | switch (imm) { | ||
271 | case 16: | ||
272 | emit(A64_REV16(is64, dst, dst), ctx); | ||
273 | break; | ||
274 | case 32: | ||
275 | emit(A64_REV32(is64, dst, dst), ctx); | ||
276 | break; | ||
277 | case 64: | ||
278 | emit(A64_REV64(dst, dst), ctx); | ||
279 | break; | ||
280 | } | ||
281 | break; | ||
282 | /* dst = imm */ | ||
283 | case BPF_ALU | BPF_MOV | BPF_K: | ||
284 | case BPF_ALU64 | BPF_MOV | BPF_K: | ||
285 | emit_a64_mov_i(is64, dst, imm, ctx); | ||
286 | break; | ||
287 | /* dst = dst OP imm */ | ||
288 | case BPF_ALU | BPF_ADD | BPF_K: | ||
289 | case BPF_ALU64 | BPF_ADD | BPF_K: | ||
290 | ctx->tmp_used = 1; | ||
291 | emit_a64_mov_i(is64, tmp, imm, ctx); | ||
292 | emit(A64_ADD(is64, dst, dst, tmp), ctx); | ||
293 | break; | ||
294 | case BPF_ALU | BPF_SUB | BPF_K: | ||
295 | case BPF_ALU64 | BPF_SUB | BPF_K: | ||
296 | ctx->tmp_used = 1; | ||
297 | emit_a64_mov_i(is64, tmp, imm, ctx); | ||
298 | emit(A64_SUB(is64, dst, dst, tmp), ctx); | ||
299 | break; | ||
300 | case BPF_ALU | BPF_AND | BPF_K: | ||
301 | case BPF_ALU64 | BPF_AND | BPF_K: | ||
302 | ctx->tmp_used = 1; | ||
303 | emit_a64_mov_i(is64, tmp, imm, ctx); | ||
304 | emit(A64_AND(is64, dst, dst, tmp), ctx); | ||
305 | break; | ||
306 | case BPF_ALU | BPF_OR | BPF_K: | ||
307 | case BPF_ALU64 | BPF_OR | BPF_K: | ||
308 | ctx->tmp_used = 1; | ||
309 | emit_a64_mov_i(is64, tmp, imm, ctx); | ||
310 | emit(A64_ORR(is64, dst, dst, tmp), ctx); | ||
311 | break; | ||
312 | case BPF_ALU | BPF_XOR | BPF_K: | ||
313 | case BPF_ALU64 | BPF_XOR | BPF_K: | ||
314 | ctx->tmp_used = 1; | ||
315 | emit_a64_mov_i(is64, tmp, imm, ctx); | ||
316 | emit(A64_EOR(is64, dst, dst, tmp), ctx); | ||
317 | break; | ||
318 | case BPF_ALU | BPF_MUL | BPF_K: | ||
319 | case BPF_ALU64 | BPF_MUL | BPF_K: | ||
320 | ctx->tmp_used = 1; | ||
321 | emit_a64_mov_i(is64, tmp, imm, ctx); | ||
322 | emit(A64_MUL(is64, dst, dst, tmp), ctx); | ||
323 | break; | ||
324 | case BPF_ALU | BPF_DIV | BPF_K: | ||
325 | case BPF_ALU64 | BPF_DIV | BPF_K: | ||
326 | ctx->tmp_used = 1; | ||
327 | emit_a64_mov_i(is64, tmp, imm, ctx); | ||
328 | emit(A64_UDIV(is64, dst, dst, tmp), ctx); | ||
329 | break; | ||
330 | case BPF_ALU | BPF_MOD | BPF_K: | ||
331 | case BPF_ALU64 | BPF_MOD | BPF_K: | ||
332 | ctx->tmp_used = 1; | ||
333 | emit_a64_mov_i(is64, tmp2, imm, ctx); | ||
334 | emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); | ||
335 | emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); | ||
336 | emit(A64_SUB(is64, dst, dst, tmp), ctx); | ||
337 | break; | ||
338 | case BPF_ALU | BPF_LSH | BPF_K: | ||
339 | case BPF_ALU64 | BPF_LSH | BPF_K: | ||
340 | emit(A64_LSL(is64, dst, dst, imm), ctx); | ||
341 | break; | ||
342 | case BPF_ALU | BPF_RSH | BPF_K: | ||
343 | case BPF_ALU64 | BPF_RSH | BPF_K: | ||
344 | emit(A64_LSR(is64, dst, dst, imm), ctx); | ||
345 | break; | ||
346 | case BPF_ALU | BPF_ARSH | BPF_K: | ||
347 | case BPF_ALU64 | BPF_ARSH | BPF_K: | ||
348 | emit(A64_ASR(is64, dst, dst, imm), ctx); | ||
349 | break; | ||
350 | |||
351 | #define check_imm(bits, imm) do { \ | ||
352 | if ((((imm) > 0) && ((imm) >> (bits))) || \ | ||
353 | (((imm) < 0) && (~(imm) >> (bits)))) { \ | ||
354 | pr_info("[%2d] imm=%d(0x%x) out of range\n", \ | ||
355 | i, imm, imm); \ | ||
356 | return -EINVAL; \ | ||
357 | } \ | ||
358 | } while (0) | ||
359 | #define check_imm19(imm) check_imm(19, imm) | ||
360 | #define check_imm26(imm) check_imm(26, imm) | ||
361 | |||
362 | /* JUMP off */ | ||
363 | case BPF_JMP | BPF_JA: | ||
364 | jmp_offset = bpf2a64_offset(i + off, i, ctx); | ||
365 | check_imm26(jmp_offset); | ||
366 | emit(A64_B(jmp_offset), ctx); | ||
367 | break; | ||
368 | /* IF (dst COND src) JUMP off */ | ||
369 | case BPF_JMP | BPF_JEQ | BPF_X: | ||
370 | case BPF_JMP | BPF_JGT | BPF_X: | ||
371 | case BPF_JMP | BPF_JGE | BPF_X: | ||
372 | case BPF_JMP | BPF_JNE | BPF_X: | ||
373 | case BPF_JMP | BPF_JSGT | BPF_X: | ||
374 | case BPF_JMP | BPF_JSGE | BPF_X: | ||
375 | emit(A64_CMP(1, dst, src), ctx); | ||
376 | emit_cond_jmp: | ||
377 | jmp_offset = bpf2a64_offset(i + off, i, ctx); | ||
378 | check_imm19(jmp_offset); | ||
379 | switch (BPF_OP(code)) { | ||
380 | case BPF_JEQ: | ||
381 | jmp_cond = A64_COND_EQ; | ||
382 | break; | ||
383 | case BPF_JGT: | ||
384 | jmp_cond = A64_COND_HI; | ||
385 | break; | ||
386 | case BPF_JGE: | ||
387 | jmp_cond = A64_COND_CS; | ||
388 | break; | ||
389 | case BPF_JNE: | ||
390 | jmp_cond = A64_COND_NE; | ||
391 | break; | ||
392 | case BPF_JSGT: | ||
393 | jmp_cond = A64_COND_GT; | ||
394 | break; | ||
395 | case BPF_JSGE: | ||
396 | jmp_cond = A64_COND_GE; | ||
397 | break; | ||
398 | default: | ||
399 | return -EFAULT; | ||
400 | } | ||
401 | emit(A64_B_(jmp_cond, jmp_offset), ctx); | ||
402 | break; | ||
403 | case BPF_JMP | BPF_JSET | BPF_X: | ||
404 | emit(A64_TST(1, dst, src), ctx); | ||
405 | goto emit_cond_jmp; | ||
406 | /* IF (dst COND imm) JUMP off */ | ||
407 | case BPF_JMP | BPF_JEQ | BPF_K: | ||
408 | case BPF_JMP | BPF_JGT | BPF_K: | ||
409 | case BPF_JMP | BPF_JGE | BPF_K: | ||
410 | case BPF_JMP | BPF_JNE | BPF_K: | ||
411 | case BPF_JMP | BPF_JSGT | BPF_K: | ||
412 | case BPF_JMP | BPF_JSGE | BPF_K: | ||
413 | ctx->tmp_used = 1; | ||
414 | emit_a64_mov_i(1, tmp, imm, ctx); | ||
415 | emit(A64_CMP(1, dst, tmp), ctx); | ||
416 | goto emit_cond_jmp; | ||
417 | case BPF_JMP | BPF_JSET | BPF_K: | ||
418 | ctx->tmp_used = 1; | ||
419 | emit_a64_mov_i(1, tmp, imm, ctx); | ||
420 | emit(A64_TST(1, dst, tmp), ctx); | ||
421 | goto emit_cond_jmp; | ||
422 | /* function call */ | ||
423 | case BPF_JMP | BPF_CALL: | ||
424 | { | ||
425 | const u8 r0 = bpf2a64[BPF_REG_0]; | ||
426 | const u64 func = (u64)__bpf_call_base + imm; | ||
427 | |||
428 | ctx->tmp_used = 1; | ||
429 | emit_a64_mov_i64(tmp, func, ctx); | ||
430 | emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); | ||
431 | emit(A64_MOV(1, A64_FP, A64_SP), ctx); | ||
432 | emit(A64_BLR(tmp), ctx); | ||
433 | emit(A64_MOV(1, r0, A64_R(0)), ctx); | ||
434 | emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); | ||
435 | break; | ||
436 | } | ||
437 | /* function return */ | ||
438 | case BPF_JMP | BPF_EXIT: | ||
439 | if (i == ctx->prog->len - 1) | ||
440 | break; | ||
441 | jmp_offset = epilogue_offset(ctx); | ||
442 | check_imm26(jmp_offset); | ||
443 | emit(A64_B(jmp_offset), ctx); | ||
444 | break; | ||
445 | |||
446 | /* LDX: dst = *(size *)(src + off) */ | ||
447 | case BPF_LDX | BPF_MEM | BPF_W: | ||
448 | case BPF_LDX | BPF_MEM | BPF_H: | ||
449 | case BPF_LDX | BPF_MEM | BPF_B: | ||
450 | case BPF_LDX | BPF_MEM | BPF_DW: | ||
451 | ctx->tmp_used = 1; | ||
452 | emit_a64_mov_i(1, tmp, off, ctx); | ||
453 | switch (BPF_SIZE(code)) { | ||
454 | case BPF_W: | ||
455 | emit(A64_LDR32(dst, src, tmp), ctx); | ||
456 | break; | ||
457 | case BPF_H: | ||
458 | emit(A64_LDRH(dst, src, tmp), ctx); | ||
459 | break; | ||
460 | case BPF_B: | ||
461 | emit(A64_LDRB(dst, src, tmp), ctx); | ||
462 | break; | ||
463 | case BPF_DW: | ||
464 | emit(A64_LDR64(dst, src, tmp), ctx); | ||
465 | break; | ||
466 | } | ||
467 | break; | ||
468 | |||
469 | /* ST: *(size *)(dst + off) = imm */ | ||
470 | case BPF_ST | BPF_MEM | BPF_W: | ||
471 | case BPF_ST | BPF_MEM | BPF_H: | ||
472 | case BPF_ST | BPF_MEM | BPF_B: | ||
473 | case BPF_ST | BPF_MEM | BPF_DW: | ||
474 | goto notyet; | ||
475 | |||
476 | /* STX: *(size *)(dst + off) = src */ | ||
477 | case BPF_STX | BPF_MEM | BPF_W: | ||
478 | case BPF_STX | BPF_MEM | BPF_H: | ||
479 | case BPF_STX | BPF_MEM | BPF_B: | ||
480 | case BPF_STX | BPF_MEM | BPF_DW: | ||
481 | ctx->tmp_used = 1; | ||
482 | emit_a64_mov_i(1, tmp, off, ctx); | ||
483 | switch (BPF_SIZE(code)) { | ||
484 | case BPF_W: | ||
485 | emit(A64_STR32(src, dst, tmp), ctx); | ||
486 | break; | ||
487 | case BPF_H: | ||
488 | emit(A64_STRH(src, dst, tmp), ctx); | ||
489 | break; | ||
490 | case BPF_B: | ||
491 | emit(A64_STRB(src, dst, tmp), ctx); | ||
492 | break; | ||
493 | case BPF_DW: | ||
494 | emit(A64_STR64(src, dst, tmp), ctx); | ||
495 | break; | ||
496 | } | ||
497 | break; | ||
498 | /* STX XADD: lock *(u32 *)(dst + off) += src */ | ||
499 | case BPF_STX | BPF_XADD | BPF_W: | ||
500 | /* STX XADD: lock *(u64 *)(dst + off) += src */ | ||
501 | case BPF_STX | BPF_XADD | BPF_DW: | ||
502 | goto notyet; | ||
503 | |||
504 | /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ | ||
505 | case BPF_LD | BPF_ABS | BPF_W: | ||
506 | case BPF_LD | BPF_ABS | BPF_H: | ||
507 | case BPF_LD | BPF_ABS | BPF_B: | ||
508 | /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ | ||
509 | case BPF_LD | BPF_IND | BPF_W: | ||
510 | case BPF_LD | BPF_IND | BPF_H: | ||
511 | case BPF_LD | BPF_IND | BPF_B: | ||
512 | { | ||
513 | const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */ | ||
514 | const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */ | ||
515 | const u8 fp = bpf2a64[BPF_REG_FP]; | ||
516 | const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */ | ||
517 | const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */ | ||
518 | const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */ | ||
519 | const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */ | ||
520 | const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */ | ||
521 | int size; | ||
522 | |||
523 | emit(A64_MOV(1, r1, r6), ctx); | ||
524 | emit_a64_mov_i(0, r2, imm, ctx); | ||
525 | if (BPF_MODE(code) == BPF_IND) | ||
526 | emit(A64_ADD(0, r2, r2, src), ctx); | ||
527 | switch (BPF_SIZE(code)) { | ||
528 | case BPF_W: | ||
529 | size = 4; | ||
530 | break; | ||
531 | case BPF_H: | ||
532 | size = 2; | ||
533 | break; | ||
534 | case BPF_B: | ||
535 | size = 1; | ||
536 | break; | ||
537 | default: | ||
538 | return -EINVAL; | ||
539 | } | ||
540 | emit_a64_mov_i64(r3, size, ctx); | ||
541 | emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); | ||
542 | emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); | ||
543 | emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); | ||
544 | emit(A64_MOV(1, A64_FP, A64_SP), ctx); | ||
545 | emit(A64_BLR(r5), ctx); | ||
546 | emit(A64_MOV(1, r0, A64_R(0)), ctx); | ||
547 | emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); | ||
548 | |||
549 | jmp_offset = epilogue_offset(ctx); | ||
550 | check_imm19(jmp_offset); | ||
551 | emit(A64_CBZ(1, r0, jmp_offset), ctx); | ||
552 | emit(A64_MOV(1, r5, r0), ctx); | ||
553 | switch (BPF_SIZE(code)) { | ||
554 | case BPF_W: | ||
555 | emit(A64_LDR32(r0, r5, A64_ZR), ctx); | ||
556 | #ifndef CONFIG_CPU_BIG_ENDIAN | ||
557 | emit(A64_REV32(0, r0, r0), ctx); | ||
558 | #endif | ||
559 | break; | ||
560 | case BPF_H: | ||
561 | emit(A64_LDRH(r0, r5, A64_ZR), ctx); | ||
562 | #ifndef CONFIG_CPU_BIG_ENDIAN | ||
563 | emit(A64_REV16(0, r0, r0), ctx); | ||
564 | #endif | ||
565 | break; | ||
566 | case BPF_B: | ||
567 | emit(A64_LDRB(r0, r5, A64_ZR), ctx); | ||
568 | break; | ||
569 | } | ||
570 | break; | ||
571 | } | ||
572 | notyet: | ||
573 | pr_info_once("*** NOT YET: opcode %02x ***\n", code); | ||
574 | return -EFAULT; | ||
575 | |||
576 | default: | ||
577 | pr_err_once("unknown opcode %02x\n", code); | ||
578 | return -EINVAL; | ||
579 | } | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | static int build_body(struct jit_ctx *ctx) | ||
585 | { | ||
586 | const struct bpf_prog *prog = ctx->prog; | ||
587 | int i; | ||
588 | |||
589 | for (i = 0; i < prog->len; i++) { | ||
590 | const struct bpf_insn *insn = &prog->insnsi[i]; | ||
591 | int ret; | ||
592 | |||
593 | if (ctx->image == NULL) | ||
594 | ctx->offset[i] = ctx->idx; | ||
595 | |||
596 | ret = build_insn(insn, ctx); | ||
597 | if (ret) | ||
598 | return ret; | ||
599 | } | ||
600 | |||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | static inline void bpf_flush_icache(void *start, void *end) | ||
605 | { | ||
606 | flush_icache_range((unsigned long)start, (unsigned long)end); | ||
607 | } | ||
608 | |||
609 | void bpf_jit_compile(struct bpf_prog *prog) | ||
610 | { | ||
611 | /* Nothing to do here. We support Internal BPF. */ | ||
612 | } | ||
613 | |||
614 | void bpf_int_jit_compile(struct bpf_prog *prog) | ||
615 | { | ||
616 | struct jit_ctx ctx; | ||
617 | int image_size; | ||
618 | |||
619 | if (!bpf_jit_enable) | ||
620 | return; | ||
621 | |||
622 | if (!prog || !prog->len) | ||
623 | return; | ||
624 | |||
625 | memset(&ctx, 0, sizeof(ctx)); | ||
626 | ctx.prog = prog; | ||
627 | |||
628 | ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); | ||
629 | if (ctx.offset == NULL) | ||
630 | return; | ||
631 | |||
632 | /* 1. Initial fake pass to compute ctx->idx. */ | ||
633 | |||
634 | /* Fake pass to fill in ctx->offset. */ | ||
635 | if (build_body(&ctx)) | ||
636 | goto out; | ||
637 | |||
638 | build_prologue(&ctx); | ||
639 | |||
640 | build_epilogue(&ctx); | ||
641 | |||
642 | /* Now we know the actual image size. */ | ||
643 | image_size = sizeof(u32) * ctx.idx; | ||
644 | ctx.image = module_alloc(image_size); | ||
645 | if (unlikely(ctx.image == NULL)) | ||
646 | goto out; | ||
647 | |||
648 | /* 2. Now, the actual pass. */ | ||
649 | |||
650 | ctx.idx = 0; | ||
651 | build_prologue(&ctx); | ||
652 | |||
653 | ctx.body_offset = ctx.idx; | ||
654 | if (build_body(&ctx)) { | ||
655 | module_free(NULL, ctx.image); | ||
656 | goto out; | ||
657 | } | ||
658 | |||
659 | build_epilogue(&ctx); | ||
660 | |||
661 | /* And we're done. */ | ||
662 | if (bpf_jit_enable > 1) | ||
663 | bpf_jit_dump(prog->len, image_size, 2, ctx.image); | ||
664 | |||
665 | bpf_flush_icache(ctx.image, ctx.image + ctx.idx); | ||
666 | prog->bpf_func = (void *)ctx.image; | ||
667 | prog->jited = 1; | ||
668 | |||
669 | out: | ||
670 | kfree(ctx.offset); | ||
671 | } | ||
672 | |||
673 | void bpf_jit_free(struct bpf_prog *prog) | ||
674 | { | ||
675 | if (prog->jited) | ||
676 | module_free(NULL, prog->bpf_func); | ||
677 | |||
678 | kfree(prog); | ||
679 | } | ||