diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 21:03:54 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 21:03:54 -0500 |
commit | 6b00f7efb5303418c231994c91fb8239f5ada260 (patch) | |
tree | 1daba87ccda34e632ea39dedc5055391c7e94bdc /arch | |
parent | b3d6524ff7956c5a898d51a18eaecb62a60a2b84 (diff) | |
parent | d476d94f180af3f0fca77394651d4a98f4df1c54 (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas:
"arm64 updates for 3.20:
- reimplementation of the virtual remapping of UEFI Runtime Services
in a way that is stable across kexec
- emulation of the "setend" instruction for 32-bit tasks (user
endianness switching trapped in the kernel, SCTLR_EL1.E0E bit set
accordingly)
- compat_sys_call_table implemented in C (from asm) and made it a
constant array together with sys_call_table
- export CPU cache information via /sys (like other architectures)
- DMA API implementation clean-up in preparation for IOMMU support
- macros clean-up for KVM
- dropped some unnecessary cache+tlb maintenance
- CONFIG_ARM64_CPU_SUSPEND clean-up
- defconfig update (CPU_IDLE)
The EFI changes going via the arm64 tree have been acked by Matt
Fleming. There is also a patch adding sys_*stat64 prototypes to
include/linux/syscalls.h, acked by Andrew Morton"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (47 commits)
arm64: compat: Remove incorrect comment in compat_siginfo
arm64: Fix section mismatch on alloc_init_p[mu]d()
arm64: Avoid breakage caused by .altmacro in fpsimd save/restore macros
arm64: mm: use *_sect to check for section maps
arm64: drop unnecessary cache+tlb maintenance
arm64:mm: free the useless initial page table
arm64: Enable CPU_IDLE in defconfig
arm64: kernel: remove ARM64_CPU_SUSPEND config option
arm64: make sys_call_table const
arm64: Remove asm/syscalls.h
arm64: Implement the compat_sys_call_table in C
syscalls: Declare sys_*stat64 prototypes if __ARCH_WANT_(COMPAT_)STAT64
compat: Declare compat_sys_sigpending and compat_sys_sigprocmask prototypes
arm64: uapi: expose our struct ucontext to the uapi headers
smp, ARM64: Kill SMP single function call interrupt
arm64: Emulate SETEND for AArch32 tasks
arm64: Consolidate hotplug notifier for instruction emulation
arm64: Track system support for mixed endian EL0
arm64: implement generic IOMMU configuration
arm64: Combine coherent and non-coherent swiotlb dma_ops
...
Diffstat (limited to 'arch')
62 files changed, 1334 insertions, 879 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1f9a20a3677..d3f7e4941231 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -540,6 +540,21 @@ config CP15_BARRIER_EMULATION | |||
540 | 540 | ||
541 | If unsure, say Y | 541 | If unsure, say Y |
542 | 542 | ||
543 | config SETEND_EMULATION | ||
544 | bool "Emulate SETEND instruction" | ||
545 | help | ||
546 | The SETEND instruction alters the data-endianness of the | ||
547 | AArch32 EL0, and is deprecated in ARMv8. | ||
548 | |||
549 | Say Y here to enable software emulation of the instruction | ||
550 | for AArch32 userspace code. | ||
551 | |||
552 | Note: All the cpus on the system must have mixed endian support at EL0 | ||
553 | for this feature to be enabled. If a new CPU - which doesn't support mixed | ||
554 | endian - is hotplugged in after this feature has been enabled, there could | ||
555 | be unexpected results in the applications. | ||
556 | |||
557 | If unsure, say Y | ||
543 | endif | 558 | endif |
544 | 559 | ||
545 | endmenu | 560 | endmenu |
@@ -627,9 +642,6 @@ source "kernel/power/Kconfig" | |||
627 | config ARCH_SUSPEND_POSSIBLE | 642 | config ARCH_SUSPEND_POSSIBLE |
628 | def_bool y | 643 | def_bool y |
629 | 644 | ||
630 | config ARM64_CPU_SUSPEND | ||
631 | def_bool PM_SLEEP | ||
632 | |||
633 | endmenu | 645 | endmenu |
634 | 646 | ||
635 | menu "CPU Power Management" | 647 | menu "CPU Power Management" |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 5fdd6dce8061..4a8741073c90 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
@@ -66,4 +66,27 @@ config DEBUG_SET_MODULE_RONX | |||
66 | against certain classes of kernel exploits. | 66 | against certain classes of kernel exploits. |
67 | If in doubt, say "N". | 67 | If in doubt, say "N". |
68 | 68 | ||
69 | config DEBUG_RODATA | ||
70 | bool "Make kernel text and rodata read-only" | ||
71 | help | ||
72 | If this is set, kernel text and rodata will be made read-only. This | ||
73 | is to help catch accidental or malicious attempts to change the | ||
74 | kernel's executable code. Additionally splits rodata from kernel | ||
75 | text so it can be made explicitly non-executable. | ||
76 | |||
77 | If in doubt, say Y | ||
78 | |||
79 | config DEBUG_ALIGN_RODATA | ||
80 | depends on DEBUG_RODATA && !ARM64_64K_PAGES | ||
81 | bool "Align linker sections up to SECTION_SIZE" | ||
82 | help | ||
83 | If this option is enabled, sections that may potentially be marked as | ||
84 | read only or non-executable will be aligned up to the section size of | ||
85 | the kernel. This prevents sections from being split into pages and | ||
86 | avoids a potential TLB penalty. The downside is an increase in | ||
87 | alignment and potentially wasted space. Turn on this option if | ||
88 | performance is more important than memory pressure. | ||
89 | |||
90 | If in doubt, say N | ||
91 | |||
69 | endmenu | 92 | endmenu |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 066688863920..69ceedc982a5 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -15,8 +15,6 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) | |||
15 | OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S | 15 | OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S |
16 | GZFLAGS :=-9 | 16 | GZFLAGS :=-9 |
17 | 17 | ||
18 | LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | ||
19 | |||
20 | KBUILD_DEFCONFIG := defconfig | 18 | KBUILD_DEFCONFIG := defconfig |
21 | 19 | ||
22 | KBUILD_CFLAGS += -mgeneral-regs-only | 20 | KBUILD_CFLAGS += -mgeneral-regs-only |
@@ -50,7 +48,6 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/ | |||
50 | core-$(CONFIG_XEN) += arch/arm64/xen/ | 48 | core-$(CONFIG_XEN) += arch/arm64/xen/ |
51 | core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ | 49 | core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ |
52 | libs-y := arch/arm64/lib/ $(libs-y) | 50 | libs-y := arch/arm64/lib/ $(libs-y) |
53 | libs-y += $(LIBGCC) | ||
54 | libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/ | 51 | libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/ |
55 | 52 | ||
56 | # Default target when executing plain make | 53 | # Default target when executing plain make |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 5376d908eabe..66b6cacc3251 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -45,6 +45,8 @@ CONFIG_CMA=y | |||
45 | CONFIG_CMDLINE="console=ttyAMA0" | 45 | CONFIG_CMDLINE="console=ttyAMA0" |
46 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 46 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
47 | CONFIG_COMPAT=y | 47 | CONFIG_COMPAT=y |
48 | CONFIG_CPU_IDLE=y | ||
49 | CONFIG_ARM64_CPUIDLE=y | ||
48 | CONFIG_NET=y | 50 | CONFIG_NET=y |
49 | CONFIG_PACKET=y | 51 | CONFIG_PACKET=y |
50 | CONFIG_UNIX=y | 52 | CONFIG_UNIX=y |
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 7ae31a2cc6c0..67d309cc3b6b 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h | |||
@@ -152,4 +152,9 @@ int set_memory_ro(unsigned long addr, int numpages); | |||
152 | int set_memory_rw(unsigned long addr, int numpages); | 152 | int set_memory_rw(unsigned long addr, int numpages); |
153 | int set_memory_x(unsigned long addr, int numpages); | 153 | int set_memory_x(unsigned long addr, int numpages); |
154 | int set_memory_nx(unsigned long addr, int numpages); | 154 | int set_memory_nx(unsigned long addr, int numpages); |
155 | |||
156 | #ifdef CONFIG_DEBUG_RODATA | ||
157 | void mark_rodata_ro(void); | ||
158 | #endif | ||
159 | |||
155 | #endif | 160 | #endif |
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h index 4c631a0a3609..da2fc9e3cedd 100644 --- a/arch/arm64/include/asm/cachetype.h +++ b/arch/arm64/include/asm/cachetype.h | |||
@@ -39,24 +39,41 @@ | |||
39 | 39 | ||
40 | extern unsigned long __icache_flags; | 40 | extern unsigned long __icache_flags; |
41 | 41 | ||
42 | /* | ||
43 | * NumSets, bits[27:13] - (Number of sets in cache) - 1 | ||
44 | * Associativity, bits[12:3] - (Associativity of cache) - 1 | ||
45 | * LineSize, bits[2:0] - (Log2(Number of words in cache line)) - 2 | ||
46 | */ | ||
47 | #define CCSIDR_EL1_WRITE_THROUGH BIT(31) | ||
48 | #define CCSIDR_EL1_WRITE_BACK BIT(30) | ||
49 | #define CCSIDR_EL1_READ_ALLOCATE BIT(29) | ||
50 | #define CCSIDR_EL1_WRITE_ALLOCATE BIT(28) | ||
42 | #define CCSIDR_EL1_LINESIZE_MASK 0x7 | 51 | #define CCSIDR_EL1_LINESIZE_MASK 0x7 |
43 | #define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK) | 52 | #define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK) |
44 | 53 | #define CCSIDR_EL1_ASSOCIATIVITY_SHIFT 3 | |
54 | #define CCSIDR_EL1_ASSOCIATIVITY_MASK 0x3ff | ||
55 | #define CCSIDR_EL1_ASSOCIATIVITY(x) \ | ||
56 | (((x) >> CCSIDR_EL1_ASSOCIATIVITY_SHIFT) & CCSIDR_EL1_ASSOCIATIVITY_MASK) | ||
45 | #define CCSIDR_EL1_NUMSETS_SHIFT 13 | 57 | #define CCSIDR_EL1_NUMSETS_SHIFT 13 |
46 | #define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT) | 58 | #define CCSIDR_EL1_NUMSETS_MASK 0x7fff |
47 | #define CCSIDR_EL1_NUMSETS(x) \ | 59 | #define CCSIDR_EL1_NUMSETS(x) \ |
48 | (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT) | 60 | (((x) >> CCSIDR_EL1_NUMSETS_SHIFT) & CCSIDR_EL1_NUMSETS_MASK) |
61 | |||
62 | #define CACHE_LINESIZE(x) (16 << CCSIDR_EL1_LINESIZE(x)) | ||
63 | #define CACHE_NUMSETS(x) (CCSIDR_EL1_NUMSETS(x) + 1) | ||
64 | #define CACHE_ASSOCIATIVITY(x) (CCSIDR_EL1_ASSOCIATIVITY(x) + 1) | ||
49 | 65 | ||
50 | extern u64 __attribute_const__ icache_get_ccsidr(void); | 66 | extern u64 __attribute_const__ cache_get_ccsidr(u64 csselr); |
51 | 67 | ||
68 | /* Helpers for Level 1 Instruction cache csselr = 1L */ | ||
52 | static inline int icache_get_linesize(void) | 69 | static inline int icache_get_linesize(void) |
53 | { | 70 | { |
54 | return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr()); | 71 | return CACHE_LINESIZE(cache_get_ccsidr(1L)); |
55 | } | 72 | } |
56 | 73 | ||
57 | static inline int icache_get_numsets(void) | 74 | static inline int icache_get_numsets(void) |
58 | { | 75 | { |
59 | return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr()); | 76 | return CACHE_NUMSETS(cache_get_ccsidr(1L)); |
60 | } | 77 | } |
61 | 78 | ||
62 | /* | 79 | /* |
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 3fb053fa6e98..7fbed6919b54 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h | |||
@@ -161,7 +161,6 @@ typedef struct compat_siginfo { | |||
161 | int si_code; | 161 | int si_code; |
162 | 162 | ||
163 | union { | 163 | union { |
164 | /* The padding is the same size as AArch64. */ | ||
165 | int _pad[128/sizeof(int) - 3]; | 164 | int _pad[128/sizeof(int) - 3]; |
166 | 165 | ||
167 | /* kill() */ | 166 | /* kill() */ |
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index 6f8e2ef9094a..da301ee7395c 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h | |||
@@ -28,8 +28,6 @@ struct device_node; | |||
28 | * enable-method property. | 28 | * enable-method property. |
29 | * @cpu_init: Reads any data necessary for a specific enable-method from the | 29 | * @cpu_init: Reads any data necessary for a specific enable-method from the |
30 | * devicetree, for a given cpu node and proposed logical id. | 30 | * devicetree, for a given cpu node and proposed logical id. |
31 | * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from | ||
32 | * devicetree, for a given cpu node and proposed logical id. | ||
33 | * @cpu_prepare: Early one-time preparation step for a cpu. If there is a | 31 | * @cpu_prepare: Early one-time preparation step for a cpu. If there is a |
34 | * mechanism for doing so, tests whether it is possible to boot | 32 | * mechanism for doing so, tests whether it is possible to boot |
35 | * the given CPU. | 33 | * the given CPU. |
@@ -42,6 +40,8 @@ struct device_node; | |||
42 | * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the | 40 | * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the |
43 | * cpu being killed. | 41 | * cpu being killed. |
44 | * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. | 42 | * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. |
43 | * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from | ||
44 | * devicetree, for a given cpu node and proposed logical id. | ||
45 | * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing | 45 | * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing |
46 | * to wrong parameters or error conditions. Called from the | 46 | * to wrong parameters or error conditions. Called from the |
47 | * CPU being suspended. Must be called with IRQs disabled. | 47 | * CPU being suspended. Must be called with IRQs disabled. |
@@ -49,7 +49,6 @@ struct device_node; | |||
49 | struct cpu_operations { | 49 | struct cpu_operations { |
50 | const char *name; | 50 | const char *name; |
51 | int (*cpu_init)(struct device_node *, unsigned int); | 51 | int (*cpu_init)(struct device_node *, unsigned int); |
52 | int (*cpu_init_idle)(struct device_node *, unsigned int); | ||
53 | int (*cpu_prepare)(unsigned int); | 52 | int (*cpu_prepare)(unsigned int); |
54 | int (*cpu_boot)(unsigned int); | 53 | int (*cpu_boot)(unsigned int); |
55 | void (*cpu_postboot)(void); | 54 | void (*cpu_postboot)(void); |
@@ -58,7 +57,8 @@ struct cpu_operations { | |||
58 | void (*cpu_die)(unsigned int cpu); | 57 | void (*cpu_die)(unsigned int cpu); |
59 | int (*cpu_kill)(unsigned int cpu); | 58 | int (*cpu_kill)(unsigned int cpu); |
60 | #endif | 59 | #endif |
61 | #ifdef CONFIG_ARM64_CPU_SUSPEND | 60 | #ifdef CONFIG_CPU_IDLE |
61 | int (*cpu_init_idle)(struct device_node *, unsigned int); | ||
62 | int (*cpu_suspend)(unsigned long); | 62 | int (*cpu_suspend)(unsigned long); |
63 | #endif | 63 | #endif |
64 | }; | 64 | }; |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 07547ccc1f2b..b6c16d5f622f 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -52,6 +52,8 @@ static inline void cpus_set_cap(unsigned int num) | |||
52 | } | 52 | } |
53 | 53 | ||
54 | void check_local_cpu_errata(void); | 54 | void check_local_cpu_errata(void); |
55 | bool cpu_supports_mixed_endian_el0(void); | ||
56 | bool system_supports_mixed_endian_el0(void); | ||
55 | 57 | ||
56 | #endif /* __ASSEMBLY__ */ | 58 | #endif /* __ASSEMBLY__ */ |
57 | 59 | ||
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index b52a9932e2b1..0710654631e7 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h | |||
@@ -3,11 +3,17 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_CPU_IDLE | 4 | #ifdef CONFIG_CPU_IDLE |
5 | extern int cpu_init_idle(unsigned int cpu); | 5 | extern int cpu_init_idle(unsigned int cpu); |
6 | extern int cpu_suspend(unsigned long arg); | ||
6 | #else | 7 | #else |
7 | static inline int cpu_init_idle(unsigned int cpu) | 8 | static inline int cpu_init_idle(unsigned int cpu) |
8 | { | 9 | { |
9 | return -EOPNOTSUPP; | 10 | return -EOPNOTSUPP; |
10 | } | 11 | } |
12 | |||
13 | static inline int cpu_suspend(unsigned long arg) | ||
14 | { | ||
15 | return -EOPNOTSUPP; | ||
16 | } | ||
11 | #endif | 17 | #endif |
12 | 18 | ||
13 | #endif | 19 | #endif |
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 8adb986a3086..a84ec605bed8 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
@@ -72,6 +72,18 @@ | |||
72 | 72 | ||
73 | #define APM_CPU_PART_POTENZA 0x000 | 73 | #define APM_CPU_PART_POTENZA 0x000 |
74 | 74 | ||
75 | #define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 | ||
76 | #define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT) | ||
77 | #define ID_AA64MMFR0_BIGENDEL0(mmfr0) \ | ||
78 | (((mmfr0) & ID_AA64MMFR0_BIGENDEL0_MASK) >> ID_AA64MMFR0_BIGENDEL0_SHIFT) | ||
79 | #define ID_AA64MMFR0_BIGEND_SHIFT 8 | ||
80 | #define ID_AA64MMFR0_BIGEND_MASK (0xf << ID_AA64MMFR0_BIGEND_SHIFT) | ||
81 | #define ID_AA64MMFR0_BIGEND(mmfr0) \ | ||
82 | (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT) | ||
83 | |||
84 | #define SCTLR_EL1_CP15BEN (0x1 << 5) | ||
85 | #define SCTLR_EL1_SED (0x1 << 8) | ||
86 | |||
75 | #ifndef __ASSEMBLY__ | 87 | #ifndef __ASSEMBLY__ |
76 | 88 | ||
77 | /* | 89 | /* |
@@ -104,6 +116,11 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void) | |||
104 | return read_cpuid(CTR_EL0); | 116 | return read_cpuid(CTR_EL0); |
105 | } | 117 | } |
106 | 118 | ||
119 | static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) | ||
120 | { | ||
121 | return (ID_AA64MMFR0_BIGEND(mmfr0) == 0x1) || | ||
122 | (ID_AA64MMFR0_BIGENDEL0(mmfr0) == 0x1); | ||
123 | } | ||
107 | #endif /* __ASSEMBLY__ */ | 124 | #endif /* __ASSEMBLY__ */ |
108 | 125 | ||
109 | #endif | 126 | #endif |
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 9ce3e680ae1c..6932bb57dba0 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
@@ -28,8 +28,6 @@ | |||
28 | 28 | ||
29 | #define DMA_ERROR_CODE (~(dma_addr_t)0) | 29 | #define DMA_ERROR_CODE (~(dma_addr_t)0) |
30 | extern struct dma_map_ops *dma_ops; | 30 | extern struct dma_map_ops *dma_ops; |
31 | extern struct dma_map_ops coherent_swiotlb_dma_ops; | ||
32 | extern struct dma_map_ops noncoherent_swiotlb_dma_ops; | ||
33 | 31 | ||
34 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) | 32 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) |
35 | { | 33 | { |
@@ -47,23 +45,18 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
47 | return __generic_dma_ops(dev); | 45 | return __generic_dma_ops(dev); |
48 | } | 46 | } |
49 | 47 | ||
50 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) | ||
51 | { | ||
52 | dev->archdata.dma_ops = ops; | ||
53 | } | ||
54 | |||
55 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | 48 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
56 | struct iommu_ops *iommu, bool coherent) | 49 | struct iommu_ops *iommu, bool coherent) |
57 | { | 50 | { |
58 | dev->archdata.dma_coherent = coherent; | 51 | dev->archdata.dma_coherent = coherent; |
59 | if (coherent) | ||
60 | set_dma_ops(dev, &coherent_swiotlb_dma_ops); | ||
61 | } | 52 | } |
62 | #define arch_setup_dma_ops arch_setup_dma_ops | 53 | #define arch_setup_dma_ops arch_setup_dma_ops |
63 | 54 | ||
64 | /* do not use this function in a driver */ | 55 | /* do not use this function in a driver */ |
65 | static inline bool is_device_dma_coherent(struct device *dev) | 56 | static inline bool is_device_dma_coherent(struct device *dev) |
66 | { | 57 | { |
58 | if (!dev) | ||
59 | return false; | ||
67 | return dev->archdata.dma_coherent; | 60 | return dev->archdata.dma_coherent; |
68 | } | 61 | } |
69 | 62 | ||
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index a34fd3b12e2b..ef572206f1c3 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h | |||
@@ -6,29 +6,33 @@ | |||
6 | 6 | ||
7 | #ifdef CONFIG_EFI | 7 | #ifdef CONFIG_EFI |
8 | extern void efi_init(void); | 8 | extern void efi_init(void); |
9 | extern void efi_idmap_init(void); | ||
10 | #else | 9 | #else |
11 | #define efi_init() | 10 | #define efi_init() |
12 | #define efi_idmap_init() | ||
13 | #endif | 11 | #endif |
14 | 12 | ||
15 | #define efi_call_virt(f, ...) \ | 13 | #define efi_call_virt(f, ...) \ |
16 | ({ \ | 14 | ({ \ |
17 | efi_##f##_t *__f = efi.systab->runtime->f; \ | 15 | efi_##f##_t *__f; \ |
18 | efi_status_t __s; \ | 16 | efi_status_t __s; \ |
19 | \ | 17 | \ |
20 | kernel_neon_begin(); \ | 18 | kernel_neon_begin(); \ |
19 | efi_virtmap_load(); \ | ||
20 | __f = efi.systab->runtime->f; \ | ||
21 | __s = __f(__VA_ARGS__); \ | 21 | __s = __f(__VA_ARGS__); \ |
22 | efi_virtmap_unload(); \ | ||
22 | kernel_neon_end(); \ | 23 | kernel_neon_end(); \ |
23 | __s; \ | 24 | __s; \ |
24 | }) | 25 | }) |
25 | 26 | ||
26 | #define __efi_call_virt(f, ...) \ | 27 | #define __efi_call_virt(f, ...) \ |
27 | ({ \ | 28 | ({ \ |
28 | efi_##f##_t *__f = efi.systab->runtime->f; \ | 29 | efi_##f##_t *__f; \ |
29 | \ | 30 | \ |
30 | kernel_neon_begin(); \ | 31 | kernel_neon_begin(); \ |
32 | efi_virtmap_load(); \ | ||
33 | __f = efi.systab->runtime->f; \ | ||
31 | __f(__VA_ARGS__); \ | 34 | __f(__VA_ARGS__); \ |
35 | efi_virtmap_unload(); \ | ||
32 | kernel_neon_end(); \ | 36 | kernel_neon_end(); \ |
33 | }) | 37 | }) |
34 | 38 | ||
@@ -44,4 +48,22 @@ extern void efi_idmap_init(void); | |||
44 | 48 | ||
45 | #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) | 49 | #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) |
46 | 50 | ||
51 | #define EFI_ALLOC_ALIGN SZ_64K | ||
52 | |||
53 | /* | ||
54 | * On ARM systems, virtually remapped UEFI runtime services are set up in two | ||
55 | * distinct stages: | ||
56 | * - The stub retrieves the final version of the memory map from UEFI, populates | ||
57 | * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime | ||
58 | * service to communicate the new mapping to the firmware (Note that the new | ||
59 | * mapping is not live at this time) | ||
60 | * - During an early initcall(), the EFI system table is permanently remapped | ||
61 | * and the virtual remapping of the UEFI Runtime Services regions is loaded | ||
62 | * into a private set of page tables. If this all succeeds, the Runtime | ||
63 | * Services are enabled and the EFI_RUNTIME_SERVICES bit set. | ||
64 | */ | ||
65 | |||
66 | void efi_virtmap_load(void); | ||
67 | void efi_virtmap_unload(void); | ||
68 | |||
47 | #endif /* _ASM_EFI_H */ | 69 | #endif /* _ASM_EFI_H */ |
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 72674f4c3871..62167090937d 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h | |||
@@ -18,40 +18,89 @@ | |||
18 | #ifndef __ASM_ESR_H | 18 | #ifndef __ASM_ESR_H |
19 | #define __ASM_ESR_H | 19 | #define __ASM_ESR_H |
20 | 20 | ||
21 | #define ESR_EL1_WRITE (1 << 6) | 21 | #define ESR_ELx_EC_UNKNOWN (0x00) |
22 | #define ESR_EL1_CM (1 << 8) | 22 | #define ESR_ELx_EC_WFx (0x01) |
23 | #define ESR_EL1_IL (1 << 25) | 23 | /* Unallocated EC: 0x02 */ |
24 | #define ESR_ELx_EC_CP15_32 (0x03) | ||
25 | #define ESR_ELx_EC_CP15_64 (0x04) | ||
26 | #define ESR_ELx_EC_CP14_MR (0x05) | ||
27 | #define ESR_ELx_EC_CP14_LS (0x06) | ||
28 | #define ESR_ELx_EC_FP_ASIMD (0x07) | ||
29 | #define ESR_ELx_EC_CP10_ID (0x08) | ||
30 | /* Unallocated EC: 0x09 - 0x0B */ | ||
31 | #define ESR_ELx_EC_CP14_64 (0x0C) | ||
32 | /* Unallocated EC: 0x0d */ | ||
33 | #define ESR_ELx_EC_ILL (0x0E) | ||
34 | /* Unallocated EC: 0x0F - 0x10 */ | ||
35 | #define ESR_ELx_EC_SVC32 (0x11) | ||
36 | #define ESR_ELx_EC_HVC32 (0x12) | ||
37 | #define ESR_ELx_EC_SMC32 (0x13) | ||
38 | /* Unallocated EC: 0x14 */ | ||
39 | #define ESR_ELx_EC_SVC64 (0x15) | ||
40 | #define ESR_ELx_EC_HVC64 (0x16) | ||
41 | #define ESR_ELx_EC_SMC64 (0x17) | ||
42 | #define ESR_ELx_EC_SYS64 (0x18) | ||
43 | /* Unallocated EC: 0x19 - 0x1E */ | ||
44 | #define ESR_ELx_EC_IMP_DEF (0x1f) | ||
45 | #define ESR_ELx_EC_IABT_LOW (0x20) | ||
46 | #define ESR_ELx_EC_IABT_CUR (0x21) | ||
47 | #define ESR_ELx_EC_PC_ALIGN (0x22) | ||
48 | /* Unallocated EC: 0x23 */ | ||
49 | #define ESR_ELx_EC_DABT_LOW (0x24) | ||
50 | #define ESR_ELx_EC_DABT_CUR (0x25) | ||
51 | #define ESR_ELx_EC_SP_ALIGN (0x26) | ||
52 | /* Unallocated EC: 0x27 */ | ||
53 | #define ESR_ELx_EC_FP_EXC32 (0x28) | ||
54 | /* Unallocated EC: 0x29 - 0x2B */ | ||
55 | #define ESR_ELx_EC_FP_EXC64 (0x2C) | ||
56 | /* Unallocated EC: 0x2D - 0x2E */ | ||
57 | #define ESR_ELx_EC_SERROR (0x2F) | ||
58 | #define ESR_ELx_EC_BREAKPT_LOW (0x30) | ||
59 | #define ESR_ELx_EC_BREAKPT_CUR (0x31) | ||
60 | #define ESR_ELx_EC_SOFTSTP_LOW (0x32) | ||
61 | #define ESR_ELx_EC_SOFTSTP_CUR (0x33) | ||
62 | #define ESR_ELx_EC_WATCHPT_LOW (0x34) | ||
63 | #define ESR_ELx_EC_WATCHPT_CUR (0x35) | ||
64 | /* Unallocated EC: 0x36 - 0x37 */ | ||
65 | #define ESR_ELx_EC_BKPT32 (0x38) | ||
66 | /* Unallocated EC: 0x39 */ | ||
67 | #define ESR_ELx_EC_VECTOR32 (0x3A) | ||
68 | /* Unallocted EC: 0x3B */ | ||
69 | #define ESR_ELx_EC_BRK64 (0x3C) | ||
70 | /* Unallocated EC: 0x3D - 0x3F */ | ||
71 | #define ESR_ELx_EC_MAX (0x3F) | ||
24 | 72 | ||
25 | #define ESR_EL1_EC_SHIFT (26) | 73 | #define ESR_ELx_EC_SHIFT (26) |
26 | #define ESR_EL1_EC_UNKNOWN (0x00) | 74 | #define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT) |
27 | #define ESR_EL1_EC_WFI (0x01) | 75 | |
28 | #define ESR_EL1_EC_CP15_32 (0x03) | 76 | #define ESR_ELx_IL (UL(1) << 25) |
29 | #define ESR_EL1_EC_CP15_64 (0x04) | 77 | #define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) |
30 | #define ESR_EL1_EC_CP14_MR (0x05) | 78 | #define ESR_ELx_ISV (UL(1) << 24) |
31 | #define ESR_EL1_EC_CP14_LS (0x06) | 79 | #define ESR_ELx_SAS_SHIFT (22) |
32 | #define ESR_EL1_EC_FP_ASIMD (0x07) | 80 | #define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT) |
33 | #define ESR_EL1_EC_CP10_ID (0x08) | 81 | #define ESR_ELx_SSE (UL(1) << 21) |
34 | #define ESR_EL1_EC_CP14_64 (0x0C) | 82 | #define ESR_ELx_SRT_SHIFT (16) |
35 | #define ESR_EL1_EC_ILL_ISS (0x0E) | 83 | #define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT) |
36 | #define ESR_EL1_EC_SVC32 (0x11) | 84 | #define ESR_ELx_SF (UL(1) << 15) |
37 | #define ESR_EL1_EC_SVC64 (0x15) | 85 | #define ESR_ELx_AR (UL(1) << 14) |
38 | #define ESR_EL1_EC_SYS64 (0x18) | 86 | #define ESR_ELx_EA (UL(1) << 9) |
39 | #define ESR_EL1_EC_IABT_EL0 (0x20) | 87 | #define ESR_ELx_CM (UL(1) << 8) |
40 | #define ESR_EL1_EC_IABT_EL1 (0x21) | 88 | #define ESR_ELx_S1PTW (UL(1) << 7) |
41 | #define ESR_EL1_EC_PC_ALIGN (0x22) | 89 | #define ESR_ELx_WNR (UL(1) << 6) |
42 | #define ESR_EL1_EC_DABT_EL0 (0x24) | 90 | #define ESR_ELx_FSC (0x3F) |
43 | #define ESR_EL1_EC_DABT_EL1 (0x25) | 91 | #define ESR_ELx_FSC_TYPE (0x3C) |
44 | #define ESR_EL1_EC_SP_ALIGN (0x26) | 92 | #define ESR_ELx_FSC_EXTABT (0x10) |
45 | #define ESR_EL1_EC_FP_EXC32 (0x28) | 93 | #define ESR_ELx_FSC_FAULT (0x04) |
46 | #define ESR_EL1_EC_FP_EXC64 (0x2C) | 94 | #define ESR_ELx_FSC_PERM (0x0C) |
47 | #define ESR_EL1_EC_SERROR (0x2F) | 95 | #define ESR_ELx_CV (UL(1) << 24) |
48 | #define ESR_EL1_EC_BREAKPT_EL0 (0x30) | 96 | #define ESR_ELx_COND_SHIFT (20) |
49 | #define ESR_EL1_EC_BREAKPT_EL1 (0x31) | 97 | #define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT) |
50 | #define ESR_EL1_EC_SOFTSTP_EL0 (0x32) | 98 | #define ESR_ELx_WFx_ISS_WFE (UL(1) << 0) |
51 | #define ESR_EL1_EC_SOFTSTP_EL1 (0x33) | 99 | |
52 | #define ESR_EL1_EC_WATCHPT_EL0 (0x34) | 100 | #ifndef __ASSEMBLY__ |
53 | #define ESR_EL1_EC_WATCHPT_EL1 (0x35) | 101 | #include <asm/types.h> |
54 | #define ESR_EL1_EC_BKPT32 (0x38) | 102 | |
55 | #define ESR_EL1_EC_BRK64 (0x3C) | 103 | const char *esr_get_class_string(u32 esr); |
104 | #endif /* __ASSEMBLY */ | ||
56 | 105 | ||
57 | #endif /* __ASM_ESR_H */ | 106 | #endif /* __ASM_ESR_H */ |
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 9ef6eca905ca..defa0ff98250 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h | |||
@@ -49,6 +49,7 @@ enum fixed_addresses { | |||
49 | 49 | ||
50 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, | 50 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, |
51 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, | 51 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, |
52 | FIX_TEXT_POKE0, | ||
52 | __end_of_fixed_addresses | 53 | __end_of_fixed_addresses |
53 | }; | 54 | }; |
54 | 55 | ||
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 007618b8188c..a2daf1293028 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h | |||
@@ -76,7 +76,6 @@ | |||
76 | fpsimd_restore_fpcr x\tmpnr, \state | 76 | fpsimd_restore_fpcr x\tmpnr, \state |
77 | .endm | 77 | .endm |
78 | 78 | ||
79 | .altmacro | ||
80 | .macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2 | 79 | .macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2 |
81 | mrs x\tmpnr1, fpsr | 80 | mrs x\tmpnr1, fpsr |
82 | str w\numnr, [\state, #8] | 81 | str w\numnr, [\state, #8] |
@@ -86,11 +85,22 @@ | |||
86 | add \state, \state, x\numnr, lsl #4 | 85 | add \state, \state, x\numnr, lsl #4 |
87 | sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1 | 86 | sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1 |
88 | br x\tmpnr1 | 87 | br x\tmpnr1 |
89 | .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 | 88 | stp q30, q31, [\state, #-16 * 30 - 16] |
90 | .irp qb, %(qa + 1) | 89 | stp q28, q29, [\state, #-16 * 28 - 16] |
91 | stp q\qa, q\qb, [\state, # -16 * \qa - 16] | 90 | stp q26, q27, [\state, #-16 * 26 - 16] |
92 | .endr | 91 | stp q24, q25, [\state, #-16 * 24 - 16] |
93 | .endr | 92 | stp q22, q23, [\state, #-16 * 22 - 16] |
93 | stp q20, q21, [\state, #-16 * 20 - 16] | ||
94 | stp q18, q19, [\state, #-16 * 18 - 16] | ||
95 | stp q16, q17, [\state, #-16 * 16 - 16] | ||
96 | stp q14, q15, [\state, #-16 * 14 - 16] | ||
97 | stp q12, q13, [\state, #-16 * 12 - 16] | ||
98 | stp q10, q11, [\state, #-16 * 10 - 16] | ||
99 | stp q8, q9, [\state, #-16 * 8 - 16] | ||
100 | stp q6, q7, [\state, #-16 * 6 - 16] | ||
101 | stp q4, q5, [\state, #-16 * 4 - 16] | ||
102 | stp q2, q3, [\state, #-16 * 2 - 16] | ||
103 | stp q0, q1, [\state, #-16 * 0 - 16] | ||
94 | 0: | 104 | 0: |
95 | .endm | 105 | .endm |
96 | 106 | ||
@@ -103,10 +113,21 @@ | |||
103 | add \state, \state, x\tmpnr2, lsl #4 | 113 | add \state, \state, x\tmpnr2, lsl #4 |
104 | sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1 | 114 | sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1 |
105 | br x\tmpnr1 | 115 | br x\tmpnr1 |
106 | .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 | 116 | ldp q30, q31, [\state, #-16 * 30 - 16] |
107 | .irp qb, %(qa + 1) | 117 | ldp q28, q29, [\state, #-16 * 28 - 16] |
108 | ldp q\qa, q\qb, [\state, # -16 * \qa - 16] | 118 | ldp q26, q27, [\state, #-16 * 26 - 16] |
109 | .endr | 119 | ldp q24, q25, [\state, #-16 * 24 - 16] |
110 | .endr | 120 | ldp q22, q23, [\state, #-16 * 22 - 16] |
121 | ldp q20, q21, [\state, #-16 * 20 - 16] | ||
122 | ldp q18, q19, [\state, #-16 * 18 - 16] | ||
123 | ldp q16, q17, [\state, #-16 * 16 - 16] | ||
124 | ldp q14, q15, [\state, #-16 * 14 - 16] | ||
125 | ldp q12, q13, [\state, #-16 * 12 - 16] | ||
126 | ldp q10, q11, [\state, #-16 * 10 - 16] | ||
127 | ldp q8, q9, [\state, #-16 * 8 - 16] | ||
128 | ldp q6, q7, [\state, #-16 * 6 - 16] | ||
129 | ldp q4, q5, [\state, #-16 * 4 - 16] | ||
130 | ldp q2, q3, [\state, #-16 * 2 - 16] | ||
131 | ldp q0, q1, [\state, #-16 * 0 - 16] | ||
111 | 0: | 132 | 0: |
112 | .endm | 133 | .endm |
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index e8a3268a891c..6aae421f4d73 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/threads.h> | 20 | #include <linux/threads.h> |
21 | #include <asm/irq.h> | 21 | #include <asm/irq.h> |
22 | 22 | ||
23 | #define NR_IPI 6 | 23 | #define NR_IPI 5 |
24 | 24 | ||
25 | typedef struct { | 25 | typedef struct { |
26 | unsigned int __softirq_pending; | 26 | unsigned int __softirq_pending; |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 949c406d4df4..540f7c0aea82 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include <asm/byteorder.h> | 27 | #include <asm/byteorder.h> |
28 | #include <asm/barrier.h> | 28 | #include <asm/barrier.h> |
29 | #include <asm/memory.h> | ||
29 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
30 | #include <asm/early_ioremap.h> | 31 | #include <asm/early_ioremap.h> |
31 | #include <asm/alternative.h> | 32 | #include <asm/alternative.h> |
@@ -145,8 +146,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) | |||
145 | * I/O port access primitives. | 146 | * I/O port access primitives. |
146 | */ | 147 | */ |
147 | #define arch_has_dev_port() (1) | 148 | #define arch_has_dev_port() (1) |
148 | #define IO_SPACE_LIMIT (SZ_32M - 1) | 149 | #define IO_SPACE_LIMIT (PCI_IO_SIZE - 1) |
149 | #define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M)) | 150 | #define PCI_IOBASE ((void __iomem *)PCI_IO_START) |
150 | 151 | ||
151 | /* | 152 | /* |
152 | * String version of I/O memory access operations. | 153 | * String version of I/O memory access operations. |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 8afb863f5a9e..94674eb7e7bb 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #ifndef __ARM64_KVM_ARM_H__ | 18 | #ifndef __ARM64_KVM_ARM_H__ |
19 | #define __ARM64_KVM_ARM_H__ | 19 | #define __ARM64_KVM_ARM_H__ |
20 | 20 | ||
21 | #include <asm/esr.h> | ||
21 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
22 | #include <asm/types.h> | 23 | #include <asm/types.h> |
23 | 24 | ||
@@ -184,77 +185,11 @@ | |||
184 | #define MDCR_EL2_TPMCR (1 << 5) | 185 | #define MDCR_EL2_TPMCR (1 << 5) |
185 | #define MDCR_EL2_HPMN_MASK (0x1F) | 186 | #define MDCR_EL2_HPMN_MASK (0x1F) |
186 | 187 | ||
187 | /* Exception Syndrome Register (ESR) bits */ | 188 | /* For compatibility with fault code shared with 32-bit */ |
188 | #define ESR_EL2_EC_SHIFT (26) | 189 | #define FSC_FAULT ESR_ELx_FSC_FAULT |
189 | #define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT) | 190 | #define FSC_PERM ESR_ELx_FSC_PERM |
190 | #define ESR_EL2_IL (UL(1) << 25) | ||
191 | #define ESR_EL2_ISS (ESR_EL2_IL - 1) | ||
192 | #define ESR_EL2_ISV_SHIFT (24) | ||
193 | #define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT) | ||
194 | #define ESR_EL2_SAS_SHIFT (22) | ||
195 | #define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT) | ||
196 | #define ESR_EL2_SSE (1 << 21) | ||
197 | #define ESR_EL2_SRT_SHIFT (16) | ||
198 | #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT) | ||
199 | #define ESR_EL2_SF (1 << 15) | ||
200 | #define ESR_EL2_AR (1 << 14) | ||
201 | #define ESR_EL2_EA (1 << 9) | ||
202 | #define ESR_EL2_CM (1 << 8) | ||
203 | #define ESR_EL2_S1PTW (1 << 7) | ||
204 | #define ESR_EL2_WNR (1 << 6) | ||
205 | #define ESR_EL2_FSC (0x3f) | ||
206 | #define ESR_EL2_FSC_TYPE (0x3c) | ||
207 | |||
208 | #define ESR_EL2_CV_SHIFT (24) | ||
209 | #define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT) | ||
210 | #define ESR_EL2_COND_SHIFT (20) | ||
211 | #define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT) | ||
212 | |||
213 | |||
214 | #define FSC_FAULT (0x04) | ||
215 | #define FSC_PERM (0x0c) | ||
216 | 191 | ||
217 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ | 192 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ |
218 | #define HPFAR_MASK (~UL(0xf)) | 193 | #define HPFAR_MASK (~UL(0xf)) |
219 | 194 | ||
220 | #define ESR_EL2_EC_UNKNOWN (0x00) | ||
221 | #define ESR_EL2_EC_WFI (0x01) | ||
222 | #define ESR_EL2_EC_CP15_32 (0x03) | ||
223 | #define ESR_EL2_EC_CP15_64 (0x04) | ||
224 | #define ESR_EL2_EC_CP14_MR (0x05) | ||
225 | #define ESR_EL2_EC_CP14_LS (0x06) | ||
226 | #define ESR_EL2_EC_FP_ASIMD (0x07) | ||
227 | #define ESR_EL2_EC_CP10_ID (0x08) | ||
228 | #define ESR_EL2_EC_CP14_64 (0x0C) | ||
229 | #define ESR_EL2_EC_ILL_ISS (0x0E) | ||
230 | #define ESR_EL2_EC_SVC32 (0x11) | ||
231 | #define ESR_EL2_EC_HVC32 (0x12) | ||
232 | #define ESR_EL2_EC_SMC32 (0x13) | ||
233 | #define ESR_EL2_EC_SVC64 (0x15) | ||
234 | #define ESR_EL2_EC_HVC64 (0x16) | ||
235 | #define ESR_EL2_EC_SMC64 (0x17) | ||
236 | #define ESR_EL2_EC_SYS64 (0x18) | ||
237 | #define ESR_EL2_EC_IABT (0x20) | ||
238 | #define ESR_EL2_EC_IABT_HYP (0x21) | ||
239 | #define ESR_EL2_EC_PC_ALIGN (0x22) | ||
240 | #define ESR_EL2_EC_DABT (0x24) | ||
241 | #define ESR_EL2_EC_DABT_HYP (0x25) | ||
242 | #define ESR_EL2_EC_SP_ALIGN (0x26) | ||
243 | #define ESR_EL2_EC_FP_EXC32 (0x28) | ||
244 | #define ESR_EL2_EC_FP_EXC64 (0x2C) | ||
245 | #define ESR_EL2_EC_SERROR (0x2F) | ||
246 | #define ESR_EL2_EC_BREAKPT (0x30) | ||
247 | #define ESR_EL2_EC_BREAKPT_HYP (0x31) | ||
248 | #define ESR_EL2_EC_SOFTSTP (0x32) | ||
249 | #define ESR_EL2_EC_SOFTSTP_HYP (0x33) | ||
250 | #define ESR_EL2_EC_WATCHPT (0x34) | ||
251 | #define ESR_EL2_EC_WATCHPT_HYP (0x35) | ||
252 | #define ESR_EL2_EC_BKPT32 (0x38) | ||
253 | #define ESR_EL2_EC_VECTOR32 (0x3A) | ||
254 | #define ESR_EL2_EC_BRK64 (0x3C) | ||
255 | |||
256 | #define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10 | ||
257 | |||
258 | #define ESR_EL2_EC_WFI_ISS_WFE (1 << 0) | ||
259 | |||
260 | #endif /* __ARM64_KVM_ARM_H__ */ | 195 | #endif /* __ARM64_KVM_ARM_H__ */ |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 3cb4c856b10d..0163b5775ca5 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -23,8 +23,10 @@ | |||
23 | #define __ARM64_KVM_EMULATE_H__ | 23 | #define __ARM64_KVM_EMULATE_H__ |
24 | 24 | ||
25 | #include <linux/kvm_host.h> | 25 | #include <linux/kvm_host.h> |
26 | #include <asm/kvm_asm.h> | 26 | |
27 | #include <asm/esr.h> | ||
27 | #include <asm/kvm_arm.h> | 28 | #include <asm/kvm_arm.h> |
29 | #include <asm/kvm_asm.h> | ||
28 | #include <asm/kvm_mmio.h> | 30 | #include <asm/kvm_mmio.h> |
29 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
30 | 32 | ||
@@ -140,63 +142,63 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) | |||
140 | 142 | ||
141 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) | 143 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) |
142 | { | 144 | { |
143 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); | 145 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); |
144 | } | 146 | } |
145 | 147 | ||
146 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) | 148 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
147 | { | 149 | { |
148 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); | 150 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR); |
149 | } | 151 | } |
150 | 152 | ||
151 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) | 153 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) |
152 | { | 154 | { |
153 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); | 155 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); |
154 | } | 156 | } |
155 | 157 | ||
156 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) | 158 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) |
157 | { | 159 | { |
158 | return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; | 160 | return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; |
159 | } | 161 | } |
160 | 162 | ||
161 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) | 163 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) |
162 | { | 164 | { |
163 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); | 165 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA); |
164 | } | 166 | } |
165 | 167 | ||
166 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) | 168 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) |
167 | { | 169 | { |
168 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); | 170 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); |
169 | } | 171 | } |
170 | 172 | ||
171 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) | 173 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) |
172 | { | 174 | { |
173 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); | 175 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); |
174 | } | 176 | } |
175 | 177 | ||
176 | /* This one is not specific to Data Abort */ | 178 | /* This one is not specific to Data Abort */ |
177 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) | 179 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) |
178 | { | 180 | { |
179 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); | 181 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); |
180 | } | 182 | } |
181 | 183 | ||
182 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) | 184 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) |
183 | { | 185 | { |
184 | return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; | 186 | return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT; |
185 | } | 187 | } |
186 | 188 | ||
187 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) | 189 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) |
188 | { | 190 | { |
189 | return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; | 191 | return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; |
190 | } | 192 | } |
191 | 193 | ||
192 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | 194 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) |
193 | { | 195 | { |
194 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC; | 196 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; |
195 | } | 197 | } |
196 | 198 | ||
197 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) | 199 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) |
198 | { | 200 | { |
199 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; | 201 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; |
200 | } | 202 | } |
201 | 203 | ||
202 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | 204 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 6486b2bfd562..f800d45ea226 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -33,6 +33,12 @@ | |||
33 | #define UL(x) _AC(x, UL) | 33 | #define UL(x) _AC(x, UL) |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Size of the PCI I/O space. This must remain a power of two so that | ||
37 | * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses. | ||
38 | */ | ||
39 | #define PCI_IO_SIZE SZ_16M | ||
40 | |||
41 | /* | ||
36 | * PAGE_OFFSET - the virtual address of the start of the kernel image (top | 42 | * PAGE_OFFSET - the virtual address of the start of the kernel image (top |
37 | * (VA_BITS - 1)) | 43 | * (VA_BITS - 1)) |
38 | * VA_BITS - the maximum number of bits for virtual addresses. | 44 | * VA_BITS - the maximum number of bits for virtual addresses. |
@@ -45,7 +51,9 @@ | |||
45 | #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) | 51 | #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) |
46 | #define MODULES_END (PAGE_OFFSET) | 52 | #define MODULES_END (PAGE_OFFSET) |
47 | #define MODULES_VADDR (MODULES_END - SZ_64M) | 53 | #define MODULES_VADDR (MODULES_END - SZ_64M) |
48 | #define FIXADDR_TOP (MODULES_VADDR - SZ_2M - PAGE_SIZE) | 54 | #define PCI_IO_END (MODULES_VADDR - SZ_2M) |
55 | #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) | ||
56 | #define FIXADDR_TOP (PCI_IO_START - SZ_2M) | ||
49 | #define TASK_SIZE_64 (UL(1) << VA_BITS) | 57 | #define TASK_SIZE_64 (UL(1) << VA_BITS) |
50 | 58 | ||
51 | #ifdef CONFIG_COMPAT | 59 | #ifdef CONFIG_COMPAT |
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index c2f006c48bdb..3d311761e3c2 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h | |||
@@ -31,7 +31,8 @@ extern void paging_init(void); | |||
31 | extern void setup_mm_for_reboot(void); | 31 | extern void setup_mm_for_reboot(void); |
32 | extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); | 32 | extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); |
33 | extern void init_mem_pgprot(void); | 33 | extern void init_mem_pgprot(void); |
34 | /* create an identity mapping for memory (or io if map_io is true) */ | 34 | extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
35 | extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io); | 35 | unsigned long virt, phys_addr_t size, |
36 | pgprot_t prot); | ||
36 | 37 | ||
37 | #endif | 38 | #endif |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 4c445057169d..cf1d9c86f20a 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -263,6 +263,11 @@ static inline pmd_t pte_pmd(pte_t pte) | |||
263 | return __pmd(pte_val(pte)); | 263 | return __pmd(pte_val(pte)); |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline pgprot_t mk_sect_prot(pgprot_t prot) | ||
267 | { | ||
268 | return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT); | ||
269 | } | ||
270 | |||
266 | /* | 271 | /* |
267 | * THP definitions. | 272 | * THP definitions. |
268 | */ | 273 | */ |
@@ -336,9 +341,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |||
336 | 341 | ||
337 | #ifdef CONFIG_ARM64_64K_PAGES | 342 | #ifdef CONFIG_ARM64_64K_PAGES |
338 | #define pud_sect(pud) (0) | 343 | #define pud_sect(pud) (0) |
344 | #define pud_table(pud) (1) | ||
339 | #else | 345 | #else |
340 | #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ | 346 | #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ |
341 | PUD_TYPE_SECT) | 347 | PUD_TYPE_SECT) |
348 | #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ | ||
349 | PUD_TYPE_TABLE) | ||
342 | #endif | 350 | #endif |
343 | 351 | ||
344 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | 352 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 41ed9e13795e..d6dd9fdbc3be 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -58,6 +58,13 @@ | |||
58 | #define COMPAT_PSR_Z_BIT 0x40000000 | 58 | #define COMPAT_PSR_Z_BIT 0x40000000 |
59 | #define COMPAT_PSR_N_BIT 0x80000000 | 59 | #define COMPAT_PSR_N_BIT 0x80000000 |
60 | #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ | 60 | #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ |
61 | |||
62 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
63 | #define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT | ||
64 | #else | ||
65 | #define COMPAT_PSR_ENDSTATE 0 | ||
66 | #endif | ||
67 | |||
61 | /* | 68 | /* |
62 | * These are 'magic' values for PTRACE_PEEKUSR that return info about where a | 69 | * These are 'magic' values for PTRACE_PEEKUSR that return info about where a |
63 | * process is located in memory. | 70 | * process is located in memory. |
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 456d67c1f0fa..003802f58963 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h | |||
@@ -23,6 +23,4 @@ struct sleep_save_sp { | |||
23 | 23 | ||
24 | extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); | 24 | extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); |
25 | extern void cpu_resume(void); | 25 | extern void cpu_resume(void); |
26 | extern int cpu_suspend(unsigned long); | ||
27 | |||
28 | #endif | 26 | #endif |
diff --git a/arch/arm64/include/asm/syscalls.h b/arch/arm64/include/asm/syscalls.h deleted file mode 100644 index 48fe7c600e98..000000000000 --- a/arch/arm64/include/asm/syscalls.h +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 ARM Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #ifndef __ASM_SYSCALLS_H | ||
17 | #define __ASM_SYSCALLS_H | ||
18 | |||
19 | #include <linux/linkage.h> | ||
20 | #include <linux/compiler.h> | ||
21 | #include <linux/signal.h> | ||
22 | |||
23 | /* | ||
24 | * System call wrappers implemented in kernel/entry.S. | ||
25 | */ | ||
26 | asmlinkage long sys_rt_sigreturn_wrapper(void); | ||
27 | |||
28 | #include <asm-generic/syscalls.h> | ||
29 | |||
30 | #endif /* __ASM_SYSCALLS_H */ | ||
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 23e9432ac112..3bc498c250dc 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h | |||
@@ -48,6 +48,9 @@ | |||
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #define __ARCH_WANT_SYS_CLONE | 50 | #define __ARCH_WANT_SYS_CLONE |
51 | |||
52 | #ifndef __COMPAT_SYSCALL_NR | ||
51 | #include <uapi/asm/unistd.h> | 53 | #include <uapi/asm/unistd.h> |
54 | #endif | ||
52 | 55 | ||
53 | #define NR_syscalls (__NR_syscalls) | 56 | #define NR_syscalls (__NR_syscalls) |
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index 942376d37d22..825b0fe51c2b 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild | |||
@@ -18,4 +18,5 @@ header-y += siginfo.h | |||
18 | header-y += signal.h | 18 | header-y += signal.h |
19 | header-y += stat.h | 19 | header-y += stat.h |
20 | header-y += statfs.h | 20 | header-y += statfs.h |
21 | header-y += ucontext.h | ||
21 | header-y += unistd.h | 22 | header-y += unistd.h |
diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/uapi/asm/ucontext.h index 42e04c877428..791de8e89e35 100644 --- a/arch/arm64/include/asm/ucontext.h +++ b/arch/arm64/include/uapi/asm/ucontext.h | |||
@@ -13,8 +13,10 @@ | |||
13 | * You should have received a copy of the GNU General Public License | 13 | * You should have received a copy of the GNU General Public License |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | #ifndef __ASM_UCONTEXT_H | 16 | #ifndef _UAPI__ASM_UCONTEXT_H |
17 | #define __ASM_UCONTEXT_H | 17 | #define _UAPI__ASM_UCONTEXT_H |
18 | |||
19 | #include <linux/types.h> | ||
18 | 20 | ||
19 | struct ucontext { | 21 | struct ucontext { |
20 | unsigned long uc_flags; | 22 | unsigned long uc_flags; |
@@ -27,4 +29,4 @@ struct ucontext { | |||
27 | struct sigcontext uc_mcontext; | 29 | struct sigcontext uc_mcontext; |
28 | }; | 30 | }; |
29 | 31 | ||
30 | #endif /* __ASM_UCONTEXT_H */ | 32 | #endif /* _UAPI__ASM_UCONTEXT_H */ |
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index eaa77ed7766a..bef04afd6031 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -16,10 +16,10 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ | |||
16 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ | 16 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ |
17 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ | 17 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ |
18 | hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ | 18 | hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ |
19 | cpuinfo.o cpu_errata.o alternative.o | 19 | cpuinfo.o cpu_errata.o alternative.o cacheinfo.o |
20 | 20 | ||
21 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 21 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ |
22 | sys_compat.o \ | 22 | sys_compat.o entry32.o \ |
23 | ../../arm/kernel/opcodes.o | 23 | ../../arm/kernel/opcodes.o |
24 | arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o | 24 | arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o |
25 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o | 25 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o |
@@ -27,7 +27,7 @@ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o | |||
27 | arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o | 27 | arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o |
28 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o | 28 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o |
29 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 29 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o |
30 | arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o | 30 | arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o |
31 | arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o | 31 | arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o |
32 | arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o | 32 | arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o |
33 | arm64-obj-$(CONFIG_KGDB) += kgdb.o | 33 | arm64-obj-$(CONFIG_KGDB) += kgdb.o |
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index c363671d7509..7922c2e710ca 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/system_misc.h> | 19 | #include <asm/system_misc.h> |
20 | #include <asm/traps.h> | 20 | #include <asm/traps.h> |
21 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
22 | #include <asm/cpufeature.h> | ||
22 | 23 | ||
23 | #define CREATE_TRACE_POINTS | 24 | #define CREATE_TRACE_POINTS |
24 | #include "trace-events-emulation.h" | 25 | #include "trace-events-emulation.h" |
@@ -85,6 +86,57 @@ static void remove_emulation_hooks(struct insn_emulation_ops *ops) | |||
85 | pr_notice("Removed %s emulation handler\n", ops->name); | 86 | pr_notice("Removed %s emulation handler\n", ops->name); |
86 | } | 87 | } |
87 | 88 | ||
89 | static void enable_insn_hw_mode(void *data) | ||
90 | { | ||
91 | struct insn_emulation *insn = (struct insn_emulation *)data; | ||
92 | if (insn->ops->set_hw_mode) | ||
93 | insn->ops->set_hw_mode(true); | ||
94 | } | ||
95 | |||
96 | static void disable_insn_hw_mode(void *data) | ||
97 | { | ||
98 | struct insn_emulation *insn = (struct insn_emulation *)data; | ||
99 | if (insn->ops->set_hw_mode) | ||
100 | insn->ops->set_hw_mode(false); | ||
101 | } | ||
102 | |||
103 | /* Run set_hw_mode(mode) on all active CPUs */ | ||
104 | static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable) | ||
105 | { | ||
106 | if (!insn->ops->set_hw_mode) | ||
107 | return -EINVAL; | ||
108 | if (enable) | ||
109 | on_each_cpu(enable_insn_hw_mode, (void *)insn, true); | ||
110 | else | ||
111 | on_each_cpu(disable_insn_hw_mode, (void *)insn, true); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Run set_hw_mode for all insns on a starting CPU. | ||
117 | * Returns: | ||
118 | * 0 - If all the hooks ran successfully. | ||
119 | * -EINVAL - At least one hook is not supported by the CPU. | ||
120 | */ | ||
121 | static int run_all_insn_set_hw_mode(unsigned long cpu) | ||
122 | { | ||
123 | int rc = 0; | ||
124 | unsigned long flags; | ||
125 | struct insn_emulation *insn; | ||
126 | |||
127 | raw_spin_lock_irqsave(&insn_emulation_lock, flags); | ||
128 | list_for_each_entry(insn, &insn_emulation, node) { | ||
129 | bool enable = (insn->current_mode == INSN_HW); | ||
130 | if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) { | ||
131 | pr_warn("CPU[%ld] cannot support the emulation of %s", | ||
132 | cpu, insn->ops->name); | ||
133 | rc = -EINVAL; | ||
134 | } | ||
135 | } | ||
136 | raw_spin_unlock_irqrestore(&insn_emulation_lock, flags); | ||
137 | return rc; | ||
138 | } | ||
139 | |||
88 | static int update_insn_emulation_mode(struct insn_emulation *insn, | 140 | static int update_insn_emulation_mode(struct insn_emulation *insn, |
89 | enum insn_emulation_mode prev) | 141 | enum insn_emulation_mode prev) |
90 | { | 142 | { |
@@ -97,10 +149,8 @@ static int update_insn_emulation_mode(struct insn_emulation *insn, | |||
97 | remove_emulation_hooks(insn->ops); | 149 | remove_emulation_hooks(insn->ops); |
98 | break; | 150 | break; |
99 | case INSN_HW: | 151 | case INSN_HW: |
100 | if (insn->ops->set_hw_mode) { | 152 | if (!run_all_cpu_set_hw_mode(insn, false)) |
101 | insn->ops->set_hw_mode(false); | ||
102 | pr_notice("Disabled %s support\n", insn->ops->name); | 153 | pr_notice("Disabled %s support\n", insn->ops->name); |
103 | } | ||
104 | break; | 154 | break; |
105 | } | 155 | } |
106 | 156 | ||
@@ -111,10 +161,9 @@ static int update_insn_emulation_mode(struct insn_emulation *insn, | |||
111 | register_emulation_hooks(insn->ops); | 161 | register_emulation_hooks(insn->ops); |
112 | break; | 162 | break; |
113 | case INSN_HW: | 163 | case INSN_HW: |
114 | if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(true)) | 164 | ret = run_all_cpu_set_hw_mode(insn, true); |
165 | if (!ret) | ||
115 | pr_notice("Enabled %s support\n", insn->ops->name); | 166 | pr_notice("Enabled %s support\n", insn->ops->name); |
116 | else | ||
117 | ret = -EINVAL; | ||
118 | break; | 167 | break; |
119 | } | 168 | } |
120 | 169 | ||
@@ -133,6 +182,8 @@ static void register_insn_emulation(struct insn_emulation_ops *ops) | |||
133 | switch (ops->status) { | 182 | switch (ops->status) { |
134 | case INSN_DEPRECATED: | 183 | case INSN_DEPRECATED: |
135 | insn->current_mode = INSN_EMULATE; | 184 | insn->current_mode = INSN_EMULATE; |
185 | /* Disable the HW mode if it was turned on at early boot time */ | ||
186 | run_all_cpu_set_hw_mode(insn, false); | ||
136 | insn->max = INSN_HW; | 187 | insn->max = INSN_HW; |
137 | break; | 188 | break; |
138 | case INSN_OBSOLETE: | 189 | case INSN_OBSOLETE: |
@@ -453,8 +504,6 @@ ret: | |||
453 | return 0; | 504 | return 0; |
454 | } | 505 | } |
455 | 506 | ||
456 | #define SCTLR_EL1_CP15BEN (1 << 5) | ||
457 | |||
458 | static inline void config_sctlr_el1(u32 clear, u32 set) | 507 | static inline void config_sctlr_el1(u32 clear, u32 set) |
459 | { | 508 | { |
460 | u32 val; | 509 | u32 val; |
@@ -465,48 +514,13 @@ static inline void config_sctlr_el1(u32 clear, u32 set) | |||
465 | asm volatile("msr sctlr_el1, %0" : : "r" (val)); | 514 | asm volatile("msr sctlr_el1, %0" : : "r" (val)); |
466 | } | 515 | } |
467 | 516 | ||
468 | static void enable_cp15_ben(void *info) | ||
469 | { | ||
470 | config_sctlr_el1(0, SCTLR_EL1_CP15BEN); | ||
471 | } | ||
472 | |||
473 | static void disable_cp15_ben(void *info) | ||
474 | { | ||
475 | config_sctlr_el1(SCTLR_EL1_CP15BEN, 0); | ||
476 | } | ||
477 | |||
478 | static int cpu_hotplug_notify(struct notifier_block *b, | ||
479 | unsigned long action, void *hcpu) | ||
480 | { | ||
481 | switch (action) { | ||
482 | case CPU_STARTING: | ||
483 | case CPU_STARTING_FROZEN: | ||
484 | enable_cp15_ben(NULL); | ||
485 | return NOTIFY_DONE; | ||
486 | case CPU_DYING: | ||
487 | case CPU_DYING_FROZEN: | ||
488 | disable_cp15_ben(NULL); | ||
489 | return NOTIFY_DONE; | ||
490 | } | ||
491 | |||
492 | return NOTIFY_OK; | ||
493 | } | ||
494 | |||
495 | static struct notifier_block cpu_hotplug_notifier = { | ||
496 | .notifier_call = cpu_hotplug_notify, | ||
497 | }; | ||
498 | |||
499 | static int cp15_barrier_set_hw_mode(bool enable) | 517 | static int cp15_barrier_set_hw_mode(bool enable) |
500 | { | 518 | { |
501 | if (enable) { | 519 | if (enable) |
502 | register_cpu_notifier(&cpu_hotplug_notifier); | 520 | config_sctlr_el1(0, SCTLR_EL1_CP15BEN); |
503 | on_each_cpu(enable_cp15_ben, NULL, true); | 521 | else |
504 | } else { | 522 | config_sctlr_el1(SCTLR_EL1_CP15BEN, 0); |
505 | unregister_cpu_notifier(&cpu_hotplug_notifier); | 523 | return 0; |
506 | on_each_cpu(disable_cp15_ben, NULL, true); | ||
507 | } | ||
508 | |||
509 | return true; | ||
510 | } | 524 | } |
511 | 525 | ||
512 | static struct undef_hook cp15_barrier_hooks[] = { | 526 | static struct undef_hook cp15_barrier_hooks[] = { |
@@ -534,6 +548,93 @@ static struct insn_emulation_ops cp15_barrier_ops = { | |||
534 | .set_hw_mode = cp15_barrier_set_hw_mode, | 548 | .set_hw_mode = cp15_barrier_set_hw_mode, |
535 | }; | 549 | }; |
536 | 550 | ||
551 | static int setend_set_hw_mode(bool enable) | ||
552 | { | ||
553 | if (!cpu_supports_mixed_endian_el0()) | ||
554 | return -EINVAL; | ||
555 | |||
556 | if (enable) | ||
557 | config_sctlr_el1(SCTLR_EL1_SED, 0); | ||
558 | else | ||
559 | config_sctlr_el1(0, SCTLR_EL1_SED); | ||
560 | return 0; | ||
561 | } | ||
562 | |||
563 | static int compat_setend_handler(struct pt_regs *regs, u32 big_endian) | ||
564 | { | ||
565 | char *insn; | ||
566 | |||
567 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); | ||
568 | |||
569 | if (big_endian) { | ||
570 | insn = "setend be"; | ||
571 | regs->pstate |= COMPAT_PSR_E_BIT; | ||
572 | } else { | ||
573 | insn = "setend le"; | ||
574 | regs->pstate &= ~COMPAT_PSR_E_BIT; | ||
575 | } | ||
576 | |||
577 | trace_instruction_emulation(insn, regs->pc); | ||
578 | pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n", | ||
579 | current->comm, (unsigned long)current->pid, regs->pc); | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | static int a32_setend_handler(struct pt_regs *regs, u32 instr) | ||
585 | { | ||
586 | int rc = compat_setend_handler(regs, (instr >> 9) & 1); | ||
587 | regs->pc += 4; | ||
588 | return rc; | ||
589 | } | ||
590 | |||
591 | static int t16_setend_handler(struct pt_regs *regs, u32 instr) | ||
592 | { | ||
593 | int rc = compat_setend_handler(regs, (instr >> 3) & 1); | ||
594 | regs->pc += 2; | ||
595 | return rc; | ||
596 | } | ||
597 | |||
598 | static struct undef_hook setend_hooks[] = { | ||
599 | { | ||
600 | .instr_mask = 0xfffffdff, | ||
601 | .instr_val = 0xf1010000, | ||
602 | .pstate_mask = COMPAT_PSR_MODE_MASK, | ||
603 | .pstate_val = COMPAT_PSR_MODE_USR, | ||
604 | .fn = a32_setend_handler, | ||
605 | }, | ||
606 | { | ||
607 | /* Thumb mode */ | ||
608 | .instr_mask = 0x0000fff7, | ||
609 | .instr_val = 0x0000b650, | ||
610 | .pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK), | ||
611 | .pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR), | ||
612 | .fn = t16_setend_handler, | ||
613 | }, | ||
614 | {} | ||
615 | }; | ||
616 | |||
617 | static struct insn_emulation_ops setend_ops = { | ||
618 | .name = "setend", | ||
619 | .status = INSN_DEPRECATED, | ||
620 | .hooks = setend_hooks, | ||
621 | .set_hw_mode = setend_set_hw_mode, | ||
622 | }; | ||
623 | |||
624 | static int insn_cpu_hotplug_notify(struct notifier_block *b, | ||
625 | unsigned long action, void *hcpu) | ||
626 | { | ||
627 | int rc = 0; | ||
628 | if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) | ||
629 | rc = run_all_insn_set_hw_mode((unsigned long)hcpu); | ||
630 | |||
631 | return notifier_from_errno(rc); | ||
632 | } | ||
633 | |||
634 | static struct notifier_block insn_cpu_hotplug_notifier = { | ||
635 | .notifier_call = insn_cpu_hotplug_notify, | ||
636 | }; | ||
637 | |||
537 | /* | 638 | /* |
538 | * Invoked as late_initcall, since not needed before init spawned. | 639 | * Invoked as late_initcall, since not needed before init spawned. |
539 | */ | 640 | */ |
@@ -545,6 +646,14 @@ static int __init armv8_deprecated_init(void) | |||
545 | if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION)) | 646 | if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION)) |
546 | register_insn_emulation(&cp15_barrier_ops); | 647 | register_insn_emulation(&cp15_barrier_ops); |
547 | 648 | ||
649 | if (IS_ENABLED(CONFIG_SETEND_EMULATION)) { | ||
650 | if(system_supports_mixed_endian_el0()) | ||
651 | register_insn_emulation(&setend_ops); | ||
652 | else | ||
653 | pr_info("setend instruction emulation is not supported on the system"); | ||
654 | } | ||
655 | |||
656 | register_cpu_notifier(&insn_cpu_hotplug_notifier); | ||
548 | register_insn_emulation_sysctl(ctl_abi); | 657 | register_insn_emulation_sysctl(ctl_abi); |
549 | 658 | ||
550 | return 0; | 659 | return 0; |
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 9a9fce090d58..a2ae19403abb 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
@@ -152,7 +152,7 @@ int main(void) | |||
152 | DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); | 152 | DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); |
153 | DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); | 153 | DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); |
154 | #endif | 154 | #endif |
155 | #ifdef CONFIG_ARM64_CPU_SUSPEND | 155 | #ifdef CONFIG_CPU_PM |
156 | DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); | 156 | DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); |
157 | DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); | 157 | DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); |
158 | DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); | 158 | DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); |
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c new file mode 100644 index 000000000000..b8629d52fba9 --- /dev/null +++ b/arch/arm64/kernel/cacheinfo.c | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * ARM64 cacheinfo support | ||
3 | * | ||
4 | * Copyright (C) 2015 ARM Ltd. | ||
5 | * All Rights Reserved | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
12 | * kind, whether express or implied; without even the implied warranty | ||
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/bitops.h> | ||
21 | #include <linux/cacheinfo.h> | ||
22 | #include <linux/cpu.h> | ||
23 | #include <linux/compiler.h> | ||
24 | #include <linux/of.h> | ||
25 | |||
26 | #include <asm/cachetype.h> | ||
27 | #include <asm/processor.h> | ||
28 | |||
29 | #define MAX_CACHE_LEVEL 7 /* Max 7 level supported */ | ||
30 | /* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */ | ||
31 | #define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1)) | ||
32 | #define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level)) | ||
33 | #define CLIDR_CTYPE(clidr, level) \ | ||
34 | (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level)) | ||
35 | |||
36 | static inline enum cache_type get_cache_type(int level) | ||
37 | { | ||
38 | u64 clidr; | ||
39 | |||
40 | if (level > MAX_CACHE_LEVEL) | ||
41 | return CACHE_TYPE_NOCACHE; | ||
42 | asm volatile ("mrs %x0, clidr_el1" : "=r" (clidr)); | ||
43 | return CLIDR_CTYPE(clidr, level); | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * Cache Size Selection Register(CSSELR) selects which Cache Size ID | ||
48 | * Register(CCSIDR) is accessible by specifying the required cache | ||
49 | * level and the cache type. We need to ensure that no one else changes | ||
50 | * CSSELR by calling this in non-preemtible context | ||
51 | */ | ||
52 | u64 __attribute_const__ cache_get_ccsidr(u64 csselr) | ||
53 | { | ||
54 | u64 ccsidr; | ||
55 | |||
56 | WARN_ON(preemptible()); | ||
57 | |||
58 | /* Put value into CSSELR */ | ||
59 | asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); | ||
60 | isb(); | ||
61 | /* Read result out of CCSIDR */ | ||
62 | asm volatile("mrs %x0, ccsidr_el1" : "=r" (ccsidr)); | ||
63 | |||
64 | return ccsidr; | ||
65 | } | ||
66 | |||
67 | static void ci_leaf_init(struct cacheinfo *this_leaf, | ||
68 | enum cache_type type, unsigned int level) | ||
69 | { | ||
70 | bool is_icache = type & CACHE_TYPE_INST; | ||
71 | u64 tmp = cache_get_ccsidr((level - 1) << 1 | is_icache); | ||
72 | |||
73 | this_leaf->level = level; | ||
74 | this_leaf->type = type; | ||
75 | this_leaf->coherency_line_size = CACHE_LINESIZE(tmp); | ||
76 | this_leaf->number_of_sets = CACHE_NUMSETS(tmp); | ||
77 | this_leaf->ways_of_associativity = CACHE_ASSOCIATIVITY(tmp); | ||
78 | this_leaf->size = this_leaf->number_of_sets * | ||
79 | this_leaf->coherency_line_size * this_leaf->ways_of_associativity; | ||
80 | this_leaf->attributes = | ||
81 | ((tmp & CCSIDR_EL1_WRITE_THROUGH) ? CACHE_WRITE_THROUGH : 0) | | ||
82 | ((tmp & CCSIDR_EL1_WRITE_BACK) ? CACHE_WRITE_BACK : 0) | | ||
83 | ((tmp & CCSIDR_EL1_READ_ALLOCATE) ? CACHE_READ_ALLOCATE : 0) | | ||
84 | ((tmp & CCSIDR_EL1_WRITE_ALLOCATE) ? CACHE_WRITE_ALLOCATE : 0); | ||
85 | } | ||
86 | |||
87 | static int __init_cache_level(unsigned int cpu) | ||
88 | { | ||
89 | unsigned int ctype, level, leaves; | ||
90 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | ||
91 | |||
92 | for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { | ||
93 | ctype = get_cache_type(level); | ||
94 | if (ctype == CACHE_TYPE_NOCACHE) { | ||
95 | level--; | ||
96 | break; | ||
97 | } | ||
98 | /* Separate instruction and data caches */ | ||
99 | leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; | ||
100 | } | ||
101 | |||
102 | this_cpu_ci->num_levels = level; | ||
103 | this_cpu_ci->num_leaves = leaves; | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int __populate_cache_leaves(unsigned int cpu) | ||
108 | { | ||
109 | unsigned int level, idx; | ||
110 | enum cache_type type; | ||
111 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | ||
112 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; | ||
113 | |||
114 | for (idx = 0, level = 1; level <= this_cpu_ci->num_levels && | ||
115 | idx < this_cpu_ci->num_leaves; idx++, level++) { | ||
116 | type = get_cache_type(level); | ||
117 | if (type == CACHE_TYPE_SEPARATE) { | ||
118 | ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); | ||
119 | ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); | ||
120 | } else { | ||
121 | ci_leaf_init(this_leaf++, type, level); | ||
122 | } | ||
123 | } | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level) | ||
128 | DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves) | ||
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 19d17f51db37..5c0896647fd1 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c | |||
@@ -29,3 +29,23 @@ int cpu_init_idle(unsigned int cpu) | |||
29 | of_node_put(cpu_node); | 29 | of_node_put(cpu_node); |
30 | return ret; | 30 | return ret; |
31 | } | 31 | } |
32 | |||
33 | /** | ||
34 | * cpu_suspend() - function to enter a low-power idle state | ||
35 | * @arg: argument to pass to CPU suspend operations | ||
36 | * | ||
37 | * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU | ||
38 | * operations back-end error code otherwise. | ||
39 | */ | ||
40 | int cpu_suspend(unsigned long arg) | ||
41 | { | ||
42 | int cpu = smp_processor_id(); | ||
43 | |||
44 | /* | ||
45 | * If cpu_ops have not been registered or suspend | ||
46 | * has not been initialized, cpu_suspend call fails early. | ||
47 | */ | ||
48 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) | ||
49 | return -EOPNOTSUPP; | ||
50 | return cpu_ops[cpu]->cpu_suspend(arg); | ||
51 | } | ||
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 07d435cf2eea..929855691dae 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
@@ -35,6 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); | 36 | DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); |
37 | static struct cpuinfo_arm64 boot_cpu_data; | 37 | static struct cpuinfo_arm64 boot_cpu_data; |
38 | static bool mixed_endian_el0 = true; | ||
38 | 39 | ||
39 | static char *icache_policy_str[] = { | 40 | static char *icache_policy_str[] = { |
40 | [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN", | 41 | [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN", |
@@ -68,6 +69,26 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) | |||
68 | pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); | 69 | pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); |
69 | } | 70 | } |
70 | 71 | ||
72 | bool cpu_supports_mixed_endian_el0(void) | ||
73 | { | ||
74 | return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); | ||
75 | } | ||
76 | |||
77 | bool system_supports_mixed_endian_el0(void) | ||
78 | { | ||
79 | return mixed_endian_el0; | ||
80 | } | ||
81 | |||
82 | static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info) | ||
83 | { | ||
84 | mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0); | ||
85 | } | ||
86 | |||
87 | static void update_cpu_features(struct cpuinfo_arm64 *info) | ||
88 | { | ||
89 | update_mixed_endian_el0_support(info); | ||
90 | } | ||
91 | |||
71 | static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) | 92 | static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) |
72 | { | 93 | { |
73 | if ((boot & mask) == (cur & mask)) | 94 | if ((boot & mask) == (cur & mask)) |
@@ -215,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) | |||
215 | cpuinfo_detect_icache_policy(info); | 236 | cpuinfo_detect_icache_policy(info); |
216 | 237 | ||
217 | check_local_cpu_errata(); | 238 | check_local_cpu_errata(); |
239 | update_cpu_features(info); | ||
218 | } | 240 | } |
219 | 241 | ||
220 | void cpuinfo_store_cpu(void) | 242 | void cpuinfo_store_cpu(void) |
@@ -231,15 +253,3 @@ void __init cpuinfo_store_boot_cpu(void) | |||
231 | 253 | ||
232 | boot_cpu_data = *info; | 254 | boot_cpu_data = *info; |
233 | } | 255 | } |
234 | |||
235 | u64 __attribute_const__ icache_get_ccsidr(void) | ||
236 | { | ||
237 | u64 ccsidr; | ||
238 | |||
239 | WARN_ON(preemptible()); | ||
240 | |||
241 | /* Select L1 I-cache and read its size ID register */ | ||
242 | asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1" | ||
243 | : "=r"(ccsidr) : "r"(1L)); | ||
244 | return ccsidr; | ||
245 | } | ||
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 2bb4347d0edf..b42c7b480e1e 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -11,27 +11,46 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/atomic.h> | ||
14 | #include <linux/dmi.h> | 15 | #include <linux/dmi.h> |
15 | #include <linux/efi.h> | 16 | #include <linux/efi.h> |
16 | #include <linux/export.h> | 17 | #include <linux/export.h> |
17 | #include <linux/memblock.h> | 18 | #include <linux/memblock.h> |
19 | #include <linux/mm_types.h> | ||
18 | #include <linux/bootmem.h> | 20 | #include <linux/bootmem.h> |
19 | #include <linux/of.h> | 21 | #include <linux/of.h> |
20 | #include <linux/of_fdt.h> | 22 | #include <linux/of_fdt.h> |
23 | #include <linux/preempt.h> | ||
24 | #include <linux/rbtree.h> | ||
25 | #include <linux/rwsem.h> | ||
21 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
22 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/spinlock.h> | ||
23 | 29 | ||
24 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
25 | #include <asm/efi.h> | 31 | #include <asm/efi.h> |
26 | #include <asm/tlbflush.h> | 32 | #include <asm/tlbflush.h> |
27 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
34 | #include <asm/mmu.h> | ||
35 | #include <asm/pgtable.h> | ||
28 | 36 | ||
29 | struct efi_memory_map memmap; | 37 | struct efi_memory_map memmap; |
30 | 38 | ||
31 | static efi_runtime_services_t *runtime; | ||
32 | |||
33 | static u64 efi_system_table; | 39 | static u64 efi_system_table; |
34 | 40 | ||
41 | static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss; | ||
42 | |||
43 | static struct mm_struct efi_mm = { | ||
44 | .mm_rb = RB_ROOT, | ||
45 | .pgd = efi_pgd, | ||
46 | .mm_users = ATOMIC_INIT(2), | ||
47 | .mm_count = ATOMIC_INIT(1), | ||
48 | .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), | ||
49 | .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), | ||
50 | .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), | ||
51 | INIT_MM_CONTEXT(efi_mm) | ||
52 | }; | ||
53 | |||
35 | static int uefi_debug __initdata; | 54 | static int uefi_debug __initdata; |
36 | static int __init uefi_debug_setup(char *str) | 55 | static int __init uefi_debug_setup(char *str) |
37 | { | 56 | { |
@@ -48,30 +67,33 @@ static int __init is_normal_ram(efi_memory_desc_t *md) | |||
48 | return 0; | 67 | return 0; |
49 | } | 68 | } |
50 | 69 | ||
51 | static void __init efi_setup_idmap(void) | 70 | /* |
71 | * Translate a EFI virtual address into a physical address: this is necessary, | ||
72 | * as some data members of the EFI system table are virtually remapped after | ||
73 | * SetVirtualAddressMap() has been called. | ||
74 | */ | ||
75 | static phys_addr_t efi_to_phys(unsigned long addr) | ||
52 | { | 76 | { |
53 | struct memblock_region *r; | ||
54 | efi_memory_desc_t *md; | 77 | efi_memory_desc_t *md; |
55 | u64 paddr, npages, size; | ||
56 | 78 | ||
57 | for_each_memblock(memory, r) | ||
58 | create_id_mapping(r->base, r->size, 0); | ||
59 | |||
60 | /* map runtime io spaces */ | ||
61 | for_each_efi_memory_desc(&memmap, md) { | 79 | for_each_efi_memory_desc(&memmap, md) { |
62 | if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md)) | 80 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) |
63 | continue; | 81 | continue; |
64 | paddr = md->phys_addr; | 82 | if (md->virt_addr == 0) |
65 | npages = md->num_pages; | 83 | /* no virtual mapping has been installed by the stub */ |
66 | memrange_efi_to_native(&paddr, &npages); | 84 | break; |
67 | size = npages << PAGE_SHIFT; | 85 | if (md->virt_addr <= addr && |
68 | create_id_mapping(paddr, size, 1); | 86 | (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) |
87 | return md->phys_addr + addr - md->virt_addr; | ||
69 | } | 88 | } |
89 | return addr; | ||
70 | } | 90 | } |
71 | 91 | ||
72 | static int __init uefi_init(void) | 92 | static int __init uefi_init(void) |
73 | { | 93 | { |
74 | efi_char16_t *c16; | 94 | efi_char16_t *c16; |
95 | void *config_tables; | ||
96 | u64 table_size; | ||
75 | char vendor[100] = "unknown"; | 97 | char vendor[100] = "unknown"; |
76 | int i, retval; | 98 | int i, retval; |
77 | 99 | ||
@@ -99,7 +121,7 @@ static int __init uefi_init(void) | |||
99 | efi.systab->hdr.revision & 0xffff); | 121 | efi.systab->hdr.revision & 0xffff); |
100 | 122 | ||
101 | /* Show what we know for posterity */ | 123 | /* Show what we know for posterity */ |
102 | c16 = early_memremap(efi.systab->fw_vendor, | 124 | c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), |
103 | sizeof(vendor)); | 125 | sizeof(vendor)); |
104 | if (c16) { | 126 | if (c16) { |
105 | for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) | 127 | for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) |
@@ -112,8 +134,14 @@ static int __init uefi_init(void) | |||
112 | efi.systab->hdr.revision >> 16, | 134 | efi.systab->hdr.revision >> 16, |
113 | efi.systab->hdr.revision & 0xffff, vendor); | 135 | efi.systab->hdr.revision & 0xffff, vendor); |
114 | 136 | ||
115 | retval = efi_config_init(NULL); | 137 | table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; |
138 | config_tables = early_memremap(efi_to_phys(efi.systab->tables), | ||
139 | table_size); | ||
116 | 140 | ||
141 | retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, | ||
142 | sizeof(efi_config_table_64_t), NULL); | ||
143 | |||
144 | early_memunmap(config_tables, table_size); | ||
117 | out: | 145 | out: |
118 | early_memunmap(efi.systab, sizeof(efi_system_table_t)); | 146 | early_memunmap(efi.systab, sizeof(efi_system_table_t)); |
119 | return retval; | 147 | return retval; |
@@ -163,9 +191,7 @@ static __init void reserve_regions(void) | |||
163 | if (is_normal_ram(md)) | 191 | if (is_normal_ram(md)) |
164 | early_init_dt_add_memory_arch(paddr, size); | 192 | early_init_dt_add_memory_arch(paddr, size); |
165 | 193 | ||
166 | if (is_reserve_region(md) || | 194 | if (is_reserve_region(md)) { |
167 | md->type == EFI_BOOT_SERVICES_CODE || | ||
168 | md->type == EFI_BOOT_SERVICES_DATA) { | ||
169 | memblock_reserve(paddr, size); | 195 | memblock_reserve(paddr, size); |
170 | if (uefi_debug) | 196 | if (uefi_debug) |
171 | pr_cont("*"); | 197 | pr_cont("*"); |
@@ -178,123 +204,6 @@ static __init void reserve_regions(void) | |||
178 | set_bit(EFI_MEMMAP, &efi.flags); | 204 | set_bit(EFI_MEMMAP, &efi.flags); |
179 | } | 205 | } |
180 | 206 | ||
181 | |||
182 | static u64 __init free_one_region(u64 start, u64 end) | ||
183 | { | ||
184 | u64 size = end - start; | ||
185 | |||
186 | if (uefi_debug) | ||
187 | pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1); | ||
188 | |||
189 | free_bootmem_late(start, size); | ||
190 | return size; | ||
191 | } | ||
192 | |||
193 | static u64 __init free_region(u64 start, u64 end) | ||
194 | { | ||
195 | u64 map_start, map_end, total = 0; | ||
196 | |||
197 | if (end <= start) | ||
198 | return total; | ||
199 | |||
200 | map_start = (u64)memmap.phys_map; | ||
201 | map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map)); | ||
202 | map_start &= PAGE_MASK; | ||
203 | |||
204 | if (start < map_end && end > map_start) { | ||
205 | /* region overlaps UEFI memmap */ | ||
206 | if (start < map_start) | ||
207 | total += free_one_region(start, map_start); | ||
208 | |||
209 | if (map_end < end) | ||
210 | total += free_one_region(map_end, end); | ||
211 | } else | ||
212 | total += free_one_region(start, end); | ||
213 | |||
214 | return total; | ||
215 | } | ||
216 | |||
217 | static void __init free_boot_services(void) | ||
218 | { | ||
219 | u64 total_freed = 0; | ||
220 | u64 keep_end, free_start, free_end; | ||
221 | efi_memory_desc_t *md; | ||
222 | |||
223 | /* | ||
224 | * If kernel uses larger pages than UEFI, we have to be careful | ||
225 | * not to inadvertantly free memory we want to keep if there is | ||
226 | * overlap at the kernel page size alignment. We do not want to | ||
227 | * free is_reserve_region() memory nor the UEFI memmap itself. | ||
228 | * | ||
229 | * The memory map is sorted, so we keep track of the end of | ||
230 | * any previous region we want to keep, remember any region | ||
231 | * we want to free and defer freeing it until we encounter | ||
232 | * the next region we want to keep. This way, before freeing | ||
233 | * it, we can clip it as needed to avoid freeing memory we | ||
234 | * want to keep for UEFI. | ||
235 | */ | ||
236 | |||
237 | keep_end = 0; | ||
238 | free_start = 0; | ||
239 | |||
240 | for_each_efi_memory_desc(&memmap, md) { | ||
241 | u64 paddr, npages, size; | ||
242 | |||
243 | if (is_reserve_region(md)) { | ||
244 | /* | ||
245 | * We don't want to free any memory from this region. | ||
246 | */ | ||
247 | if (free_start) { | ||
248 | /* adjust free_end then free region */ | ||
249 | if (free_end > md->phys_addr) | ||
250 | free_end -= PAGE_SIZE; | ||
251 | total_freed += free_region(free_start, free_end); | ||
252 | free_start = 0; | ||
253 | } | ||
254 | keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | ||
255 | continue; | ||
256 | } | ||
257 | |||
258 | if (md->type != EFI_BOOT_SERVICES_CODE && | ||
259 | md->type != EFI_BOOT_SERVICES_DATA) { | ||
260 | /* no need to free this region */ | ||
261 | continue; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * We want to free memory from this region. | ||
266 | */ | ||
267 | paddr = md->phys_addr; | ||
268 | npages = md->num_pages; | ||
269 | memrange_efi_to_native(&paddr, &npages); | ||
270 | size = npages << PAGE_SHIFT; | ||
271 | |||
272 | if (free_start) { | ||
273 | if (paddr <= free_end) | ||
274 | free_end = paddr + size; | ||
275 | else { | ||
276 | total_freed += free_region(free_start, free_end); | ||
277 | free_start = paddr; | ||
278 | free_end = paddr + size; | ||
279 | } | ||
280 | } else { | ||
281 | free_start = paddr; | ||
282 | free_end = paddr + size; | ||
283 | } | ||
284 | if (free_start < keep_end) { | ||
285 | free_start += PAGE_SIZE; | ||
286 | if (free_start >= free_end) | ||
287 | free_start = 0; | ||
288 | } | ||
289 | } | ||
290 | if (free_start) | ||
291 | total_freed += free_region(free_start, free_end); | ||
292 | |||
293 | if (total_freed) | ||
294 | pr_info("Freed 0x%llx bytes of EFI boot services memory", | ||
295 | total_freed); | ||
296 | } | ||
297 | |||
298 | void __init efi_init(void) | 207 | void __init efi_init(void) |
299 | { | 208 | { |
300 | struct efi_fdt_params params; | 209 | struct efi_fdt_params params; |
@@ -317,159 +226,100 @@ void __init efi_init(void) | |||
317 | return; | 226 | return; |
318 | 227 | ||
319 | reserve_regions(); | 228 | reserve_regions(); |
229 | early_memunmap(memmap.map, params.mmap_size); | ||
320 | } | 230 | } |
321 | 231 | ||
322 | void __init efi_idmap_init(void) | 232 | static bool __init efi_virtmap_init(void) |
323 | { | 233 | { |
324 | if (!efi_enabled(EFI_BOOT)) | 234 | efi_memory_desc_t *md; |
325 | return; | ||
326 | |||
327 | /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ | ||
328 | efi_setup_idmap(); | ||
329 | early_memunmap(memmap.map, memmap.map_end - memmap.map); | ||
330 | } | ||
331 | |||
332 | static int __init remap_region(efi_memory_desc_t *md, void **new) | ||
333 | { | ||
334 | u64 paddr, vaddr, npages, size; | ||
335 | |||
336 | paddr = md->phys_addr; | ||
337 | npages = md->num_pages; | ||
338 | memrange_efi_to_native(&paddr, &npages); | ||
339 | size = npages << PAGE_SHIFT; | ||
340 | 235 | ||
341 | if (is_normal_ram(md)) | 236 | for_each_efi_memory_desc(&memmap, md) { |
342 | vaddr = (__force u64)ioremap_cache(paddr, size); | 237 | u64 paddr, npages, size; |
343 | else | 238 | pgprot_t prot; |
344 | vaddr = (__force u64)ioremap(paddr, size); | ||
345 | 239 | ||
346 | if (!vaddr) { | 240 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) |
347 | pr_err("Unable to remap 0x%llx pages @ %p\n", | 241 | continue; |
348 | npages, (void *)paddr); | 242 | if (md->virt_addr == 0) |
349 | return 0; | 243 | return false; |
350 | } | ||
351 | 244 | ||
352 | /* adjust for any rounding when EFI and system pagesize differs */ | 245 | paddr = md->phys_addr; |
353 | md->virt_addr = vaddr + (md->phys_addr - paddr); | 246 | npages = md->num_pages; |
247 | memrange_efi_to_native(&paddr, &npages); | ||
248 | size = npages << PAGE_SHIFT; | ||
354 | 249 | ||
355 | if (uefi_debug) | 250 | pr_info(" EFI remap 0x%016llx => %p\n", |
356 | pr_info(" EFI remap 0x%012llx => %p\n", | ||
357 | md->phys_addr, (void *)md->virt_addr); | 251 | md->phys_addr, (void *)md->virt_addr); |
358 | 252 | ||
359 | memcpy(*new, md, memmap.desc_size); | 253 | /* |
360 | *new += memmap.desc_size; | 254 | * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be |
361 | 255 | * executable, everything else can be mapped with the XN bits | |
362 | return 1; | 256 | * set. |
257 | */ | ||
258 | if (!is_normal_ram(md)) | ||
259 | prot = __pgprot(PROT_DEVICE_nGnRE); | ||
260 | else if (md->type == EFI_RUNTIME_SERVICES_CODE) | ||
261 | prot = PAGE_KERNEL_EXEC; | ||
262 | else | ||
263 | prot = PAGE_KERNEL; | ||
264 | |||
265 | create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); | ||
266 | } | ||
267 | return true; | ||
363 | } | 268 | } |
364 | 269 | ||
365 | /* | 270 | /* |
366 | * Switch UEFI from an identity map to a kernel virtual map | 271 | * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., |
272 | * non-early mapping of the UEFI system table and virtual mappings for all | ||
273 | * EFI_MEMORY_RUNTIME regions. | ||
367 | */ | 274 | */ |
368 | static int __init arm64_enter_virtual_mode(void) | 275 | static int __init arm64_enable_runtime_services(void) |
369 | { | 276 | { |
370 | efi_memory_desc_t *md; | ||
371 | phys_addr_t virtmap_phys; | ||
372 | void *virtmap, *virt_md; | ||
373 | efi_status_t status; | ||
374 | u64 mapsize; | 277 | u64 mapsize; |
375 | int count = 0; | ||
376 | unsigned long flags; | ||
377 | 278 | ||
378 | if (!efi_enabled(EFI_BOOT)) { | 279 | if (!efi_enabled(EFI_BOOT)) { |
379 | pr_info("EFI services will not be available.\n"); | 280 | pr_info("EFI services will not be available.\n"); |
380 | return -1; | 281 | return -1; |
381 | } | 282 | } |
382 | 283 | ||
383 | mapsize = memmap.map_end - memmap.map; | ||
384 | |||
385 | if (efi_runtime_disabled()) { | 284 | if (efi_runtime_disabled()) { |
386 | pr_info("EFI runtime services will be disabled.\n"); | 285 | pr_info("EFI runtime services will be disabled.\n"); |
387 | return -1; | 286 | return -1; |
388 | } | 287 | } |
389 | 288 | ||
390 | pr_info("Remapping and enabling EFI services.\n"); | 289 | pr_info("Remapping and enabling EFI services.\n"); |
391 | /* replace early memmap mapping with permanent mapping */ | 290 | |
291 | mapsize = memmap.map_end - memmap.map; | ||
392 | memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, | 292 | memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, |
393 | mapsize); | 293 | mapsize); |
394 | memmap.map_end = memmap.map + mapsize; | 294 | if (!memmap.map) { |
395 | 295 | pr_err("Failed to remap EFI memory map\n"); | |
396 | efi.memmap = &memmap; | ||
397 | |||
398 | /* Map the runtime regions */ | ||
399 | virtmap = kmalloc(mapsize, GFP_KERNEL); | ||
400 | if (!virtmap) { | ||
401 | pr_err("Failed to allocate EFI virtual memmap\n"); | ||
402 | return -1; | 296 | return -1; |
403 | } | 297 | } |
404 | virtmap_phys = virt_to_phys(virtmap); | 298 | memmap.map_end = memmap.map + mapsize; |
405 | virt_md = virtmap; | 299 | efi.memmap = &memmap; |
406 | |||
407 | for_each_efi_memory_desc(&memmap, md) { | ||
408 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) | ||
409 | continue; | ||
410 | if (!remap_region(md, &virt_md)) | ||
411 | goto err_unmap; | ||
412 | ++count; | ||
413 | } | ||
414 | 300 | ||
415 | efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table); | 301 | efi.systab = (__force void *)ioremap_cache(efi_system_table, |
302 | sizeof(efi_system_table_t)); | ||
416 | if (!efi.systab) { | 303 | if (!efi.systab) { |
417 | /* | 304 | pr_err("Failed to remap EFI System Table\n"); |
418 | * If we have no virtual mapping for the System Table at this | 305 | return -1; |
419 | * point, the memory map doesn't cover the physical offset where | ||
420 | * it resides. This means the System Table will be inaccessible | ||
421 | * to Runtime Services themselves once the virtual mapping is | ||
422 | * installed. | ||
423 | */ | ||
424 | pr_err("Failed to remap EFI System Table -- buggy firmware?\n"); | ||
425 | goto err_unmap; | ||
426 | } | 306 | } |
427 | set_bit(EFI_SYSTEM_TABLES, &efi.flags); | 307 | set_bit(EFI_SYSTEM_TABLES, &efi.flags); |
428 | 308 | ||
429 | local_irq_save(flags); | 309 | if (!efi_virtmap_init()) { |
430 | cpu_switch_mm(idmap_pg_dir, &init_mm); | 310 | pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); |
431 | |||
432 | /* Call SetVirtualAddressMap with the physical address of the map */ | ||
433 | runtime = efi.systab->runtime; | ||
434 | efi.set_virtual_address_map = runtime->set_virtual_address_map; | ||
435 | |||
436 | status = efi.set_virtual_address_map(count * memmap.desc_size, | ||
437 | memmap.desc_size, | ||
438 | memmap.desc_version, | ||
439 | (efi_memory_desc_t *)virtmap_phys); | ||
440 | cpu_set_reserved_ttbr0(); | ||
441 | flush_tlb_all(); | ||
442 | local_irq_restore(flags); | ||
443 | |||
444 | kfree(virtmap); | ||
445 | |||
446 | free_boot_services(); | ||
447 | |||
448 | if (status != EFI_SUCCESS) { | ||
449 | pr_err("Failed to set EFI virtual address map! [%lx]\n", | ||
450 | status); | ||
451 | return -1; | 311 | return -1; |
452 | } | 312 | } |
453 | 313 | ||
454 | /* Set up runtime services function pointers */ | 314 | /* Set up runtime services function pointers */ |
455 | runtime = efi.systab->runtime; | ||
456 | efi_native_runtime_setup(); | 315 | efi_native_runtime_setup(); |
457 | set_bit(EFI_RUNTIME_SERVICES, &efi.flags); | 316 | set_bit(EFI_RUNTIME_SERVICES, &efi.flags); |
458 | 317 | ||
459 | efi.runtime_version = efi.systab->hdr.revision; | 318 | efi.runtime_version = efi.systab->hdr.revision; |
460 | 319 | ||
461 | return 0; | 320 | return 0; |
462 | |||
463 | err_unmap: | ||
464 | /* unmap all mappings that succeeded: there are 'count' of those */ | ||
465 | for (virt_md = virtmap; count--; virt_md += memmap.desc_size) { | ||
466 | md = virt_md; | ||
467 | iounmap((__force void __iomem *)md->virt_addr); | ||
468 | } | ||
469 | kfree(virtmap); | ||
470 | return -1; | ||
471 | } | 321 | } |
472 | early_initcall(arm64_enter_virtual_mode); | 322 | early_initcall(arm64_enable_runtime_services); |
473 | 323 | ||
474 | static int __init arm64_dmi_init(void) | 324 | static int __init arm64_dmi_init(void) |
475 | { | 325 | { |
@@ -484,3 +334,23 @@ static int __init arm64_dmi_init(void) | |||
484 | return 0; | 334 | return 0; |
485 | } | 335 | } |
486 | core_initcall(arm64_dmi_init); | 336 | core_initcall(arm64_dmi_init); |
337 | |||
338 | static void efi_set_pgd(struct mm_struct *mm) | ||
339 | { | ||
340 | cpu_switch_mm(mm->pgd, mm); | ||
341 | flush_tlb_all(); | ||
342 | if (icache_is_aivivt()) | ||
343 | __flush_icache_all(); | ||
344 | } | ||
345 | |||
346 | void efi_virtmap_load(void) | ||
347 | { | ||
348 | preempt_disable(); | ||
349 | efi_set_pgd(&efi_mm); | ||
350 | } | ||
351 | |||
352 | void efi_virtmap_unload(void) | ||
353 | { | ||
354 | efi_set_pgd(current->active_mm); | ||
355 | preempt_enable(); | ||
356 | } | ||
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index fd4fa374e5d2..cf21bb3bf752 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -269,18 +269,18 @@ ENDPROC(el1_error_invalid) | |||
269 | el1_sync: | 269 | el1_sync: |
270 | kernel_entry 1 | 270 | kernel_entry 1 |
271 | mrs x1, esr_el1 // read the syndrome register | 271 | mrs x1, esr_el1 // read the syndrome register |
272 | lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class | 272 | lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class |
273 | cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1 | 273 | cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 |
274 | b.eq el1_da | 274 | b.eq el1_da |
275 | cmp x24, #ESR_EL1_EC_SYS64 // configurable trap | 275 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
276 | b.eq el1_undef | 276 | b.eq el1_undef |
277 | cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception | 277 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
278 | b.eq el1_sp_pc | 278 | b.eq el1_sp_pc |
279 | cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception | 279 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
280 | b.eq el1_sp_pc | 280 | b.eq el1_sp_pc |
281 | cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1 | 281 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 |
282 | b.eq el1_undef | 282 | b.eq el1_undef |
283 | cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1 | 283 | cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 |
284 | b.ge el1_dbg | 284 | b.ge el1_dbg |
285 | b el1_inv | 285 | b el1_inv |
286 | el1_da: | 286 | el1_da: |
@@ -318,7 +318,7 @@ el1_dbg: | |||
318 | /* | 318 | /* |
319 | * Debug exception handling | 319 | * Debug exception handling |
320 | */ | 320 | */ |
321 | cmp x24, #ESR_EL1_EC_BRK64 // if BRK64 | 321 | cmp x24, #ESR_ELx_EC_BRK64 // if BRK64 |
322 | cinc x24, x24, eq // set bit '0' | 322 | cinc x24, x24, eq // set bit '0' |
323 | tbz x24, #0, el1_inv // EL1 only | 323 | tbz x24, #0, el1_inv // EL1 only |
324 | mrs x0, far_el1 | 324 | mrs x0, far_el1 |
@@ -375,26 +375,26 @@ el1_preempt: | |||
375 | el0_sync: | 375 | el0_sync: |
376 | kernel_entry 0 | 376 | kernel_entry 0 |
377 | mrs x25, esr_el1 // read the syndrome register | 377 | mrs x25, esr_el1 // read the syndrome register |
378 | lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class | 378 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class |
379 | cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state | 379 | cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state |
380 | b.eq el0_svc | 380 | b.eq el0_svc |
381 | cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 | 381 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 |
382 | b.eq el0_da | 382 | b.eq el0_da |
383 | cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 | 383 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 |
384 | b.eq el0_ia | 384 | b.eq el0_ia |
385 | cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access | 385 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access |
386 | b.eq el0_fpsimd_acc | 386 | b.eq el0_fpsimd_acc |
387 | cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception | 387 | cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception |
388 | b.eq el0_fpsimd_exc | 388 | b.eq el0_fpsimd_exc |
389 | cmp x24, #ESR_EL1_EC_SYS64 // configurable trap | 389 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
390 | b.eq el0_undef | 390 | b.eq el0_undef |
391 | cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception | 391 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
392 | b.eq el0_sp_pc | 392 | b.eq el0_sp_pc |
393 | cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception | 393 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
394 | b.eq el0_sp_pc | 394 | b.eq el0_sp_pc |
395 | cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0 | 395 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 |
396 | b.eq el0_undef | 396 | b.eq el0_undef |
397 | cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0 | 397 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 |
398 | b.ge el0_dbg | 398 | b.ge el0_dbg |
399 | b el0_inv | 399 | b el0_inv |
400 | 400 | ||
@@ -403,37 +403,37 @@ el0_sync: | |||
403 | el0_sync_compat: | 403 | el0_sync_compat: |
404 | kernel_entry 0, 32 | 404 | kernel_entry 0, 32 |
405 | mrs x25, esr_el1 // read the syndrome register | 405 | mrs x25, esr_el1 // read the syndrome register |
406 | lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class | 406 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class |
407 | cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state | 407 | cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state |
408 | b.eq el0_svc_compat | 408 | b.eq el0_svc_compat |
409 | cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 | 409 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 |
410 | b.eq el0_da | 410 | b.eq el0_da |
411 | cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 | 411 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 |
412 | b.eq el0_ia | 412 | b.eq el0_ia |
413 | cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access | 413 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access |
414 | b.eq el0_fpsimd_acc | 414 | b.eq el0_fpsimd_acc |
415 | cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception | 415 | cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception |
416 | b.eq el0_fpsimd_exc | 416 | b.eq el0_fpsimd_exc |
417 | cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0 | 417 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 |
418 | b.eq el0_undef | 418 | b.eq el0_undef |
419 | cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap | 419 | cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap |
420 | b.eq el0_undef | 420 | b.eq el0_undef |
421 | cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap | 421 | cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap |
422 | b.eq el0_undef | 422 | b.eq el0_undef |
423 | cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap | 423 | cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap |
424 | b.eq el0_undef | 424 | b.eq el0_undef |
425 | cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap | 425 | cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap |
426 | b.eq el0_undef | 426 | b.eq el0_undef |
427 | cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap | 427 | cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap |
428 | b.eq el0_undef | 428 | b.eq el0_undef |
429 | cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0 | 429 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 |
430 | b.ge el0_dbg | 430 | b.ge el0_dbg |
431 | b el0_inv | 431 | b el0_inv |
432 | el0_svc_compat: | 432 | el0_svc_compat: |
433 | /* | 433 | /* |
434 | * AArch32 syscall handling | 434 | * AArch32 syscall handling |
435 | */ | 435 | */ |
436 | adr stbl, compat_sys_call_table // load compat syscall table pointer | 436 | adrp stbl, compat_sys_call_table // load compat syscall table pointer |
437 | uxtw scno, w7 // syscall number in w7 (r7) | 437 | uxtw scno, w7 // syscall number in w7 (r7) |
438 | mov sc_nr, #__NR_compat_syscalls | 438 | mov sc_nr, #__NR_compat_syscalls |
439 | b el0_svc_naked | 439 | b el0_svc_naked |
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/entry32.S index 423a5b3fc2be..9a8f6ae2530e 100644 --- a/arch/arm64/kernel/sys32.S +++ b/arch/arm64/kernel/entry32.S | |||
@@ -27,26 +27,26 @@ | |||
27 | * System call wrappers for the AArch32 compatibility layer. | 27 | * System call wrappers for the AArch32 compatibility layer. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | compat_sys_sigreturn_wrapper: | 30 | ENTRY(compat_sys_sigreturn_wrapper) |
31 | mov x0, sp | 31 | mov x0, sp |
32 | mov x27, #0 // prevent syscall restart handling (why) | 32 | mov x27, #0 // prevent syscall restart handling (why) |
33 | b compat_sys_sigreturn | 33 | b compat_sys_sigreturn |
34 | ENDPROC(compat_sys_sigreturn_wrapper) | 34 | ENDPROC(compat_sys_sigreturn_wrapper) |
35 | 35 | ||
36 | compat_sys_rt_sigreturn_wrapper: | 36 | ENTRY(compat_sys_rt_sigreturn_wrapper) |
37 | mov x0, sp | 37 | mov x0, sp |
38 | mov x27, #0 // prevent syscall restart handling (why) | 38 | mov x27, #0 // prevent syscall restart handling (why) |
39 | b compat_sys_rt_sigreturn | 39 | b compat_sys_rt_sigreturn |
40 | ENDPROC(compat_sys_rt_sigreturn_wrapper) | 40 | ENDPROC(compat_sys_rt_sigreturn_wrapper) |
41 | 41 | ||
42 | compat_sys_statfs64_wrapper: | 42 | ENTRY(compat_sys_statfs64_wrapper) |
43 | mov w3, #84 | 43 | mov w3, #84 |
44 | cmp w1, #88 | 44 | cmp w1, #88 |
45 | csel w1, w3, w1, eq | 45 | csel w1, w3, w1, eq |
46 | b compat_sys_statfs64 | 46 | b compat_sys_statfs64 |
47 | ENDPROC(compat_sys_statfs64_wrapper) | 47 | ENDPROC(compat_sys_statfs64_wrapper) |
48 | 48 | ||
49 | compat_sys_fstatfs64_wrapper: | 49 | ENTRY(compat_sys_fstatfs64_wrapper) |
50 | mov w3, #84 | 50 | mov w3, #84 |
51 | cmp w1, #88 | 51 | cmp w1, #88 |
52 | csel w1, w3, w1, eq | 52 | csel w1, w3, w1, eq |
@@ -58,33 +58,33 @@ ENDPROC(compat_sys_fstatfs64_wrapper) | |||
58 | * in registers or that take 32-bit parameters which require sign | 58 | * in registers or that take 32-bit parameters which require sign |
59 | * extension. | 59 | * extension. |
60 | */ | 60 | */ |
61 | compat_sys_pread64_wrapper: | 61 | ENTRY(compat_sys_pread64_wrapper) |
62 | regs_to_64 x3, x4, x5 | 62 | regs_to_64 x3, x4, x5 |
63 | b sys_pread64 | 63 | b sys_pread64 |
64 | ENDPROC(compat_sys_pread64_wrapper) | 64 | ENDPROC(compat_sys_pread64_wrapper) |
65 | 65 | ||
66 | compat_sys_pwrite64_wrapper: | 66 | ENTRY(compat_sys_pwrite64_wrapper) |
67 | regs_to_64 x3, x4, x5 | 67 | regs_to_64 x3, x4, x5 |
68 | b sys_pwrite64 | 68 | b sys_pwrite64 |
69 | ENDPROC(compat_sys_pwrite64_wrapper) | 69 | ENDPROC(compat_sys_pwrite64_wrapper) |
70 | 70 | ||
71 | compat_sys_truncate64_wrapper: | 71 | ENTRY(compat_sys_truncate64_wrapper) |
72 | regs_to_64 x1, x2, x3 | 72 | regs_to_64 x1, x2, x3 |
73 | b sys_truncate | 73 | b sys_truncate |
74 | ENDPROC(compat_sys_truncate64_wrapper) | 74 | ENDPROC(compat_sys_truncate64_wrapper) |
75 | 75 | ||
76 | compat_sys_ftruncate64_wrapper: | 76 | ENTRY(compat_sys_ftruncate64_wrapper) |
77 | regs_to_64 x1, x2, x3 | 77 | regs_to_64 x1, x2, x3 |
78 | b sys_ftruncate | 78 | b sys_ftruncate |
79 | ENDPROC(compat_sys_ftruncate64_wrapper) | 79 | ENDPROC(compat_sys_ftruncate64_wrapper) |
80 | 80 | ||
81 | compat_sys_readahead_wrapper: | 81 | ENTRY(compat_sys_readahead_wrapper) |
82 | regs_to_64 x1, x2, x3 | 82 | regs_to_64 x1, x2, x3 |
83 | mov w2, w4 | 83 | mov w2, w4 |
84 | b sys_readahead | 84 | b sys_readahead |
85 | ENDPROC(compat_sys_readahead_wrapper) | 85 | ENDPROC(compat_sys_readahead_wrapper) |
86 | 86 | ||
87 | compat_sys_fadvise64_64_wrapper: | 87 | ENTRY(compat_sys_fadvise64_64_wrapper) |
88 | mov w6, w1 | 88 | mov w6, w1 |
89 | regs_to_64 x1, x2, x3 | 89 | regs_to_64 x1, x2, x3 |
90 | regs_to_64 x2, x4, x5 | 90 | regs_to_64 x2, x4, x5 |
@@ -92,24 +92,14 @@ compat_sys_fadvise64_64_wrapper: | |||
92 | b sys_fadvise64_64 | 92 | b sys_fadvise64_64 |
93 | ENDPROC(compat_sys_fadvise64_64_wrapper) | 93 | ENDPROC(compat_sys_fadvise64_64_wrapper) |
94 | 94 | ||
95 | compat_sys_sync_file_range2_wrapper: | 95 | ENTRY(compat_sys_sync_file_range2_wrapper) |
96 | regs_to_64 x2, x2, x3 | 96 | regs_to_64 x2, x2, x3 |
97 | regs_to_64 x3, x4, x5 | 97 | regs_to_64 x3, x4, x5 |
98 | b sys_sync_file_range2 | 98 | b sys_sync_file_range2 |
99 | ENDPROC(compat_sys_sync_file_range2_wrapper) | 99 | ENDPROC(compat_sys_sync_file_range2_wrapper) |
100 | 100 | ||
101 | compat_sys_fallocate_wrapper: | 101 | ENTRY(compat_sys_fallocate_wrapper) |
102 | regs_to_64 x2, x2, x3 | 102 | regs_to_64 x2, x2, x3 |
103 | regs_to_64 x3, x4, x5 | 103 | regs_to_64 x3, x4, x5 |
104 | b sys_fallocate | 104 | b sys_fallocate |
105 | ENDPROC(compat_sys_fallocate_wrapper) | 105 | ENDPROC(compat_sys_fallocate_wrapper) |
106 | |||
107 | #undef __SYSCALL | ||
108 | #define __SYSCALL(x, y) .quad y // x | ||
109 | |||
110 | /* | ||
111 | * The system calls table must be 4KB aligned. | ||
112 | */ | ||
113 | .align 12 | ||
114 | ENTRY(compat_sys_call_table) | ||
115 | #include <asm/unistd32.h> | ||
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index df1cf15377b4..98bbe06e469c 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c | |||
@@ -894,7 +894,7 @@ static struct notifier_block hw_breakpoint_reset_nb = { | |||
894 | .notifier_call = hw_breakpoint_reset_notify, | 894 | .notifier_call = hw_breakpoint_reset_notify, |
895 | }; | 895 | }; |
896 | 896 | ||
897 | #ifdef CONFIG_ARM64_CPU_SUSPEND | 897 | #ifdef CONFIG_CPU_PM |
898 | extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)); | 898 | extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)); |
899 | #else | 899 | #else |
900 | static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) | 900 | static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) |
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 7e9327a0986d..27d4864577e5 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c | |||
@@ -17,14 +17,19 @@ | |||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | #include <linux/bitops.h> | 19 | #include <linux/bitops.h> |
20 | #include <linux/bug.h> | ||
20 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
21 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/mm.h> | ||
22 | #include <linux/smp.h> | 24 | #include <linux/smp.h> |
25 | #include <linux/spinlock.h> | ||
23 | #include <linux/stop_machine.h> | 26 | #include <linux/stop_machine.h> |
27 | #include <linux/types.h> | ||
24 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
25 | 29 | ||
26 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
27 | #include <asm/debug-monitors.h> | 31 | #include <asm/debug-monitors.h> |
32 | #include <asm/fixmap.h> | ||
28 | #include <asm/insn.h> | 33 | #include <asm/insn.h> |
29 | 34 | ||
30 | #define AARCH64_INSN_SF_BIT BIT(31) | 35 | #define AARCH64_INSN_SF_BIT BIT(31) |
@@ -72,6 +77,29 @@ bool __kprobes aarch64_insn_is_nop(u32 insn) | |||
72 | } | 77 | } |
73 | } | 78 | } |
74 | 79 | ||
80 | static DEFINE_SPINLOCK(patch_lock); | ||
81 | |||
82 | static void __kprobes *patch_map(void *addr, int fixmap) | ||
83 | { | ||
84 | unsigned long uintaddr = (uintptr_t) addr; | ||
85 | bool module = !core_kernel_text(uintaddr); | ||
86 | struct page *page; | ||
87 | |||
88 | if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) | ||
89 | page = vmalloc_to_page(addr); | ||
90 | else | ||
91 | page = virt_to_page(addr); | ||
92 | |||
93 | BUG_ON(!page); | ||
94 | set_fixmap(fixmap, page_to_phys(page)); | ||
95 | |||
96 | return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); | ||
97 | } | ||
98 | |||
99 | static void __kprobes patch_unmap(int fixmap) | ||
100 | { | ||
101 | clear_fixmap(fixmap); | ||
102 | } | ||
75 | /* | 103 | /* |
76 | * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always | 104 | * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always |
77 | * little-endian. | 105 | * little-endian. |
@@ -88,10 +116,27 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp) | |||
88 | return ret; | 116 | return ret; |
89 | } | 117 | } |
90 | 118 | ||
119 | static int __kprobes __aarch64_insn_write(void *addr, u32 insn) | ||
120 | { | ||
121 | void *waddr = addr; | ||
122 | unsigned long flags = 0; | ||
123 | int ret; | ||
124 | |||
125 | spin_lock_irqsave(&patch_lock, flags); | ||
126 | waddr = patch_map(addr, FIX_TEXT_POKE0); | ||
127 | |||
128 | ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); | ||
129 | |||
130 | patch_unmap(FIX_TEXT_POKE0); | ||
131 | spin_unlock_irqrestore(&patch_lock, flags); | ||
132 | |||
133 | return ret; | ||
134 | } | ||
135 | |||
91 | int __kprobes aarch64_insn_write(void *addr, u32 insn) | 136 | int __kprobes aarch64_insn_write(void *addr, u32 insn) |
92 | { | 137 | { |
93 | insn = cpu_to_le32(insn); | 138 | insn = cpu_to_le32(insn); |
94 | return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE); | 139 | return __aarch64_insn_write(addr, insn); |
95 | } | 140 | } |
96 | 141 | ||
97 | static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) | 142 | static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) |
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index f1dbca7d5c96..3425f311c49e 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c | |||
@@ -540,8 +540,6 @@ const struct cpu_operations cpu_psci_ops = { | |||
540 | .name = "psci", | 540 | .name = "psci", |
541 | #ifdef CONFIG_CPU_IDLE | 541 | #ifdef CONFIG_CPU_IDLE |
542 | .cpu_init_idle = cpu_psci_cpu_init_idle, | 542 | .cpu_init_idle = cpu_psci_cpu_init_idle, |
543 | #endif | ||
544 | #ifdef CONFIG_ARM64_CPU_SUSPEND | ||
545 | .cpu_suspend = cpu_psci_cpu_suspend, | 543 | .cpu_suspend = cpu_psci_cpu_suspend, |
546 | #endif | 544 | #endif |
547 | #ifdef CONFIG_SMP | 545 | #ifdef CONFIG_SMP |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 20fe2932ad0c..e8420f635bd4 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/fs.h> | 40 | #include <linux/fs.h> |
41 | #include <linux/proc_fs.h> | 41 | #include <linux/proc_fs.h> |
42 | #include <linux/memblock.h> | 42 | #include <linux/memblock.h> |
43 | #include <linux/of_iommu.h> | ||
43 | #include <linux/of_fdt.h> | 44 | #include <linux/of_fdt.h> |
44 | #include <linux/of_platform.h> | 45 | #include <linux/of_platform.h> |
45 | #include <linux/efi.h> | 46 | #include <linux/efi.h> |
@@ -322,25 +323,6 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) | |||
322 | dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name()); | 323 | dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name()); |
323 | } | 324 | } |
324 | 325 | ||
325 | /* | ||
326 | * Limit the memory size that was specified via FDT. | ||
327 | */ | ||
328 | static int __init early_mem(char *p) | ||
329 | { | ||
330 | phys_addr_t limit; | ||
331 | |||
332 | if (!p) | ||
333 | return 1; | ||
334 | |||
335 | limit = memparse(p, &p) & PAGE_MASK; | ||
336 | pr_notice("Memory limited to %lldMB\n", limit >> 20); | ||
337 | |||
338 | memblock_enforce_memory_limit(limit); | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | early_param("mem", early_mem); | ||
343 | |||
344 | static void __init request_standard_resources(void) | 326 | static void __init request_standard_resources(void) |
345 | { | 327 | { |
346 | struct memblock_region *region; | 328 | struct memblock_region *region; |
@@ -401,7 +383,6 @@ void __init setup_arch(char **cmdline_p) | |||
401 | paging_init(); | 383 | paging_init(); |
402 | request_standard_resources(); | 384 | request_standard_resources(); |
403 | 385 | ||
404 | efi_idmap_init(); | ||
405 | early_ioremap_reset(); | 386 | early_ioremap_reset(); |
406 | 387 | ||
407 | unflatten_device_tree(); | 388 | unflatten_device_tree(); |
@@ -425,6 +406,7 @@ void __init setup_arch(char **cmdline_p) | |||
425 | 406 | ||
426 | static int __init arm64_device_init(void) | 407 | static int __init arm64_device_init(void) |
427 | { | 408 | { |
409 | of_iommu_init(); | ||
428 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 410 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
429 | return 0; | 411 | return 0; |
430 | } | 412 | } |
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 5a1ba6e80d4e..e299de396e9b 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -440,7 +440,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, | |||
440 | { | 440 | { |
441 | compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); | 441 | compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); |
442 | compat_ulong_t retcode; | 442 | compat_ulong_t retcode; |
443 | compat_ulong_t spsr = regs->pstate & ~PSR_f; | 443 | compat_ulong_t spsr = regs->pstate & ~(PSR_f | COMPAT_PSR_E_BIT); |
444 | int thumb; | 444 | int thumb; |
445 | 445 | ||
446 | /* Check if the handler is written for ARM or Thumb */ | 446 | /* Check if the handler is written for ARM or Thumb */ |
@@ -454,6 +454,9 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, | |||
454 | /* The IT state must be cleared for both ARM and Thumb-2 */ | 454 | /* The IT state must be cleared for both ARM and Thumb-2 */ |
455 | spsr &= ~COMPAT_PSR_IT_MASK; | 455 | spsr &= ~COMPAT_PSR_IT_MASK; |
456 | 456 | ||
457 | /* Restore the original endianness */ | ||
458 | spsr |= COMPAT_PSR_ENDSTATE; | ||
459 | |||
457 | if (ka->sa.sa_flags & SA_RESTORER) { | 460 | if (ka->sa.sa_flags & SA_RESTORER) { |
458 | retcode = ptr_to_compat(ka->sa.sa_restorer); | 461 | retcode = ptr_to_compat(ka->sa.sa_restorer); |
459 | } else { | 462 | } else { |
@@ -501,7 +504,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, | |||
501 | 504 | ||
502 | __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); | 505 | __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); |
503 | /* set the compat FSR WnR */ | 506 | /* set the compat FSR WnR */ |
504 | __put_user_error(!!(current->thread.fault_code & ESR_EL1_WRITE) << | 507 | __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) << |
505 | FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err); | 508 | FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err); |
506 | __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); | 509 | __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); |
507 | __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); | 510 | __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 7ae6ee085261..328b8ce4b007 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -65,7 +65,6 @@ struct secondary_data secondary_data; | |||
65 | enum ipi_msg_type { | 65 | enum ipi_msg_type { |
66 | IPI_RESCHEDULE, | 66 | IPI_RESCHEDULE, |
67 | IPI_CALL_FUNC, | 67 | IPI_CALL_FUNC, |
68 | IPI_CALL_FUNC_SINGLE, | ||
69 | IPI_CPU_STOP, | 68 | IPI_CPU_STOP, |
70 | IPI_TIMER, | 69 | IPI_TIMER, |
71 | IPI_IRQ_WORK, | 70 | IPI_IRQ_WORK, |
@@ -483,7 +482,6 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { | |||
483 | #define S(x,s) [x] = s | 482 | #define S(x,s) [x] = s |
484 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), | 483 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), |
485 | S(IPI_CALL_FUNC, "Function call interrupts"), | 484 | S(IPI_CALL_FUNC, "Function call interrupts"), |
486 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), | ||
487 | S(IPI_CPU_STOP, "CPU stop interrupts"), | 485 | S(IPI_CPU_STOP, "CPU stop interrupts"), |
488 | S(IPI_TIMER, "Timer broadcast interrupts"), | 486 | S(IPI_TIMER, "Timer broadcast interrupts"), |
489 | S(IPI_IRQ_WORK, "IRQ work interrupts"), | 487 | S(IPI_IRQ_WORK, "IRQ work interrupts"), |
@@ -527,7 +525,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |||
527 | 525 | ||
528 | void arch_send_call_function_single_ipi(int cpu) | 526 | void arch_send_call_function_single_ipi(int cpu) |
529 | { | 527 | { |
530 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | 528 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); |
531 | } | 529 | } |
532 | 530 | ||
533 | #ifdef CONFIG_IRQ_WORK | 531 | #ifdef CONFIG_IRQ_WORK |
@@ -585,12 +583,6 @@ void handle_IPI(int ipinr, struct pt_regs *regs) | |||
585 | irq_exit(); | 583 | irq_exit(); |
586 | break; | 584 | break; |
587 | 585 | ||
588 | case IPI_CALL_FUNC_SINGLE: | ||
589 | irq_enter(); | ||
590 | generic_smp_call_function_single_interrupt(); | ||
591 | irq_exit(); | ||
592 | break; | ||
593 | |||
594 | case IPI_CPU_STOP: | 586 | case IPI_CPU_STOP: |
595 | irq_enter(); | 587 | irq_enter(); |
596 | ipi_cpu_stop(cpu); | 588 | ipi_cpu_stop(cpu); |
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 2d6b6065fe7f..d7daf45ae7a2 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c | |||
@@ -1,7 +1,6 @@ | |||
1 | #include <linux/percpu.h> | 1 | #include <linux/percpu.h> |
2 | #include <linux/slab.h> | 2 | #include <linux/slab.h> |
3 | #include <asm/cacheflush.h> | 3 | #include <asm/cacheflush.h> |
4 | #include <asm/cpu_ops.h> | ||
5 | #include <asm/debug-monitors.h> | 4 | #include <asm/debug-monitors.h> |
6 | #include <asm/pgtable.h> | 5 | #include <asm/pgtable.h> |
7 | #include <asm/memory.h> | 6 | #include <asm/memory.h> |
@@ -51,26 +50,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) | |||
51 | hw_breakpoint_restore = hw_bp_restore; | 50 | hw_breakpoint_restore = hw_bp_restore; |
52 | } | 51 | } |
53 | 52 | ||
54 | /** | ||
55 | * cpu_suspend() - function to enter a low-power state | ||
56 | * @arg: argument to pass to CPU suspend operations | ||
57 | * | ||
58 | * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU | ||
59 | * operations back-end error code otherwise. | ||
60 | */ | ||
61 | int cpu_suspend(unsigned long arg) | ||
62 | { | ||
63 | int cpu = smp_processor_id(); | ||
64 | |||
65 | /* | ||
66 | * If cpu_ops have not been registered or suspend | ||
67 | * has not been initialized, cpu_suspend call fails early. | ||
68 | */ | ||
69 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) | ||
70 | return -EOPNOTSUPP; | ||
71 | return cpu_ops[cpu]->cpu_suspend(arg); | ||
72 | } | ||
73 | |||
74 | /* | 53 | /* |
75 | * __cpu_suspend | 54 | * __cpu_suspend |
76 | * | 55 | * |
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index 3fa98ff14f0e..75151aaf1a52 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c | |||
@@ -39,10 +39,9 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, | |||
39 | /* | 39 | /* |
40 | * Wrappers to pass the pt_regs argument. | 40 | * Wrappers to pass the pt_regs argument. |
41 | */ | 41 | */ |
42 | asmlinkage long sys_rt_sigreturn_wrapper(void); | ||
42 | #define sys_rt_sigreturn sys_rt_sigreturn_wrapper | 43 | #define sys_rt_sigreturn sys_rt_sigreturn_wrapper |
43 | 44 | ||
44 | #include <asm/syscalls.h> | ||
45 | |||
46 | #undef __SYSCALL | 45 | #undef __SYSCALL |
47 | #define __SYSCALL(nr, sym) [nr] = sym, | 46 | #define __SYSCALL(nr, sym) [nr] = sym, |
48 | 47 | ||
@@ -50,7 +49,7 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, | |||
50 | * The sys_call_table array must be 4K aligned to be accessible from | 49 | * The sys_call_table array must be 4K aligned to be accessible from |
51 | * kernel/entry.S. | 50 | * kernel/entry.S. |
52 | */ | 51 | */ |
53 | void *sys_call_table[__NR_syscalls] __aligned(4096) = { | 52 | void * const sys_call_table[__NR_syscalls] __aligned(4096) = { |
54 | [0 ... __NR_syscalls - 1] = sys_ni_syscall, | 53 | [0 ... __NR_syscalls - 1] = sys_ni_syscall, |
55 | #include <asm/unistd.h> | 54 | #include <asm/unistd.h> |
56 | }; | 55 | }; |
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c new file mode 100644 index 000000000000..2d5ab3c90b82 --- /dev/null +++ b/arch/arm64/kernel/sys32.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * arch/arm64/kernel/sys32.c | ||
3 | * | ||
4 | * Copyright (C) 2015 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software(void); you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http(void);//www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * Needed to avoid conflicting __NR_* macros between uapi/asm/unistd.h and | ||
21 | * asm/unistd32.h. | ||
22 | */ | ||
23 | #define __COMPAT_SYSCALL_NR | ||
24 | |||
25 | #include <linux/compiler.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | |||
28 | asmlinkage long compat_sys_sigreturn_wrapper(void); | ||
29 | asmlinkage long compat_sys_rt_sigreturn_wrapper(void); | ||
30 | asmlinkage long compat_sys_statfs64_wrapper(void); | ||
31 | asmlinkage long compat_sys_fstatfs64_wrapper(void); | ||
32 | asmlinkage long compat_sys_pread64_wrapper(void); | ||
33 | asmlinkage long compat_sys_pwrite64_wrapper(void); | ||
34 | asmlinkage long compat_sys_truncate64_wrapper(void); | ||
35 | asmlinkage long compat_sys_ftruncate64_wrapper(void); | ||
36 | asmlinkage long compat_sys_readahead_wrapper(void); | ||
37 | asmlinkage long compat_sys_fadvise64_64_wrapper(void); | ||
38 | asmlinkage long compat_sys_sync_file_range2_wrapper(void); | ||
39 | asmlinkage long compat_sys_fallocate_wrapper(void); | ||
40 | |||
41 | #undef __SYSCALL | ||
42 | #define __SYSCALL(nr, sym) [nr] = sym, | ||
43 | |||
44 | /* | ||
45 | * The sys_call_table array must be 4K aligned to be accessible from | ||
46 | * kernel/entry.S. | ||
47 | */ | ||
48 | void * const compat_sys_call_table[__NR_compat_syscalls] __aligned(4096) = { | ||
49 | [0 ... __NR_compat_syscalls - 1] = sys_ni_syscall, | ||
50 | #include <asm/unistd32.h> | ||
51 | }; | ||
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 0a801e3743d5..1ef2940df13c 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -33,6 +33,7 @@ | |||
33 | 33 | ||
34 | #include <asm/atomic.h> | 34 | #include <asm/atomic.h> |
35 | #include <asm/debug-monitors.h> | 35 | #include <asm/debug-monitors.h> |
36 | #include <asm/esr.h> | ||
36 | #include <asm/traps.h> | 37 | #include <asm/traps.h> |
37 | #include <asm/stacktrace.h> | 38 | #include <asm/stacktrace.h> |
38 | #include <asm/exception.h> | 39 | #include <asm/exception.h> |
@@ -373,6 +374,51 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) | |||
373 | return sys_ni_syscall(); | 374 | return sys_ni_syscall(); |
374 | } | 375 | } |
375 | 376 | ||
377 | static const char *esr_class_str[] = { | ||
378 | [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", | ||
379 | [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized", | ||
380 | [ESR_ELx_EC_WFx] = "WFI/WFE", | ||
381 | [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC", | ||
382 | [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC", | ||
383 | [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC", | ||
384 | [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", | ||
385 | [ESR_ELx_EC_FP_ASIMD] = "ASIMD", | ||
386 | [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", | ||
387 | [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", | ||
388 | [ESR_ELx_EC_ILL] = "PSTATE.IL", | ||
389 | [ESR_ELx_EC_SVC32] = "SVC (AArch32)", | ||
390 | [ESR_ELx_EC_HVC32] = "HVC (AArch32)", | ||
391 | [ESR_ELx_EC_SMC32] = "SMC (AArch32)", | ||
392 | [ESR_ELx_EC_SVC64] = "SVC (AArch64)", | ||
393 | [ESR_ELx_EC_HVC64] = "HVC (AArch64)", | ||
394 | [ESR_ELx_EC_SMC64] = "SMC (AArch64)", | ||
395 | [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", | ||
396 | [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", | ||
397 | [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", | ||
398 | [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", | ||
399 | [ESR_ELx_EC_PC_ALIGN] = "PC Alignment", | ||
400 | [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)", | ||
401 | [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)", | ||
402 | [ESR_ELx_EC_SP_ALIGN] = "SP Alignment", | ||
403 | [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)", | ||
404 | [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)", | ||
405 | [ESR_ELx_EC_SERROR] = "SError", | ||
406 | [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)", | ||
407 | [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)", | ||
408 | [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)", | ||
409 | [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)", | ||
410 | [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)", | ||
411 | [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)", | ||
412 | [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)", | ||
413 | [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)", | ||
414 | [ESR_ELx_EC_BRK64] = "BRK (AArch64)", | ||
415 | }; | ||
416 | |||
417 | const char *esr_get_class_string(u32 esr) | ||
418 | { | ||
419 | return esr_class_str[esr >> ESR_ELx_EC_SHIFT]; | ||
420 | } | ||
421 | |||
376 | /* | 422 | /* |
377 | * bad_mode handles the impossible case in the exception vector. | 423 | * bad_mode handles the impossible case in the exception vector. |
378 | */ | 424 | */ |
@@ -382,8 +428,8 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) | |||
382 | void __user *pc = (void __user *)instruction_pointer(regs); | 428 | void __user *pc = (void __user *)instruction_pointer(regs); |
383 | console_verbose(); | 429 | console_verbose(); |
384 | 430 | ||
385 | pr_crit("Bad mode in %s handler detected, code 0x%08x\n", | 431 | pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n", |
386 | handler[reason], esr); | 432 | handler[reason], esr, esr_get_class_string(esr)); |
387 | __show_regs(regs); | 433 | __show_regs(regs); |
388 | 434 | ||
389 | info.si_signo = SIGILL; | 435 | info.si_signo = SIGILL; |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 9965ec87cbec..5d9d2dca530d 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <asm/thread_info.h> | 8 | #include <asm/thread_info.h> |
9 | #include <asm/memory.h> | 9 | #include <asm/memory.h> |
10 | #include <asm/page.h> | 10 | #include <asm/page.h> |
11 | #include <asm/pgtable.h> | ||
11 | 12 | ||
12 | #include "image.h" | 13 | #include "image.h" |
13 | 14 | ||
@@ -49,6 +50,14 @@ PECOFF_FILE_ALIGNMENT = 0x200; | |||
49 | #define PECOFF_EDATA_PADDING | 50 | #define PECOFF_EDATA_PADDING |
50 | #endif | 51 | #endif |
51 | 52 | ||
53 | #ifdef CONFIG_DEBUG_ALIGN_RODATA | ||
54 | #define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT); | ||
55 | #define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO | ||
56 | #else | ||
57 | #define ALIGN_DEBUG_RO | ||
58 | #define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min); | ||
59 | #endif | ||
60 | |||
52 | SECTIONS | 61 | SECTIONS |
53 | { | 62 | { |
54 | /* | 63 | /* |
@@ -71,6 +80,7 @@ SECTIONS | |||
71 | _text = .; | 80 | _text = .; |
72 | HEAD_TEXT | 81 | HEAD_TEXT |
73 | } | 82 | } |
83 | ALIGN_DEBUG_RO | ||
74 | .text : { /* Real text segment */ | 84 | .text : { /* Real text segment */ |
75 | _stext = .; /* Text and read-only data */ | 85 | _stext = .; /* Text and read-only data */ |
76 | __exception_text_start = .; | 86 | __exception_text_start = .; |
@@ -87,19 +97,22 @@ SECTIONS | |||
87 | *(.got) /* Global offset table */ | 97 | *(.got) /* Global offset table */ |
88 | } | 98 | } |
89 | 99 | ||
100 | ALIGN_DEBUG_RO | ||
90 | RO_DATA(PAGE_SIZE) | 101 | RO_DATA(PAGE_SIZE) |
91 | EXCEPTION_TABLE(8) | 102 | EXCEPTION_TABLE(8) |
92 | NOTES | 103 | NOTES |
104 | ALIGN_DEBUG_RO | ||
93 | _etext = .; /* End of text and rodata section */ | 105 | _etext = .; /* End of text and rodata section */ |
94 | 106 | ||
95 | . = ALIGN(PAGE_SIZE); | 107 | ALIGN_DEBUG_RO_MIN(PAGE_SIZE) |
96 | __init_begin = .; | 108 | __init_begin = .; |
97 | 109 | ||
98 | INIT_TEXT_SECTION(8) | 110 | INIT_TEXT_SECTION(8) |
99 | .exit.text : { | 111 | .exit.text : { |
100 | ARM_EXIT_KEEP(EXIT_TEXT) | 112 | ARM_EXIT_KEEP(EXIT_TEXT) |
101 | } | 113 | } |
102 | . = ALIGN(16); | 114 | |
115 | ALIGN_DEBUG_RO_MIN(16) | ||
103 | .init.data : { | 116 | .init.data : { |
104 | INIT_DATA | 117 | INIT_DATA |
105 | INIT_SETUP(16) | 118 | INIT_SETUP(16) |
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c index 124418d17049..f87d8fbaa48d 100644 --- a/arch/arm64/kvm/emulate.c +++ b/arch/arm64/kvm/emulate.c | |||
@@ -22,6 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/esr.h> | ||
25 | #include <asm/kvm_emulate.h> | 26 | #include <asm/kvm_emulate.h> |
26 | 27 | ||
27 | /* | 28 | /* |
@@ -55,8 +56,8 @@ static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) | |||
55 | { | 56 | { |
56 | u32 esr = kvm_vcpu_get_hsr(vcpu); | 57 | u32 esr = kvm_vcpu_get_hsr(vcpu); |
57 | 58 | ||
58 | if (esr & ESR_EL2_CV) | 59 | if (esr & ESR_ELx_CV) |
59 | return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT; | 60 | return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; |
60 | 61 | ||
61 | return -1; | 62 | return -1; |
62 | } | 63 | } |
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 34b8bd0711e9..29b184a8f3f8 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -21,8 +21,10 @@ | |||
21 | 21 | ||
22 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <asm/kvm_emulate.h> | 24 | |
25 | #include <asm/esr.h> | ||
25 | #include <asm/kvm_coproc.h> | 26 | #include <asm/kvm_coproc.h> |
27 | #include <asm/kvm_emulate.h> | ||
26 | #include <asm/kvm_mmu.h> | 28 | #include <asm/kvm_mmu.h> |
27 | #include <asm/kvm_psci.h> | 29 | #include <asm/kvm_psci.h> |
28 | 30 | ||
@@ -61,7 +63,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
61 | */ | 63 | */ |
62 | static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) | 64 | static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) |
63 | { | 65 | { |
64 | if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE) | 66 | if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) |
65 | kvm_vcpu_on_spin(vcpu); | 67 | kvm_vcpu_on_spin(vcpu); |
66 | else | 68 | else |
67 | kvm_vcpu_block(vcpu); | 69 | kvm_vcpu_block(vcpu); |
@@ -72,29 +74,30 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
72 | } | 74 | } |
73 | 75 | ||
74 | static exit_handle_fn arm_exit_handlers[] = { | 76 | static exit_handle_fn arm_exit_handlers[] = { |
75 | [ESR_EL2_EC_WFI] = kvm_handle_wfx, | 77 | [ESR_ELx_EC_WFx] = kvm_handle_wfx, |
76 | [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, | 78 | [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, |
77 | [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, | 79 | [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, |
78 | [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32, | 80 | [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32, |
79 | [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store, | 81 | [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store, |
80 | [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64, | 82 | [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64, |
81 | [ESR_EL2_EC_HVC32] = handle_hvc, | 83 | [ESR_ELx_EC_HVC32] = handle_hvc, |
82 | [ESR_EL2_EC_SMC32] = handle_smc, | 84 | [ESR_ELx_EC_SMC32] = handle_smc, |
83 | [ESR_EL2_EC_HVC64] = handle_hvc, | 85 | [ESR_ELx_EC_HVC64] = handle_hvc, |
84 | [ESR_EL2_EC_SMC64] = handle_smc, | 86 | [ESR_ELx_EC_SMC64] = handle_smc, |
85 | [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg, | 87 | [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, |
86 | [ESR_EL2_EC_IABT] = kvm_handle_guest_abort, | 88 | [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort, |
87 | [ESR_EL2_EC_DABT] = kvm_handle_guest_abort, | 89 | [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort, |
88 | }; | 90 | }; |
89 | 91 | ||
90 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) | 92 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) |
91 | { | 93 | { |
92 | u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); | 94 | u32 hsr = kvm_vcpu_get_hsr(vcpu); |
95 | u8 hsr_ec = hsr >> ESR_ELx_EC_SHIFT; | ||
93 | 96 | ||
94 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || | 97 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || |
95 | !arm_exit_handlers[hsr_ec]) { | 98 | !arm_exit_handlers[hsr_ec]) { |
96 | kvm_err("Unknown exception class: hsr: %#08x\n", | 99 | kvm_err("Unknown exception class: hsr: %#08x -- %s\n", |
97 | (unsigned int)kvm_vcpu_get_hsr(vcpu)); | 100 | hsr, esr_get_class_string(hsr)); |
98 | BUG(); | 101 | BUG(); |
99 | } | 102 | } |
100 | 103 | ||
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index c3ca89c27c6b..9bff671cc561 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -17,15 +17,16 @@ | |||
17 | 17 | ||
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | 19 | ||
20 | #include <asm/assembler.h> | ||
21 | #include <asm/memory.h> | ||
22 | #include <asm/asm-offsets.h> | 20 | #include <asm/asm-offsets.h> |
21 | #include <asm/assembler.h> | ||
23 | #include <asm/debug-monitors.h> | 22 | #include <asm/debug-monitors.h> |
23 | #include <asm/esr.h> | ||
24 | #include <asm/fpsimdmacros.h> | 24 | #include <asm/fpsimdmacros.h> |
25 | #include <asm/kvm.h> | 25 | #include <asm/kvm.h> |
26 | #include <asm/kvm_asm.h> | ||
27 | #include <asm/kvm_arm.h> | 26 | #include <asm/kvm_arm.h> |
27 | #include <asm/kvm_asm.h> | ||
28 | #include <asm/kvm_mmu.h> | 28 | #include <asm/kvm_mmu.h> |
29 | #include <asm/memory.h> | ||
29 | 30 | ||
30 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) | 31 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) |
31 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) | 32 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) |
@@ -1141,9 +1142,9 @@ el1_sync: // Guest trapped into EL2 | |||
1141 | push x2, x3 | 1142 | push x2, x3 |
1142 | 1143 | ||
1143 | mrs x1, esr_el2 | 1144 | mrs x1, esr_el2 |
1144 | lsr x2, x1, #ESR_EL2_EC_SHIFT | 1145 | lsr x2, x1, #ESR_ELx_EC_SHIFT |
1145 | 1146 | ||
1146 | cmp x2, #ESR_EL2_EC_HVC64 | 1147 | cmp x2, #ESR_ELx_EC_HVC64 |
1147 | b.ne el1_trap | 1148 | b.ne el1_trap |
1148 | 1149 | ||
1149 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest | 1150 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest |
@@ -1178,13 +1179,13 @@ el1_trap: | |||
1178 | * x1: ESR | 1179 | * x1: ESR |
1179 | * x2: ESR_EC | 1180 | * x2: ESR_EC |
1180 | */ | 1181 | */ |
1181 | cmp x2, #ESR_EL2_EC_DABT | 1182 | cmp x2, #ESR_ELx_EC_DABT_LOW |
1182 | mov x0, #ESR_EL2_EC_IABT | 1183 | mov x0, #ESR_ELx_EC_IABT_LOW |
1183 | ccmp x2, x0, #4, ne | 1184 | ccmp x2, x0, #4, ne |
1184 | b.ne 1f // Not an abort we care about | 1185 | b.ne 1f // Not an abort we care about |
1185 | 1186 | ||
1186 | /* This is an abort. Check for permission fault */ | 1187 | /* This is an abort. Check for permission fault */ |
1187 | and x2, x1, #ESR_EL2_FSC_TYPE | 1188 | and x2, x1, #ESR_ELx_FSC_TYPE |
1188 | cmp x2, #FSC_PERM | 1189 | cmp x2, #FSC_PERM |
1189 | b.ne 1f // Not a permission fault | 1190 | b.ne 1f // Not a permission fault |
1190 | 1191 | ||
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 81a02a8762b0..f02530e726f6 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
@@ -118,27 +118,27 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr | |||
118 | * instruction set. Report an external synchronous abort. | 118 | * instruction set. Report an external synchronous abort. |
119 | */ | 119 | */ |
120 | if (kvm_vcpu_trap_il_is32bit(vcpu)) | 120 | if (kvm_vcpu_trap_il_is32bit(vcpu)) |
121 | esr |= ESR_EL1_IL; | 121 | esr |= ESR_ELx_IL; |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Here, the guest runs in AArch64 mode when in EL1. If we get | 124 | * Here, the guest runs in AArch64 mode when in EL1. If we get |
125 | * an AArch32 fault, it means we managed to trap an EL0 fault. | 125 | * an AArch32 fault, it means we managed to trap an EL0 fault. |
126 | */ | 126 | */ |
127 | if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t) | 127 | if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t) |
128 | esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT); | 128 | esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT); |
129 | else | 129 | else |
130 | esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT); | 130 | esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT); |
131 | 131 | ||
132 | if (!is_iabt) | 132 | if (!is_iabt) |
133 | esr |= ESR_EL1_EC_DABT_EL0; | 133 | esr |= ESR_ELx_EC_DABT_LOW; |
134 | 134 | ||
135 | vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT; | 135 | vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT; |
136 | } | 136 | } |
137 | 137 | ||
138 | static void inject_undef64(struct kvm_vcpu *vcpu) | 138 | static void inject_undef64(struct kvm_vcpu *vcpu) |
139 | { | 139 | { |
140 | unsigned long cpsr = *vcpu_cpsr(vcpu); | 140 | unsigned long cpsr = *vcpu_cpsr(vcpu); |
141 | u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT); | 141 | u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT); |
142 | 142 | ||
143 | *vcpu_spsr(vcpu) = cpsr; | 143 | *vcpu_spsr(vcpu) = cpsr; |
144 | *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); | 144 | *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); |
@@ -151,7 +151,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) | |||
151 | * set. | 151 | * set. |
152 | */ | 152 | */ |
153 | if (kvm_vcpu_trap_il_is32bit(vcpu)) | 153 | if (kvm_vcpu_trap_il_is32bit(vcpu)) |
154 | esr |= ESR_EL1_IL; | 154 | esr |= ESR_ELx_IL; |
155 | 155 | ||
156 | vcpu_sys_reg(vcpu, ESR_EL1) = esr; | 156 | vcpu_sys_reg(vcpu, ESR_EL1) = esr; |
157 | } | 157 | } |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index f31e8bb2bc5b..b96afdf6cee4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -20,17 +20,20 @@ | |||
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <linux/mm.h> | ||
25 | #include <linux/uaccess.h> | 25 | #include <linux/uaccess.h> |
26 | #include <asm/kvm_arm.h> | 26 | |
27 | #include <asm/kvm_host.h> | ||
28 | #include <asm/kvm_emulate.h> | ||
29 | #include <asm/kvm_coproc.h> | ||
30 | #include <asm/kvm_mmu.h> | ||
31 | #include <asm/cacheflush.h> | 27 | #include <asm/cacheflush.h> |
32 | #include <asm/cputype.h> | 28 | #include <asm/cputype.h> |
33 | #include <asm/debug-monitors.h> | 29 | #include <asm/debug-monitors.h> |
30 | #include <asm/esr.h> | ||
31 | #include <asm/kvm_arm.h> | ||
32 | #include <asm/kvm_coproc.h> | ||
33 | #include <asm/kvm_emulate.h> | ||
34 | #include <asm/kvm_host.h> | ||
35 | #include <asm/kvm_mmu.h> | ||
36 | |||
34 | #include <trace/events/kvm.h> | 37 | #include <trace/events/kvm.h> |
35 | 38 | ||
36 | #include "sys_regs.h" | 39 | #include "sys_regs.h" |
@@ -760,12 +763,12 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, | |||
760 | int cp; | 763 | int cp; |
761 | 764 | ||
762 | switch(hsr_ec) { | 765 | switch(hsr_ec) { |
763 | case ESR_EL2_EC_CP15_32: | 766 | case ESR_ELx_EC_CP15_32: |
764 | case ESR_EL2_EC_CP15_64: | 767 | case ESR_ELx_EC_CP15_64: |
765 | cp = 15; | 768 | cp = 15; |
766 | break; | 769 | break; |
767 | case ESR_EL2_EC_CP14_MR: | 770 | case ESR_ELx_EC_CP14_MR: |
768 | case ESR_EL2_EC_CP14_64: | 771 | case ESR_ELx_EC_CP14_64: |
769 | cp = 14; | 772 | cp = 14; |
770 | break; | 773 | break; |
771 | default: | 774 | default: |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index d92094203913..0a24b9b8c698 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -134,16 +134,17 @@ static void __dma_free_coherent(struct device *dev, size_t size, | |||
134 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | 134 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); |
135 | } | 135 | } |
136 | 136 | ||
137 | static void *__dma_alloc_noncoherent(struct device *dev, size_t size, | 137 | static void *__dma_alloc(struct device *dev, size_t size, |
138 | dma_addr_t *dma_handle, gfp_t flags, | 138 | dma_addr_t *dma_handle, gfp_t flags, |
139 | struct dma_attrs *attrs) | 139 | struct dma_attrs *attrs) |
140 | { | 140 | { |
141 | struct page *page; | 141 | struct page *page; |
142 | void *ptr, *coherent_ptr; | 142 | void *ptr, *coherent_ptr; |
143 | bool coherent = is_device_dma_coherent(dev); | ||
143 | 144 | ||
144 | size = PAGE_ALIGN(size); | 145 | size = PAGE_ALIGN(size); |
145 | 146 | ||
146 | if (!(flags & __GFP_WAIT)) { | 147 | if (!coherent && !(flags & __GFP_WAIT)) { |
147 | struct page *page = NULL; | 148 | struct page *page = NULL; |
148 | void *addr = __alloc_from_pool(size, &page); | 149 | void *addr = __alloc_from_pool(size, &page); |
149 | 150 | ||
@@ -151,13 +152,16 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size, | |||
151 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | 152 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
152 | 153 | ||
153 | return addr; | 154 | return addr; |
154 | |||
155 | } | 155 | } |
156 | 156 | ||
157 | ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); | 157 | ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); |
158 | if (!ptr) | 158 | if (!ptr) |
159 | goto no_mem; | 159 | goto no_mem; |
160 | 160 | ||
161 | /* no need for non-cacheable mapping if coherent */ | ||
162 | if (coherent) | ||
163 | return ptr; | ||
164 | |||
161 | /* remove any dirty cache lines on the kernel alias */ | 165 | /* remove any dirty cache lines on the kernel alias */ |
162 | __dma_flush_range(ptr, ptr + size); | 166 | __dma_flush_range(ptr, ptr + size); |
163 | 167 | ||
@@ -179,15 +183,17 @@ no_mem: | |||
179 | return NULL; | 183 | return NULL; |
180 | } | 184 | } |
181 | 185 | ||
182 | static void __dma_free_noncoherent(struct device *dev, size_t size, | 186 | static void __dma_free(struct device *dev, size_t size, |
183 | void *vaddr, dma_addr_t dma_handle, | 187 | void *vaddr, dma_addr_t dma_handle, |
184 | struct dma_attrs *attrs) | 188 | struct dma_attrs *attrs) |
185 | { | 189 | { |
186 | void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); | 190 | void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); |
187 | 191 | ||
188 | if (__free_from_pool(vaddr, size)) | 192 | if (!is_device_dma_coherent(dev)) { |
189 | return; | 193 | if (__free_from_pool(vaddr, size)) |
190 | vunmap(vaddr); | 194 | return; |
195 | vunmap(vaddr); | ||
196 | } | ||
191 | __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); | 197 | __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); |
192 | } | 198 | } |
193 | 199 | ||
@@ -199,7 +205,8 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, | |||
199 | dma_addr_t dev_addr; | 205 | dma_addr_t dev_addr; |
200 | 206 | ||
201 | dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); | 207 | dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); |
202 | __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | 208 | if (!is_device_dma_coherent(dev)) |
209 | __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | ||
203 | 210 | ||
204 | return dev_addr; | 211 | return dev_addr; |
205 | } | 212 | } |
@@ -209,7 +216,8 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
209 | size_t size, enum dma_data_direction dir, | 216 | size_t size, enum dma_data_direction dir, |
210 | struct dma_attrs *attrs) | 217 | struct dma_attrs *attrs) |
211 | { | 218 | { |
212 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | 219 | if (!is_device_dma_coherent(dev)) |
220 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | ||
213 | swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); | 221 | swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); |
214 | } | 222 | } |
215 | 223 | ||
@@ -221,9 +229,10 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
221 | int i, ret; | 229 | int i, ret; |
222 | 230 | ||
223 | ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); | 231 | ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); |
224 | for_each_sg(sgl, sg, ret, i) | 232 | if (!is_device_dma_coherent(dev)) |
225 | __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), | 233 | for_each_sg(sgl, sg, ret, i) |
226 | sg->length, dir); | 234 | __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), |
235 | sg->length, dir); | ||
227 | 236 | ||
228 | return ret; | 237 | return ret; |
229 | } | 238 | } |
@@ -236,9 +245,10 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev, | |||
236 | struct scatterlist *sg; | 245 | struct scatterlist *sg; |
237 | int i; | 246 | int i; |
238 | 247 | ||
239 | for_each_sg(sgl, sg, nelems, i) | 248 | if (!is_device_dma_coherent(dev)) |
240 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), | 249 | for_each_sg(sgl, sg, nelems, i) |
241 | sg->length, dir); | 250 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), |
251 | sg->length, dir); | ||
242 | swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs); | 252 | swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs); |
243 | } | 253 | } |
244 | 254 | ||
@@ -246,7 +256,8 @@ static void __swiotlb_sync_single_for_cpu(struct device *dev, | |||
246 | dma_addr_t dev_addr, size_t size, | 256 | dma_addr_t dev_addr, size_t size, |
247 | enum dma_data_direction dir) | 257 | enum dma_data_direction dir) |
248 | { | 258 | { |
249 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | 259 | if (!is_device_dma_coherent(dev)) |
260 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | ||
250 | swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir); | 261 | swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir); |
251 | } | 262 | } |
252 | 263 | ||
@@ -255,7 +266,8 @@ static void __swiotlb_sync_single_for_device(struct device *dev, | |||
255 | enum dma_data_direction dir) | 266 | enum dma_data_direction dir) |
256 | { | 267 | { |
257 | swiotlb_sync_single_for_device(dev, dev_addr, size, dir); | 268 | swiotlb_sync_single_for_device(dev, dev_addr, size, dir); |
258 | __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | 269 | if (!is_device_dma_coherent(dev)) |
270 | __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); | ||
259 | } | 271 | } |
260 | 272 | ||
261 | static void __swiotlb_sync_sg_for_cpu(struct device *dev, | 273 | static void __swiotlb_sync_sg_for_cpu(struct device *dev, |
@@ -265,9 +277,10 @@ static void __swiotlb_sync_sg_for_cpu(struct device *dev, | |||
265 | struct scatterlist *sg; | 277 | struct scatterlist *sg; |
266 | int i; | 278 | int i; |
267 | 279 | ||
268 | for_each_sg(sgl, sg, nelems, i) | 280 | if (!is_device_dma_coherent(dev)) |
269 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), | 281 | for_each_sg(sgl, sg, nelems, i) |
270 | sg->length, dir); | 282 | __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), |
283 | sg->length, dir); | ||
271 | swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir); | 284 | swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir); |
272 | } | 285 | } |
273 | 286 | ||
@@ -279,9 +292,10 @@ static void __swiotlb_sync_sg_for_device(struct device *dev, | |||
279 | int i; | 292 | int i; |
280 | 293 | ||
281 | swiotlb_sync_sg_for_device(dev, sgl, nelems, dir); | 294 | swiotlb_sync_sg_for_device(dev, sgl, nelems, dir); |
282 | for_each_sg(sgl, sg, nelems, i) | 295 | if (!is_device_dma_coherent(dev)) |
283 | __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), | 296 | for_each_sg(sgl, sg, nelems, i) |
284 | sg->length, dir); | 297 | __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), |
298 | sg->length, dir); | ||
285 | } | 299 | } |
286 | 300 | ||
287 | /* vma->vm_page_prot must be set appropriately before calling this function */ | 301 | /* vma->vm_page_prot must be set appropriately before calling this function */ |
@@ -308,28 +322,20 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |||
308 | return ret; | 322 | return ret; |
309 | } | 323 | } |
310 | 324 | ||
311 | static int __swiotlb_mmap_noncoherent(struct device *dev, | 325 | static int __swiotlb_mmap(struct device *dev, |
312 | struct vm_area_struct *vma, | 326 | struct vm_area_struct *vma, |
313 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 327 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
314 | struct dma_attrs *attrs) | 328 | struct dma_attrs *attrs) |
315 | { | ||
316 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false); | ||
317 | return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
318 | } | ||
319 | |||
320 | static int __swiotlb_mmap_coherent(struct device *dev, | ||
321 | struct vm_area_struct *vma, | ||
322 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
323 | struct dma_attrs *attrs) | ||
324 | { | 329 | { |
325 | /* Just use whatever page_prot attributes were specified */ | 330 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, |
331 | is_device_dma_coherent(dev)); | ||
326 | return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | 332 | return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); |
327 | } | 333 | } |
328 | 334 | ||
329 | struct dma_map_ops noncoherent_swiotlb_dma_ops = { | 335 | static struct dma_map_ops swiotlb_dma_ops = { |
330 | .alloc = __dma_alloc_noncoherent, | 336 | .alloc = __dma_alloc, |
331 | .free = __dma_free_noncoherent, | 337 | .free = __dma_free, |
332 | .mmap = __swiotlb_mmap_noncoherent, | 338 | .mmap = __swiotlb_mmap, |
333 | .map_page = __swiotlb_map_page, | 339 | .map_page = __swiotlb_map_page, |
334 | .unmap_page = __swiotlb_unmap_page, | 340 | .unmap_page = __swiotlb_unmap_page, |
335 | .map_sg = __swiotlb_map_sg_attrs, | 341 | .map_sg = __swiotlb_map_sg_attrs, |
@@ -341,24 +347,6 @@ struct dma_map_ops noncoherent_swiotlb_dma_ops = { | |||
341 | .dma_supported = swiotlb_dma_supported, | 347 | .dma_supported = swiotlb_dma_supported, |
342 | .mapping_error = swiotlb_dma_mapping_error, | 348 | .mapping_error = swiotlb_dma_mapping_error, |
343 | }; | 349 | }; |
344 | EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops); | ||
345 | |||
346 | struct dma_map_ops coherent_swiotlb_dma_ops = { | ||
347 | .alloc = __dma_alloc_coherent, | ||
348 | .free = __dma_free_coherent, | ||
349 | .mmap = __swiotlb_mmap_coherent, | ||
350 | .map_page = swiotlb_map_page, | ||
351 | .unmap_page = swiotlb_unmap_page, | ||
352 | .map_sg = swiotlb_map_sg_attrs, | ||
353 | .unmap_sg = swiotlb_unmap_sg_attrs, | ||
354 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | ||
355 | .sync_single_for_device = swiotlb_sync_single_for_device, | ||
356 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | ||
357 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | ||
358 | .dma_supported = swiotlb_dma_supported, | ||
359 | .mapping_error = swiotlb_dma_mapping_error, | ||
360 | }; | ||
361 | EXPORT_SYMBOL(coherent_swiotlb_dma_ops); | ||
362 | 350 | ||
363 | extern int swiotlb_late_init_with_default_size(size_t default_size); | 351 | extern int swiotlb_late_init_with_default_size(size_t default_size); |
364 | 352 | ||
@@ -427,7 +415,7 @@ static int __init swiotlb_late_init(void) | |||
427 | { | 415 | { |
428 | size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); | 416 | size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); |
429 | 417 | ||
430 | dma_ops = &noncoherent_swiotlb_dma_ops; | 418 | dma_ops = &swiotlb_dma_ops; |
431 | 419 | ||
432 | return swiotlb_late_init_with_default_size(swiotlb_size); | 420 | return swiotlb_late_init_with_default_size(swiotlb_size); |
433 | } | 421 | } |
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index d54dc9ac4b70..74c256744b25 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c | |||
@@ -14,14 +14,18 @@ | |||
14 | * of the License. | 14 | * of the License. |
15 | */ | 15 | */ |
16 | #include <linux/debugfs.h> | 16 | #include <linux/debugfs.h> |
17 | #include <linux/errno.h> | ||
17 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
18 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/init.h> | ||
19 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
20 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
21 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
22 | 24 | ||
23 | #include <asm/fixmap.h> | 25 | #include <asm/fixmap.h> |
26 | #include <asm/memory.h> | ||
24 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
28 | #include <asm/pgtable-hwdef.h> | ||
25 | 29 | ||
26 | #define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS) | 30 | #define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS) |
27 | 31 | ||
@@ -37,10 +41,10 @@ enum address_markers_idx { | |||
37 | VMEMMAP_START_NR, | 41 | VMEMMAP_START_NR, |
38 | VMEMMAP_END_NR, | 42 | VMEMMAP_END_NR, |
39 | #endif | 43 | #endif |
40 | PCI_START_NR, | ||
41 | PCI_END_NR, | ||
42 | FIXADDR_START_NR, | 44 | FIXADDR_START_NR, |
43 | FIXADDR_END_NR, | 45 | FIXADDR_END_NR, |
46 | PCI_START_NR, | ||
47 | PCI_END_NR, | ||
44 | MODULES_START_NR, | 48 | MODULES_START_NR, |
45 | MODUELS_END_NR, | 49 | MODUELS_END_NR, |
46 | KERNEL_SPACE_NR, | 50 | KERNEL_SPACE_NR, |
@@ -53,10 +57,10 @@ static struct addr_marker address_markers[] = { | |||
53 | { 0, "vmemmap start" }, | 57 | { 0, "vmemmap start" }, |
54 | { 0, "vmemmap end" }, | 58 | { 0, "vmemmap end" }, |
55 | #endif | 59 | #endif |
56 | { (unsigned long) PCI_IOBASE, "PCI I/O start" }, | ||
57 | { (unsigned long) PCI_IOBASE + SZ_16M, "PCI I/O end" }, | ||
58 | { FIXADDR_START, "Fixmap start" }, | 60 | { FIXADDR_START, "Fixmap start" }, |
59 | { FIXADDR_TOP, "Fixmap end" }, | 61 | { FIXADDR_TOP, "Fixmap end" }, |
62 | { PCI_IO_START, "PCI I/O start" }, | ||
63 | { PCI_IO_END, "PCI I/O end" }, | ||
60 | { MODULES_VADDR, "Modules start" }, | 64 | { MODULES_VADDR, "Modules start" }, |
61 | { MODULES_END, "Modules end" }, | 65 | { MODULES_END, "Modules end" }, |
62 | { PAGE_OFFSET, "Kernel Mapping" }, | 66 | { PAGE_OFFSET, "Kernel Mapping" }, |
@@ -246,10 +250,12 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) | |||
246 | 250 | ||
247 | for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { | 251 | for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { |
248 | addr = start + i * PMD_SIZE; | 252 | addr = start + i * PMD_SIZE; |
249 | if (pmd_none(*pmd) || pmd_sect(*pmd) || pmd_bad(*pmd)) | 253 | if (pmd_none(*pmd) || pmd_sect(*pmd)) { |
250 | note_page(st, addr, 3, pmd_val(*pmd)); | 254 | note_page(st, addr, 3, pmd_val(*pmd)); |
251 | else | 255 | } else { |
256 | BUG_ON(pmd_bad(*pmd)); | ||
252 | walk_pte(st, pmd, addr); | 257 | walk_pte(st, pmd, addr); |
258 | } | ||
253 | } | 259 | } |
254 | } | 260 | } |
255 | 261 | ||
@@ -261,10 +267,12 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) | |||
261 | 267 | ||
262 | for (i = 0; i < PTRS_PER_PUD; i++, pud++) { | 268 | for (i = 0; i < PTRS_PER_PUD; i++, pud++) { |
263 | addr = start + i * PUD_SIZE; | 269 | addr = start + i * PUD_SIZE; |
264 | if (pud_none(*pud) || pud_sect(*pud) || pud_bad(*pud)) | 270 | if (pud_none(*pud) || pud_sect(*pud)) { |
265 | note_page(st, addr, 2, pud_val(*pud)); | 271 | note_page(st, addr, 2, pud_val(*pud)); |
266 | else | 272 | } else { |
273 | BUG_ON(pud_bad(*pud)); | ||
267 | walk_pmd(st, pud, addr); | 274 | walk_pmd(st, pud, addr); |
275 | } | ||
268 | } | 276 | } |
269 | } | 277 | } |
270 | 278 | ||
@@ -276,10 +284,12 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long st | |||
276 | 284 | ||
277 | for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { | 285 | for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { |
278 | addr = start + i * PGDIR_SIZE; | 286 | addr = start + i * PGDIR_SIZE; |
279 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | 287 | if (pgd_none(*pgd)) { |
280 | note_page(st, addr, 1, pgd_val(*pgd)); | 288 | note_page(st, addr, 1, pgd_val(*pgd)); |
281 | else | 289 | } else { |
290 | BUG_ON(pgd_bad(*pgd)); | ||
282 | walk_pud(st, pgd, addr); | 291 | walk_pud(st, pgd, addr); |
292 | } | ||
283 | } | 293 | } |
284 | } | 294 | } |
285 | 295 | ||
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c11cd27ca8f5..96da13167d4a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -219,7 +219,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, | |||
219 | 219 | ||
220 | if (esr & ESR_LNX_EXEC) { | 220 | if (esr & ESR_LNX_EXEC) { |
221 | vm_flags = VM_EXEC; | 221 | vm_flags = VM_EXEC; |
222 | } else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) { | 222 | } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) { |
223 | vm_flags = VM_WRITE; | 223 | vm_flags = VM_WRITE; |
224 | mm_flags |= FAULT_FLAG_WRITE; | 224 | mm_flags |= FAULT_FLAG_WRITE; |
225 | } | 225 | } |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index c95464a33f36..71145f952070 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/efi.h> | 35 | #include <linux/efi.h> |
36 | 36 | ||
37 | #include <asm/fixmap.h> | 37 | #include <asm/fixmap.h> |
38 | #include <asm/memory.h> | ||
38 | #include <asm/sections.h> | 39 | #include <asm/sections.h> |
39 | #include <asm/setup.h> | 40 | #include <asm/setup.h> |
40 | #include <asm/sizes.h> | 41 | #include <asm/sizes.h> |
@@ -136,10 +137,29 @@ static void arm64_memory_present(void) | |||
136 | } | 137 | } |
137 | #endif | 138 | #endif |
138 | 139 | ||
140 | static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX; | ||
141 | |||
142 | /* | ||
143 | * Limit the memory size that was specified via FDT. | ||
144 | */ | ||
145 | static int __init early_mem(char *p) | ||
146 | { | ||
147 | if (!p) | ||
148 | return 1; | ||
149 | |||
150 | memory_limit = memparse(p, &p) & PAGE_MASK; | ||
151 | pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | early_param("mem", early_mem); | ||
156 | |||
139 | void __init arm64_memblock_init(void) | 157 | void __init arm64_memblock_init(void) |
140 | { | 158 | { |
141 | phys_addr_t dma_phys_limit = 0; | 159 | phys_addr_t dma_phys_limit = 0; |
142 | 160 | ||
161 | memblock_enforce_memory_limit(memory_limit); | ||
162 | |||
143 | /* | 163 | /* |
144 | * Register the kernel text, kernel data, initrd, and initial | 164 | * Register the kernel text, kernel data, initrd, and initial |
145 | * pagetables with memblock. | 165 | * pagetables with memblock. |
@@ -277,8 +297,8 @@ void __init mem_init(void) | |||
277 | " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n" | 297 | " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n" |
278 | " 0x%16lx - 0x%16lx (%6ld MB actual)\n" | 298 | " 0x%16lx - 0x%16lx (%6ld MB actual)\n" |
279 | #endif | 299 | #endif |
280 | " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n" | ||
281 | " fixed : 0x%16lx - 0x%16lx (%6ld KB)\n" | 300 | " fixed : 0x%16lx - 0x%16lx (%6ld KB)\n" |
301 | " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n" | ||
282 | " modules : 0x%16lx - 0x%16lx (%6ld MB)\n" | 302 | " modules : 0x%16lx - 0x%16lx (%6ld MB)\n" |
283 | " memory : 0x%16lx - 0x%16lx (%6ld MB)\n" | 303 | " memory : 0x%16lx - 0x%16lx (%6ld MB)\n" |
284 | " .init : 0x%p" " - 0x%p" " (%6ld KB)\n" | 304 | " .init : 0x%p" " - 0x%p" " (%6ld KB)\n" |
@@ -291,8 +311,8 @@ void __init mem_init(void) | |||
291 | MLM((unsigned long)virt_to_page(PAGE_OFFSET), | 311 | MLM((unsigned long)virt_to_page(PAGE_OFFSET), |
292 | (unsigned long)virt_to_page(high_memory)), | 312 | (unsigned long)virt_to_page(high_memory)), |
293 | #endif | 313 | #endif |
294 | MLM((unsigned long)PCI_IOBASE, (unsigned long)PCI_IOBASE + SZ_16M), | ||
295 | MLK(FIXADDR_START, FIXADDR_TOP), | 314 | MLK(FIXADDR_START, FIXADDR_TOP), |
315 | MLM(PCI_IO_START, PCI_IO_END), | ||
296 | MLM(MODULES_VADDR, MODULES_END), | 316 | MLM(MODULES_VADDR, MODULES_END), |
297 | MLM(PAGE_OFFSET, (unsigned long)high_memory), | 317 | MLM(PAGE_OFFSET, (unsigned long)high_memory), |
298 | MLK_ROUNDUP(__init_begin, __init_end), | 318 | MLK_ROUNDUP(__init_begin, __init_end), |
@@ -325,6 +345,7 @@ void __init mem_init(void) | |||
325 | 345 | ||
326 | void free_initmem(void) | 346 | void free_initmem(void) |
327 | { | 347 | { |
348 | fixup_init(); | ||
328 | free_initmem_default(0); | 349 | free_initmem_default(0); |
329 | free_alternatives_memory(); | 350 | free_alternatives_memory(); |
330 | } | 351 | } |
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index cbb99c8f1e04..01e88c8bcab0 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c | |||
@@ -62,6 +62,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, | |||
62 | if (!area) | 62 | if (!area) |
63 | return NULL; | 63 | return NULL; |
64 | addr = (unsigned long)area->addr; | 64 | addr = (unsigned long)area->addr; |
65 | area->phys_addr = phys_addr; | ||
65 | 66 | ||
66 | err = ioremap_page_range(addr, addr + size, phys_addr, prot); | 67 | err = ioremap_page_range(addr, addr + size, phys_addr, prot); |
67 | if (err) { | 68 | if (err) { |
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h index 50c3351df9c7..ef47d99b5cbc 100644 --- a/arch/arm64/mm/mm.h +++ b/arch/arm64/mm/mm.h | |||
@@ -1 +1,3 @@ | |||
1 | extern void __init bootmem_init(void); | 1 | extern void __init bootmem_init(void); |
2 | |||
3 | void fixup_init(void); | ||
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6032f3e3056a..c6daaf6c6f97 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include <linux/memblock.h> | 26 | #include <linux/memblock.h> |
27 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
28 | #include <linux/io.h> | 28 | #include <linux/io.h> |
29 | #include <linux/slab.h> | ||
30 | #include <linux/stop_machine.h> | ||
29 | 31 | ||
30 | #include <asm/cputype.h> | 32 | #include <asm/cputype.h> |
31 | #include <asm/fixmap.h> | 33 | #include <asm/fixmap.h> |
@@ -45,80 +47,6 @@ | |||
45 | struct page *empty_zero_page; | 47 | struct page *empty_zero_page; |
46 | EXPORT_SYMBOL(empty_zero_page); | 48 | EXPORT_SYMBOL(empty_zero_page); |
47 | 49 | ||
48 | struct cachepolicy { | ||
49 | const char policy[16]; | ||
50 | u64 mair; | ||
51 | u64 tcr; | ||
52 | }; | ||
53 | |||
54 | static struct cachepolicy cache_policies[] __initdata = { | ||
55 | { | ||
56 | .policy = "uncached", | ||
57 | .mair = 0x44, /* inner, outer non-cacheable */ | ||
58 | .tcr = TCR_IRGN_NC | TCR_ORGN_NC, | ||
59 | }, { | ||
60 | .policy = "writethrough", | ||
61 | .mair = 0xaa, /* inner, outer write-through, read-allocate */ | ||
62 | .tcr = TCR_IRGN_WT | TCR_ORGN_WT, | ||
63 | }, { | ||
64 | .policy = "writeback", | ||
65 | .mair = 0xee, /* inner, outer write-back, read-allocate */ | ||
66 | .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA, | ||
67 | } | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * These are useful for identifying cache coherency problems by allowing the | ||
72 | * cache or the cache and writebuffer to be turned off. It changes the Normal | ||
73 | * memory caching attributes in the MAIR_EL1 register. | ||
74 | */ | ||
75 | static int __init early_cachepolicy(char *p) | ||
76 | { | ||
77 | int i; | ||
78 | u64 tmp; | ||
79 | |||
80 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | ||
81 | int len = strlen(cache_policies[i].policy); | ||
82 | |||
83 | if (memcmp(p, cache_policies[i].policy, len) == 0) | ||
84 | break; | ||
85 | } | ||
86 | if (i == ARRAY_SIZE(cache_policies)) { | ||
87 | pr_err("ERROR: unknown or unsupported cache policy: %s\n", p); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | flush_cache_all(); | ||
92 | |||
93 | /* | ||
94 | * Modify MT_NORMAL attributes in MAIR_EL1. | ||
95 | */ | ||
96 | asm volatile( | ||
97 | " mrs %0, mair_el1\n" | ||
98 | " bfi %0, %1, %2, #8\n" | ||
99 | " msr mair_el1, %0\n" | ||
100 | " isb\n" | ||
101 | : "=&r" (tmp) | ||
102 | : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8)); | ||
103 | |||
104 | /* | ||
105 | * Modify TCR PTW cacheability attributes. | ||
106 | */ | ||
107 | asm volatile( | ||
108 | " mrs %0, tcr_el1\n" | ||
109 | " bic %0, %0, %2\n" | ||
110 | " orr %0, %0, %1\n" | ||
111 | " msr tcr_el1, %0\n" | ||
112 | " isb\n" | ||
113 | : "=&r" (tmp) | ||
114 | : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK)); | ||
115 | |||
116 | flush_cache_all(); | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | early_param("cachepolicy", early_cachepolicy); | ||
121 | |||
122 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 50 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
123 | unsigned long size, pgprot_t vma_prot) | 51 | unsigned long size, pgprot_t vma_prot) |
124 | { | 52 | { |
@@ -133,19 +61,42 @@ EXPORT_SYMBOL(phys_mem_access_prot); | |||
133 | static void __init *early_alloc(unsigned long sz) | 61 | static void __init *early_alloc(unsigned long sz) |
134 | { | 62 | { |
135 | void *ptr = __va(memblock_alloc(sz, sz)); | 63 | void *ptr = __va(memblock_alloc(sz, sz)); |
64 | BUG_ON(!ptr); | ||
136 | memset(ptr, 0, sz); | 65 | memset(ptr, 0, sz); |
137 | return ptr; | 66 | return ptr; |
138 | } | 67 | } |
139 | 68 | ||
140 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | 69 | /* |
70 | * remap a PMD into pages | ||
71 | */ | ||
72 | static void split_pmd(pmd_t *pmd, pte_t *pte) | ||
73 | { | ||
74 | unsigned long pfn = pmd_pfn(*pmd); | ||
75 | int i = 0; | ||
76 | |||
77 | do { | ||
78 | /* | ||
79 | * Need to have the least restrictive permissions available | ||
80 | * permissions will be fixed up later | ||
81 | */ | ||
82 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | ||
83 | pfn++; | ||
84 | } while (pte++, i++, i < PTRS_PER_PTE); | ||
85 | } | ||
86 | |||
87 | static void alloc_init_pte(pmd_t *pmd, unsigned long addr, | ||
141 | unsigned long end, unsigned long pfn, | 88 | unsigned long end, unsigned long pfn, |
142 | pgprot_t prot) | 89 | pgprot_t prot, |
90 | void *(*alloc)(unsigned long size)) | ||
143 | { | 91 | { |
144 | pte_t *pte; | 92 | pte_t *pte; |
145 | 93 | ||
146 | if (pmd_none(*pmd)) { | 94 | if (pmd_none(*pmd) || pmd_sect(*pmd)) { |
147 | pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); | 95 | pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); |
96 | if (pmd_sect(*pmd)) | ||
97 | split_pmd(pmd, pte); | ||
148 | __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); | 98 | __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); |
99 | flush_tlb_all(); | ||
149 | } | 100 | } |
150 | BUG_ON(pmd_bad(*pmd)); | 101 | BUG_ON(pmd_bad(*pmd)); |
151 | 102 | ||
@@ -156,30 +107,42 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
156 | } while (pte++, addr += PAGE_SIZE, addr != end); | 107 | } while (pte++, addr += PAGE_SIZE, addr != end); |
157 | } | 108 | } |
158 | 109 | ||
159 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | 110 | void split_pud(pud_t *old_pud, pmd_t *pmd) |
160 | unsigned long end, phys_addr_t phys, | 111 | { |
161 | int map_io) | 112 | unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT; |
113 | pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr); | ||
114 | int i = 0; | ||
115 | |||
116 | do { | ||
117 | set_pmd(pmd, __pmd(addr | prot)); | ||
118 | addr += PMD_SIZE; | ||
119 | } while (pmd++, i++, i < PTRS_PER_PMD); | ||
120 | } | ||
121 | |||
122 | static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, | ||
123 | unsigned long addr, unsigned long end, | ||
124 | phys_addr_t phys, pgprot_t prot, | ||
125 | void *(*alloc)(unsigned long size)) | ||
162 | { | 126 | { |
163 | pmd_t *pmd; | 127 | pmd_t *pmd; |
164 | unsigned long next; | 128 | unsigned long next; |
165 | pmdval_t prot_sect; | ||
166 | pgprot_t prot_pte; | ||
167 | |||
168 | if (map_io) { | ||
169 | prot_sect = PROT_SECT_DEVICE_nGnRE; | ||
170 | prot_pte = __pgprot(PROT_DEVICE_nGnRE); | ||
171 | } else { | ||
172 | prot_sect = PROT_SECT_NORMAL_EXEC; | ||
173 | prot_pte = PAGE_KERNEL_EXEC; | ||
174 | } | ||
175 | 129 | ||
176 | /* | 130 | /* |
177 | * Check for initial section mappings in the pgd/pud and remove them. | 131 | * Check for initial section mappings in the pgd/pud and remove them. |
178 | */ | 132 | */ |
179 | if (pud_none(*pud) || pud_bad(*pud)) { | 133 | if (pud_none(*pud) || pud_sect(*pud)) { |
180 | pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t)); | 134 | pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t)); |
181 | pud_populate(&init_mm, pud, pmd); | 135 | if (pud_sect(*pud)) { |
136 | /* | ||
137 | * need to have the 1G of mappings continue to be | ||
138 | * present | ||
139 | */ | ||
140 | split_pud(pud, pmd); | ||
141 | } | ||
142 | pud_populate(mm, pud, pmd); | ||
143 | flush_tlb_all(); | ||
182 | } | 144 | } |
145 | BUG_ON(pud_bad(*pud)); | ||
183 | 146 | ||
184 | pmd = pmd_offset(pud, addr); | 147 | pmd = pmd_offset(pud, addr); |
185 | do { | 148 | do { |
@@ -187,31 +150,51 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | |||
187 | /* try section mapping first */ | 150 | /* try section mapping first */ |
188 | if (((addr | next | phys) & ~SECTION_MASK) == 0) { | 151 | if (((addr | next | phys) & ~SECTION_MASK) == 0) { |
189 | pmd_t old_pmd =*pmd; | 152 | pmd_t old_pmd =*pmd; |
190 | set_pmd(pmd, __pmd(phys | prot_sect)); | 153 | set_pmd(pmd, __pmd(phys | |
154 | pgprot_val(mk_sect_prot(prot)))); | ||
191 | /* | 155 | /* |
192 | * Check for previous table entries created during | 156 | * Check for previous table entries created during |
193 | * boot (__create_page_tables) and flush them. | 157 | * boot (__create_page_tables) and flush them. |
194 | */ | 158 | */ |
195 | if (!pmd_none(old_pmd)) | 159 | if (!pmd_none(old_pmd)) { |
196 | flush_tlb_all(); | 160 | flush_tlb_all(); |
161 | if (pmd_table(old_pmd)) { | ||
162 | phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0)); | ||
163 | if (!WARN_ON_ONCE(slab_is_available())) | ||
164 | memblock_free(table, PAGE_SIZE); | ||
165 | } | ||
166 | } | ||
197 | } else { | 167 | } else { |
198 | alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), | 168 | alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), |
199 | prot_pte); | 169 | prot, alloc); |
200 | } | 170 | } |
201 | phys += next - addr; | 171 | phys += next - addr; |
202 | } while (pmd++, addr = next, addr != end); | 172 | } while (pmd++, addr = next, addr != end); |
203 | } | 173 | } |
204 | 174 | ||
205 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | 175 | static inline bool use_1G_block(unsigned long addr, unsigned long next, |
206 | unsigned long end, phys_addr_t phys, | 176 | unsigned long phys) |
207 | int map_io) | 177 | { |
178 | if (PAGE_SHIFT != 12) | ||
179 | return false; | ||
180 | |||
181 | if (((addr | next | phys) & ~PUD_MASK) != 0) | ||
182 | return false; | ||
183 | |||
184 | return true; | ||
185 | } | ||
186 | |||
187 | static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, | ||
188 | unsigned long addr, unsigned long end, | ||
189 | phys_addr_t phys, pgprot_t prot, | ||
190 | void *(*alloc)(unsigned long size)) | ||
208 | { | 191 | { |
209 | pud_t *pud; | 192 | pud_t *pud; |
210 | unsigned long next; | 193 | unsigned long next; |
211 | 194 | ||
212 | if (pgd_none(*pgd)) { | 195 | if (pgd_none(*pgd)) { |
213 | pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t)); | 196 | pud = alloc(PTRS_PER_PUD * sizeof(pud_t)); |
214 | pgd_populate(&init_mm, pgd, pud); | 197 | pgd_populate(mm, pgd, pud); |
215 | } | 198 | } |
216 | BUG_ON(pgd_bad(*pgd)); | 199 | BUG_ON(pgd_bad(*pgd)); |
217 | 200 | ||
@@ -222,10 +205,10 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |||
222 | /* | 205 | /* |
223 | * For 4K granule only, attempt to put down a 1GB block | 206 | * For 4K granule only, attempt to put down a 1GB block |
224 | */ | 207 | */ |
225 | if (!map_io && (PAGE_SHIFT == 12) && | 208 | if (use_1G_block(addr, next, phys)) { |
226 | ((addr | next | phys) & ~PUD_MASK) == 0) { | ||
227 | pud_t old_pud = *pud; | 209 | pud_t old_pud = *pud; |
228 | set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC)); | 210 | set_pud(pud, __pud(phys | |
211 | pgprot_val(mk_sect_prot(prot)))); | ||
229 | 212 | ||
230 | /* | 213 | /* |
231 | * If we have an old value for a pud, it will | 214 | * If we have an old value for a pud, it will |
@@ -235,12 +218,15 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |||
235 | * Look up the old pmd table and free it. | 218 | * Look up the old pmd table and free it. |
236 | */ | 219 | */ |
237 | if (!pud_none(old_pud)) { | 220 | if (!pud_none(old_pud)) { |
238 | phys_addr_t table = __pa(pmd_offset(&old_pud, 0)); | ||
239 | memblock_free(table, PAGE_SIZE); | ||
240 | flush_tlb_all(); | 221 | flush_tlb_all(); |
222 | if (pud_table(old_pud)) { | ||
223 | phys_addr_t table = __pa(pmd_offset(&old_pud, 0)); | ||
224 | if (!WARN_ON_ONCE(slab_is_available())) | ||
225 | memblock_free(table, PAGE_SIZE); | ||
226 | } | ||
241 | } | 227 | } |
242 | } else { | 228 | } else { |
243 | alloc_init_pmd(pud, addr, next, phys, map_io); | 229 | alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc); |
244 | } | 230 | } |
245 | phys += next - addr; | 231 | phys += next - addr; |
246 | } while (pud++, addr = next, addr != end); | 232 | } while (pud++, addr = next, addr != end); |
@@ -250,9 +236,10 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |||
250 | * Create the page directory entries and any necessary page tables for the | 236 | * Create the page directory entries and any necessary page tables for the |
251 | * mapping specified by 'md'. | 237 | * mapping specified by 'md'. |
252 | */ | 238 | */ |
253 | static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys, | 239 | static void __create_mapping(struct mm_struct *mm, pgd_t *pgd, |
254 | unsigned long virt, phys_addr_t size, | 240 | phys_addr_t phys, unsigned long virt, |
255 | int map_io) | 241 | phys_addr_t size, pgprot_t prot, |
242 | void *(*alloc)(unsigned long size)) | ||
256 | { | 243 | { |
257 | unsigned long addr, length, end, next; | 244 | unsigned long addr, length, end, next; |
258 | 245 | ||
@@ -262,31 +249,95 @@ static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys, | |||
262 | end = addr + length; | 249 | end = addr + length; |
263 | do { | 250 | do { |
264 | next = pgd_addr_end(addr, end); | 251 | next = pgd_addr_end(addr, end); |
265 | alloc_init_pud(pgd, addr, next, phys, map_io); | 252 | alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc); |
266 | phys += next - addr; | 253 | phys += next - addr; |
267 | } while (pgd++, addr = next, addr != end); | 254 | } while (pgd++, addr = next, addr != end); |
268 | } | 255 | } |
269 | 256 | ||
270 | static void __init create_mapping(phys_addr_t phys, unsigned long virt, | 257 | static void *late_alloc(unsigned long size) |
271 | phys_addr_t size) | 258 | { |
259 | void *ptr; | ||
260 | |||
261 | BUG_ON(size > PAGE_SIZE); | ||
262 | ptr = (void *)__get_free_page(PGALLOC_GFP); | ||
263 | BUG_ON(!ptr); | ||
264 | return ptr; | ||
265 | } | ||
266 | |||
267 | static void __ref create_mapping(phys_addr_t phys, unsigned long virt, | ||
268 | phys_addr_t size, pgprot_t prot) | ||
272 | { | 269 | { |
273 | if (virt < VMALLOC_START) { | 270 | if (virt < VMALLOC_START) { |
274 | pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", | 271 | pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", |
275 | &phys, virt); | 272 | &phys, virt); |
276 | return; | 273 | return; |
277 | } | 274 | } |
278 | __create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0); | 275 | __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt, |
276 | size, prot, early_alloc); | ||
277 | } | ||
278 | |||
279 | void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, | ||
280 | unsigned long virt, phys_addr_t size, | ||
281 | pgprot_t prot) | ||
282 | { | ||
283 | __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot, | ||
284 | late_alloc); | ||
279 | } | 285 | } |
280 | 286 | ||
281 | void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io) | 287 | static void create_mapping_late(phys_addr_t phys, unsigned long virt, |
288 | phys_addr_t size, pgprot_t prot) | ||
282 | { | 289 | { |
283 | if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) { | 290 | if (virt < VMALLOC_START) { |
284 | pr_warn("BUG: not creating id mapping for %pa\n", &addr); | 291 | pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", |
292 | &phys, virt); | ||
285 | return; | 293 | return; |
286 | } | 294 | } |
287 | __create_mapping(&idmap_pg_dir[pgd_index(addr)], | 295 | |
288 | addr, addr, size, map_io); | 296 | return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), |
297 | phys, virt, size, prot, late_alloc); | ||
298 | } | ||
299 | |||
300 | #ifdef CONFIG_DEBUG_RODATA | ||
301 | static void __init __map_memblock(phys_addr_t start, phys_addr_t end) | ||
302 | { | ||
303 | /* | ||
304 | * Set up the executable regions using the existing section mappings | ||
305 | * for now. This will get more fine grained later once all memory | ||
306 | * is mapped | ||
307 | */ | ||
308 | unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); | ||
309 | unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); | ||
310 | |||
311 | if (end < kernel_x_start) { | ||
312 | create_mapping(start, __phys_to_virt(start), | ||
313 | end - start, PAGE_KERNEL); | ||
314 | } else if (start >= kernel_x_end) { | ||
315 | create_mapping(start, __phys_to_virt(start), | ||
316 | end - start, PAGE_KERNEL); | ||
317 | } else { | ||
318 | if (start < kernel_x_start) | ||
319 | create_mapping(start, __phys_to_virt(start), | ||
320 | kernel_x_start - start, | ||
321 | PAGE_KERNEL); | ||
322 | create_mapping(kernel_x_start, | ||
323 | __phys_to_virt(kernel_x_start), | ||
324 | kernel_x_end - kernel_x_start, | ||
325 | PAGE_KERNEL_EXEC); | ||
326 | if (kernel_x_end < end) | ||
327 | create_mapping(kernel_x_end, | ||
328 | __phys_to_virt(kernel_x_end), | ||
329 | end - kernel_x_end, | ||
330 | PAGE_KERNEL); | ||
331 | } | ||
332 | |||
289 | } | 333 | } |
334 | #else | ||
335 | static void __init __map_memblock(phys_addr_t start, phys_addr_t end) | ||
336 | { | ||
337 | create_mapping(start, __phys_to_virt(start), end - start, | ||
338 | PAGE_KERNEL_EXEC); | ||
339 | } | ||
340 | #endif | ||
290 | 341 | ||
291 | static void __init map_mem(void) | 342 | static void __init map_mem(void) |
292 | { | 343 | { |
@@ -332,14 +383,53 @@ static void __init map_mem(void) | |||
332 | memblock_set_current_limit(limit); | 383 | memblock_set_current_limit(limit); |
333 | } | 384 | } |
334 | #endif | 385 | #endif |
335 | 386 | __map_memblock(start, end); | |
336 | create_mapping(start, __phys_to_virt(start), end - start); | ||
337 | } | 387 | } |
338 | 388 | ||
339 | /* Limit no longer required. */ | 389 | /* Limit no longer required. */ |
340 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); | 390 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); |
341 | } | 391 | } |
342 | 392 | ||
393 | void __init fixup_executable(void) | ||
394 | { | ||
395 | #ifdef CONFIG_DEBUG_RODATA | ||
396 | /* now that we are actually fully mapped, make the start/end more fine grained */ | ||
397 | if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { | ||
398 | unsigned long aligned_start = round_down(__pa(_stext), | ||
399 | SECTION_SIZE); | ||
400 | |||
401 | create_mapping(aligned_start, __phys_to_virt(aligned_start), | ||
402 | __pa(_stext) - aligned_start, | ||
403 | PAGE_KERNEL); | ||
404 | } | ||
405 | |||
406 | if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { | ||
407 | unsigned long aligned_end = round_up(__pa(__init_end), | ||
408 | SECTION_SIZE); | ||
409 | create_mapping(__pa(__init_end), (unsigned long)__init_end, | ||
410 | aligned_end - __pa(__init_end), | ||
411 | PAGE_KERNEL); | ||
412 | } | ||
413 | #endif | ||
414 | } | ||
415 | |||
416 | #ifdef CONFIG_DEBUG_RODATA | ||
417 | void mark_rodata_ro(void) | ||
418 | { | ||
419 | create_mapping_late(__pa(_stext), (unsigned long)_stext, | ||
420 | (unsigned long)_etext - (unsigned long)_stext, | ||
421 | PAGE_KERNEL_EXEC | PTE_RDONLY); | ||
422 | |||
423 | } | ||
424 | #endif | ||
425 | |||
426 | void fixup_init(void) | ||
427 | { | ||
428 | create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin, | ||
429 | (unsigned long)__init_end - (unsigned long)__init_begin, | ||
430 | PAGE_KERNEL); | ||
431 | } | ||
432 | |||
343 | /* | 433 | /* |
344 | * paging_init() sets up the page tables, initialises the zone memory | 434 | * paging_init() sets up the page tables, initialises the zone memory |
345 | * maps and sets up the zero page. | 435 | * maps and sets up the zero page. |
@@ -349,13 +439,7 @@ void __init paging_init(void) | |||
349 | void *zero_page; | 439 | void *zero_page; |
350 | 440 | ||
351 | map_mem(); | 441 | map_mem(); |
352 | 442 | fixup_executable(); | |
353 | /* | ||
354 | * Finally flush the caches and tlb to ensure that we're in a | ||
355 | * consistent state. | ||
356 | */ | ||
357 | flush_cache_all(); | ||
358 | flush_tlb_all(); | ||
359 | 443 | ||
360 | /* allocate the zero page. */ | 444 | /* allocate the zero page. */ |
361 | zero_page = early_alloc(PAGE_SIZE); | 445 | zero_page = early_alloc(PAGE_SIZE); |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 4e778b13291b..28eebfb6af76 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -102,7 +102,7 @@ ENTRY(cpu_do_idle) | |||
102 | ret | 102 | ret |
103 | ENDPROC(cpu_do_idle) | 103 | ENDPROC(cpu_do_idle) |
104 | 104 | ||
105 | #ifdef CONFIG_ARM64_CPU_SUSPEND | 105 | #ifdef CONFIG_CPU_PM |
106 | /** | 106 | /** |
107 | * cpu_do_suspend - save CPU registers context | 107 | * cpu_do_suspend - save CPU registers context |
108 | * | 108 | * |
@@ -244,14 +244,18 @@ ENTRY(__cpu_setup) | |||
244 | ENDPROC(__cpu_setup) | 244 | ENDPROC(__cpu_setup) |
245 | 245 | ||
246 | /* | 246 | /* |
247 | * We set the desired value explicitly, including those of the | ||
248 | * reserved bits. The values of bits EE & E0E were set early in | ||
249 | * el2_setup, which are left untouched below. | ||
250 | * | ||
247 | * n n T | 251 | * n n T |
248 | * U E WT T UD US IHBS | 252 | * U E WT T UD US IHBS |
249 | * CE0 XWHW CZ ME TEEA S | 253 | * CE0 XWHW CZ ME TEEA S |
250 | * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM | 254 | * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM |
251 | * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved | 255 | * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved |
252 | * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings | 256 | * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings |
253 | */ | 257 | */ |
254 | .type crval, #object | 258 | .type crval, #object |
255 | crval: | 259 | crval: |
256 | .word 0x000802e2 // clear | 260 | .word 0xfcffffff // clear |
257 | .word 0x0405d11d // set | 261 | .word 0x34d5d91d // set |