diff options
Diffstat (limited to 'arch/arm64/include/asm')
31 files changed, 488 insertions, 101 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 0b3fcf86e6ba..774a7c85e70f 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild | |||
| @@ -9,8 +9,8 @@ generic-y += current.h | |||
| 9 | generic-y += delay.h | 9 | generic-y += delay.h |
| 10 | generic-y += div64.h | 10 | generic-y += div64.h |
| 11 | generic-y += dma.h | 11 | generic-y += dma.h |
| 12 | generic-y += emergency-restart.h | ||
| 13 | generic-y += early_ioremap.h | 12 | generic-y += early_ioremap.h |
| 13 | generic-y += emergency-restart.h | ||
| 14 | generic-y += errno.h | 14 | generic-y += errno.h |
| 15 | generic-y += ftrace.h | 15 | generic-y += ftrace.h |
| 16 | generic-y += hash.h | 16 | generic-y += hash.h |
| @@ -29,6 +29,7 @@ generic-y += mman.h | |||
| 29 | generic-y += msgbuf.h | 29 | generic-y += msgbuf.h |
| 30 | generic-y += mutex.h | 30 | generic-y += mutex.h |
| 31 | generic-y += pci.h | 31 | generic-y += pci.h |
| 32 | generic-y += pci-bridge.h | ||
| 32 | generic-y += poll.h | 33 | generic-y += poll.h |
| 33 | generic-y += preempt.h | 34 | generic-y += preempt.h |
| 34 | generic-y += resource.h | 35 | generic-y += resource.h |
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 9400596a0f39..f19097134b02 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h | |||
| @@ -104,37 +104,6 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl) | |||
| 104 | asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); | 104 | asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static inline void arch_counter_set_user_access(void) | ||
| 108 | { | ||
| 109 | u32 cntkctl = arch_timer_get_cntkctl(); | ||
| 110 | |||
| 111 | /* Disable user access to the timers and the physical counter */ | ||
| 112 | /* Also disable virtual event stream */ | ||
| 113 | cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN | ||
| 114 | | ARCH_TIMER_USR_VT_ACCESS_EN | ||
| 115 | | ARCH_TIMER_VIRT_EVT_EN | ||
| 116 | | ARCH_TIMER_USR_PCT_ACCESS_EN); | ||
| 117 | |||
| 118 | /* Enable user access to the virtual counter */ | ||
| 119 | cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; | ||
| 120 | |||
| 121 | arch_timer_set_cntkctl(cntkctl); | ||
| 122 | } | ||
| 123 | |||
| 124 | static inline void arch_timer_evtstrm_enable(int divider) | ||
| 125 | { | ||
| 126 | u32 cntkctl = arch_timer_get_cntkctl(); | ||
| 127 | cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; | ||
| 128 | /* Set the divider and enable virtual event stream */ | ||
| 129 | cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) | ||
| 130 | | ARCH_TIMER_VIRT_EVT_EN; | ||
| 131 | arch_timer_set_cntkctl(cntkctl); | ||
| 132 | elf_hwcap |= HWCAP_EVTSTRM; | ||
| 133 | #ifdef CONFIG_COMPAT | ||
| 134 | compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; | ||
| 135 | #endif | ||
| 136 | } | ||
| 137 | |||
| 138 | static inline u64 arch_counter_get_cntvct(void) | 107 | static inline u64 arch_counter_get_cntvct(void) |
| 139 | { | 108 | { |
| 140 | u64 cval; | 109 | u64 cval; |
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index f2defe1c380c..689b6379188c 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h | |||
| @@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | |||
| 148 | { | 148 | { |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | int set_memory_ro(unsigned long addr, int numpages); | ||
| 152 | int set_memory_rw(unsigned long addr, int numpages); | ||
| 153 | int set_memory_x(unsigned long addr, int numpages); | ||
| 154 | int set_memory_nx(unsigned long addr, int numpages); | ||
| 151 | #endif | 155 | #endif |
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h index 7a2e0762cb40..4c631a0a3609 100644 --- a/arch/arm64/include/asm/cachetype.h +++ b/arch/arm64/include/asm/cachetype.h | |||
| @@ -39,6 +39,26 @@ | |||
| 39 | 39 | ||
| 40 | extern unsigned long __icache_flags; | 40 | extern unsigned long __icache_flags; |
| 41 | 41 | ||
| 42 | #define CCSIDR_EL1_LINESIZE_MASK 0x7 | ||
| 43 | #define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK) | ||
| 44 | |||
| 45 | #define CCSIDR_EL1_NUMSETS_SHIFT 13 | ||
| 46 | #define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT) | ||
| 47 | #define CCSIDR_EL1_NUMSETS(x) \ | ||
| 48 | (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT) | ||
| 49 | |||
| 50 | extern u64 __attribute_const__ icache_get_ccsidr(void); | ||
| 51 | |||
| 52 | static inline int icache_get_linesize(void) | ||
| 53 | { | ||
| 54 | return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr()); | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline int icache_get_numsets(void) | ||
| 58 | { | ||
| 59 | return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr()); | ||
| 60 | } | ||
| 61 | |||
| 42 | /* | 62 | /* |
| 43 | * Whilst the D-side always behaves as PIPT on AArch64, aliasing is | 63 | * Whilst the D-side always behaves as PIPT on AArch64, aliasing is |
| 44 | * permitted in the I-cache. | 64 | * permitted in the I-cache. |
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index d7b4b38a8e86..6f8e2ef9094a 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h | |||
| @@ -28,6 +28,8 @@ struct device_node; | |||
| 28 | * enable-method property. | 28 | * enable-method property. |
| 29 | * @cpu_init: Reads any data necessary for a specific enable-method from the | 29 | * @cpu_init: Reads any data necessary for a specific enable-method from the |
| 30 | * devicetree, for a given cpu node and proposed logical id. | 30 | * devicetree, for a given cpu node and proposed logical id. |
| 31 | * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from | ||
| 32 | * devicetree, for a given cpu node and proposed logical id. | ||
| 31 | * @cpu_prepare: Early one-time preparation step for a cpu. If there is a | 33 | * @cpu_prepare: Early one-time preparation step for a cpu. If there is a |
| 32 | * mechanism for doing so, tests whether it is possible to boot | 34 | * mechanism for doing so, tests whether it is possible to boot |
| 33 | * the given CPU. | 35 | * the given CPU. |
| @@ -47,6 +49,7 @@ struct device_node; | |||
| 47 | struct cpu_operations { | 49 | struct cpu_operations { |
| 48 | const char *name; | 50 | const char *name; |
| 49 | int (*cpu_init)(struct device_node *, unsigned int); | 51 | int (*cpu_init)(struct device_node *, unsigned int); |
| 52 | int (*cpu_init_idle)(struct device_node *, unsigned int); | ||
| 50 | int (*cpu_prepare)(unsigned int); | 53 | int (*cpu_prepare)(unsigned int); |
| 51 | int (*cpu_boot)(unsigned int); | 54 | int (*cpu_boot)(unsigned int); |
| 52 | void (*cpu_postboot)(void); | 55 | void (*cpu_postboot)(void); |
| @@ -61,7 +64,7 @@ struct cpu_operations { | |||
| 61 | }; | 64 | }; |
| 62 | 65 | ||
| 63 | extern const struct cpu_operations *cpu_ops[NR_CPUS]; | 66 | extern const struct cpu_operations *cpu_ops[NR_CPUS]; |
| 64 | extern int __init cpu_read_ops(struct device_node *dn, int cpu); | 67 | int __init cpu_read_ops(struct device_node *dn, int cpu); |
| 65 | extern void __init cpu_read_bootcpu_ops(void); | 68 | void __init cpu_read_bootcpu_ops(void); |
| 66 | 69 | ||
| 67 | #endif /* ifndef __ASM_CPU_OPS_H */ | 70 | #endif /* ifndef __ASM_CPU_OPS_H */ |
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h new file mode 100644 index 000000000000..b52a9932e2b1 --- /dev/null +++ b/arch/arm64/include/asm/cpuidle.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | #ifndef __ASM_CPUIDLE_H | ||
| 2 | #define __ASM_CPUIDLE_H | ||
| 3 | |||
| 4 | #ifdef CONFIG_CPU_IDLE | ||
| 5 | extern int cpu_init_idle(unsigned int cpu); | ||
| 6 | #else | ||
| 7 | static inline int cpu_init_idle(unsigned int cpu) | ||
| 8 | { | ||
| 9 | return -EOPNOTSUPP; | ||
| 10 | } | ||
| 11 | #endif | ||
| 12 | |||
| 13 | #endif | ||
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 7fb343779498..40ec68aa6870 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h | |||
| @@ -48,11 +48,13 @@ | |||
| 48 | /* | 48 | /* |
| 49 | * #imm16 values used for BRK instruction generation | 49 | * #imm16 values used for BRK instruction generation |
| 50 | * Allowed values for kgbd are 0x400 - 0x7ff | 50 | * Allowed values for kgbd are 0x400 - 0x7ff |
| 51 | * 0x100: for triggering a fault on purpose (reserved) | ||
| 51 | * 0x400: for dynamic BRK instruction | 52 | * 0x400: for dynamic BRK instruction |
| 52 | * 0x401: for compile time BRK instruction | 53 | * 0x401: for compile time BRK instruction |
| 53 | */ | 54 | */ |
| 54 | #define KGDB_DYN_DGB_BRK_IMM 0x400 | 55 | #define FAULT_BRK_IMM 0x100 |
| 55 | #define KDBG_COMPILED_DBG_BRK_IMM 0x401 | 56 | #define KGDB_DYN_DBG_BRK_IMM 0x400 |
| 57 | #define KGDB_COMPILED_DBG_BRK_IMM 0x401 | ||
| 56 | 58 | ||
| 57 | /* | 59 | /* |
| 58 | * BRK instruction encoding | 60 | * BRK instruction encoding |
| @@ -61,24 +63,30 @@ | |||
| 61 | #define AARCH64_BREAK_MON 0xd4200000 | 63 | #define AARCH64_BREAK_MON 0xd4200000 |
| 62 | 64 | ||
| 63 | /* | 65 | /* |
| 66 | * BRK instruction for provoking a fault on purpose | ||
| 67 | * Unlike kgdb, #imm16 value with unallocated handler is used for faulting. | ||
| 68 | */ | ||
| 69 | #define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5)) | ||
| 70 | |||
| 71 | /* | ||
| 64 | * Extract byte from BRK instruction | 72 | * Extract byte from BRK instruction |
| 65 | */ | 73 | */ |
| 66 | #define KGDB_DYN_DGB_BRK_INS_BYTE(x) \ | 74 | #define KGDB_DYN_DBG_BRK_INS_BYTE(x) \ |
| 67 | ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff) | 75 | ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff) |
| 68 | 76 | ||
| 69 | /* | 77 | /* |
| 70 | * Extract byte from BRK #imm16 | 78 | * Extract byte from BRK #imm16 |
| 71 | */ | 79 | */ |
| 72 | #define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \ | 80 | #define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \ |
| 73 | (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) | 81 | (((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) |
| 74 | 82 | ||
| 75 | #define KGDB_DYN_DGB_BRK_BYTE(x) \ | 83 | #define KGDB_DYN_DBG_BRK_BYTE(x) \ |
| 76 | (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x)) | 84 | (KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x)) |
| 77 | 85 | ||
| 78 | #define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0) | 86 | #define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0) |
| 79 | #define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1) | 87 | #define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1) |
| 80 | #define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2) | 88 | #define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2) |
| 81 | #define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3) | 89 | #define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3) |
| 82 | 90 | ||
| 83 | #define CACHE_FLUSH_IS_SAFE 1 | 91 | #define CACHE_FLUSH_IS_SAFE 1 |
| 84 | 92 | ||
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index dc82e52acdb3..adeae3f6f0fc 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
| @@ -52,6 +52,13 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) | |||
| 52 | dev->archdata.dma_ops = ops; | 52 | dev->archdata.dma_ops = ops; |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | static inline int set_arch_dma_coherent_ops(struct device *dev) | ||
| 56 | { | ||
| 57 | set_dma_ops(dev, &coherent_swiotlb_dma_ops); | ||
| 58 | return 0; | ||
| 59 | } | ||
| 60 | #define set_arch_dma_coherent_ops set_arch_dma_coherent_ops | ||
| 61 | |||
| 55 | #include <asm-generic/dma-mapping-common.h> | 62 | #include <asm-generic/dma-mapping-common.h> |
| 56 | 63 | ||
| 57 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | 64 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 0be67821f9ce..e8a3268a891c 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h | |||
| @@ -47,8 +47,6 @@ static inline void ack_bad_irq(unsigned int irq) | |||
| 47 | irq_err_count++; | 47 | irq_err_count++; |
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | extern void handle_IRQ(unsigned int, struct pt_regs *); | ||
| 51 | |||
| 52 | /* | 50 | /* |
| 53 | * No arch-specific IRQ flags. | 51 | * No arch-specific IRQ flags. |
| 54 | */ | 52 | */ |
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index d064047612b1..52b484b6aa1a 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h | |||
| @@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg, | |||
| 79 | */ | 79 | */ |
| 80 | #define ARM_MAX_BRP 16 | 80 | #define ARM_MAX_BRP 16 |
| 81 | #define ARM_MAX_WRP 16 | 81 | #define ARM_MAX_WRP 16 |
| 82 | #define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP) | ||
| 83 | 82 | ||
| 84 | /* Virtual debug register bases. */ | 83 | /* Virtual debug register bases. */ |
| 85 | #define AARCH64_DBG_REG_BVR 0 | 84 | #define AARCH64_DBG_REG_BVR 0 |
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index dc1f73b13e74..56a9e63b6c33 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h | |||
| @@ -2,6 +2,8 @@ | |||
| 2 | * Copyright (C) 2013 Huawei Ltd. | 2 | * Copyright (C) 2013 Huawei Ltd. |
| 3 | * Author: Jiang Liu <liuj97@gmail.com> | 3 | * Author: Jiang Liu <liuj97@gmail.com> |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> | ||
| 6 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
| 7 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
| @@ -64,12 +66,155 @@ enum aarch64_insn_imm_type { | |||
| 64 | AARCH64_INSN_IMM_14, | 66 | AARCH64_INSN_IMM_14, |
| 65 | AARCH64_INSN_IMM_12, | 67 | AARCH64_INSN_IMM_12, |
| 66 | AARCH64_INSN_IMM_9, | 68 | AARCH64_INSN_IMM_9, |
| 69 | AARCH64_INSN_IMM_7, | ||
| 70 | AARCH64_INSN_IMM_6, | ||
| 71 | AARCH64_INSN_IMM_S, | ||
| 72 | AARCH64_INSN_IMM_R, | ||
| 67 | AARCH64_INSN_IMM_MAX | 73 | AARCH64_INSN_IMM_MAX |
| 68 | }; | 74 | }; |
| 69 | 75 | ||
| 76 | enum aarch64_insn_register_type { | ||
| 77 | AARCH64_INSN_REGTYPE_RT, | ||
| 78 | AARCH64_INSN_REGTYPE_RN, | ||
| 79 | AARCH64_INSN_REGTYPE_RT2, | ||
| 80 | AARCH64_INSN_REGTYPE_RM, | ||
| 81 | AARCH64_INSN_REGTYPE_RD, | ||
| 82 | AARCH64_INSN_REGTYPE_RA, | ||
| 83 | }; | ||
| 84 | |||
| 85 | enum aarch64_insn_register { | ||
| 86 | AARCH64_INSN_REG_0 = 0, | ||
| 87 | AARCH64_INSN_REG_1 = 1, | ||
| 88 | AARCH64_INSN_REG_2 = 2, | ||
| 89 | AARCH64_INSN_REG_3 = 3, | ||
| 90 | AARCH64_INSN_REG_4 = 4, | ||
| 91 | AARCH64_INSN_REG_5 = 5, | ||
| 92 | AARCH64_INSN_REG_6 = 6, | ||
| 93 | AARCH64_INSN_REG_7 = 7, | ||
| 94 | AARCH64_INSN_REG_8 = 8, | ||
| 95 | AARCH64_INSN_REG_9 = 9, | ||
| 96 | AARCH64_INSN_REG_10 = 10, | ||
| 97 | AARCH64_INSN_REG_11 = 11, | ||
| 98 | AARCH64_INSN_REG_12 = 12, | ||
| 99 | AARCH64_INSN_REG_13 = 13, | ||
| 100 | AARCH64_INSN_REG_14 = 14, | ||
| 101 | AARCH64_INSN_REG_15 = 15, | ||
| 102 | AARCH64_INSN_REG_16 = 16, | ||
| 103 | AARCH64_INSN_REG_17 = 17, | ||
| 104 | AARCH64_INSN_REG_18 = 18, | ||
| 105 | AARCH64_INSN_REG_19 = 19, | ||
| 106 | AARCH64_INSN_REG_20 = 20, | ||
| 107 | AARCH64_INSN_REG_21 = 21, | ||
| 108 | AARCH64_INSN_REG_22 = 22, | ||
| 109 | AARCH64_INSN_REG_23 = 23, | ||
| 110 | AARCH64_INSN_REG_24 = 24, | ||
| 111 | AARCH64_INSN_REG_25 = 25, | ||
| 112 | AARCH64_INSN_REG_26 = 26, | ||
| 113 | AARCH64_INSN_REG_27 = 27, | ||
| 114 | AARCH64_INSN_REG_28 = 28, | ||
| 115 | AARCH64_INSN_REG_29 = 29, | ||
| 116 | AARCH64_INSN_REG_FP = 29, /* Frame pointer */ | ||
| 117 | AARCH64_INSN_REG_30 = 30, | ||
| 118 | AARCH64_INSN_REG_LR = 30, /* Link register */ | ||
| 119 | AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */ | ||
| 120 | AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */ | ||
| 121 | }; | ||
| 122 | |||
| 123 | enum aarch64_insn_variant { | ||
| 124 | AARCH64_INSN_VARIANT_32BIT, | ||
| 125 | AARCH64_INSN_VARIANT_64BIT | ||
| 126 | }; | ||
| 127 | |||
| 128 | enum aarch64_insn_condition { | ||
| 129 | AARCH64_INSN_COND_EQ = 0x0, /* == */ | ||
| 130 | AARCH64_INSN_COND_NE = 0x1, /* != */ | ||
| 131 | AARCH64_INSN_COND_CS = 0x2, /* unsigned >= */ | ||
| 132 | AARCH64_INSN_COND_CC = 0x3, /* unsigned < */ | ||
| 133 | AARCH64_INSN_COND_MI = 0x4, /* < 0 */ | ||
| 134 | AARCH64_INSN_COND_PL = 0x5, /* >= 0 */ | ||
| 135 | AARCH64_INSN_COND_VS = 0x6, /* overflow */ | ||
| 136 | AARCH64_INSN_COND_VC = 0x7, /* no overflow */ | ||
| 137 | AARCH64_INSN_COND_HI = 0x8, /* unsigned > */ | ||
| 138 | AARCH64_INSN_COND_LS = 0x9, /* unsigned <= */ | ||
| 139 | AARCH64_INSN_COND_GE = 0xa, /* signed >= */ | ||
| 140 | AARCH64_INSN_COND_LT = 0xb, /* signed < */ | ||
| 141 | AARCH64_INSN_COND_GT = 0xc, /* signed > */ | ||
| 142 | AARCH64_INSN_COND_LE = 0xd, /* signed <= */ | ||
| 143 | AARCH64_INSN_COND_AL = 0xe, /* always */ | ||
| 144 | }; | ||
| 145 | |||
| 70 | enum aarch64_insn_branch_type { | 146 | enum aarch64_insn_branch_type { |
| 71 | AARCH64_INSN_BRANCH_NOLINK, | 147 | AARCH64_INSN_BRANCH_NOLINK, |
| 72 | AARCH64_INSN_BRANCH_LINK, | 148 | AARCH64_INSN_BRANCH_LINK, |
| 149 | AARCH64_INSN_BRANCH_RETURN, | ||
| 150 | AARCH64_INSN_BRANCH_COMP_ZERO, | ||
| 151 | AARCH64_INSN_BRANCH_COMP_NONZERO, | ||
| 152 | }; | ||
| 153 | |||
| 154 | enum aarch64_insn_size_type { | ||
| 155 | AARCH64_INSN_SIZE_8, | ||
| 156 | AARCH64_INSN_SIZE_16, | ||
| 157 | AARCH64_INSN_SIZE_32, | ||
| 158 | AARCH64_INSN_SIZE_64, | ||
| 159 | }; | ||
| 160 | |||
| 161 | enum aarch64_insn_ldst_type { | ||
| 162 | AARCH64_INSN_LDST_LOAD_REG_OFFSET, | ||
| 163 | AARCH64_INSN_LDST_STORE_REG_OFFSET, | ||
| 164 | AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX, | ||
| 165 | AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX, | ||
| 166 | AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX, | ||
| 167 | AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX, | ||
| 168 | }; | ||
| 169 | |||
| 170 | enum aarch64_insn_adsb_type { | ||
| 171 | AARCH64_INSN_ADSB_ADD, | ||
| 172 | AARCH64_INSN_ADSB_SUB, | ||
| 173 | AARCH64_INSN_ADSB_ADD_SETFLAGS, | ||
| 174 | AARCH64_INSN_ADSB_SUB_SETFLAGS | ||
| 175 | }; | ||
| 176 | |||
| 177 | enum aarch64_insn_movewide_type { | ||
| 178 | AARCH64_INSN_MOVEWIDE_ZERO, | ||
| 179 | AARCH64_INSN_MOVEWIDE_KEEP, | ||
| 180 | AARCH64_INSN_MOVEWIDE_INVERSE | ||
| 181 | }; | ||
| 182 | |||
| 183 | enum aarch64_insn_bitfield_type { | ||
| 184 | AARCH64_INSN_BITFIELD_MOVE, | ||
| 185 | AARCH64_INSN_BITFIELD_MOVE_UNSIGNED, | ||
| 186 | AARCH64_INSN_BITFIELD_MOVE_SIGNED | ||
| 187 | }; | ||
| 188 | |||
| 189 | enum aarch64_insn_data1_type { | ||
| 190 | AARCH64_INSN_DATA1_REVERSE_16, | ||
| 191 | AARCH64_INSN_DATA1_REVERSE_32, | ||
| 192 | AARCH64_INSN_DATA1_REVERSE_64, | ||
| 193 | }; | ||
| 194 | |||
| 195 | enum aarch64_insn_data2_type { | ||
| 196 | AARCH64_INSN_DATA2_UDIV, | ||
| 197 | AARCH64_INSN_DATA2_SDIV, | ||
| 198 | AARCH64_INSN_DATA2_LSLV, | ||
| 199 | AARCH64_INSN_DATA2_LSRV, | ||
| 200 | AARCH64_INSN_DATA2_ASRV, | ||
| 201 | AARCH64_INSN_DATA2_RORV, | ||
| 202 | }; | ||
| 203 | |||
| 204 | enum aarch64_insn_data3_type { | ||
| 205 | AARCH64_INSN_DATA3_MADD, | ||
| 206 | AARCH64_INSN_DATA3_MSUB, | ||
| 207 | }; | ||
| 208 | |||
| 209 | enum aarch64_insn_logic_type { | ||
| 210 | AARCH64_INSN_LOGIC_AND, | ||
| 211 | AARCH64_INSN_LOGIC_BIC, | ||
| 212 | AARCH64_INSN_LOGIC_ORR, | ||
| 213 | AARCH64_INSN_LOGIC_ORN, | ||
| 214 | AARCH64_INSN_LOGIC_EOR, | ||
| 215 | AARCH64_INSN_LOGIC_EON, | ||
| 216 | AARCH64_INSN_LOGIC_AND_SETFLAGS, | ||
| 217 | AARCH64_INSN_LOGIC_BIC_SETFLAGS | ||
| 73 | }; | 218 | }; |
| 74 | 219 | ||
| 75 | #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ | 220 | #define __AARCH64_INSN_FUNCS(abbr, mask, val) \ |
| @@ -78,13 +223,58 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ | |||
| 78 | static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ | 223 | static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ |
| 79 | { return (val); } | 224 | { return (val); } |
| 80 | 225 | ||
| 226 | __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) | ||
| 227 | __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) | ||
| 228 | __AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000) | ||
| 229 | __AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000) | ||
| 230 | __AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000) | ||
| 231 | __AARCH64_INSN_FUNCS(ldp_pre, 0x7FC00000, 0x29C00000) | ||
| 232 | __AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000) | ||
| 233 | __AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000) | ||
| 234 | __AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000) | ||
| 235 | __AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000) | ||
| 236 | __AARCH64_INSN_FUNCS(movn, 0x7F800000, 0x12800000) | ||
| 237 | __AARCH64_INSN_FUNCS(sbfm, 0x7F800000, 0x13000000) | ||
| 238 | __AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000) | ||
| 239 | __AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000) | ||
| 240 | __AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000) | ||
| 241 | __AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000) | ||
| 242 | __AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000) | ||
| 243 | __AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000) | ||
| 244 | __AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000) | ||
| 245 | __AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000) | ||
| 246 | __AARCH64_INSN_FUNCS(madd, 0x7FE08000, 0x1B000000) | ||
| 247 | __AARCH64_INSN_FUNCS(msub, 0x7FE08000, 0x1B008000) | ||
| 248 | __AARCH64_INSN_FUNCS(udiv, 0x7FE0FC00, 0x1AC00800) | ||
| 249 | __AARCH64_INSN_FUNCS(sdiv, 0x7FE0FC00, 0x1AC00C00) | ||
| 250 | __AARCH64_INSN_FUNCS(lslv, 0x7FE0FC00, 0x1AC02000) | ||
| 251 | __AARCH64_INSN_FUNCS(lsrv, 0x7FE0FC00, 0x1AC02400) | ||
| 252 | __AARCH64_INSN_FUNCS(asrv, 0x7FE0FC00, 0x1AC02800) | ||
| 253 | __AARCH64_INSN_FUNCS(rorv, 0x7FE0FC00, 0x1AC02C00) | ||
| 254 | __AARCH64_INSN_FUNCS(rev16, 0x7FFFFC00, 0x5AC00400) | ||
| 255 | __AARCH64_INSN_FUNCS(rev32, 0x7FFFFC00, 0x5AC00800) | ||
| 256 | __AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00) | ||
| 257 | __AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000) | ||
| 258 | __AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000) | ||
| 259 | __AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000) | ||
| 260 | __AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000) | ||
| 261 | __AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000) | ||
| 262 | __AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000) | ||
| 263 | __AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000) | ||
| 264 | __AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) | ||
| 81 | __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) | 265 | __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) |
| 82 | __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) | 266 | __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) |
| 267 | __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) | ||
| 268 | __AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) | ||
| 269 | __AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) | ||
| 83 | __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) | 270 | __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) |
| 84 | __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) | 271 | __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) |
| 85 | __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) | 272 | __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) |
| 86 | __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) | 273 | __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) |
| 87 | __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) | 274 | __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) |
| 275 | __AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000) | ||
| 276 | __AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000) | ||
| 277 | __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) | ||
| 88 | 278 | ||
| 89 | #undef __AARCH64_INSN_FUNCS | 279 | #undef __AARCH64_INSN_FUNCS |
| 90 | 280 | ||
| @@ -97,8 +287,67 @@ u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | |||
| 97 | u32 insn, u64 imm); | 287 | u32 insn, u64 imm); |
| 98 | u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, | 288 | u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, |
| 99 | enum aarch64_insn_branch_type type); | 289 | enum aarch64_insn_branch_type type); |
| 290 | u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, | ||
| 291 | enum aarch64_insn_register reg, | ||
| 292 | enum aarch64_insn_variant variant, | ||
| 293 | enum aarch64_insn_branch_type type); | ||
| 294 | u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr, | ||
| 295 | enum aarch64_insn_condition cond); | ||
| 100 | u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); | 296 | u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); |
| 101 | u32 aarch64_insn_gen_nop(void); | 297 | u32 aarch64_insn_gen_nop(void); |
| 298 | u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, | ||
| 299 | enum aarch64_insn_branch_type type); | ||
| 300 | u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, | ||
| 301 | enum aarch64_insn_register base, | ||
| 302 | enum aarch64_insn_register offset, | ||
| 303 | enum aarch64_insn_size_type size, | ||
| 304 | enum aarch64_insn_ldst_type type); | ||
| 305 | u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, | ||
| 306 | enum aarch64_insn_register reg2, | ||
| 307 | enum aarch64_insn_register base, | ||
| 308 | int offset, | ||
| 309 | enum aarch64_insn_variant variant, | ||
| 310 | enum aarch64_insn_ldst_type type); | ||
| 311 | u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, | ||
| 312 | enum aarch64_insn_register src, | ||
| 313 | int imm, enum aarch64_insn_variant variant, | ||
| 314 | enum aarch64_insn_adsb_type type); | ||
| 315 | u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, | ||
| 316 | enum aarch64_insn_register src, | ||
| 317 | int immr, int imms, | ||
| 318 | enum aarch64_insn_variant variant, | ||
| 319 | enum aarch64_insn_bitfield_type type); | ||
| 320 | u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, | ||
| 321 | int imm, int shift, | ||
| 322 | enum aarch64_insn_variant variant, | ||
| 323 | enum aarch64_insn_movewide_type type); | ||
| 324 | u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, | ||
| 325 | enum aarch64_insn_register src, | ||
| 326 | enum aarch64_insn_register reg, | ||
| 327 | int shift, | ||
| 328 | enum aarch64_insn_variant variant, | ||
| 329 | enum aarch64_insn_adsb_type type); | ||
| 330 | u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst, | ||
| 331 | enum aarch64_insn_register src, | ||
| 332 | enum aarch64_insn_variant variant, | ||
| 333 | enum aarch64_insn_data1_type type); | ||
| 334 | u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst, | ||
| 335 | enum aarch64_insn_register src, | ||
| 336 | enum aarch64_insn_register reg, | ||
| 337 | enum aarch64_insn_variant variant, | ||
| 338 | enum aarch64_insn_data2_type type); | ||
| 339 | u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst, | ||
| 340 | enum aarch64_insn_register src, | ||
| 341 | enum aarch64_insn_register reg1, | ||
| 342 | enum aarch64_insn_register reg2, | ||
| 343 | enum aarch64_insn_variant variant, | ||
| 344 | enum aarch64_insn_data3_type type); | ||
| 345 | u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, | ||
| 346 | enum aarch64_insn_register src, | ||
| 347 | enum aarch64_insn_register reg, | ||
| 348 | int shift, | ||
| 349 | enum aarch64_insn_variant variant, | ||
| 350 | enum aarch64_insn_logic_type type); | ||
| 102 | 351 | ||
| 103 | bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); | 352 | bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); |
| 104 | 353 | ||
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index e0ecdcf6632d..79f1d519221f 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
| @@ -121,7 +121,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) | |||
| 121 | /* | 121 | /* |
| 122 | * I/O port access primitives. | 122 | * I/O port access primitives. |
| 123 | */ | 123 | */ |
| 124 | #define IO_SPACE_LIMIT 0xffff | 124 | #define arch_has_dev_port() (1) |
| 125 | #define IO_SPACE_LIMIT (SZ_32M - 1) | ||
| 125 | #define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M)) | 126 | #define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M)) |
| 126 | 127 | ||
| 127 | static inline u8 inb(unsigned long addr) | 128 | static inline u8 inb(unsigned long addr) |
| @@ -243,7 +244,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); | |||
| 243 | * (PHYS_OFFSET and PHYS_MASK taken into account). | 244 | * (PHYS_OFFSET and PHYS_MASK taken into account). |
| 244 | */ | 245 | */ |
| 245 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | 246 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
| 246 | extern int valid_phys_addr_range(unsigned long addr, size_t size); | 247 | extern int valid_phys_addr_range(phys_addr_t addr, size_t size); |
| 247 | extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); | 248 | extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); |
| 248 | 249 | ||
| 249 | extern int devmem_is_allowed(unsigned long pfn); | 250 | extern int devmem_is_allowed(unsigned long pfn); |
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h new file mode 100644 index 000000000000..8e24ef3f7c82 --- /dev/null +++ b/arch/arm64/include/asm/irq_work.h | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | #ifndef __ASM_IRQ_WORK_H | ||
| 2 | #define __ASM_IRQ_WORK_H | ||
| 3 | |||
| 4 | #include <asm/smp.h> | ||
| 5 | |||
| 6 | static inline bool arch_irq_work_has_interrupt(void) | ||
| 7 | { | ||
| 8 | return !!__smp_cross_call; | ||
| 9 | } | ||
| 10 | |||
| 11 | #endif /* __ASM_IRQ_WORK_H */ | ||
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h index 3c8aafc1082f..f69f69c8120c 100644 --- a/arch/arm64/include/asm/kgdb.h +++ b/arch/arm64/include/asm/kgdb.h | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | 29 | ||
| 30 | static inline void arch_kgdb_breakpoint(void) | 30 | static inline void arch_kgdb_breakpoint(void) |
| 31 | { | 31 | { |
| 32 | asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM)); | 32 | asm ("brk %0" : : "I" (KGDB_COMPILED_DBG_BRK_IMM)); |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | extern void kgdb_handle_bus_error(void); | 35 | extern void kgdb_handle_bus_error(void); |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index cc83520459ed..7fd3e27e3ccc 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
| @@ -122,6 +122,17 @@ | |||
| 122 | #define VTCR_EL2_T0SZ_MASK 0x3f | 122 | #define VTCR_EL2_T0SZ_MASK 0x3f |
| 123 | #define VTCR_EL2_T0SZ_40B 24 | 123 | #define VTCR_EL2_T0SZ_40B 24 |
| 124 | 124 | ||
| 125 | /* | ||
| 126 | * We configure the Stage-2 page tables to always restrict the IPA space to be | ||
| 127 | * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are | ||
| 128 | * not known to exist and will break with this configuration. | ||
| 129 | * | ||
| 130 | * Note that when using 4K pages, we concatenate two first level page tables | ||
| 131 | * together. | ||
| 132 | * | ||
| 133 | * The magic numbers used for VTTBR_X in this patch can be found in Tables | ||
| 134 | * D4-23 and D4-25 in ARM DDI 0487A.b. | ||
| 135 | */ | ||
| 125 | #ifdef CONFIG_ARM64_64K_PAGES | 136 | #ifdef CONFIG_ARM64_64K_PAGES |
| 126 | /* | 137 | /* |
| 127 | * Stage2 translation configuration: | 138 | * Stage2 translation configuration: |
| @@ -149,7 +160,7 @@ | |||
| 149 | #endif | 160 | #endif |
| 150 | 161 | ||
| 151 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) | 162 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) |
| 152 | #define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) | 163 | #define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) |
| 153 | #define VTTBR_VMID_SHIFT (48LLU) | 164 | #define VTTBR_VMID_SHIFT (48LLU) |
| 154 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) | 165 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) |
| 155 | 166 | ||
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index fdc3e21abd8d..5674a55b5518 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
| @@ -174,6 +174,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) | |||
| 174 | 174 | ||
| 175 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | 175 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) |
| 176 | { | 176 | { |
| 177 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC; | ||
| 178 | } | ||
| 179 | |||
| 180 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) | ||
| 181 | { | ||
| 177 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; | 182 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; |
| 178 | } | 183 | } |
| 179 | 184 | ||
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e10c45a578e3..2012c4ba8d67 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
| @@ -22,6 +22,8 @@ | |||
| 22 | #ifndef __ARM64_KVM_HOST_H__ | 22 | #ifndef __ARM64_KVM_HOST_H__ |
| 23 | #define __ARM64_KVM_HOST_H__ | 23 | #define __ARM64_KVM_HOST_H__ |
| 24 | 24 | ||
| 25 | #include <linux/types.h> | ||
| 26 | #include <linux/kvm_types.h> | ||
| 25 | #include <asm/kvm.h> | 27 | #include <asm/kvm.h> |
| 26 | #include <asm/kvm_asm.h> | 28 | #include <asm/kvm_asm.h> |
| 27 | #include <asm/kvm_mmio.h> | 29 | #include <asm/kvm_mmio.h> |
| @@ -41,8 +43,7 @@ | |||
| 41 | 43 | ||
| 42 | #define KVM_VCPU_MAX_FEATURES 3 | 44 | #define KVM_VCPU_MAX_FEATURES 3 |
| 43 | 45 | ||
| 44 | struct kvm_vcpu; | 46 | int __attribute_const__ kvm_target_cpu(void); |
| 45 | int kvm_target_cpu(void); | ||
| 46 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | 47 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
| 47 | int kvm_arch_dev_ioctl_check_extension(long ext); | 48 | int kvm_arch_dev_ioctl_check_extension(long ext); |
| 48 | 49 | ||
| @@ -164,25 +165,23 @@ struct kvm_vcpu_stat { | |||
| 164 | u32 halt_wakeup; | 165 | u32 halt_wakeup; |
| 165 | }; | 166 | }; |
| 166 | 167 | ||
| 167 | struct kvm_vcpu_init; | ||
| 168 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | 168 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, |
| 169 | const struct kvm_vcpu_init *init); | 169 | const struct kvm_vcpu_init *init); |
| 170 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); | 170 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); |
| 171 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | 171 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
| 172 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | 172 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); |
| 173 | struct kvm_one_reg; | ||
| 174 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | 173 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); |
| 175 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | 174 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); |
| 176 | 175 | ||
| 177 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 176 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
| 178 | struct kvm; | ||
| 179 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 177 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
| 180 | int kvm_unmap_hva_range(struct kvm *kvm, | 178 | int kvm_unmap_hva_range(struct kvm *kvm, |
| 181 | unsigned long start, unsigned long end); | 179 | unsigned long start, unsigned long end); |
| 182 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 180 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
| 183 | 181 | ||
| 184 | /* We do not have shadow page tables, hence the empty hooks */ | 182 | /* We do not have shadow page tables, hence the empty hooks */ |
| 185 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) | 183 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long start, |
| 184 | unsigned long end) | ||
| 186 | { | 185 | { |
| 187 | return 0; | 186 | return 0; |
| 188 | } | 187 | } |
| @@ -192,8 +191,13 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |||
| 192 | return 0; | 191 | return 0; |
| 193 | } | 192 | } |
| 194 | 193 | ||
| 194 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | ||
| 195 | unsigned long address) | ||
| 196 | { | ||
| 197 | } | ||
| 198 | |||
| 195 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); | 199 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); |
| 196 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); | 200 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
| 197 | 201 | ||
| 198 | u64 kvm_call_hyp(void *hypfn, ...); | 202 | u64 kvm_call_hyp(void *hypfn, ...); |
| 199 | 203 | ||
| @@ -244,4 +248,10 @@ static inline void vgic_arch_setup(const struct vgic_params *vgic) | |||
| 244 | } | 248 | } |
| 245 | } | 249 | } |
| 246 | 250 | ||
| 251 | static inline void kvm_arch_hardware_disable(void) {} | ||
| 252 | static inline void kvm_arch_hardware_unsetup(void) {} | ||
| 253 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | ||
| 254 | static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} | ||
| 255 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} | ||
| 256 | |||
| 247 | #endif /* __ARM64_KVM_HOST_H__ */ | 257 | #endif /* __ARM64_KVM_HOST_H__ */ |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 8e138c7c53ac..a030d163840b 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
| @@ -59,10 +59,9 @@ | |||
| 59 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) | 59 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) |
| 60 | 60 | ||
| 61 | /* | 61 | /* |
| 62 | * Align KVM with the kernel's view of physical memory. Should be | 62 | * We currently only support a 40bit IPA. |
| 63 | * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration. | ||
| 64 | */ | 63 | */ |
| 65 | #define KVM_PHYS_SHIFT PHYS_MASK_SHIFT | 64 | #define KVM_PHYS_SHIFT (40) |
| 66 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) | 65 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) |
| 67 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) | 66 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) |
| 68 | 67 | ||
| @@ -93,19 +92,6 @@ void kvm_clear_hyp_idmap(void); | |||
| 93 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) | 92 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) |
| 94 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) | 93 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) |
| 95 | 94 | ||
| 96 | static inline bool kvm_is_write_fault(unsigned long esr) | ||
| 97 | { | ||
| 98 | unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT; | ||
| 99 | |||
| 100 | if (esr_ec == ESR_EL2_EC_IABT) | ||
| 101 | return false; | ||
| 102 | |||
| 103 | if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR)) | ||
| 104 | return false; | ||
| 105 | |||
| 106 | return true; | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline void kvm_clean_pgd(pgd_t *pgd) {} | 95 | static inline void kvm_clean_pgd(pgd_t *pgd) {} |
| 110 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} | 96 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} |
| 111 | static inline void kvm_clean_pte(pte_t *pte) {} | 97 | static inline void kvm_clean_pte(pte_t *pte) {} |
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h new file mode 100644 index 000000000000..872ba939fcb2 --- /dev/null +++ b/arch/arm64/include/asm/pci.h | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | #ifndef __ASM_PCI_H | ||
| 2 | #define __ASM_PCI_H | ||
| 3 | #ifdef __KERNEL__ | ||
| 4 | |||
| 5 | #include <linux/types.h> | ||
| 6 | #include <linux/slab.h> | ||
| 7 | #include <linux/dma-mapping.h> | ||
| 8 | |||
| 9 | #include <asm/io.h> | ||
| 10 | #include <asm-generic/pci-bridge.h> | ||
| 11 | #include <asm-generic/pci-dma-compat.h> | ||
| 12 | |||
| 13 | #define PCIBIOS_MIN_IO 0x1000 | ||
| 14 | #define PCIBIOS_MIN_MEM 0 | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Set to 1 if the kernel should re-assign all PCI bus numbers | ||
| 18 | */ | ||
| 19 | #define pcibios_assign_all_busses() \ | ||
| 20 | (pci_has_flag(PCI_REASSIGN_ALL_BUS)) | ||
| 21 | |||
| 22 | /* | ||
| 23 | * PCI address space differs from physical memory address space | ||
| 24 | */ | ||
| 25 | #define PCI_DMA_BUS_IS_PHYS (0) | ||
| 26 | |||
| 27 | extern int isa_dma_bridge_buggy; | ||
| 28 | |||
| 29 | #ifdef CONFIG_PCI | ||
| 30 | static inline int pci_proc_domain(struct pci_bus *bus) | ||
| 31 | { | ||
| 32 | return 1; | ||
| 33 | } | ||
| 34 | #endif /* CONFIG_PCI */ | ||
| 35 | |||
| 36 | #endif /* __KERNEL__ */ | ||
| 37 | #endif /* __ASM_PCI_H */ | ||
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 453a179469a3..5279e5733386 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h | |||
| @@ -26,13 +26,13 @@ static inline void set_my_cpu_offset(unsigned long off) | |||
| 26 | static inline unsigned long __my_cpu_offset(void) | 26 | static inline unsigned long __my_cpu_offset(void) |
| 27 | { | 27 | { |
| 28 | unsigned long off; | 28 | unsigned long off; |
| 29 | register unsigned long *sp asm ("sp"); | ||
| 30 | 29 | ||
| 31 | /* | 30 | /* |
| 32 | * We want to allow caching the value, so avoid using volatile and | 31 | * We want to allow caching the value, so avoid using volatile and |
| 33 | * instead use a fake stack read to hazard against barrier(). | 32 | * instead use a fake stack read to hazard against barrier(). |
| 34 | */ | 33 | */ |
| 35 | asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp)); | 34 | asm("mrs %0, tpidr_el1" : "=r" (off) : |
| 35 | "Q" (*(const unsigned long *)current_stack_pointer)); | ||
| 36 | 36 | ||
| 37 | return off; | 37 | return off; |
| 38 | } | 38 | } |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index ffe1ba0506d1..cefd3e825612 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
| @@ -149,46 +149,51 @@ extern struct page *empty_zero_page; | |||
| 149 | #define pte_valid_not_user(pte) \ | 149 | #define pte_valid_not_user(pte) \ |
| 150 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) | 150 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) |
| 151 | 151 | ||
| 152 | static inline pte_t pte_wrprotect(pte_t pte) | 152 | static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) |
| 153 | { | 153 | { |
| 154 | pte_val(pte) &= ~PTE_WRITE; | 154 | pte_val(pte) &= ~pgprot_val(prot); |
| 155 | return pte; | 155 | return pte; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | static inline pte_t pte_mkwrite(pte_t pte) | 158 | static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) |
| 159 | { | 159 | { |
| 160 | pte_val(pte) |= PTE_WRITE; | 160 | pte_val(pte) |= pgprot_val(prot); |
| 161 | return pte; | 161 | return pte; |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | static inline pte_t pte_wrprotect(pte_t pte) | ||
| 165 | { | ||
| 166 | return clear_pte_bit(pte, __pgprot(PTE_WRITE)); | ||
| 167 | } | ||
| 168 | |||
| 169 | static inline pte_t pte_mkwrite(pte_t pte) | ||
| 170 | { | ||
| 171 | return set_pte_bit(pte, __pgprot(PTE_WRITE)); | ||
| 172 | } | ||
| 173 | |||
| 164 | static inline pte_t pte_mkclean(pte_t pte) | 174 | static inline pte_t pte_mkclean(pte_t pte) |
| 165 | { | 175 | { |
| 166 | pte_val(pte) &= ~PTE_DIRTY; | 176 | return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); |
| 167 | return pte; | ||
| 168 | } | 177 | } |
| 169 | 178 | ||
| 170 | static inline pte_t pte_mkdirty(pte_t pte) | 179 | static inline pte_t pte_mkdirty(pte_t pte) |
| 171 | { | 180 | { |
| 172 | pte_val(pte) |= PTE_DIRTY; | 181 | return set_pte_bit(pte, __pgprot(PTE_DIRTY)); |
| 173 | return pte; | ||
| 174 | } | 182 | } |
| 175 | 183 | ||
| 176 | static inline pte_t pte_mkold(pte_t pte) | 184 | static inline pte_t pte_mkold(pte_t pte) |
| 177 | { | 185 | { |
| 178 | pte_val(pte) &= ~PTE_AF; | 186 | return clear_pte_bit(pte, __pgprot(PTE_AF)); |
| 179 | return pte; | ||
| 180 | } | 187 | } |
| 181 | 188 | ||
| 182 | static inline pte_t pte_mkyoung(pte_t pte) | 189 | static inline pte_t pte_mkyoung(pte_t pte) |
| 183 | { | 190 | { |
| 184 | pte_val(pte) |= PTE_AF; | 191 | return set_pte_bit(pte, __pgprot(PTE_AF)); |
| 185 | return pte; | ||
| 186 | } | 192 | } |
| 187 | 193 | ||
| 188 | static inline pte_t pte_mkspecial(pte_t pte) | 194 | static inline pte_t pte_mkspecial(pte_t pte) |
| 189 | { | 195 | { |
| 190 | pte_val(pte) |= PTE_SPECIAL; | 196 | return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); |
| 191 | return pte; | ||
| 192 | } | 197 | } |
| 193 | 198 | ||
| 194 | static inline void set_pte(pte_t *ptep, pte_t pte) | 199 | static inline void set_pte(pte_t *ptep, pte_t pte) |
| @@ -239,6 +244,16 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
| 239 | 244 | ||
| 240 | #define __HAVE_ARCH_PTE_SPECIAL | 245 | #define __HAVE_ARCH_PTE_SPECIAL |
| 241 | 246 | ||
| 247 | static inline pte_t pud_pte(pud_t pud) | ||
| 248 | { | ||
| 249 | return __pte(pud_val(pud)); | ||
| 250 | } | ||
| 251 | |||
| 252 | static inline pmd_t pud_pmd(pud_t pud) | ||
| 253 | { | ||
| 254 | return __pmd(pud_val(pud)); | ||
| 255 | } | ||
| 256 | |||
| 242 | static inline pte_t pmd_pte(pmd_t pmd) | 257 | static inline pte_t pmd_pte(pmd_t pmd) |
| 243 | { | 258 | { |
| 244 | return __pte(pmd_val(pmd)); | 259 | return __pte(pmd_val(pmd)); |
| @@ -256,7 +271,13 @@ static inline pmd_t pte_pmd(pte_t pte) | |||
| 256 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 271 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 257 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | 272 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) |
| 258 | #define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd)) | 273 | #define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd)) |
| 259 | #endif | 274 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 275 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH | ||
| 276 | struct vm_area_struct; | ||
| 277 | void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, | ||
| 278 | pmd_t *pmdp); | ||
| 279 | #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ | ||
| 280 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
| 260 | 281 | ||
| 261 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) | 282 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) |
| 262 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | 283 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) |
| @@ -277,6 +298,7 @@ static inline pmd_t pte_pmd(pte_t pte) | |||
| 277 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | 298 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) |
| 278 | 299 | ||
| 279 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) | 300 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) |
| 301 | #define pud_write(pud) pte_write(pud_pte(pud)) | ||
| 280 | #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) | 302 | #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) |
| 281 | 303 | ||
| 282 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) | 304 | #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) |
| @@ -296,6 +318,8 @@ static inline int has_transparent_hugepage(void) | |||
| 296 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) | 318 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) |
| 297 | #define pgprot_writecombine(prot) \ | 319 | #define pgprot_writecombine(prot) \ |
| 298 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) | 320 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) |
| 321 | #define pgprot_device(prot) \ | ||
| 322 | __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) | ||
| 299 | #define __HAVE_PHYS_MEM_ACCESS_PROT | 323 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
| 300 | struct file; | 324 | struct file; |
| 301 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 325 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| @@ -376,6 +400,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
| 376 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); | 400 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); |
| 377 | } | 401 | } |
| 378 | 402 | ||
| 403 | #define pud_page(pud) pmd_page(pud_pmd(pud)) | ||
| 404 | |||
| 379 | #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ | 405 | #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ |
| 380 | 406 | ||
| 381 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 | 407 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 |
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 0c657bb54597..9a8fd84f8fb2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h | |||
| @@ -32,6 +32,8 @@ extern void cpu_cache_off(void); | |||
| 32 | extern void cpu_do_idle(void); | 32 | extern void cpu_do_idle(void); |
| 33 | extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); | 33 | extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); |
| 34 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); | 34 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); |
| 35 | void cpu_soft_restart(phys_addr_t cpu_reset, | ||
| 36 | unsigned long addr) __attribute__((noreturn)); | ||
| 35 | extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); | 37 | extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); |
| 36 | extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); | 38 | extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); |
| 37 | 39 | ||
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 3df21feeabdd..286b1bec547c 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
| @@ -139,7 +139,7 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev, | |||
| 139 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) | 139 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) |
| 140 | 140 | ||
| 141 | #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) | 141 | #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) |
| 142 | #define KSTK_ESP(tsk) ((unsigned long)task_pt_regs(tsk)->sp) | 142 | #define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) |
| 143 | 143 | ||
| 144 | /* | 144 | /* |
| 145 | * Prefetching support | 145 | * Prefetching support |
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 501000fadb6f..41ed9e13795e 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
| @@ -137,7 +137,7 @@ struct pt_regs { | |||
| 137 | (!((regs)->pstate & PSR_F_BIT)) | 137 | (!((regs)->pstate & PSR_F_BIT)) |
| 138 | 138 | ||
| 139 | #define user_stack_pointer(regs) \ | 139 | #define user_stack_pointer(regs) \ |
| 140 | (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp) | 140 | (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) |
| 141 | 141 | ||
| 142 | static inline unsigned long regs_return_value(struct pt_regs *regs) | 142 | static inline unsigned long regs_return_value(struct pt_regs *regs) |
| 143 | { | 143 | { |
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index a498f2cd2c2a..780f82c827b6 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h | |||
| @@ -48,6 +48,8 @@ extern void smp_init_cpus(void); | |||
| 48 | */ | 48 | */ |
| 49 | extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); | 49 | extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); |
| 50 | 50 | ||
| 51 | extern void (*__smp_cross_call)(const struct cpumask *, unsigned int); | ||
| 52 | |||
| 51 | /* | 53 | /* |
| 52 | * Called from the secondary holding pen, this is the secondary CPU entry point. | 54 | * Called from the secondary holding pen, this is the secondary CPU entry point. |
| 53 | */ | 55 | */ |
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index 1be62bcb9d47..74a9d301819f 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | #define __ASM_SPARSEMEM_H | 17 | #define __ASM_SPARSEMEM_H |
| 18 | 18 | ||
| 19 | #ifdef CONFIG_SPARSEMEM | 19 | #ifdef CONFIG_SPARSEMEM |
| 20 | #define MAX_PHYSMEM_BITS 40 | 20 | #define MAX_PHYSMEM_BITS 48 |
| 21 | #define SECTION_SIZE_BITS 30 | 21 | #define SECTION_SIZE_BITS 30 |
| 22 | #endif | 22 | #endif |
| 23 | 23 | ||
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index e9c149c042e0..456d67c1f0fa 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h | |||
| @@ -21,6 +21,7 @@ struct sleep_save_sp { | |||
| 21 | phys_addr_t save_ptr_stash_phys; | 21 | phys_addr_t save_ptr_stash_phys; |
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); | ||
| 24 | extern void cpu_resume(void); | 25 | extern void cpu_resume(void); |
| 25 | extern int cpu_suspend(unsigned long); | 26 | extern int cpu_suspend(unsigned long); |
| 26 | 27 | ||
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 45108d802f5e..459bf8e53208 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h | |||
| @@ -69,14 +69,19 @@ struct thread_info { | |||
| 69 | #define init_stack (init_thread_union.stack) | 69 | #define init_stack (init_thread_union.stack) |
| 70 | 70 | ||
| 71 | /* | 71 | /* |
| 72 | * how to get the current stack pointer from C | ||
| 73 | */ | ||
| 74 | register unsigned long current_stack_pointer asm ("sp"); | ||
| 75 | |||
| 76 | /* | ||
| 72 | * how to get the thread information struct from C | 77 | * how to get the thread information struct from C |
| 73 | */ | 78 | */ |
| 74 | static inline struct thread_info *current_thread_info(void) __attribute_const__; | 79 | static inline struct thread_info *current_thread_info(void) __attribute_const__; |
| 75 | 80 | ||
| 76 | static inline struct thread_info *current_thread_info(void) | 81 | static inline struct thread_info *current_thread_info(void) |
| 77 | { | 82 | { |
| 78 | register unsigned long sp asm ("sp"); | 83 | return (struct thread_info *) |
| 79 | return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); | 84 | (current_stack_pointer & ~(THREAD_SIZE - 1)); |
| 80 | } | 85 | } |
| 81 | 86 | ||
| 82 | #define thread_saved_pc(tsk) \ | 87 | #define thread_saved_pc(tsk) \ |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 62731ef9749a..a82c0c5c8b52 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
| @@ -23,6 +23,20 @@ | |||
| 23 | 23 | ||
| 24 | #include <asm-generic/tlb.h> | 24 | #include <asm-generic/tlb.h> |
| 25 | 25 | ||
| 26 | #include <linux/pagemap.h> | ||
| 27 | #include <linux/swap.h> | ||
| 28 | |||
| 29 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
| 30 | |||
| 31 | #define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) | ||
| 32 | static inline void __tlb_remove_table(void *_table) | ||
| 33 | { | ||
| 34 | free_page_and_swap_cache((struct page *)_table); | ||
| 35 | } | ||
| 36 | #else | ||
| 37 | #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) | ||
| 38 | #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ | ||
| 39 | |||
| 26 | /* | 40 | /* |
| 27 | * There's three ways the TLB shootdown code is used: | 41 | * There's three ways the TLB shootdown code is used: |
| 28 | * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). | 42 | * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). |
| @@ -88,7 +102,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
| 88 | { | 102 | { |
| 89 | pgtable_page_dtor(pte); | 103 | pgtable_page_dtor(pte); |
| 90 | tlb_add_flush(tlb, addr); | 104 | tlb_add_flush(tlb, addr); |
| 91 | tlb_remove_page(tlb, pte); | 105 | tlb_remove_entry(tlb, pte); |
| 92 | } | 106 | } |
| 93 | 107 | ||
| 94 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 | 108 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 |
| @@ -96,7 +110,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
| 96 | unsigned long addr) | 110 | unsigned long addr) |
| 97 | { | 111 | { |
| 98 | tlb_add_flush(tlb, addr); | 112 | tlb_add_flush(tlb, addr); |
| 99 | tlb_remove_page(tlb, virt_to_page(pmdp)); | 113 | tlb_remove_entry(tlb, virt_to_page(pmdp)); |
| 100 | } | 114 | } |
| 101 | #endif | 115 | #endif |
| 102 | 116 | ||
| @@ -105,7 +119,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, | |||
| 105 | unsigned long addr) | 119 | unsigned long addr) |
| 106 | { | 120 | { |
| 107 | tlb_add_flush(tlb, addr); | 121 | tlb_add_flush(tlb, addr); |
| 108 | tlb_remove_page(tlb, virt_to_page(pudp)); | 122 | tlb_remove_entry(tlb, virt_to_page(pudp)); |
| 109 | } | 123 | } |
| 110 | #endif | 124 | #endif |
| 111 | 125 | ||
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 4bc95d27e063..6d2bf419431d 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h | |||
| @@ -41,7 +41,7 @@ | |||
| 41 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) | 41 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) |
| 42 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) | 42 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) |
| 43 | 43 | ||
| 44 | #define __NR_compat_syscalls 383 | 44 | #define __NR_compat_syscalls 386 |
| 45 | #endif | 45 | #endif |
| 46 | 46 | ||
| 47 | #define __ARCH_WANT_SYS_CLONE | 47 | #define __ARCH_WANT_SYS_CLONE |
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index e242600c4046..da1f06b535e3 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
| @@ -787,3 +787,8 @@ __SYSCALL(__NR_sched_setattr, sys_sched_setattr) | |||
| 787 | __SYSCALL(__NR_sched_getattr, sys_sched_getattr) | 787 | __SYSCALL(__NR_sched_getattr, sys_sched_getattr) |
| 788 | #define __NR_renameat2 382 | 788 | #define __NR_renameat2 382 |
| 789 | __SYSCALL(__NR_renameat2, sys_renameat2) | 789 | __SYSCALL(__NR_renameat2, sys_renameat2) |
| 790 | /* 383 for seccomp */ | ||
| 791 | #define __NR_getrandom 384 | ||
| 792 | __SYSCALL(__NR_getrandom, sys_getrandom) | ||
| 793 | #define __NR_memfd_create 385 | ||
| 794 | __SYSCALL(__NR_memfd_create, sys_memfd_create) | ||
