diff options
| -rw-r--r-- | arch/arc/boot/dts/haps_hs.dts | 30 | ||||
| -rw-r--r-- | arch/arc/boot/dts/hsdk.dts | 14 | ||||
| -rw-r--r-- | arch/arc/configs/haps_hs_defconfig | 5 | ||||
| -rw-r--r-- | arch/arc/configs/hsdk_defconfig | 5 | ||||
| -rw-r--r-- | arch/arc/include/asm/entry-arcv2.h | 361 | ||||
| -rw-r--r-- | arch/arc/include/asm/entry-compact.h | 4 | ||||
| -rw-r--r-- | arch/arc/include/asm/linkage.h | 18 | ||||
| -rw-r--r-- | arch/arc/kernel/asm-offsets.c | 7 | ||||
| -rw-r--r-- | arch/arc/kernel/entry-arcv2.S | 62 | ||||
| -rw-r--r-- | arch/arc/kernel/entry-compact.S | 2 | ||||
| -rw-r--r-- | arch/arc/kernel/entry.S | 4 | ||||
| -rw-r--r-- | arch/arc/kernel/unwind.c | 9 | ||||
| -rw-r--r-- | arch/arc/mm/fault.c | 185 | ||||
| -rw-r--r-- | arch/arc/mm/tlbex.S | 11 |
14 files changed, 377 insertions, 340 deletions
diff --git a/arch/arc/boot/dts/haps_hs.dts b/arch/arc/boot/dts/haps_hs.dts index 1ebfa046492b..44bc522fdec8 100644 --- a/arch/arc/boot/dts/haps_hs.dts +++ b/arch/arc/boot/dts/haps_hs.dts | |||
| @@ -62,5 +62,35 @@ | |||
| 62 | #interrupt-cells = <1>; | 62 | #interrupt-cells = <1>; |
| 63 | interrupts = <20>; | 63 | interrupts = <20>; |
| 64 | }; | 64 | }; |
| 65 | |||
| 66 | virtio0: virtio@f0100000 { | ||
| 67 | compatible = "virtio,mmio"; | ||
| 68 | reg = <0xf0100000 0x2000>; | ||
| 69 | interrupts = <31>; | ||
| 70 | }; | ||
| 71 | |||
| 72 | virtio1: virtio@f0102000 { | ||
| 73 | compatible = "virtio,mmio"; | ||
| 74 | reg = <0xf0102000 0x2000>; | ||
| 75 | interrupts = <32>; | ||
| 76 | }; | ||
| 77 | |||
| 78 | virtio2: virtio@f0104000 { | ||
| 79 | compatible = "virtio,mmio"; | ||
| 80 | reg = <0xf0104000 0x2000>; | ||
| 81 | interrupts = <33>; | ||
| 82 | }; | ||
| 83 | |||
| 84 | virtio3: virtio@f0106000 { | ||
| 85 | compatible = "virtio,mmio"; | ||
| 86 | reg = <0xf0106000 0x2000>; | ||
| 87 | interrupts = <34>; | ||
| 88 | }; | ||
| 89 | |||
| 90 | virtio4: virtio@f0108000 { | ||
| 91 | compatible = "virtio,mmio"; | ||
| 92 | reg = <0xf0108000 0x2000>; | ||
| 93 | interrupts = <35>; | ||
| 94 | }; | ||
| 65 | }; | 95 | }; |
| 66 | }; | 96 | }; |
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index 9a45cb093096..bfc7f5f5d6f2 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | */ | 8 | */ |
| 9 | /dts-v1/; | 9 | /dts-v1/; |
| 10 | 10 | ||
| 11 | #include <dt-bindings/gpio/gpio.h> | ||
| 11 | #include <dt-bindings/reset/snps,hsdk-reset.h> | 12 | #include <dt-bindings/reset/snps,hsdk-reset.h> |
| 12 | 13 | ||
| 13 | / { | 14 | / { |
| @@ -252,6 +253,19 @@ | |||
| 252 | dma-coherent; | 253 | dma-coherent; |
| 253 | }; | 254 | }; |
| 254 | 255 | ||
| 256 | spi0: spi@20000 { | ||
| 257 | compatible = "snps,dw-apb-ssi"; | ||
| 258 | reg = <0x20000 0x100>; | ||
| 259 | #address-cells = <1>; | ||
| 260 | #size-cells = <0>; | ||
| 261 | interrupts = <16>; | ||
| 262 | num-cs = <2>; | ||
| 263 | reg-io-width = <4>; | ||
| 264 | clocks = <&input_clk>; | ||
| 265 | cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>, | ||
| 266 | <&creg_gpio 1 GPIO_ACTIVE_LOW>; | ||
| 267 | }; | ||
| 268 | |||
| 255 | creg_gpio: gpio@14b0 { | 269 | creg_gpio: gpio@14b0 { |
| 256 | compatible = "snps,creg-gpio-hsdk"; | 270 | compatible = "snps,creg-gpio-hsdk"; |
| 257 | reg = <0x14b0 0x4>; | 271 | reg = <0x14b0 0x4>; |
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index b117e6c16d41..436f2135bdc1 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig | |||
| @@ -35,10 +35,12 @@ CONFIG_INET=y | |||
| 35 | # CONFIG_IPV6 is not set | 35 | # CONFIG_IPV6 is not set |
| 36 | # CONFIG_WIRELESS is not set | 36 | # CONFIG_WIRELESS is not set |
| 37 | CONFIG_DEVTMPFS=y | 37 | CONFIG_DEVTMPFS=y |
| 38 | CONFIG_DEVTMPFS_MOUNT=y | ||
| 38 | # CONFIG_STANDALONE is not set | 39 | # CONFIG_STANDALONE is not set |
| 39 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 40 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
| 40 | # CONFIG_BLK_DEV is not set | 41 | CONFIG_VIRTIO_BLK=y |
| 41 | CONFIG_NETDEVICES=y | 42 | CONFIG_NETDEVICES=y |
| 43 | CONFIG_VIRTIO_NET=y | ||
| 42 | # CONFIG_NET_VENDOR_ARC is not set | 44 | # CONFIG_NET_VENDOR_ARC is not set |
| 43 | # CONFIG_NET_VENDOR_BROADCOM is not set | 45 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| 44 | # CONFIG_NET_VENDOR_INTEL is not set | 46 | # CONFIG_NET_VENDOR_INTEL is not set |
| @@ -68,6 +70,7 @@ CONFIG_FRAMEBUFFER_CONSOLE=y | |||
| 68 | CONFIG_LOGO=y | 70 | CONFIG_LOGO=y |
| 69 | # CONFIG_HID is not set | 71 | # CONFIG_HID is not set |
| 70 | # CONFIG_USB_SUPPORT is not set | 72 | # CONFIG_USB_SUPPORT is not set |
| 73 | CONFIG_VIRTIO_MMIO=y | ||
| 71 | # CONFIG_IOMMU_SUPPORT is not set | 74 | # CONFIG_IOMMU_SUPPORT is not set |
| 72 | CONFIG_EXT2_FS=y | 75 | CONFIG_EXT2_FS=y |
| 73 | CONFIG_EXT2_FS_XATTR=y | 76 | CONFIG_EXT2_FS_XATTR=y |
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index c8fb5d60c53f..403125d9c9a3 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig | |||
| @@ -46,6 +46,9 @@ CONFIG_SERIAL_8250_CONSOLE=y | |||
| 46 | CONFIG_SERIAL_8250_DW=y | 46 | CONFIG_SERIAL_8250_DW=y |
| 47 | CONFIG_SERIAL_OF_PLATFORM=y | 47 | CONFIG_SERIAL_OF_PLATFORM=y |
| 48 | # CONFIG_HW_RANDOM is not set | 48 | # CONFIG_HW_RANDOM is not set |
| 49 | CONFIG_SPI=y | ||
| 50 | CONFIG_SPI_DESIGNWARE=y | ||
| 51 | CONFIG_SPI_DW_MMIO=y | ||
| 49 | CONFIG_GPIOLIB=y | 52 | CONFIG_GPIOLIB=y |
| 50 | CONFIG_GPIO_SYSFS=y | 53 | CONFIG_GPIO_SYSFS=y |
| 51 | CONFIG_GPIO_DWAPB=y | 54 | CONFIG_GPIO_DWAPB=y |
| @@ -66,6 +69,8 @@ CONFIG_MMC=y | |||
| 66 | CONFIG_MMC_SDHCI=y | 69 | CONFIG_MMC_SDHCI=y |
| 67 | CONFIG_MMC_SDHCI_PLTFM=y | 70 | CONFIG_MMC_SDHCI_PLTFM=y |
| 68 | CONFIG_MMC_DW=y | 71 | CONFIG_MMC_DW=y |
| 72 | CONFIG_DMADEVICES=y | ||
| 73 | CONFIG_DW_AXI_DMAC=y | ||
| 69 | CONFIG_EXT3_FS=y | 74 | CONFIG_EXT3_FS=y |
| 70 | CONFIG_VFAT_FS=y | 75 | CONFIG_VFAT_FS=y |
| 71 | CONFIG_TMPFS=y | 76 | CONFIG_TMPFS=y |
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 225e7df2d8ed..f5ae394ebe06 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h | |||
| @@ -7,232 +7,251 @@ | |||
| 7 | #include <asm/irqflags-arcv2.h> | 7 | #include <asm/irqflags-arcv2.h> |
| 8 | #include <asm/thread_info.h> /* For THREAD_SIZE */ | 8 | #include <asm/thread_info.h> /* For THREAD_SIZE */ |
| 9 | 9 | ||
| 10 | /* | ||
| 11 | * Interrupt/Exception stack layout (pt_regs) for ARCv2 | ||
| 12 | * (End of struct aligned to end of page [unless nested]) | ||
| 13 | * | ||
| 14 | * INTERRUPT EXCEPTION | ||
| 15 | * | ||
| 16 | * manual --------------------- manual | ||
| 17 | * | orig_r0 | | ||
| 18 | * | event/ECR | | ||
| 19 | * | bta | | ||
| 20 | * | user_r25 | | ||
| 21 | * | gp | | ||
| 22 | * | fp | | ||
| 23 | * | sp | | ||
| 24 | * | r12 | | ||
| 25 | * | r30 | | ||
| 26 | * | r58 | | ||
| 27 | * | r59 | | ||
| 28 | * hw autosave --------------------- | ||
| 29 | * optional | r0 | | ||
| 30 | * | r1 | | ||
| 31 | * ~ ~ | ||
| 32 | * | r9 | | ||
| 33 | * | r10 | | ||
| 34 | * | r11 | | ||
| 35 | * | blink | | ||
| 36 | * | lpe | | ||
| 37 | * | lps | | ||
| 38 | * | lpc | | ||
| 39 | * | ei base | | ||
| 40 | * | ldi base | | ||
| 41 | * | jli base | | ||
| 42 | * --------------------- | ||
| 43 | * hw autosave | pc / eret | | ||
| 44 | * mandatory | stat32 / erstatus | | ||
| 45 | * --------------------- | ||
| 46 | */ | ||
| 47 | |||
| 10 | /*------------------------------------------------------------------------*/ | 48 | /*------------------------------------------------------------------------*/ |
| 11 | .macro INTERRUPT_PROLOGUE called_from | 49 | .macro INTERRUPT_PROLOGUE |
| 12 | 50 | ||
| 13 | ; Before jumping to Interrupt Vector, hardware micro-ops did following: | 51 | ; (A) Before jumping to Interrupt Vector, hardware micro-ops did following: |
| 14 | ; 1. SP auto-switched to kernel mode stack | 52 | ; 1. SP auto-switched to kernel mode stack |
| 15 | ; 2. STATUS32.Z flag set to U mode at time of interrupt (U:1, K:0) | 53 | ; 2. STATUS32.Z flag set if in U mode at time of interrupt (U:1,K:0) |
| 16 | ; 3. Auto saved: r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI, PC, STAT32 | 54 | ; 3. Auto save: (mandatory) Push PC and STAT32 on stack |
| 55 | ; hardware does even if CONFIG_ARC_IRQ_NO_AUTOSAVE | ||
| 56 | ; 4. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI | ||
| 17 | ; | 57 | ; |
| 18 | ; Now manually save: r12, sp, fp, gp, r25 | 58 | ; (B) Manually saved some regs: r12,r25,r30, sp,fp,gp, ACCL pair |
| 19 | 59 | ||
| 20 | #ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE | 60 | #ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE |
| 21 | .ifnc \called_from, exception | 61 | ; carve pt_regs on stack (case #3), PC/STAT32 already on stack |
| 22 | st.as r9, [sp, -10] ; save r9 in it's final stack slot | 62 | sub sp, sp, SZ_PT_REGS - 8 |
| 23 | sub sp, sp, 12 ; skip JLI, LDI, EI | ||
| 24 | |||
| 25 | PUSH lp_count | ||
| 26 | PUSHAX lp_start | ||
| 27 | PUSHAX lp_end | ||
| 28 | PUSH blink | ||
| 29 | |||
| 30 | PUSH r11 | ||
| 31 | PUSH r10 | ||
| 32 | |||
| 33 | sub sp, sp, 4 ; skip r9 | ||
| 34 | |||
| 35 | PUSH r8 | ||
| 36 | PUSH r7 | ||
| 37 | PUSH r6 | ||
| 38 | PUSH r5 | ||
| 39 | PUSH r4 | ||
| 40 | PUSH r3 | ||
| 41 | PUSH r2 | ||
| 42 | PUSH r1 | ||
| 43 | PUSH r0 | ||
| 44 | .endif | ||
| 45 | #endif | ||
| 46 | 63 | ||
| 47 | #ifdef CONFIG_ARC_HAS_ACCL_REGS | 64 | __SAVE_REGFILE_HARD |
| 48 | PUSH r59 | 65 | #else |
| 49 | PUSH r58 | 66 | ; carve pt_regs on stack (case #4), which grew partially already |
| 67 | sub sp, sp, PT_r0 | ||
| 50 | #endif | 68 | #endif |
| 51 | 69 | ||
| 52 | PUSH r30 | 70 | __SAVE_REGFILE_SOFT |
| 53 | PUSH r12 | 71 | .endm |
| 72 | |||
| 73 | /*------------------------------------------------------------------------*/ | ||
| 74 | .macro EXCEPTION_PROLOGUE | ||
| 75 | |||
| 76 | ; (A) Before jumping to Exception Vector, hardware micro-ops did following: | ||
| 77 | ; 1. SP auto-switched to kernel mode stack | ||
| 78 | ; 2. STATUS32.Z flag set if in U mode at time of exception (U:1,K:0) | ||
| 79 | ; | ||
| 80 | ; (B) Manually save the complete reg file below | ||
| 81 | |||
| 82 | sub sp, sp, SZ_PT_REGS ; carve pt_regs | ||
| 83 | |||
| 84 | ; _HARD saves r10 clobbered by _SOFT as scratch hence comes first | ||
| 85 | |||
| 86 | __SAVE_REGFILE_HARD | ||
| 87 | __SAVE_REGFILE_SOFT | ||
| 88 | |||
| 89 | st r0, [sp] ; orig_r0 | ||
| 90 | |||
| 91 | lr r10, [eret] | ||
| 92 | lr r11, [erstatus] | ||
| 93 | ST2 r10, r11, PT_ret | ||
| 94 | |||
| 95 | lr r10, [ecr] | ||
| 96 | lr r11, [erbta] | ||
| 97 | ST2 r10, r11, PT_event | ||
| 98 | |||
| 99 | ; OUTPUT: r10 has ECR expected by EV_Trap | ||
| 100 | .endm | ||
| 101 | |||
| 102 | /*------------------------------------------------------------------------ | ||
| 103 | * This macro saves the registers manually which would normally be autosaved | ||
| 104 | * by hardware on taken interrupts. It is used by | ||
| 105 | * - exception handlers (which don't have autosave) | ||
| 106 | * - interrupt autosave disabled due to CONFIG_ARC_IRQ_NO_AUTOSAVE | ||
| 107 | */ | ||
| 108 | .macro __SAVE_REGFILE_HARD | ||
| 109 | |||
| 110 | ST2 r0, r1, PT_r0 | ||
| 111 | ST2 r2, r3, PT_r2 | ||
| 112 | ST2 r4, r5, PT_r4 | ||
| 113 | ST2 r6, r7, PT_r6 | ||
| 114 | ST2 r8, r9, PT_r8 | ||
| 115 | ST2 r10, r11, PT_r10 | ||
| 116 | |||
| 117 | st blink, [sp, PT_blink] | ||
| 118 | |||
| 119 | lr r10, [lp_end] | ||
| 120 | lr r11, [lp_start] | ||
| 121 | ST2 r10, r11, PT_lpe | ||
| 122 | |||
| 123 | st lp_count, [sp, PT_lpc] | ||
| 124 | |||
| 125 | ; skip JLI, LDI, EI for now | ||
| 126 | .endm | ||
| 127 | |||
| 128 | /*------------------------------------------------------------------------ | ||
| 129 | * This macros saves a bunch of other registers which can't be autosaved for | ||
| 130 | * various reasons: | ||
| 131 | * - r12: the last caller saved scratch reg since hardware saves in pairs so r0-r11 | ||
| 132 | * - r30: free reg, used by gcc as scratch | ||
| 133 | * - ACCL/ACCH pair when they exist | ||
| 134 | */ | ||
| 135 | .macro __SAVE_REGFILE_SOFT | ||
| 136 | |||
| 137 | ST2 gp, fp, PT_r26 ; gp (r26), fp (r27) | ||
| 138 | |||
| 139 | st r12, [sp, PT_sp + 4] | ||
| 140 | st r30, [sp, PT_sp + 8] | ||
| 54 | 141 | ||
| 55 | ; Saving pt_regs->sp correctly requires some extra work due to the way | 142 | ; Saving pt_regs->sp correctly requires some extra work due to the way |
| 56 | ; Auto stack switch works | 143 | ; Auto stack switch works |
| 57 | ; - U mode: retrieve it from AUX_USER_SP | 144 | ; - U mode: retrieve it from AUX_USER_SP |
| 58 | ; - K mode: add the offset from current SP where H/w starts auto push | 145 | ; - K mode: add the offset from current SP where H/w starts auto push |
| 59 | ; | 146 | ; |
| 60 | ; Utilize the fact that Z bit is set if Intr taken in U mode | 147 | ; 1. Utilize the fact that Z bit is set if Intr taken in U mode |
| 61 | mov.nz r9, sp | 148 | ; 2. Upon entry SP is always saved (for any inspection, unwinding etc), |
| 62 | add.nz r9, r9, SZ_PT_REGS - PT_sp - 4 | 149 | ; but on return, restored only if U mode |
| 63 | bnz 1f | ||
| 64 | 150 | ||
| 65 | lr r9, [AUX_USER_SP] | 151 | lr r10, [AUX_USER_SP] ; U mode SP |
| 66 | 1: | 152 | |
| 67 | PUSH r9 ; SP | 153 | ; ISA requires ADD.nz to have same dest and src reg operands |
| 154 | mov.nz r10, sp | ||
| 155 | add.nz r10, r10, SZ_PT_REGS ; K mode SP | ||
| 68 | 156 | ||
| 69 | PUSH fp | 157 | st r10, [sp, PT_sp] ; SP (pt_regs->sp) |
| 70 | PUSH gp | ||
| 71 | 158 | ||
| 72 | #ifdef CONFIG_ARC_CURR_IN_REG | 159 | #ifdef CONFIG_ARC_CURR_IN_REG |
| 73 | PUSH r25 ; user_r25 | 160 | st r25, [sp, PT_user_r25] |
| 74 | GET_CURR_TASK_ON_CPU r25 | 161 | GET_CURR_TASK_ON_CPU r25 |
| 75 | #else | ||
| 76 | sub sp, sp, 4 | ||
| 77 | #endif | 162 | #endif |
| 78 | 163 | ||
| 79 | .ifnc \called_from, exception | 164 | #ifdef CONFIG_ARC_HAS_ACCL_REGS |
| 80 | sub sp, sp, 12 ; BTA/ECR/orig_r0 placeholder per pt_regs | 165 | ST2 r58, r59, PT_sp + 12 |
| 81 | .endif | 166 | #endif |
| 82 | 167 | ||
| 83 | .endm | 168 | .endm |
| 84 | 169 | ||
| 85 | /*------------------------------------------------------------------------*/ | 170 | /*------------------------------------------------------------------------*/ |
| 86 | .macro INTERRUPT_EPILOGUE called_from | 171 | .macro __RESTORE_REGFILE_SOFT |
| 87 | 172 | ||
| 88 | .ifnc \called_from, exception | 173 | LD2 gp, fp, PT_r26 ; gp (r26), fp (r27) |
| 89 | add sp, sp, 12 ; skip BTA/ECR/orig_r0 placeholderss | ||
| 90 | .endif | ||
| 91 | 174 | ||
| 92 | #ifdef CONFIG_ARC_CURR_IN_REG | 175 | ld r12, [sp, PT_sp + 4] |
| 93 | POP r25 | 176 | ld r30, [sp, PT_sp + 8] |
| 94 | #else | ||
| 95 | add sp, sp, 4 | ||
| 96 | #endif | ||
| 97 | 177 | ||
| 98 | POP gp | 178 | ; Restore SP (into AUX_USER_SP) only if returning to U mode |
| 99 | POP fp | 179 | ; - for K mode, it will be implicitly restored as stack is unwound |
| 100 | 180 | ; - Z flag set on K is inverse of what hardware does on interrupt entry | |
| 101 | ; Don't touch AUX_USER_SP if returning to K mode (Z bit set) | 181 | ; but that doesn't really matter |
| 102 | ; (Z bit set on K mode is inverse of INTERRUPT_PROLOGUE) | ||
| 103 | add.z sp, sp, 4 | ||
| 104 | bz 1f | 182 | bz 1f |
| 105 | 183 | ||
| 106 | POPAX AUX_USER_SP | 184 | ld r10, [sp, PT_sp] ; SP (pt_regs->sp) |
| 185 | sr r10, [AUX_USER_SP] | ||
| 107 | 1: | 186 | 1: |
| 108 | POP r12 | ||
| 109 | POP r30 | ||
| 110 | 187 | ||
| 111 | #ifdef CONFIG_ARC_HAS_ACCL_REGS | 188 | #ifdef CONFIG_ARC_CURR_IN_REG |
| 112 | POP r58 | 189 | ld r25, [sp, PT_user_r25] |
| 113 | POP r59 | ||
| 114 | #endif | 190 | #endif |
| 115 | 191 | ||
| 116 | #ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE | 192 | #ifdef CONFIG_ARC_HAS_ACCL_REGS |
| 117 | .ifnc \called_from, exception | 193 | LD2 r58, r59, PT_sp + 12 |
| 118 | POP r0 | ||
| 119 | POP r1 | ||
| 120 | POP r2 | ||
| 121 | POP r3 | ||
| 122 | POP r4 | ||
| 123 | POP r5 | ||
| 124 | POP r6 | ||
| 125 | POP r7 | ||
| 126 | POP r8 | ||
| 127 | POP r9 | ||
| 128 | POP r10 | ||
| 129 | POP r11 | ||
| 130 | |||
| 131 | POP blink | ||
| 132 | POPAX lp_end | ||
| 133 | POPAX lp_start | ||
| 134 | |||
| 135 | POP r9 | ||
| 136 | mov lp_count, r9 | ||
| 137 | |||
| 138 | add sp, sp, 12 ; skip JLI, LDI, EI | ||
| 139 | ld.as r9, [sp, -10] ; reload r9 which got clobbered | ||
| 140 | .endif | ||
| 141 | #endif | 194 | #endif |
| 195 | .endm | ||
| 142 | 196 | ||
| 197 | /*------------------------------------------------------------------------*/ | ||
| 198 | .macro __RESTORE_REGFILE_HARD | ||
| 199 | |||
| 200 | ld blink, [sp, PT_blink] | ||
| 201 | |||
| 202 | LD2 r10, r11, PT_lpe | ||
| 203 | sr r10, [lp_end] | ||
| 204 | sr r11, [lp_start] | ||
| 205 | |||
| 206 | ld r10, [sp, PT_lpc] ; lp_count can't be target of LD | ||
| 207 | mov lp_count, r10 | ||
| 208 | |||
| 209 | LD2 r0, r1, PT_r0 | ||
| 210 | LD2 r2, r3, PT_r2 | ||
| 211 | LD2 r4, r5, PT_r4 | ||
| 212 | LD2 r6, r7, PT_r6 | ||
| 213 | LD2 r8, r9, PT_r8 | ||
| 214 | LD2 r10, r11, PT_r10 | ||
| 143 | .endm | 215 | .endm |
| 144 | 216 | ||
| 217 | |||
| 145 | /*------------------------------------------------------------------------*/ | 218 | /*------------------------------------------------------------------------*/ |
| 146 | .macro EXCEPTION_PROLOGUE | 219 | .macro INTERRUPT_EPILOGUE |
| 147 | 220 | ||
| 148 | ; Before jumping to Exception Vector, hardware micro-ops did following: | 221 | ; INPUT: r0 has STAT32 of calling context |
| 149 | ; 1. SP auto-switched to kernel mode stack | 222 | ; INPUT: Z flag set if returning to K mode |
| 150 | ; 2. STATUS32.Z flag set to U mode at time of interrupt (U:1,K:0) | ||
| 151 | ; | ||
| 152 | ; Now manually save the complete reg file | ||
| 153 | |||
| 154 | PUSH r9 ; freeup a register: slot of erstatus | ||
| 155 | |||
| 156 | PUSHAX eret | ||
| 157 | sub sp, sp, 12 ; skip JLI, LDI, EI | ||
| 158 | PUSH lp_count | ||
| 159 | PUSHAX lp_start | ||
| 160 | PUSHAX lp_end | ||
| 161 | PUSH blink | ||
| 162 | |||
| 163 | PUSH r11 | ||
| 164 | PUSH r10 | ||
| 165 | |||
| 166 | ld.as r9, [sp, 10] ; load stashed r9 (status32 stack slot) | ||
| 167 | lr r10, [erstatus] | ||
| 168 | st.as r10, [sp, 10] ; save status32 at it's right stack slot | ||
| 169 | |||
| 170 | PUSH r9 | ||
| 171 | PUSH r8 | ||
| 172 | PUSH r7 | ||
| 173 | PUSH r6 | ||
| 174 | PUSH r5 | ||
| 175 | PUSH r4 | ||
| 176 | PUSH r3 | ||
| 177 | PUSH r2 | ||
| 178 | PUSH r1 | ||
| 179 | PUSH r0 | ||
| 180 | |||
| 181 | ; -- for interrupts, regs above are auto-saved by h/w in that order -- | ||
| 182 | ; Now do what ISR prologue does (manually save r12, sp, fp, gp, r25) | ||
| 183 | ; | ||
| 184 | ; Set Z flag if this was from U mode (expected by INTERRUPT_PROLOGUE) | ||
| 185 | ; Although H/w exception micro-ops do set Z flag for U mode (just like | ||
| 186 | ; for interrupts), it could get clobbered in case we soft land here from | ||
| 187 | ; a TLB Miss exception handler (tlbex.S) | ||
| 188 | 223 | ||
| 189 | and r10, r10, STATUS_U_MASK | 224 | ; _SOFT clobbers r10 restored by _HARD hence the order |
| 190 | xor.f 0, r10, STATUS_U_MASK | ||
| 191 | 225 | ||
| 192 | INTERRUPT_PROLOGUE exception | 226 | __RESTORE_REGFILE_SOFT |
| 193 | 227 | ||
| 194 | PUSHAX erbta | 228 | #ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE |
| 195 | PUSHAX ecr ; r9 contains ECR, expected by EV_Trap | 229 | __RESTORE_REGFILE_HARD |
| 230 | add sp, sp, SZ_PT_REGS - 8 | ||
| 231 | #else | ||
| 232 | add sp, sp, PT_r0 | ||
| 233 | #endif | ||
| 196 | 234 | ||
| 197 | PUSH r0 ; orig_r0 | ||
| 198 | .endm | 235 | .endm |
| 199 | 236 | ||
| 200 | /*------------------------------------------------------------------------*/ | 237 | /*------------------------------------------------------------------------*/ |
| 201 | .macro EXCEPTION_EPILOGUE | 238 | .macro EXCEPTION_EPILOGUE |
| 202 | 239 | ||
| 203 | ; Assumes r0 has PT_status32 | 240 | ; INPUT: r0 has STAT32 of calling context |
| 204 | btst r0, STATUS_U_BIT ; Z flag set if K, used in INTERRUPT_EPILOGUE | ||
| 205 | |||
| 206 | add sp, sp, 8 ; orig_r0/ECR don't need restoring | ||
| 207 | POPAX erbta | ||
| 208 | |||
| 209 | INTERRUPT_EPILOGUE exception | ||
| 210 | 241 | ||
| 211 | POP r0 | 242 | btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP |
| 212 | POP r1 | ||
| 213 | POP r2 | ||
| 214 | POP r3 | ||
| 215 | POP r4 | ||
| 216 | POP r5 | ||
| 217 | POP r6 | ||
| 218 | POP r7 | ||
| 219 | POP r8 | ||
| 220 | POP r9 | ||
| 221 | POP r10 | ||
| 222 | POP r11 | ||
| 223 | 243 | ||
| 224 | POP blink | 244 | ld r10, [sp, PT_event + 4] |
| 225 | POPAX lp_end | 245 | sr r10, [erbta] |
| 226 | POPAX lp_start | ||
| 227 | 246 | ||
| 228 | POP r9 | 247 | LD2 r10, r11, PT_ret |
| 229 | mov lp_count, r9 | 248 | sr r10, [eret] |
| 249 | sr r11, [erstatus] | ||
| 230 | 250 | ||
| 231 | add sp, sp, 12 ; skip JLI, LDI, EI | 251 | __RESTORE_REGFILE_SOFT |
| 232 | POPAX eret | 252 | __RESTORE_REGFILE_HARD |
| 233 | POPAX erstatus | ||
| 234 | 253 | ||
| 235 | ld.as r9, [sp, -12] ; reload r9 which got clobbered | 254 | add sp, sp, SZ_PT_REGS |
| 236 | .endm | 255 | .endm |
| 237 | 256 | ||
| 238 | .macro FAKE_RET_FROM_EXCPN | 257 | .macro FAKE_RET_FROM_EXCPN |
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index 66ba1bf21d28..66a292335ee6 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h | |||
| @@ -195,8 +195,8 @@ | |||
| 195 | PUSHAX CTOP_AUX_EFLAGS | 195 | PUSHAX CTOP_AUX_EFLAGS |
| 196 | #endif | 196 | #endif |
| 197 | 197 | ||
| 198 | lr r9, [ecr] | 198 | lr r10, [ecr] |
| 199 | st r9, [sp, PT_event] /* EV_Trap expects r9 to have ECR */ | 199 | st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */ |
| 200 | .endm | 200 | .endm |
| 201 | 201 | ||
| 202 | /*-------------------------------------------------------------- | 202 | /*-------------------------------------------------------------- |
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h index 54f5ec5c1759..a0eeb9f8f0a9 100644 --- a/arch/arc/include/asm/linkage.h +++ b/arch/arc/include/asm/linkage.h | |||
| @@ -10,6 +10,24 @@ | |||
| 10 | 10 | ||
| 11 | #ifdef __ASSEMBLY__ | 11 | #ifdef __ASSEMBLY__ |
| 12 | 12 | ||
| 13 | .macro ST2 e, o, off | ||
| 14 | #ifdef CONFIG_ARC_HAS_LL64 | ||
| 15 | std \e, [sp, \off] | ||
| 16 | #else | ||
| 17 | st \e, [sp, \off] | ||
| 18 | st \o, [sp, \off+4] | ||
| 19 | #endif | ||
| 20 | .endm | ||
| 21 | |||
| 22 | .macro LD2 e, o, off | ||
| 23 | #ifdef CONFIG_ARC_HAS_LL64 | ||
| 24 | ldd \e, [sp, \off] | ||
| 25 | #else | ||
| 26 | ld \e, [sp, \off] | ||
| 27 | ld \o, [sp, \off+4] | ||
| 28 | #endif | ||
| 29 | .endm | ||
| 30 | |||
| 13 | #define ASM_NL ` /* use '`' to mark new line in macro */ | 31 | #define ASM_NL ` /* use '`' to mark new line in macro */ |
| 14 | 32 | ||
| 15 | /* annotation for data we want in DCCM - if enabled in .config */ | 33 | /* annotation for data we want in DCCM - if enabled in .config */ |
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c index dba116535005..1f621e416521 100644 --- a/arch/arc/kernel/asm-offsets.c +++ b/arch/arc/kernel/asm-offsets.c | |||
| @@ -55,7 +55,14 @@ int main(void) | |||
| 55 | DEFINE(PT_r5, offsetof(struct pt_regs, r5)); | 55 | DEFINE(PT_r5, offsetof(struct pt_regs, r5)); |
| 56 | DEFINE(PT_r6, offsetof(struct pt_regs, r6)); | 56 | DEFINE(PT_r6, offsetof(struct pt_regs, r6)); |
| 57 | DEFINE(PT_r7, offsetof(struct pt_regs, r7)); | 57 | DEFINE(PT_r7, offsetof(struct pt_regs, r7)); |
| 58 | DEFINE(PT_r8, offsetof(struct pt_regs, r8)); | ||
| 59 | DEFINE(PT_r10, offsetof(struct pt_regs, r10)); | ||
| 60 | DEFINE(PT_r26, offsetof(struct pt_regs, r26)); | ||
| 58 | DEFINE(PT_ret, offsetof(struct pt_regs, ret)); | 61 | DEFINE(PT_ret, offsetof(struct pt_regs, ret)); |
| 62 | DEFINE(PT_blink, offsetof(struct pt_regs, blink)); | ||
| 63 | DEFINE(PT_lpe, offsetof(struct pt_regs, lp_end)); | ||
| 64 | DEFINE(PT_lpc, offsetof(struct pt_regs, lp_count)); | ||
| 65 | DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25)); | ||
| 59 | 66 | ||
| 60 | DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); | 67 | DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs)); |
| 61 | DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); | 68 | DEFINE(SZ_PT_REGS, sizeof(struct pt_regs)); |
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index 14254b866fdc..12d5f12d10d2 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S | |||
| @@ -67,7 +67,7 @@ reserved: | |||
| 67 | 67 | ||
| 68 | ENTRY(handle_interrupt) | 68 | ENTRY(handle_interrupt) |
| 69 | 69 | ||
| 70 | INTERRUPT_PROLOGUE irq | 70 | INTERRUPT_PROLOGUE |
| 71 | 71 | ||
| 72 | # irq control APIs local_irq_save/restore/disable/enable fiddle with | 72 | # irq control APIs local_irq_save/restore/disable/enable fiddle with |
| 73 | # global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio) | 73 | # global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio) |
| @@ -79,7 +79,7 @@ ENTRY(handle_interrupt) | |||
| 79 | # | 79 | # |
| 80 | # Note this disable is only for consistent book-keeping as further interrupts | 80 | # Note this disable is only for consistent book-keeping as further interrupts |
| 81 | # will be disabled anyways even w/o this. Hardware tracks active interrupts | 81 | # will be disabled anyways even w/o this. Hardware tracks active interrupts |
| 82 | # seperately in AUX_IRQ_ACTIVE.active and will not take new interrupts | 82 | # seperately in AUX_IRQ_ACT.active and will not take new interrupts |
| 83 | # unless this one returns (or higher prio becomes pending in 2-prio scheme) | 83 | # unless this one returns (or higher prio becomes pending in 2-prio scheme) |
| 84 | 84 | ||
| 85 | IRQ_DISABLE | 85 | IRQ_DISABLE |
| @@ -200,17 +200,18 @@ restore_regs: | |||
| 200 | ld r0, [sp, PT_status32] ; U/K mode at time of entry | 200 | ld r0, [sp, PT_status32] ; U/K mode at time of entry |
| 201 | lr r10, [AUX_IRQ_ACT] | 201 | lr r10, [AUX_IRQ_ACT] |
| 202 | 202 | ||
| 203 | bmsk r11, r10, 15 ; AUX_IRQ_ACT.ACTIVE | 203 | bmsk r11, r10, 15 ; extract AUX_IRQ_ACT.active |
| 204 | breq r11, 0, .Lexcept_ret ; No intr active, ret from Exception | 204 | breq r11, 0, .Lexcept_ret ; No intr active, ret from Exception |
| 205 | 205 | ||
| 206 | ;####### Return from Intr ####### | 206 | ;####### Return from Intr ####### |
| 207 | 207 | ||
| 208 | .Lisr_ret: | ||
| 209 | |||
| 208 | debug_marker_l1: | 210 | debug_marker_l1: |
| 209 | ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot | 211 | ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot |
| 210 | btst r0, STATUS_DE_BIT ; Z flag set if bit clear | 212 | btst r0, STATUS_DE_BIT ; Z flag set if bit clear |
| 211 | bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set | 213 | bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set |
| 212 | 214 | ||
| 213 | .Lisr_ret_fast_path: | ||
| 214 | ; Handle special case #1: (Entry via Exception, Return via IRQ) | 215 | ; Handle special case #1: (Entry via Exception, Return via IRQ) |
| 215 | ; | 216 | ; |
| 216 | ; Exception in U mode, preempted in kernel, Intr taken (K mode), orig | 217 | ; Exception in U mode, preempted in kernel, Intr taken (K mode), orig |
| @@ -223,7 +224,7 @@ debug_marker_l1: | |||
| 223 | bset.nz r11, r11, AUX_IRQ_ACT_BIT_U ; NZ means U | 224 | bset.nz r11, r11, AUX_IRQ_ACT_BIT_U ; NZ means U |
| 224 | sr r11, [AUX_IRQ_ACT] | 225 | sr r11, [AUX_IRQ_ACT] |
| 225 | 226 | ||
| 226 | INTERRUPT_EPILOGUE irq | 227 | INTERRUPT_EPILOGUE |
| 227 | rtie | 228 | rtie |
| 228 | 229 | ||
| 229 | ;####### Return from Exception / pure kernel mode ####### | 230 | ;####### Return from Exception / pure kernel mode ####### |
| @@ -244,8 +245,8 @@ debug_marker_syscall: | |||
| 244 | ; | 245 | ; |
| 245 | ; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround | 246 | ; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround |
| 246 | ; | 247 | ; |
| 247 | ; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline | 248 | ; Solution is to drop out of interrupt context into pure kernel mode |
| 248 | ; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly | 249 | ; and return from pure kernel mode which does right things for delay slot |
| 249 | 250 | ||
| 250 | .Lintr_ret_to_delay_slot: | 251 | .Lintr_ret_to_delay_slot: |
| 251 | debug_marker_ds: | 252 | debug_marker_ds: |
| @@ -254,48 +255,9 @@ debug_marker_ds: | |||
| 254 | add r2, r2, 1 | 255 | add r2, r2, 1 |
| 255 | st r2, [@intr_to_DE_cnt] | 256 | st r2, [@intr_to_DE_cnt] |
| 256 | 257 | ||
| 257 | ld r2, [sp, PT_ret] | 258 | ; drop out of interrupt context (clear AUX_IRQ_ACT.active) |
| 258 | ld r3, [sp, PT_status32] | 259 | bmskn r11, r10, 15 |
| 259 | 260 | sr r11, [AUX_IRQ_ACT] | |
| 260 | ; STAT32 for Int return created from scratch | 261 | b .Lexcept_ret |
| 261 | ; (No delay dlot, disable Further intr in trampoline) | ||
| 262 | |||
| 263 | bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK | ||
| 264 | st r0, [sp, PT_status32] | ||
| 265 | |||
| 266 | mov r1, .Lintr_ret_to_delay_slot_2 | ||
| 267 | st r1, [sp, PT_ret] | ||
| 268 | |||
| 269 | ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots | ||
| 270 | st r2, [sp, 0] | ||
| 271 | st r3, [sp, 4] | ||
| 272 | |||
| 273 | b .Lisr_ret_fast_path | ||
| 274 | |||
| 275 | .Lintr_ret_to_delay_slot_2: | ||
| 276 | ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP | ||
| 277 | sub sp, sp, SZ_PT_REGS | ||
| 278 | st r9, [sp, -4] | ||
| 279 | |||
| 280 | ld r9, [sp, 0] | ||
| 281 | sr r9, [eret] | ||
| 282 | |||
| 283 | ld r9, [sp, 4] | ||
| 284 | sr r9, [erstatus] | ||
| 285 | |||
| 286 | ; restore AUX_USER_SP if returning to U mode | ||
| 287 | bbit0 r9, STATUS_U_BIT, 1f | ||
| 288 | ld r9, [sp, PT_sp] | ||
| 289 | sr r9, [AUX_USER_SP] | ||
| 290 | |||
| 291 | 1: | ||
| 292 | ld r9, [sp, 8] | ||
| 293 | sr r9, [erbta] | ||
| 294 | |||
| 295 | ld r9, [sp, -4] | ||
| 296 | add sp, sp, SZ_PT_REGS | ||
| 297 | |||
| 298 | ; return from pure kernel mode to delay slot | ||
| 299 | rtie | ||
| 300 | 262 | ||
| 301 | END(ret_from_exception) | 263 | END(ret_from_exception) |
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S index 7fe59880c16b..5cb0cd7e4eab 100644 --- a/arch/arc/kernel/entry-compact.S +++ b/arch/arc/kernel/entry-compact.S | |||
| @@ -256,7 +256,7 @@ ENTRY(EV_TLBProtV) | |||
| 256 | 256 | ||
| 257 | EXCEPTION_PROLOGUE | 257 | EXCEPTION_PROLOGUE |
| 258 | 258 | ||
| 259 | mov r2, r9 ; ECR set into r9 already | 259 | mov r2, r10 ; ECR set into r10 already |
| 260 | lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above) | 260 | lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above) |
| 261 | 261 | ||
| 262 | ; Exception auto-disables further Intr/exceptions. | 262 | ; Exception auto-disables further Intr/exceptions. |
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index a2bfacbcfce1..72be01270e24 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S | |||
| @@ -232,8 +232,8 @@ ENTRY(EV_Trap) | |||
| 232 | EXCEPTION_PROLOGUE | 232 | EXCEPTION_PROLOGUE |
| 233 | 233 | ||
| 234 | ;============ TRAP 1 :breakpoints | 234 | ;============ TRAP 1 :breakpoints |
| 235 | ; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR) | 235 | ; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR) |
| 236 | bmsk.f 0, r9, 7 | 236 | bmsk.f 0, r10, 7 |
| 237 | bnz trap_with_param | 237 | bnz trap_with_param |
| 238 | 238 | ||
| 239 | ;============ TRAP (no param): syscall top level | 239 | ;============ TRAP (no param): syscall top level |
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 182ce67dfe10..c2663fce7f6c 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c | |||
| @@ -181,11 +181,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz) | |||
| 181 | return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS); | 181 | return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | static void *unw_hdr_alloc(unsigned long sz) | ||
| 185 | { | ||
| 186 | return kmalloc(sz, GFP_KERNEL); | ||
| 187 | } | ||
| 188 | |||
| 189 | static void init_unwind_table(struct unwind_table *table, const char *name, | 184 | static void init_unwind_table(struct unwind_table *table, const char *name, |
| 190 | const void *core_start, unsigned long core_size, | 185 | const void *core_start, unsigned long core_size, |
| 191 | const void *init_start, unsigned long init_size, | 186 | const void *init_start, unsigned long init_size, |
| @@ -366,6 +361,10 @@ ret_err: | |||
| 366 | } | 361 | } |
| 367 | 362 | ||
| 368 | #ifdef CONFIG_MODULES | 363 | #ifdef CONFIG_MODULES |
| 364 | static void *unw_hdr_alloc(unsigned long sz) | ||
| 365 | { | ||
| 366 | return kmalloc(sz, GFP_KERNEL); | ||
| 367 | } | ||
| 369 | 368 | ||
| 370 | static struct unwind_table *last_table; | 369 | static struct unwind_table *last_table; |
| 371 | 370 | ||
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 81e84426fe21..3861543b66a0 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
| @@ -63,24 +63,19 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) | |||
| 63 | struct vm_area_struct *vma = NULL; | 63 | struct vm_area_struct *vma = NULL; |
| 64 | struct task_struct *tsk = current; | 64 | struct task_struct *tsk = current; |
| 65 | struct mm_struct *mm = tsk->mm; | 65 | struct mm_struct *mm = tsk->mm; |
| 66 | int si_code = SEGV_MAPERR; | 66 | int sig, si_code = SEGV_MAPERR; |
| 67 | int ret; | 67 | unsigned int write = 0, exec = 0, mask; |
| 68 | vm_fault_t fault; | 68 | vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */ |
| 69 | int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ | 69 | unsigned int flags; /* handle_mm_fault() input */ |
| 70 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | ||
| 71 | 70 | ||
| 72 | /* | 71 | /* |
| 73 | * We fault-in kernel-space virtual memory on-demand. The | ||
| 74 | * 'reference' page table is init_mm.pgd. | ||
| 75 | * | ||
| 76 | * NOTE! We MUST NOT take any locks for this case. We may | 72 | * NOTE! We MUST NOT take any locks for this case. We may |
| 77 | * be in an interrupt or a critical region, and should | 73 | * be in an interrupt or a critical region, and should |
| 78 | * only copy the information from the master page table, | 74 | * only copy the information from the master page table, |
| 79 | * nothing more. | 75 | * nothing more. |
| 80 | */ | 76 | */ |
| 81 | if (address >= VMALLOC_START && !user_mode(regs)) { | 77 | if (address >= VMALLOC_START && !user_mode(regs)) { |
| 82 | ret = handle_kernel_vaddr_fault(address); | 78 | if (unlikely(handle_kernel_vaddr_fault(address))) |
| 83 | if (unlikely(ret)) | ||
| 84 | goto no_context; | 79 | goto no_context; |
| 85 | else | 80 | else |
| 86 | return; | 81 | return; |
| @@ -93,143 +88,117 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) | |||
| 93 | if (faulthandler_disabled() || !mm) | 88 | if (faulthandler_disabled() || !mm) |
| 94 | goto no_context; | 89 | goto no_context; |
| 95 | 90 | ||
| 91 | if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */ | ||
| 92 | write = 1; | ||
| 93 | else if ((regs->ecr_vec == ECR_V_PROTV) && | ||
| 94 | (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) | ||
| 95 | exec = 1; | ||
| 96 | |||
| 97 | flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | ||
| 96 | if (user_mode(regs)) | 98 | if (user_mode(regs)) |
| 97 | flags |= FAULT_FLAG_USER; | 99 | flags |= FAULT_FLAG_USER; |
| 100 | if (write) | ||
| 101 | flags |= FAULT_FLAG_WRITE; | ||
| 102 | |||
| 98 | retry: | 103 | retry: |
| 99 | down_read(&mm->mmap_sem); | 104 | down_read(&mm->mmap_sem); |
| 105 | |||
| 100 | vma = find_vma(mm, address); | 106 | vma = find_vma(mm, address); |
| 101 | if (!vma) | 107 | if (!vma) |
| 102 | goto bad_area; | 108 | goto bad_area; |
| 103 | if (vma->vm_start <= address) | 109 | if (unlikely(address < vma->vm_start)) { |
| 104 | goto good_area; | 110 | if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) |
| 105 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 111 | goto bad_area; |
| 106 | goto bad_area; | 112 | } |
| 107 | if (expand_stack(vma, address)) | ||
| 108 | goto bad_area; | ||
| 109 | 113 | ||
| 110 | /* | 114 | /* |
| 111 | * Ok, we have a good vm_area for this memory access, so | 115 | * vm_area is good, now check permissions for this memory access |
| 112 | * we can handle it.. | ||
| 113 | */ | 116 | */ |
| 114 | good_area: | 117 | mask = VM_READ; |
| 115 | si_code = SEGV_ACCERR; | 118 | if (write) |
| 116 | 119 | mask = VM_WRITE; | |
| 117 | /* Handle protection violation, execute on heap or stack */ | 120 | if (exec) |
| 118 | 121 | mask = VM_EXEC; | |
| 119 | if ((regs->ecr_vec == ECR_V_PROTV) && | 122 | |
| 120 | (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) | 123 | if (!(vma->vm_flags & mask)) { |
| 124 | si_code = SEGV_ACCERR; | ||
| 121 | goto bad_area; | 125 | goto bad_area; |
| 122 | |||
| 123 | if (write) { | ||
| 124 | if (!(vma->vm_flags & VM_WRITE)) | ||
| 125 | goto bad_area; | ||
| 126 | flags |= FAULT_FLAG_WRITE; | ||
| 127 | } else { | ||
| 128 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
| 129 | goto bad_area; | ||
| 130 | } | 126 | } |
| 131 | 127 | ||
| 132 | /* | ||
| 133 | * If for any reason at all we couldn't handle the fault, | ||
| 134 | * make sure we exit gracefully rather than endlessly redo | ||
| 135 | * the fault. | ||
| 136 | */ | ||
| 137 | fault = handle_mm_fault(vma, address, flags); | 128 | fault = handle_mm_fault(vma, address, flags); |
| 138 | 129 | ||
| 139 | if (fatal_signal_pending(current)) { | 130 | /* |
| 131 | * Fault retry nuances | ||
| 132 | */ | ||
| 133 | if (unlikely(fault & VM_FAULT_RETRY)) { | ||
| 140 | 134 | ||
| 141 | /* | 135 | /* |
| 142 | * if fault retry, mmap_sem already relinquished by core mm | 136 | * If fault needs to be retried, handle any pending signals |
| 143 | * so OK to return to user mode (with signal handled first) | 137 | * first (by returning to user mode). |
| 138 | * mmap_sem already relinquished by core mm for RETRY case | ||
| 144 | */ | 139 | */ |
| 145 | if (fault & VM_FAULT_RETRY) { | 140 | if (fatal_signal_pending(current)) { |
| 146 | if (!user_mode(regs)) | 141 | if (!user_mode(regs)) |
| 147 | goto no_context; | 142 | goto no_context; |
| 148 | return; | 143 | return; |
| 149 | } | 144 | } |
| 150 | } | 145 | /* |
| 151 | 146 | * retry state machine | |
| 152 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | 147 | */ |
| 153 | |||
| 154 | if (likely(!(fault & VM_FAULT_ERROR))) { | ||
| 155 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 148 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
| 156 | /* To avoid updating stats twice for retry case */ | 149 | flags &= ~FAULT_FLAG_ALLOW_RETRY; |
| 157 | if (fault & VM_FAULT_MAJOR) { | 150 | flags |= FAULT_FLAG_TRIED; |
| 158 | tsk->maj_flt++; | 151 | goto retry; |
| 159 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | ||
| 160 | regs, address); | ||
| 161 | } else { | ||
| 162 | tsk->min_flt++; | ||
| 163 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
| 164 | regs, address); | ||
| 165 | } | ||
| 166 | |||
| 167 | if (fault & VM_FAULT_RETRY) { | ||
| 168 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
| 169 | flags |= FAULT_FLAG_TRIED; | ||
| 170 | goto retry; | ||
| 171 | } | ||
| 172 | } | 152 | } |
| 173 | |||
| 174 | /* Fault Handled Gracefully */ | ||
| 175 | up_read(&mm->mmap_sem); | ||
| 176 | return; | ||
| 177 | } | 153 | } |
| 178 | 154 | ||
| 179 | if (fault & VM_FAULT_OOM) | 155 | bad_area: |
| 180 | goto out_of_memory; | 156 | up_read(&mm->mmap_sem); |
| 181 | else if (fault & VM_FAULT_SIGSEGV) | ||
| 182 | goto bad_area; | ||
| 183 | else if (fault & VM_FAULT_SIGBUS) | ||
| 184 | goto do_sigbus; | ||
| 185 | |||
| 186 | /* no man's land */ | ||
| 187 | BUG(); | ||
| 188 | 157 | ||
| 189 | /* | 158 | /* |
| 190 | * Something tried to access memory that isn't in our memory map.. | 159 | * Major/minor page fault accounting |
| 191 | * Fix it, but check if it's kernel or user first.. | 160 | * (in case of retry we only land here once) |
| 192 | */ | 161 | */ |
| 193 | bad_area: | 162 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
| 194 | up_read(&mm->mmap_sem); | ||
| 195 | 163 | ||
| 196 | /* User mode accesses just cause a SIGSEGV */ | 164 | if (likely(!(fault & VM_FAULT_ERROR))) { |
| 197 | if (user_mode(regs)) { | 165 | if (fault & VM_FAULT_MAJOR) { |
| 198 | tsk->thread.fault_address = address; | 166 | tsk->maj_flt++; |
| 199 | force_sig_fault(SIGSEGV, si_code, (void __user *)address); | 167 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
| 200 | return; | 168 | regs, address); |
| 201 | } | 169 | } else { |
| 170 | tsk->min_flt++; | ||
| 171 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
| 172 | regs, address); | ||
| 173 | } | ||
| 202 | 174 | ||
| 203 | no_context: | 175 | /* Normal return path: fault Handled Gracefully */ |
| 204 | /* Are we prepared to handle this kernel fault? | ||
| 205 | * | ||
| 206 | * (The kernel has valid exception-points in the source | ||
| 207 | * when it accesses user-memory. When it fails in one | ||
| 208 | * of those points, we find it in a table and do a jump | ||
| 209 | * to some fixup code that loads an appropriate error | ||
| 210 | * code) | ||
| 211 | */ | ||
| 212 | if (fixup_exception(regs)) | ||
| 213 | return; | 176 | return; |
| 177 | } | ||
| 214 | 178 | ||
| 215 | die("Oops", regs, address); | 179 | if (!user_mode(regs)) |
| 216 | 180 | goto no_context; | |
| 217 | out_of_memory: | ||
| 218 | up_read(&mm->mmap_sem); | ||
| 219 | 181 | ||
| 220 | if (user_mode(regs)) { | 182 | if (fault & VM_FAULT_OOM) { |
| 221 | pagefault_out_of_memory(); | 183 | pagefault_out_of_memory(); |
| 222 | return; | 184 | return; |
| 223 | } | 185 | } |
| 224 | 186 | ||
| 225 | goto no_context; | 187 | if (fault & VM_FAULT_SIGBUS) { |
| 188 | sig = SIGBUS; | ||
| 189 | si_code = BUS_ADRERR; | ||
| 190 | } | ||
| 191 | else { | ||
| 192 | sig = SIGSEGV; | ||
| 193 | } | ||
| 226 | 194 | ||
| 227 | do_sigbus: | 195 | tsk->thread.fault_address = address; |
| 228 | up_read(&mm->mmap_sem); | 196 | force_sig_fault(sig, si_code, (void __user *)address); |
| 197 | return; | ||
| 229 | 198 | ||
| 230 | if (!user_mode(regs)) | 199 | no_context: |
| 231 | goto no_context; | 200 | if (fixup_exception(regs)) |
| 201 | return; | ||
| 232 | 202 | ||
| 233 | tsk->thread.fault_address = address; | 203 | die("Oops", regs, address); |
| 234 | force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); | ||
| 235 | } | 204 | } |
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 471a97bf492d..c55d95dd2f39 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S | |||
| @@ -393,6 +393,17 @@ EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation | |||
| 393 | ;-------- Common routine to call Linux Page Fault Handler ----------- | 393 | ;-------- Common routine to call Linux Page Fault Handler ----------- |
| 394 | do_slow_path_pf: | 394 | do_slow_path_pf: |
| 395 | 395 | ||
| 396 | #ifdef CONFIG_ISA_ARCV2 | ||
| 397 | ; Set Z flag if exception in U mode. Hardware micro-ops do this on any | ||
| 398 | ; taken interrupt/exception, and thus is already the case at the entry | ||
| 399 | ; above, but ensuing code would have already clobbered. | ||
| 400 | ; EXCEPTION_PROLOGUE called in slow path, relies on correct Z flag set | ||
| 401 | |||
| 402 | lr r2, [erstatus] | ||
| 403 | and r2, r2, STATUS_U_MASK | ||
| 404 | bxor.f 0, r2, STATUS_U_BIT | ||
| 405 | #endif | ||
| 406 | |||
| 396 | ; Restore the 4-scratch regs saved by fast path miss handler | 407 | ; Restore the 4-scratch regs saved by fast path miss handler |
| 397 | TLBMISS_RESTORE_REGS | 408 | TLBMISS_RESTORE_REGS |
| 398 | 409 | ||
